diff --git a/CouchPotato.py b/CouchPotato.py index 375a1d41..7049cda5 100755 --- a/CouchPotato.py +++ b/CouchPotato.py @@ -19,7 +19,12 @@ base_path = dirname(os.path.abspath(__file__)) sys.path.insert(0, os.path.join(base_path, 'libs')) from couchpotato.environment import Env -from couchpotato.core.helpers.variable import getDataDir +from couchpotato.core.helpers.variable import getDataDir, removePyc + + +# Remove pyc files before dynamic load (sees .pyc files regular .py modules) +removePyc(base_path) + class Loader(object): @@ -29,7 +34,7 @@ class Loader(object): # Get options via arg from couchpotato.runner import getOptions - self.options = getOptions(base_path, sys.argv[1:]) + self.options = getOptions(sys.argv[1:]) # Load settings settings = Env.get('settings') @@ -50,7 +55,7 @@ class Loader(object): # Create logging dir self.log_dir = os.path.join(self.data_dir, 'logs'); if not os.path.isdir(self.log_dir): - os.mkdir(self.log_dir) + os.makedirs(self.log_dir) # Logging from couchpotato.core.logger import CPLog @@ -67,10 +72,11 @@ class Loader(object): signal.signal(signal.SIGTERM, lambda signum, stack_frame: sys.exit(1)) from couchpotato.core.event import addEvent - addEvent('app.after_shutdown', self.afterShutdown) + addEvent('app.do_shutdown', self.setRestart) - def afterShutdown(self, restart): + def setRestart(self, restart): self.do_restart = restart + return True def onExit(self, signal, frame): from couchpotato.core.event import fireEvent @@ -98,7 +104,6 @@ class Loader(object): # Release log files and shutdown logger logging.shutdown() - time.sleep(3) args = [sys.executable] + [os.path.join(base_path, os.path.basename(__file__))] + sys.argv[1:] subprocess.Popen(args) diff --git a/README.md b/README.md index e38ea0e8..4dbe75bb 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -CouchPotato Server +CouchPotato ===== CouchPotato (CP) is an automatic NZB and torrent downloader. You can keep a "movies I want"-list and it will search for NZBs/torrents of these movies every X hours. @@ -7,7 +7,7 @@ Once a movie is found, it will send it to SABnzbd or download the torrent to a s ## Running from Source -CouchPotatoServer can be run from source. This will use *git* as updater, so make sure that is installed also. +CouchPotatoServer can be run from source. This will use *git* as updater, so make sure that is installed. Windows, see [the CP forum](http://couchpota.to/forum/showthread.php?tid=14) for more details: @@ -17,9 +17,9 @@ Windows, see [the CP forum](http://couchpota.to/forum/showthread.php?tid=14) for * Open up `Git Bash` (or CMD) and go to the folder you want to install CP. Something like Program Files. * Run `git clone https://github.com/RuudBurger/CouchPotatoServer.git`. * You can now start CP via `CouchPotatoServer\CouchPotato.py` to start -* Your browser should open up, but if it doesn't go to: `http://localhost:5050/` +* Your browser should open up, but if it doesn't go to `http://localhost:5050/` -OSx: +OS X: * If you're on Leopard (10.5) install Python 2.6+: [Python 2.6.5](http://www.python.org/download/releases/2.6.5/) * Install [GIT](http://git-scm.com/) @@ -27,20 +27,21 @@ OSx: * Go to your App folder `cd /Applications` * Run `git clone https://github.com/RuudBurger/CouchPotatoServer.git` * Then do `python CouchPotatoServer/CouchPotato.py` -* Your browser should open up, but if it doesn't go to: `http://localhost:5050/` +* Your browser should open up, but if it doesn't go to `http://localhost:5050/` -Linux (ubuntu / debian): +Linux (Ubuntu / Debian): * Install [GIT](http://git-scm.com/) with `apt-get install git-core` * 'cd' to the folder of your choosing. * Run `git clone https://github.com/RuudBurger/CouchPotatoServer.git` * Then do `python CouchPotatoServer/CouchPotato.py` to start -* To run on boot copy the init script. `sudo cp CouchPotatoServer/init/ubuntu /etc/init.d/couchpotato` -* Change the paths inside the init script. `sudo nano /etc/init.d/couchpotato` -* Make it executable. `sudo chmod +x /etc/init.d/couchpotato` -* Add it to defaults. `sudo update-rc.d couchpotato defaults` -* Open your browser and go to: `http://localhost:5050/` - +* To run on boot copy the init script `sudo cp CouchPotatoServer/init/ubuntu /etc/init.d/couchpotato` +* Copy the default paths file `sudo cp CouchPotatoServer/init/ubuntu.default /etc/default/couchpotato` +* Change the paths inside the default file `sudo nano /etc/default/couchpotato` +* Make it executable `sudo chmod +x /etc/init.d/couchpotato` +* Add it to defaults `sudo update-rc.d couchpotato defaults` +* Open your browser and go to `http://localhost:5050/` + FreeBSD : @@ -56,7 +57,7 @@ FreeBSD : * Run `git clone https://github.com/RuudBurger/CouchPotatoServer.git` * Then run `sudo python CouchPotatoServer/CouchPotato.py` to start for the first time * To run on boot copy the init script. `sudo cp CouchPotatoServer/init/freebsd /etc/rc.d/couchpotato` -* Change the paths inside the init script. `sudo vim /etc/init.d/couchpotato` +* Change the paths inside the init script. `sudo vim /etc/rc.d/couchpotato` * Make init script executable. `sudo chmod +x /etc/rc.d/couchpotato` * Add init to startup. `sudo echo 'couchpotato_enable="YES"' >> /etc/rc.conf` * Open your browser and go to: `http://server:5050/` diff --git a/contributing.md b/contributing.md index d5db0b42..821212c8 100644 --- a/contributing.md +++ b/contributing.md @@ -1,25 +1,36 @@ -## Got a issue/feature request or submitting a pull request? +# Contributing to CouchPotatoServer -Make sure you think of the following things: +1. [Contributing](#contributing) +2. [Submitting an Issue](#issues) +3. [Submitting a Pull Request](#pull-requests) -## Issue - * Search through the existing (and closed) issues first, see if you can get your answer there. - * Double check the result manually, because it could be an external issue. - * Post logs! Without seeing what is going on, I can't reproduce the error. - * Also check the logs before submitting, obvious errors like permission or http errors are often not related to CP. - * What is the movie + quality you are searching for? - * What are you're settings for the specific problem? - * What providers are you using? (While you're logs include these, scanning through hundred of lines of log isn't our hobby) - * Post the logs from config directory, please do not copy paste the UI. Use pastebin to store these logs! +## Contributing +Thank you for your interest in contributing to CouchPotato. There are several ways to help out, even if you've never worked on an open source project before. +If you've found a bug or want to request a feature, you can report it by [posting an issue](https://github.com/RuudBurger/CouchPotatoServer/issues/new) - be sure to read the [guidelines](#issues) first! +If you want to contribute your own work, please read the [guidelines](#pull-requests) for submitting a pull request. +Lastly, for anything related to CouchPotato, feel free to stop by the [forum](http://couchpota.to/forum/) or the [#couchpotato](http://webchat.freenode.net/?channels=couchpotato) IRC channel at irc.freenode.net. + +## Issues +Issues are intended for reporting bugs and weird behaviour or suggesting improvements to CouchPotatoServer. +Before you submit an issue, please go through the following checklist: + * Search through existing issues (*including closed issues!*) first: you might be able to get your answer there. + * Double check your issue manually, because it could be an external issue. + * Post logs with your issue: Without seeing what is going on, the developers can't reproduce the error. + * Check the logs yourself before submitting them. Obvious errors like permission or HTTP errors are often not related to CouchPotato. + * What movie and quality are you searching for? + * What are your settings for the specific problem? + * What providers are you using? (While your logs include these, scanning through hundreds of lines of logs isn't our hobby) + * Post the logs from the *config* directory, please do not copy paste the UI. Use pastebin to store these logs! * Give a short step by step of how to reproduce the error. - * What hardware / OS are you using and what are the limits? NAS can be slow and maybe have a different python installed then when you use CP on OSX or Windows for example. - * I will mark issues with the "can't reproduce" tag. Don't go asking "why closed" if it clearly says the issue in the tag ;) - * If you're running on a NAS (QNAP, Austor etc..) with pre-made packages, make sure these are setup to use our source repo (RuudBurger/CouchPotatoServer) and nothing else!! + * What hardware / OS are you using and what are its limitations? For example: NAS can be slow and maybe have a different version of python installed than when you use CP on OS X or Windows. + * Your issue might be marked with the "can't reproduce" tag. Don't ask why your issue was closed if it says so in the tag. + * If you're running on a NAS (QNAP, Austor, Synology etc.) with pre-made packages, make sure these are set up to use our source repository (RuudBurger/CouchPotatoServer) and nothing else! -## Pull Request - * Make sure you're pull request is made for develop branch (or relevant feature branch) +The more relevant information you provide, the more likely that your issue will be resolved. + +## Pull Requests +Pull requests are intended for contributing code or documentation to the project. Before you submit a pull request, consider the following: + * Make sure your pull request is made for the *develop* branch (or relevant feature branch). * Have you tested your PR? If not, why? - * Are there any limitations of your PR we should know of? - * Make sure to keep you're PR up-to-date with the branch you're trying to push into. - -**If we don't get enough info, the chance of the issue getting closed is a lot bigger ;)** + * Does your PR have any limitations we should know of? + * Is your PR up-to-date with the branch you're trying to push into? diff --git a/couchpotato/__init__.py b/couchpotato/__init__.py index 6b8cfd36..fb6b4dc3 100644 --- a/couchpotato/__init__.py +++ b/couchpotato/__init__.py @@ -15,6 +15,7 @@ log = CPLog(__name__) views = {} template_loader = template.Loader(os.path.join(os.path.dirname(__file__), 'templates')) + class BaseHandler(RequestHandler): def get_current_user(self): @@ -44,12 +45,12 @@ class WebHandler(BaseHandler): self.write({'success': False, 'error': 'Failed returning results'}) -def addView(route, func, static = False): +def addView(route, func): views[route] = func -def get_session(): - return Env.getSession() +def get_db(): + return Env.get('db') # Web view @@ -71,8 +72,16 @@ def apiDocs(): addView('docs', apiDocs) +# Database debug manager +def databaseManage(): + return template_loader.load('database.html').generate(fireEvent = fireEvent, Env = Env) + +addView('database', databaseManage) + + # Make non basic auth option to get api key class KeyHandler(RequestHandler): + def get(self, *args, **kwargs): api_key = None diff --git a/couchpotato/api.py b/couchpotato/api.py index ba7f7b69..99a2c6ad 100644 --- a/couchpotato/api.py +++ b/couchpotato/api.py @@ -1,15 +1,15 @@ -from couchpotato.core.helpers.request import getParams -from couchpotato.core.logger import CPLog from functools import wraps from threading import Thread -from tornado.gen import coroutine -from tornado.web import RequestHandler, asynchronous import json import threading -import tornado import traceback import urllib +from couchpotato.core.helpers.request import getParams +from couchpotato.core.logger import CPLog +from tornado.web import RequestHandler, asynchronous + + log = CPLog(__name__) @@ -26,10 +26,18 @@ def run_async(func): def async_func(*args, **kwargs): func_hl = Thread(target = func, args = args, kwargs = kwargs) func_hl.start() - return func_hl return async_func +@run_async +def run_handler(route, kwargs, callback = None): + try: + res = api[route](**kwargs) + callback(res, route) + except: + log.error('Failed doing api request "%s": %s', (route, traceback.format_exc())) + callback({'success': False, 'error': 'Failed returning results'}, route) + # NonBlock API handler class NonBlockHandler(RequestHandler): @@ -76,13 +84,17 @@ def addNonBlockApiView(route, func_tuple, docs = None, **kwargs): # Blocking API handler class ApiHandler(RequestHandler): - @coroutine + @asynchronous def get(self, route, *args, **kwargs): route = route.strip('/') if not api.get(route): self.write('API call doesn\'t seem to exist') return + # Create lock if it doesn't exist + if route in api_locks and not api_locks.get(route): + api_locks[route] = threading.Lock() + api_locks[route].acquire() try: @@ -93,37 +105,47 @@ class ApiHandler(RequestHandler): # Split array arguments kwargs = getParams(kwargs) + kwargs['_request'] = self # Remove t random string try: del kwargs['t'] except: pass # Add async callback handler - @run_async - def run_handler(callback): - try: - res = api[route](**kwargs) - callback(res) - except: - log.error('Failed doing api request "%s": %s', (route, traceback.format_exc())) - callback({'success': False, 'error': 'Failed returning results'}) - - result = yield tornado.gen.Task(run_handler) - - # Check JSONP callback - jsonp_callback = self.get_argument('callback_func', default = None) - - if jsonp_callback: - self.write(str(jsonp_callback) + '(' + json.dumps(result) + ')') - self.set_header("Content-Type", "text/javascript") - elif isinstance(result, tuple) and result[0] == 'redirect': - self.redirect(result[1]) - else: - self.write(result) + run_handler(route, kwargs, callback = self.taskFinished) except: log.error('Failed doing api request "%s": %s', (route, traceback.format_exc())) - self.write({'success': False, 'error': 'Failed returning results'}) + try: + self.write({'success': False, 'error': 'Failed returning results'}) + self.finish() + except: + log.error('Failed write error "%s": %s', (route, traceback.format_exc())) + + api_locks[route].release() + + post = get + + def taskFinished(self, result, route): + + if not self.request.connection.stream.closed(): + try: + # Check JSONP callback + jsonp_callback = self.get_argument('callback_func', default = None) + + if jsonp_callback: + self.write(str(jsonp_callback) + '(' + json.dumps(result) + ')') + self.set_header("Content-Type", "text/javascript") + self.finish() + elif isinstance(result, tuple) and result[0] == 'redirect': + self.redirect(result[1]) + else: + self.write(result) + self.finish() + except: + log.debug('Failed doing request, probably already closed: %s', (traceback.format_exc())) + try: self.finish({'success': False, 'error': 'Failed returning results'}) + except: pass api_locks[route].release() diff --git a/couchpotato/core/_base/_core/main.py b/couchpotato/core/_base/_core.py similarity index 57% rename from couchpotato/core/_base/_core/main.py rename to couchpotato/core/_base/_core.py index 02e21f2d..852c42c2 100644 --- a/couchpotato/core/_base/_core/main.py +++ b/couchpotato/core/_base/_core.py @@ -1,10 +1,3 @@ -from couchpotato.api import addApiView -from couchpotato.core.event import fireEvent, addEvent -from couchpotato.core.helpers.variable import cleanHost, md5 -from couchpotato.core.logger import CPLog -from couchpotato.core.plugins.base import Plugin -from couchpotato.environment import Env -from tornado.ioloop import IOLoop from uuid import uuid4 import os import platform @@ -13,8 +6,19 @@ import time import traceback import webbrowser +from couchpotato.api import addApiView +from couchpotato.core.event import fireEvent, addEvent +from couchpotato.core.helpers.variable import cleanHost, md5, isSubFolder +from couchpotato.core.logger import CPLog +from couchpotato.core.plugins.base import Plugin +from couchpotato.environment import Env +from tornado.ioloop import IOLoop + + log = CPLog(__name__) +autoload = 'Core' + class Core(Plugin): @@ -47,6 +51,7 @@ class Core(Plugin): addEvent('app.api_url', self.createApiUrl) addEvent('app.version', self.version) addEvent('app.load', self.checkDataDir) + addEvent('app.load', self.cleanUpFolders) addEvent('setting.save.core.password', self.md5Password) addEvent('setting.save.core.api_key', self.checkApikey) @@ -66,11 +71,15 @@ class Core(Plugin): return value if value and len(value) > 3 else uuid4().hex def checkDataDir(self): - if Env.get('app_dir') in Env.get('data_dir'): + if isSubFolder(Env.get('data_dir'), Env.get('app_dir')): log.error('You should NOT use your CouchPotato directory to save your settings in. Files will get overwritten or be deleted.') return True + def cleanUpFolders(self): + only_clean = ['couchpotato', 'libs', 'init'] + self.deleteEmptyFolder(Env.get('app_dir'), show_error = False, only_clean = only_clean) + def available(self, **kwargs): return { 'success': True @@ -82,7 +91,11 @@ class Core(Plugin): def shutdown(): self.initShutdown() - IOLoop.current().add_callback(shutdown) + + if IOLoop.current()._closing: + shutdown() + else: + IOLoop.current().add_callback(shutdown) return 'shutdown' @@ -105,7 +118,7 @@ class Core(Plugin): self.shutdown_started = True - fireEvent('app.do_shutdown') + fireEvent('app.do_shutdown', restart = restart) log.debug('Every plugin got shutdown event') loop = True @@ -130,8 +143,11 @@ class Core(Plugin): log.debug('Safe to shutdown/restart') + loop = IOLoop.current() + try: - IOLoop.current().stop() + if not loop._closing: + loop.stop() except RuntimeError: pass except: @@ -181,8 +197,104 @@ class Core(Plugin): def signalHandler(self): if Env.get('daemonized'): return - def signal_handler(signal, frame): + def signal_handler(*args, **kwargs): fireEvent('app.shutdown', single = True) signal.signal(signal.SIGINT, signal_handler) signal.signal(signal.SIGTERM, signal_handler) + + +config = [{ + 'name': 'core', + 'order': 1, + 'groups': [ + { + 'tab': 'general', + 'name': 'basics', + 'description': 'Needs restart before changes take effect.', + 'wizard': True, + 'options': [ + { + 'name': 'username', + 'default': '', + }, + { + 'name': 'password', + 'default': '', + 'type': 'password', + }, + { + 'name': 'port', + 'default': 5050, + 'type': 'int', + 'description': 'The port I should listen to.', + }, + { + 'name': 'ssl_cert', + 'description': 'Path to SSL server.crt', + 'advanced': True, + }, + { + 'name': 'ssl_key', + 'description': 'Path to SSL server.key', + 'advanced': True, + }, + { + 'name': 'launch_browser', + 'default': True, + 'type': 'bool', + 'description': 'Launch the browser when I start.', + 'wizard': True, + }, + ], + }, + { + 'tab': 'general', + 'name': 'advanced', + 'description': "For those who know what they're doing", + 'advanced': True, + 'options': [ + { + 'name': 'api_key', + 'default': uuid4().hex, + 'readonly': 1, + 'description': 'Let 3rd party app do stuff. Docs', + }, + { + 'name': 'debug', + 'default': 0, + 'type': 'bool', + 'description': 'Enable debugging.', + }, + { + 'name': 'development', + 'default': 0, + 'type': 'bool', + 'description': 'Enable this if you\'re developing, and NOT in any other case, thanks.', + }, + { + 'name': 'data_dir', + 'type': 'directory', + 'description': 'Where cache/logs/etc are stored. Keep empty for defaults.', + }, + { + 'name': 'url_base', + 'default': '', + 'description': 'When using mod_proxy use this to append the url with this.', + }, + { + 'name': 'permission_folder', + 'default': '0755', + 'label': 'Folder CHMOD', + 'description': 'Can be either decimal (493) or octal (leading zero: 0755)', + }, + { + 'name': 'permission_file', + 'default': '0755', + 'label': 'File CHMOD', + 'description': 'Same as Folder CHMOD but for files', + }, + ], + }, + ], +}] diff --git a/couchpotato/core/_base/_core/__init__.py b/couchpotato/core/_base/_core/__init__.py deleted file mode 100644 index 58965bbb..00000000 --- a/couchpotato/core/_base/_core/__init__.py +++ /dev/null @@ -1,101 +0,0 @@ -from .main import Core -from uuid import uuid4 - - -def start(): - return Core() - -config = [{ - 'name': 'core', - 'order': 1, - 'groups': [ - { - 'tab': 'general', - 'name': 'basics', - 'description': 'Needs restart before changes take effect.', - 'wizard': True, - 'options': [ - { - 'name': 'username', - 'default': '', - }, - { - 'name': 'password', - 'default': '', - 'type': 'password', - }, - { - 'name': 'port', - 'default': 5050, - 'type': 'int', - 'description': 'The port I should listen to.', - }, - { - 'name': 'ssl_cert', - 'description': 'Path to SSL server.crt', - 'advanced': True, - }, - { - 'name': 'ssl_key', - 'description': 'Path to SSL server.key', - 'advanced': True, - }, - { - 'name': 'launch_browser', - 'default': True, - 'type': 'bool', - 'description': 'Launch the browser when I start.', - 'wizard': True, - }, - ], - }, - { - 'tab': 'general', - 'name': 'advanced', - 'description': "For those who know what they're doing", - 'advanced': True, - 'options': [ - { - 'name': 'api_key', - 'default': uuid4().hex, - 'readonly': 1, - 'description': 'Let 3rd party app do stuff. Docs', - }, - { - 'name': 'debug', - 'default': 0, - 'type': 'bool', - 'description': 'Enable debugging.', - }, - { - 'name': 'development', - 'default': 0, - 'type': 'bool', - 'description': 'Enable this if you\'re developing, and NOT in any other case, thanks.', - }, - { - 'name': 'data_dir', - 'type': 'directory', - 'description': 'Where cache/logs/etc are stored. Keep empty for defaults.', - }, - { - 'name': 'url_base', - 'default': '', - 'description': 'When using mod_proxy use this to append the url with this.', - }, - { - 'name': 'permission_folder', - 'default': '0755', - 'label': 'Folder CHMOD', - 'description': 'Can be either decimal (493) or octal (leading zero: 0755)', - }, - { - 'name': 'permission_file', - 'default': '0755', - 'label': 'File CHMOD', - 'description': 'Same as Folder CHMOD but for files', - }, - ], - }, - ], -}] diff --git a/couchpotato/core/_base/clientscript/main.py b/couchpotato/core/_base/clientscript.py similarity index 97% rename from couchpotato/core/_base/clientscript/main.py rename to couchpotato/core/_base/clientscript.py index c1be7e73..e5dbd8f7 100644 --- a/couchpotato/core/_base/clientscript/main.py +++ b/couchpotato/core/_base/clientscript.py @@ -1,3 +1,7 @@ +import os +import re +import traceback + from couchpotato.core.event import addEvent from couchpotato.core.helpers.encoding import ss from couchpotato.core.helpers.variable import tryInt @@ -7,12 +11,12 @@ from couchpotato.environment import Env from minify.cssmin import cssmin from minify.jsmin import jsmin from tornado.web import StaticFileHandler -import os -import re -import traceback + log = CPLog(__name__) +autoload = 'ClientScript' + class ClientScript(Plugin): @@ -45,21 +49,17 @@ class ClientScript(Plugin): 'scripts/block/footer.js', 'scripts/block/menu.js', 'scripts/page/home.js', - 'scripts/page/wanted.js', 'scripts/page/settings.js', 'scripts/page/about.js', - 'scripts/page/manage.js', - 'scripts/misc/downloaders.js', ], } - urls = {'style': {}, 'script': {}} minified = {'style': {}, 'script': {}} paths = {'style': {}, 'script': {}} comment = { - 'style': '/*** %s:%d ***/\n', - 'script': '// %s:%d\n' + 'style': '/*** %s:%d ***/\n', + 'script': '// %s:%d\n' } html = { @@ -91,7 +91,6 @@ class ClientScript(Plugin): else: self.registerStyle(core_url, file_path, position = 'front') - def minify(self): # Create cache dir @@ -125,7 +124,7 @@ class ClientScript(Plugin): data = cssmin(data) data = data.replace('../images/', '../static/images/') data = data.replace('../fonts/', '../static/fonts/') - data = data.replace('../../static/', '../static/') # Replace inside plugins + data = data.replace('../../static/', '../static/') # Replace inside plugins raw.append({'file': file_path, 'date': int(os.path.getmtime(file_path)), 'data': data}) @@ -188,6 +187,7 @@ class ClientScript(Plugin): prefix_properties = ['border-radius', 'transform', 'transition', 'box-shadow'] prefix_tags = ['ms', 'moz', 'webkit'] + def prefix(self, data): trimmed_data = re.sub('(\t|\n|\r)+', '', data) diff --git a/couchpotato/core/_base/clientscript/__init__.py b/couchpotato/core/_base/clientscript/__init__.py deleted file mode 100644 index 8070044e..00000000 --- a/couchpotato/core/_base/clientscript/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from .main import ClientScript - - -def start(): - return ClientScript() - -config = [] diff --git a/couchpotato/core/_base/desktop/main.py b/couchpotato/core/_base/desktop.py similarity index 97% rename from couchpotato/core/_base/desktop/main.py rename to couchpotato/core/_base/desktop.py index c3beff17..9a365636 100644 --- a/couchpotato/core/_base/desktop/main.py +++ b/couchpotato/core/_base/desktop.py @@ -5,6 +5,9 @@ from couchpotato.environment import Env log = CPLog(__name__) +autoload = 'Desktop' + + if Env.get('desktop'): class Desktop(Plugin): diff --git a/couchpotato/core/_base/desktop/__init__.py b/couchpotato/core/_base/desktop/__init__.py deleted file mode 100644 index e59ca523..00000000 --- a/couchpotato/core/_base/desktop/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from .main import Desktop - - -def start(): - return Desktop() - -config = [] diff --git a/couchpotato/core/_base/downloader/__init__.py b/couchpotato/core/_base/downloader/__init__.py new file mode 100644 index 00000000..0b9201bd --- /dev/null +++ b/couchpotato/core/_base/downloader/__init__.py @@ -0,0 +1,20 @@ +from .main import Downloader + + +def autoload(): + return Downloader() + + +config = [{ + 'name': 'download_providers', + 'groups': [ + { + 'label': 'Downloaders', + 'description': 'You can select different downloaders for each type (usenet / torrent)', + 'type': 'list', + 'name': 'download_providers', + 'tab': 'downloaders', + 'options': [], + }, + ], +}] diff --git a/couchpotato/core/downloaders/base.py b/couchpotato/core/_base/downloader/main.py similarity index 91% rename from couchpotato/core/downloaders/base.py rename to couchpotato/core/_base/downloader/main.py index 3bcf1f31..70e5cc9c 100644 --- a/couchpotato/core/downloaders/base.py +++ b/couchpotato/core/_base/downloader/main.py @@ -1,16 +1,24 @@ from base64 import b32decode, b16encode +import random +import re + from couchpotato.api import addApiView from couchpotato.core.event import addEvent from couchpotato.core.helpers.variable import mergeDicts from couchpotato.core.logger import CPLog -from couchpotato.core.providers.base import Provider -import random -import re +from couchpotato.core.media._base.providers.base import Provider +from couchpotato.core.plugins.base import Plugin + log = CPLog(__name__) -class Downloader(Provider): +## This is here to load the static files +class Downloader(Plugin): + pass + + +class DownloaderBase(Provider): protocol = [] http_time_between_calls = 0 @@ -22,17 +30,21 @@ class Downloader(Provider): ] torrent_trackers = [ - 'http://tracker.publicbt.com/announce', 'udp://tracker.istole.it:80/announce', - 'udp://fr33domtracker.h33t.com:3310/announce', 'http://tracker.istole.it/announce', - 'http://tracker.ccc.de/announce', + 'udp://fr33domtracker.h33t.com:3310/announce', + 'http://tracker.publicbt.com/announce', 'udp://tracker.publicbt.com:80/announce', + 'http://tracker.ccc.de/announce', 'udp://tracker.ccc.de:80/announce', 'http://exodus.desync.com/announce', 'http://exodus.desync.com:6969/announce', 'http://tracker.publichd.eu/announce', + 'udp://tracker.publichd.eu:80/announce', 'http://tracker.openbittorrent.com/announce', + 'udp://tracker.openbittorrent.com/announce', + 'udp://tracker.openbittorrent.com:80/announce', + 'udp://open.demonii.com:1337/announce', ] def __init__(self): @@ -60,6 +72,9 @@ class Downloader(Provider): return return self.download(data = data, media = media, filedata = filedata) + def download(self, *args, **kwargs): + return False + def _getAllDownloadStatus(self, download_ids): if self.isDisabled(manual = True, data = {}): return @@ -156,11 +171,11 @@ class Downloader(Provider): if not data: data = {} d_manual = self.conf('manual', default = False) - return super(Downloader, self).isEnabled() and \ + return super(DownloaderBase, self).isEnabled() and \ (d_manual and manual or d_manual is False) and \ (not data or self.isCorrectProtocol(data.get('protocol'))) - def _test(self): + def _test(self, **kwargs): t = self.test() if isinstance(t, tuple): return {'success': t[0], 'msg': t[1]} @@ -182,6 +197,7 @@ class Downloader(Provider): def pause(self, release_download, pause): return + class ReleaseDownloadList(list): provider = None @@ -208,7 +224,7 @@ class ReleaseDownloadList(list): 'status': 'busy', 'downloader': self.provider.getName(), 'folder': '', - 'files': '', + 'files': [], } return mergeDicts(defaults, result) diff --git a/couchpotato/static/scripts/misc/downloaders.js b/couchpotato/core/_base/downloader/static/downloaders.js similarity index 87% rename from couchpotato/static/scripts/misc/downloaders.js rename to couchpotato/core/_base/downloader/static/downloaders.js index 5127275c..45215158 100644 --- a/couchpotato/static/scripts/misc/downloaders.js +++ b/couchpotato/core/_base/downloader/static/downloaders.js @@ -6,7 +6,7 @@ var DownloadersBase = new Class({ var self = this; // Add test buttons to settings page - App.addEvent('load', self.addTestButtons.bind(self)); + App.addEvent('loadSettings', self.addTestButtons.bind(self)); }, @@ -40,15 +40,16 @@ var DownloadersBase = new Class({ button.set('text', button_name); + var message; if(json.success){ - var message = new Element('span.success', { + message = new Element('span.success', { 'text': 'Connection successful' }).inject(button, 'after') } else { var msg_text = 'Connection failed. Check logs for details.'; if(json.hasOwnProperty('msg')) msg_text = json.msg; - var message = new Element('span.failed', { + message = new Element('span.failed', { 'text': msg_text }).inject(button, 'after') } @@ -68,8 +69,8 @@ var DownloadersBase = new Class({ testButtonName: function(fieldset){ var name = String(fieldset.getElement('h2').innerHTML).substring(0,String(fieldset.getElement('h2').innerHTML).indexOf(" 26214400 and last_check < time.time()-604800: # 25MB / 7 days + self.compact() + Env.prop(prop_name, value = int(time.time())) + + def migrate(self): + + from couchpotato import Env + old_db = os.path.join(Env.get('data_dir'), 'couchpotato.db') + if not os.path.isfile(old_db): return + + log.info('=' * 30) + log.info('Migrating database, hold on..') + time.sleep(1) + + if os.path.isfile(old_db): + + migrate_start = time.time() + + import sqlite3 + conn = sqlite3.connect(old_db) + + migrate_list = { + 'category': ['id', 'label', 'order', 'required', 'preferred', 'ignored', 'destination'], + 'profile': ['id', 'label', 'order', 'core', 'hide'], + 'profiletype': ['id', 'order', 'finish', 'wait_for', 'quality_id', 'profile_id'], + 'quality': ['id', 'identifier', 'order', 'size_min', 'size_max'], + 'movie': ['id', 'last_edit', 'library_id', 'status_id', 'profile_id', 'category_id'], + 'library': ['id', 'identifier', 'info'], + 'librarytitle': ['id', 'title', 'default', 'libraries_id'], + 'library_files__file_library': ['library_id', 'file_id'], + 'release': ['id', 'identifier', 'movie_id', 'status_id', 'quality_id', 'last_edit'], + 'releaseinfo': ['id', 'identifier', 'value', 'release_id'], + 'release_files__file_release': ['release_id', 'file_id'], + 'status': ['id', 'identifier'], + 'properties': ['id', 'identifier', 'value'], + 'file': ['id', 'path', 'type_id'], + 'filetype': ['identifier', 'id'] + } + + migrate_data = {} + + c = conn.cursor() + + for ml in migrate_list: + migrate_data[ml] = {} + rows = migrate_list[ml] + + try: + c.execute('SELECT %s FROM `%s`' % ('`' + '`,`'.join(rows) + '`', ml)) + except: + # ignore faulty destination_id database + if ml == 'category': + migrate_data[ml] = {} + else: + raise + + for p in c.fetchall(): + columns = {} + for row in migrate_list[ml]: + columns[row] = p[rows.index(row)] + + if not migrate_data[ml].get(p[0]): + migrate_data[ml][p[0]] = columns + else: + if not isinstance(migrate_data[ml][p[0]], list): + migrate_data[ml][p[0]] = [migrate_data[ml][p[0]]] + migrate_data[ml][p[0]].append(columns) + + conn.close() + + log.info('Getting data took %s', time.time() - migrate_start) + + db = self.getDB() + + # Use properties + properties = migrate_data['properties'] + log.info('Importing %s properties', len(properties)) + for x in properties: + property = properties[x] + Env.prop(property.get('identifier'), property.get('value')) + + # Categories + categories = migrate_data.get('category', []) + log.info('Importing %s categories', len(categories)) + category_link = {} + for x in categories: + c = categories[x] + + new_c = db.insert({ + '_t': 'category', + 'order': c.get('order', 999), + 'label': toUnicode(c.get('label', '')), + 'ignored': toUnicode(c.get('ignored', '')), + 'preferred': toUnicode(c.get('preferred', '')), + 'required': toUnicode(c.get('required', '')), + 'destination': toUnicode(c.get('destination', '')), + }) + + category_link[x] = new_c.get('_id') + + # Profiles + log.info('Importing profiles') + new_profiles = db.all('profile', with_doc = True) + new_profiles_by_label = {} + for x in new_profiles: + + # Remove default non core profiles + if not x['doc'].get('core'): + db.delete(x['doc']) + else: + new_profiles_by_label[x['doc']['label']] = x['_id'] + + profiles = migrate_data['profile'] + profile_link = {} + for x in profiles: + p = profiles[x] + + exists = new_profiles_by_label.get(p.get('label')) + + # Update existing with order only + if exists and p.get('core'): + profile = db.get('id', exists) + profile['order'] = tryInt(p.get('order')) + profile['hide'] = p.get('hide') in [1, True, 'true', 'True'] + db.update(profile) + + profile_link[x] = profile.get('_id') + else: + + new_profile = { + '_t': 'profile', + 'label': p.get('label'), + 'order': int(p.get('order', 999)), + 'core': p.get('core', False), + 'qualities': [], + 'wait_for': [], + 'finish': [] + } + + types = migrate_data['profiletype'] + for profile_type in types: + p_type = types[profile_type] + if types[profile_type]['profile_id'] == p['id']: + if p_type['quality_id']: + new_profile['finish'].append(p_type['finish']) + new_profile['wait_for'].append(p_type['wait_for']) + new_profile['qualities'].append(migrate_data['quality'][p_type['quality_id']]['identifier']) + + if len(new_profile['qualities']) > 0: + new_profile.update(db.insert(new_profile)) + profile_link[x] = new_profile.get('_id') + else: + log.error('Corrupt profile list for "%s", using default.', p.get('label')) + + # Qualities + log.info('Importing quality sizes') + new_qualities = db.all('quality', with_doc = True) + new_qualities_by_identifier = {} + for x in new_qualities: + new_qualities_by_identifier[x['doc']['identifier']] = x['_id'] + + qualities = migrate_data['quality'] + quality_link = {} + for x in qualities: + q = qualities[x] + q_id = new_qualities_by_identifier[q.get('identifier')] + + quality = db.get('id', q_id) + quality['order'] = q.get('order') + quality['size_min'] = tryInt(q.get('size_min')) + quality['size_max'] = tryInt(q.get('size_max')) + db.update(quality) + + quality_link[x] = quality + + # Titles + titles = migrate_data['librarytitle'] + titles_by_library = {} + for x in titles: + title = titles[x] + if title.get('default'): + titles_by_library[title.get('libraries_id')] = title.get('title') + + # Releases + releaseinfos = migrate_data['releaseinfo'] + for x in releaseinfos: + info = releaseinfos[x] + + # Skip if release doesn't exist for this info + if not migrate_data['release'].get(info.get('release_id')): + continue + + if not migrate_data['release'][info.get('release_id')].get('info'): + migrate_data['release'][info.get('release_id')]['info'] = {} + + migrate_data['release'][info.get('release_id')]['info'][info.get('identifier')] = info.get('value') + + releases = migrate_data['release'] + releases_by_media = {} + for x in releases: + release = releases[x] + if not releases_by_media.get(release.get('movie_id')): + releases_by_media[release.get('movie_id')] = [] + + releases_by_media[release.get('movie_id')].append(release) + + # Type ids + types = migrate_data['filetype'] + type_by_id = {} + for t in types: + type = types[t] + type_by_id[type.get('id')] = type + + # Media + log.info('Importing %s media items', len(migrate_data['movie'])) + statuses = migrate_data['status'] + libraries = migrate_data['library'] + library_files = migrate_data['library_files__file_library'] + releases_files = migrate_data['release_files__file_release'] + all_files = migrate_data['file'] + poster_type = migrate_data['filetype']['poster'] + medias = migrate_data['movie'] + for x in medias: + m = medias[x] + + status = statuses.get(m['status_id']).get('identifier') + l = libraries.get(m['library_id']) + + # Only migrate wanted movies, Skip if no identifier present + if not l or not getImdb(l.get('identifier')): continue + + profile_id = profile_link.get(m['profile_id']) + category_id = category_link.get(m['category_id']) + title = titles_by_library.get(m['library_id']) + releases = releases_by_media.get(x, []) + info = json.loads(l.get('info', '')) + + files = library_files.get(m['library_id'], []) + if not isinstance(files, list): + files = [files] + + added_media = fireEvent('movie.add', { + 'info': info, + 'identifier': l.get('identifier'), + 'profile_id': profile_id, + 'category_id': category_id, + 'title': title + }, force_readd = False, search_after = False, update_after = False, notify_after = False, status = status, single = True) + + if not added_media: + log.error('Failed adding media %s: %s', (l.get('identifier'), info)) + continue + + added_media['files'] = added_media.get('files', {}) + for f in files: + ffile = all_files[f.get('file_id')] + + # Only migrate posters + if ffile.get('type_id') == poster_type.get('id'): + if ffile.get('path') not in added_media['files'].get('image_poster', []) and os.path.isfile(ffile.get('path')): + added_media['files']['image_poster'] = [ffile.get('path')] + break + + if 'image_poster' in added_media['files']: + db.update(added_media) + + for rel in releases: + + empty_info = False + if not rel.get('info'): + empty_info = True + rel['info'] = {} + + quality = quality_link.get(rel.get('quality_id')) + if not quality: + continue + + release_status = statuses.get(rel.get('status_id')).get('identifier') + + if rel['info'].get('download_id'): + status_support = rel['info'].get('download_status_support', False) in [True, 'true', 'True'] + rel['info']['download_info'] = { + 'id': rel['info'].get('download_id'), + 'downloader': rel['info'].get('download_downloader'), + 'status_support': status_support, + } + + # Add status to keys + rel['info']['status'] = release_status + if not empty_info: + fireEvent('release.create_from_search', [rel['info']], added_media, quality, single = True) + else: + release = { + '_t': 'release', + 'identifier': rel.get('identifier'), + 'media_id': added_media.get('_id'), + 'quality': quality.get('identifier'), + 'status': release_status, + 'last_edit': int(time.time()), + 'files': {} + } + + # Add downloader info if provided + try: + release['download_info'] = rel['info']['download_info'] + del rel['download_info'] + except: + pass + + # Add files + release_files = releases_files.get(rel.get('id'), []) + if not isinstance(release_files, list): + release_files = [release_files] + + if len(release_files) == 0: + continue + + for f in release_files: + rfile = all_files[f.get('file_id')] + file_type = type_by_id.get(rfile.get('type_id')).get('identifier') + + if not release['files'].get(file_type): + release['files'][file_type] = [] + + release['files'][file_type].append(rfile.get('path')) + + try: + rls = db.get('release_identifier', rel.get('identifier'), with_doc = True)['doc'] + rls.update(release) + db.update(rls) + except: + db.insert(release) + + log.info('Total migration took %s', time.time() - migrate_start) + log.info('=' * 30) + + # rename old database + log.info('Renaming old database to %s ', old_db + '.old') + os.rename(old_db, old_db + '.old') + + if os.path.isfile(old_db + '-wal'): + os.rename(old_db + '-wal', old_db + '-wal.old') + if os.path.isfile(old_db + '-shm'): + os.rename(old_db + '-shm', old_db + '-shm.old') diff --git a/couchpotato/core/downloaders/__init__.py b/couchpotato/core/downloaders/__init__.py index a81ce881..e69de29b 100644 --- a/couchpotato/core/downloaders/__init__.py +++ b/couchpotato/core/downloaders/__init__.py @@ -1,13 +0,0 @@ -config = [{ - 'name': 'download_providers', - 'groups': [ - { - 'label': 'Downloaders', - 'description': 'You can select different downloaders for each type (usenet / torrent)', - 'type': 'list', - 'name': 'download_providers', - 'tab': 'downloaders', - 'options': [], - }, - ], -}] diff --git a/couchpotato/core/downloaders/blackhole/main.py b/couchpotato/core/downloaders/blackhole.py similarity index 65% rename from couchpotato/core/downloaders/blackhole/main.py rename to couchpotato/core/downloaders/blackhole.py index 9a018354..262776a8 100644 --- a/couchpotato/core/downloaders/blackhole/main.py +++ b/couchpotato/core/downloaders/blackhole.py @@ -1,15 +1,20 @@ from __future__ import with_statement -from couchpotato.core.downloaders.base import Downloader -from couchpotato.core.helpers.encoding import sp -from couchpotato.core.logger import CPLog -from couchpotato.environment import Env import os import traceback +from couchpotato.core._base.downloader.main import DownloaderBase +from couchpotato.core.helpers.encoding import sp +from couchpotato.core.helpers.variable import getDownloadDir +from couchpotato.core.logger import CPLog +from couchpotato.environment import Env + + log = CPLog(__name__) +autoload = 'Blackhole' -class Blackhole(Downloader): + +class Blackhole(DownloaderBase): protocol = ['nzb', 'torrent', 'torrent_magnet'] status_support = False @@ -100,3 +105,54 @@ class Blackhole(Downloader): return super(Blackhole, self).isEnabled(manual, data) and \ ((self.conf('use_for') in for_protocol)) + + +config = [{ + 'name': 'blackhole', + 'order': 30, + 'groups': [ + { + 'tab': 'downloaders', + 'list': 'download_providers', + 'name': 'blackhole', + 'label': 'Black hole', + 'description': 'Download the NZB/Torrent to a specific folder. Note: Seeding and copying/linking features do not work with Black hole.', + 'wizard': True, + 'options': [ + { + 'name': 'enabled', + 'default': True, + 'type': 'enabler', + 'radio_group': 'nzb,torrent', + }, + { + 'name': 'directory', + 'type': 'directory', + 'description': 'Directory where the .nzb (or .torrent) file is saved to.', + 'default': getDownloadDir() + }, + { + 'name': 'use_for', + 'label': 'Use for', + 'default': 'both', + 'type': 'dropdown', + 'values': [('usenet & torrents', 'both'), ('usenet', 'nzb'), ('torrent', 'torrent')], + }, + { + 'name': 'create_subdir', + 'default': 0, + 'type': 'bool', + 'advanced': True, + 'description': 'Create a sub directory when saving the .nzb (or .torrent).', + }, + { + 'name': 'manual', + 'default': 0, + 'type': 'bool', + 'advanced': True, + 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.', + }, + ], + } + ], +}] diff --git a/couchpotato/core/downloaders/blackhole/__init__.py b/couchpotato/core/downloaders/blackhole/__init__.py deleted file mode 100644 index 92d18e7f..00000000 --- a/couchpotato/core/downloaders/blackhole/__init__.py +++ /dev/null @@ -1,56 +0,0 @@ -from .main import Blackhole -from couchpotato.core.helpers.variable import getDownloadDir - - -def start(): - return Blackhole() - -config = [{ - 'name': 'blackhole', - 'order': 30, - 'groups': [ - { - 'tab': 'downloaders', - 'list': 'download_providers', - 'name': 'blackhole', - 'label': 'Black hole', - 'description': 'Download the NZB/Torrent to a specific folder. Note: Seeding and copying/linking features do not work with Black hole.', - 'wizard': True, - 'options': [ - { - 'name': 'enabled', - 'default': True, - 'type': 'enabler', - 'radio_group': 'nzb,torrent', - }, - { - 'name': 'directory', - 'type': 'directory', - 'description': 'Directory where the .nzb (or .torrent) file is saved to.', - 'default': getDownloadDir() - }, - { - 'name': 'use_for', - 'label': 'Use for', - 'default': 'both', - 'type': 'dropdown', - 'values': [('usenet & torrents', 'both'), ('usenet', 'nzb'), ('torrent', 'torrent')], - }, - { - 'name': 'create_subdir', - 'default': 0, - 'type': 'bool', - 'advanced': True, - 'description': 'Create a sub directory when saving the .nzb (or .torrent).', - }, - { - 'name': 'manual', - 'default': 0, - 'type': 'bool', - 'advanced': True, - 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.', - }, - ], - } - ], -}] diff --git a/couchpotato/core/downloaders/deluge/main.py b/couchpotato/core/downloaders/deluge.py similarity index 76% rename from couchpotato/core/downloaders/deluge/main.py rename to couchpotato/core/downloaders/deluge.py index 59300958..0c49233d 100644 --- a/couchpotato/core/downloaders/deluge/main.py +++ b/couchpotato/core/downloaders/deluge.py @@ -1,20 +1,24 @@ from base64 import b64encode, b16encode, b32decode -from bencode import bencode as benc, bdecode -from couchpotato.core.downloaders.base import Downloader, ReleaseDownloadList -from couchpotato.core.helpers.encoding import isInt, sp -from couchpotato.core.helpers.variable import tryFloat, cleanHost -from couchpotato.core.logger import CPLog from datetime import timedelta from hashlib import sha1 -from synchronousdeluge import DelugeClient import os.path import re import traceback +from bencode import bencode as benc, bdecode +from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList +from couchpotato.core.helpers.encoding import isInt, sp +from couchpotato.core.helpers.variable import tryFloat, cleanHost +from couchpotato.core.logger import CPLog +from synchronousdeluge import DelugeClient + + log = CPLog(__name__) +autoload = 'Deluge' -class Deluge(Downloader): + +class Deluge(DownloaderBase): protocol = ['torrent', 'torrent_magnet'] log = CPLog(__name__) @@ -143,7 +147,7 @@ class Deluge(Downloader): 'seed_ratio': torrent['ratio'], 'timeleft': str(timedelta(seconds = torrent['eta'])), 'folder': sp(download_dir if len(torrent_files) == 1 else os.path.join(download_dir, torrent['name'])), - 'files': '|'.join(torrent_files), + 'files': torrent_files, }) return release_downloads @@ -291,3 +295,90 @@ class DelugeRPC(object): return torrent_hash return False + + +config = [{ + 'name': 'deluge', + 'groups': [ + { + 'tab': 'downloaders', + 'list': 'download_providers', + 'name': 'deluge', + 'label': 'Deluge', + 'description': 'Use Deluge to download torrents.', + 'wizard': True, + 'options': [ + { + 'name': 'enabled', + 'default': 0, + 'type': 'enabler', + 'radio_group': 'torrent', + }, + { + 'name': 'host', + 'default': 'localhost:58846', + 'description': 'Hostname with port. Usually localhost:58846', + }, + { + 'name': 'username', + }, + { + 'name': 'password', + 'type': 'password', + }, + { + 'name': 'directory', + 'type': 'directory', + 'description': 'Download to this directory. Keep empty for default Deluge download directory.', + }, + { + 'name': 'completed_directory', + 'type': 'directory', + 'description': 'Move completed torrent to this directory. Keep empty for default Deluge options.', + 'advanced': True, + }, + { + 'name': 'label', + 'description': 'Label to add to torrents in the Deluge UI.', + }, + { + 'name': 'remove_complete', + 'label': 'Remove torrent', + 'type': 'bool', + 'default': True, + 'advanced': True, + 'description': 'Remove the torrent from Deluge after it has finished seeding.', + }, + { + 'name': 'delete_files', + 'label': 'Remove files', + 'default': True, + 'type': 'bool', + 'advanced': True, + 'description': 'Also remove the leftover files.', + }, + { + 'name': 'paused', + 'type': 'bool', + 'advanced': True, + 'default': False, + 'description': 'Add the torrent paused.', + }, + { + 'name': 'manual', + 'default': 0, + 'type': 'bool', + 'advanced': True, + 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.', + }, + { + 'name': 'delete_failed', + 'default': True, + 'advanced': True, + 'type': 'bool', + 'description': 'Delete a release after the download has failed.', + }, + ], + } + ], +}] diff --git a/couchpotato/core/downloaders/deluge/__init__.py b/couchpotato/core/downloaders/deluge/__init__.py deleted file mode 100644 index 09fae751..00000000 --- a/couchpotato/core/downloaders/deluge/__init__.py +++ /dev/null @@ -1,91 +0,0 @@ -from .main import Deluge - - -def start(): - return Deluge() - -config = [{ - 'name': 'deluge', - 'groups': [ - { - 'tab': 'downloaders', - 'list': 'download_providers', - 'name': 'deluge', - 'label': 'Deluge', - 'description': 'Use Deluge to download torrents.', - 'wizard': True, - 'options': [ - { - 'name': 'enabled', - 'default': 0, - 'type': 'enabler', - 'radio_group': 'torrent', - }, - { - 'name': 'host', - 'default': 'localhost:58846', - 'description': 'Hostname with port. Usually localhost:58846', - }, - { - 'name': 'username', - }, - { - 'name': 'password', - 'type': 'password', - }, - { - 'name': 'directory', - 'type': 'directory', - 'description': 'Download to this directory. Keep empty for default Deluge download directory.', - }, - { - 'name': 'completed_directory', - 'type': 'directory', - 'description': 'Move completed torrent to this directory. Keep empty for default Deluge options.', - 'advanced': True, - }, - { - 'name': 'label', - 'description': 'Label to add to torrents in the Deluge UI.', - }, - { - 'name': 'remove_complete', - 'label': 'Remove torrent', - 'type': 'bool', - 'default': True, - 'advanced': True, - 'description': 'Remove the torrent from Deluge after it has finished seeding.', - }, - { - 'name': 'delete_files', - 'label': 'Remove files', - 'default': True, - 'type': 'bool', - 'advanced': True, - 'description': 'Also remove the leftover files.', - }, - { - 'name': 'paused', - 'type': 'bool', - 'advanced': True, - 'default': False, - 'description': 'Add the torrent paused.', - }, - { - 'name': 'manual', - 'default': 0, - 'type': 'bool', - 'advanced': True, - 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.', - }, - { - 'name': 'delete_failed', - 'default': True, - 'advanced': True, - 'type': 'bool', - 'description': 'Delete a release after the download has failed.', - }, - ], - } - ], -}] diff --git a/couchpotato/core/downloaders/nzbget/main.py b/couchpotato/core/downloaders/nzbget.py similarity index 73% rename from couchpotato/core/downloaders/nzbget/main.py rename to couchpotato/core/downloaders/nzbget.py index 3dad8670..b46de778 100644 --- a/couchpotato/core/downloaders/nzbget/main.py +++ b/couchpotato/core/downloaders/nzbget.py @@ -1,8 +1,4 @@ from base64 import standard_b64encode -from couchpotato.core.downloaders.base import Downloader, ReleaseDownloadList -from couchpotato.core.helpers.encoding import ss, sp -from couchpotato.core.helpers.variable import tryInt, md5, cleanHost -from couchpotato.core.logger import CPLog from datetime import timedelta import re import shutil @@ -10,10 +6,18 @@ import socket import traceback import xmlrpclib +from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList +from couchpotato.core.helpers.encoding import ss, sp +from couchpotato.core.helpers.variable import tryInt, md5, cleanHost +from couchpotato.core.logger import CPLog + + log = CPLog(__name__) +autoload = 'NZBGet' -class NZBGet(Downloader): + +class NZBGet(DownloaderBase): protocol = ['nzb'] rpc = 'xmlrpc' @@ -142,7 +146,7 @@ class NZBGet(Downloader): 'timeleft': timeleft, }) - for nzb in queue: # 'Parameters' is not passed in rpc.postqueue + for nzb in queue: # 'Parameters' is not passed in rpc.postqueue if nzb['NZBID'] in ids: log.debug('Found %s in NZBGet postprocessing queue', nzb['NZBFilename']) release_downloads.append({ @@ -214,3 +218,76 @@ class NZBGet(Downloader): def getRPC(self): url = cleanHost(host = self.conf('host'), ssl = self.conf('ssl'), username = self.conf('username'), password = self.conf('password')) + self.rpc return xmlrpclib.ServerProxy(url) + + +config = [{ + 'name': 'nzbget', + 'groups': [ + { + 'tab': 'downloaders', + 'list': 'download_providers', + 'name': 'nzbget', + 'label': 'NZBGet', + 'description': 'Use NZBGet to download NZBs.', + 'wizard': True, + 'options': [ + { + 'name': 'enabled', + 'default': 0, + 'type': 'enabler', + 'radio_group': 'nzb', + }, + { + 'name': 'host', + 'default': 'localhost:6789', + 'description': 'Hostname with port. Usually localhost:6789', + }, + { + 'name': 'ssl', + 'default': 0, + 'type': 'bool', + 'advanced': True, + 'description': 'Use HyperText Transfer Protocol Secure, or https', + }, + { + 'name': 'username', + 'default': 'nzbget', + 'advanced': True, + 'description': 'Set a different username to connect. Default: nzbget', + }, + { + 'name': 'password', + 'type': 'password', + 'description': 'Default NZBGet password is tegbzn6789', + }, + { + 'name': 'category', + 'default': 'Movies', + 'description': 'The category CP places the nzb in. Like movies or couchpotato', + }, + { + 'name': 'priority', + 'advanced': True, + 'default': '0', + 'type': 'dropdown', + 'values': [('Very Low', -100), ('Low', -50), ('Normal', 0), ('High', 50), ('Very High', 100)], + 'description': 'Only change this if you are using NZBget 9.0 or higher', + }, + { + 'name': 'manual', + 'default': 0, + 'type': 'bool', + 'advanced': True, + 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.', + }, + { + 'name': 'delete_failed', + 'default': True, + 'advanced': True, + 'type': 'bool', + 'description': 'Delete a release after the download has failed.', + }, + ], + } + ], +}] diff --git a/couchpotato/core/downloaders/nzbget/__init__.py b/couchpotato/core/downloaders/nzbget/__init__.py deleted file mode 100644 index 551eb42c..00000000 --- a/couchpotato/core/downloaders/nzbget/__init__.py +++ /dev/null @@ -1,77 +0,0 @@ -from .main import NZBGet - - -def start(): - return NZBGet() - -config = [{ - 'name': 'nzbget', - 'groups': [ - { - 'tab': 'downloaders', - 'list': 'download_providers', - 'name': 'nzbget', - 'label': 'NZBGet', - 'description': 'Use NZBGet to download NZBs.', - 'wizard': True, - 'options': [ - { - 'name': 'enabled', - 'default': 0, - 'type': 'enabler', - 'radio_group': 'nzb', - }, - { - 'name': 'host', - 'default': 'localhost:6789', - 'description': 'Hostname with port. Usually localhost:6789', - }, - { - 'name': 'ssl', - 'default': 0, - 'type': 'bool', - 'advanced': True, - 'description': 'Use HyperText Transfer Protocol Secure, or https', - }, - { - 'name': 'username', - 'default': 'nzbget', - 'advanced': True, - 'description': 'Set a different username to connect. Default: nzbget', - }, - { - 'name': 'password', - 'type': 'password', - 'description': 'Default NZBGet password is tegbzn6789', - }, - { - 'name': 'category', - 'default': 'Movies', - 'description': 'The category CP places the nzb in. Like movies or couchpotato', - }, - { - 'name': 'priority', - 'advanced': True, - 'default': '0', - 'type': 'dropdown', - 'values': [('Very Low', -100), ('Low', -50), ('Normal', 0), ('High', 50), ('Very High', 100)], - 'description': 'Only change this if you are using NZBget 9.0 or higher', - }, - { - 'name': 'manual', - 'default': 0, - 'type': 'bool', - 'advanced': True, - 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.', - }, - { - 'name': 'delete_failed', - 'default': True, - 'advanced': True, - 'type': 'bool', - 'description': 'Delete a release after the download has failed.', - }, - ], - } - ], -}] diff --git a/couchpotato/core/downloaders/nzbvortex/main.py b/couchpotato/core/downloaders/nzbvortex.py similarity index 74% rename from couchpotato/core/downloaders/nzbvortex/main.py rename to couchpotato/core/downloaders/nzbvortex.py index d1525c89..9094055f 100644 --- a/couchpotato/core/downloaders/nzbvortex/main.py +++ b/couchpotato/core/downloaders/nzbvortex.py @@ -1,8 +1,4 @@ from base64 import b64encode -from couchpotato.core.downloaders.base import Downloader, ReleaseDownloadList -from couchpotato.core.helpers.encoding import tryUrlencode, sp -from couchpotato.core.helpers.variable import cleanHost -from couchpotato.core.logger import CPLog from urllib2 import URLError from uuid import uuid4 import hashlib @@ -16,10 +12,18 @@ import time import traceback import urllib2 +from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList +from couchpotato.core.helpers.encoding import tryUrlencode, sp +from couchpotato.core.helpers.variable import cleanHost +from couchpotato.core.logger import CPLog + + log = CPLog(__name__) +autoload = 'NZBVortex' -class NZBVortex(Downloader): + +class NZBVortex(DownloaderBase): protocol = ['nzb'] api_level = None @@ -186,3 +190,56 @@ class HTTPSConnection(httplib.HTTPSConnection): class HTTPSHandler(urllib2.HTTPSHandler): def https_open(self, req): return self.do_open(HTTPSConnection, req) + + +config = [{ + 'name': 'nzbvortex', + 'groups': [ + { + 'tab': 'downloaders', + 'list': 'download_providers', + 'name': 'nzbvortex', + 'label': 'NZBVortex', + 'description': 'Use NZBVortex to download NZBs.', + 'wizard': True, + 'options': [ + { + 'name': 'enabled', + 'default': 0, + 'type': 'enabler', + 'radio_group': 'nzb', + }, + { + 'name': 'host', + 'default': 'localhost:4321', + 'description': 'Hostname with port. Usually localhost:4321', + }, + { + 'name': 'ssl', + 'default': 1, + 'type': 'bool', + 'advanced': True, + 'description': 'Use HyperText Transfer Protocol Secure, or https', + }, + { + 'name': 'api_key', + 'label': 'Api Key', + }, + { + 'name': 'manual', + 'default': False, + 'type': 'bool', + 'advanced': True, + 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.', + }, + { + 'name': 'delete_failed', + 'default': True, + 'advanced': True, + 'type': 'bool', + 'description': 'Delete a release after the download has failed.', + }, + ], + } + ], +}] diff --git a/couchpotato/core/downloaders/nzbvortex/__init__.py b/couchpotato/core/downloaders/nzbvortex/__init__.py deleted file mode 100644 index 1c2d699e..00000000 --- a/couchpotato/core/downloaders/nzbvortex/__init__.py +++ /dev/null @@ -1,57 +0,0 @@ -from .main import NZBVortex - - -def start(): - return NZBVortex() - -config = [{ - 'name': 'nzbvortex', - 'groups': [ - { - 'tab': 'downloaders', - 'list': 'download_providers', - 'name': 'nzbvortex', - 'label': 'NZBVortex', - 'description': 'Use NZBVortex to download NZBs.', - 'wizard': True, - 'options': [ - { - 'name': 'enabled', - 'default': 0, - 'type': 'enabler', - 'radio_group': 'nzb', - }, - { - 'name': 'host', - 'default': 'localhost:4321', - 'description': 'Hostname with port. Usually localhost:4321', - }, - { - 'name': 'ssl', - 'default': 1, - 'type': 'bool', - 'advanced': True, - 'description': 'Use HyperText Transfer Protocol Secure, or https', - }, - { - 'name': 'api_key', - 'label': 'Api Key', - }, - { - 'name': 'manual', - 'default': False, - 'type': 'bool', - 'advanced': True, - 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.', - }, - { - 'name': 'delete_failed', - 'default': True, - 'advanced': True, - 'type': 'bool', - 'description': 'Delete a release after the download has failed.', - }, - ], - } - ], -}] diff --git a/couchpotato/core/downloaders/pneumatic/main.py b/couchpotato/core/downloaders/pneumatic.py similarity index 67% rename from couchpotato/core/downloaders/pneumatic/main.py rename to couchpotato/core/downloaders/pneumatic.py index bc1f6d04..8cf1aebb 100644 --- a/couchpotato/core/downloaders/pneumatic/main.py +++ b/couchpotato/core/downloaders/pneumatic.py @@ -1,14 +1,18 @@ from __future__ import with_statement -from couchpotato.core.downloaders.base import Downloader -from couchpotato.core.helpers.encoding import sp -from couchpotato.core.logger import CPLog import os import traceback +from couchpotato.core._base.downloader.main import DownloaderBase +from couchpotato.core.helpers.encoding import sp +from couchpotato.core.logger import CPLog + + log = CPLog(__name__) +autoload = 'Pneumatic' -class Pneumatic(Downloader): + +class Pneumatic(DownloaderBase): protocol = ['nzb'] strm_syntax = 'plugin://plugin.program.pneumatic/?mode=strm&type=add_file&nzb=%s&nzbname=%s' @@ -71,3 +75,37 @@ class Pneumatic(Downloader): return True return False + + +config = [{ + 'name': 'pneumatic', + 'order': 30, + 'groups': [ + { + 'tab': 'downloaders', + 'list': 'download_providers', + 'name': 'pneumatic', + 'label': 'Pneumatic', + 'description': 'Use Pneumatic to download .strm files.', + 'options': [ + { + 'name': 'enabled', + 'default': 0, + 'type': 'enabler', + }, + { + 'name': 'directory', + 'type': 'directory', + 'description': 'Directory where the .strm file is saved to.', + }, + { + 'name': 'manual', + 'default': 0, + 'type': 'bool', + 'advanced': True, + 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.', + }, + ], + } + ], +}] diff --git a/couchpotato/core/downloaders/pneumatic/__init__.py b/couchpotato/core/downloaders/pneumatic/__init__.py deleted file mode 100644 index 698643fb..00000000 --- a/couchpotato/core/downloaders/pneumatic/__init__.py +++ /dev/null @@ -1,38 +0,0 @@ -from .main import Pneumatic - - -def start(): - return Pneumatic() - -config = [{ - 'name': 'pneumatic', - 'order': 30, - 'groups': [ - { - 'tab': 'downloaders', - 'list': 'download_providers', - 'name': 'pneumatic', - 'label': 'Pneumatic', - 'description': 'Use Pneumatic to download .strm files.', - 'options': [ - { - 'name': 'enabled', - 'default': 0, - 'type': 'enabler', - }, - { - 'name': 'directory', - 'type': 'directory', - 'description': 'Directory where the .strm file is saved to.', - }, - { - 'name': 'manual', - 'default': 0, - 'type': 'bool', - 'advanced': True, - 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.', - }, - ], - } - ], -}] diff --git a/couchpotato/core/downloaders/qbittorrent_.py b/couchpotato/core/downloaders/qbittorrent_.py new file mode 100644 index 00000000..d4bfced1 --- /dev/null +++ b/couchpotato/core/downloaders/qbittorrent_.py @@ -0,0 +1,245 @@ +from base64 import b16encode, b32decode +from hashlib import sha1 +import os + +from bencode import bencode, bdecode +from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList +from couchpotato.core.helpers.encoding import sp +from couchpotato.core.helpers.variable import cleanHost +from couchpotato.core.logger import CPLog +from qbittorrent.client import QBittorrentClient + + +log = CPLog(__name__) + +autoload = 'qBittorrent' + + +class qBittorrent(DownloaderBase): + + protocol = ['torrent', 'torrent_magnet'] + qb = None + + def __init__(self): + super(qBittorrent, self).__init__() + + def connect(self): + if self.qb is not None: + return self.qb + + url = cleanHost(self.conf('host'), protocol = True, ssl = False) + + if self.conf('username') and self.conf('password'): + self.qb = QBittorrentClient( + url, + username = self.conf('username'), + password = self.conf('password') + ) + else: + self.qb = QBittorrentClient(url) + + return self.qb + + def test(self): + if self.connect(): + return True + + return False + + def download(self, data = None, media = None, filedata = None): + if not media: media = {} + if not data: data = {} + + log.debug('Sending "%s" to qBittorrent.', (data.get('name'))) + + if not self.connect(): + return False + + if not filedata and data.get('protocol') == 'torrent': + log.error('Failed sending torrent, no data') + return False + + + if data.get('protocol') == 'torrent_magnet': + filedata = self.magnetToTorrent(data.get('url')) + + if filedata is False: + return False + + data['protocol'] = 'torrent' + + info = bdecode(filedata)["info"] + torrent_hash = sha1(bencode(info)).hexdigest() + + # Convert base 32 to hex + if len(torrent_hash) == 32: + torrent_hash = b16encode(b32decode(torrent_hash)) + + # Send request to qBittorrent + try: + self.qb.add_file(filedata) + + return self.downloadReturnId(torrent_hash) + except Exception as e: + log.error('Failed to send torrent to qBittorrent: %s', e) + return False + + def getTorrentStatus(self, torrent): + + if torrent.state in ('uploading', 'queuedUP', 'stalledUP'): + return 'seeding' + + if torrent.progress == 1: + return 'completed' + + return 'busy' + + def getAllDownloadStatus(self, ids): + log.debug('Checking qBittorrent download status.') + + if not self.connect(): + return [] + + try: + torrents = self.qb.get_torrents() + + release_downloads = ReleaseDownloadList(self) + + for torrent in torrents: + if torrent.hash in ids: + torrent.update_general() # get extra info + torrent_filelist = torrent.get_files() + + torrent_files = [] + torrent_dir = os.path.join(torrent.save_path, torrent.name) + + if os.path.isdir(torrent_dir): + torrent.save_path = torrent_dir + + if len(torrent_filelist) > 1 and os.path.isdir(torrent_dir): # multi file torrent, path.isdir check makes sure we're not in the root download folder + for root, _, files in os.walk(torrent.save_path): + for f in files: + torrent_files.append(sp(os.path.join(root, f))) + + else: # multi or single file placed directly in torrent.save_path + for f in torrent_filelist: + file_path = os.path.join(torrent.save_path, f.name) + if os.path.isfile(file_path): + torrent_files.append(sp(file_path)) + + release_downloads.append({ + 'id': torrent.hash, + 'name': torrent.name, + 'status': self.getTorrentStatus(torrent), + 'seed_ratio': torrent.ratio, + 'original_status': torrent.state, + 'timeleft': torrent.progress * 100 if torrent.progress else -1, # percentage + 'folder': sp(torrent.save_path), + 'files': torrent_files + }) + + return release_downloads + + except Exception as e: + log.error('Failed to get status from qBittorrent: %s', e) + return [] + + def pause(self, release_download, pause = True): + if not self.connect(): + return False + + torrent = self.qb.get_torrent(release_download['id']) + if torrent is None: + return False + + if pause: + return torrent.pause() + return torrent.resume() + + def removeFailed(self, release_download): + log.info('%s failed downloading, deleting...', release_download['name']) + return self.processComplete(release_download, delete_files = True) + + def processComplete(self, release_download, delete_files): + log.debug('Requesting qBittorrent to remove the torrent %s%s.', + (release_download['name'], ' and cleanup the downloaded files' if delete_files else '')) + + if not self.connect(): + return False + + torrent = self.qb.find_torrent(release_download['id']) + + if torrent is None: + return False + + if delete_files: + torrent.delete() # deletes torrent with data + else: + torrent.remove() # just removes the torrent, doesn't delete data + + return True + + +config = [{ + 'name': 'qbittorrent', + 'groups': [ + { + 'tab': 'downloaders', + 'list': 'download_providers', + 'name': 'qbittorrent', + 'label': 'qbittorrent', + 'description': '', + 'wizard': True, + 'options': [ + { + 'name': 'enabled', + 'default': 0, + 'type': 'enabler', + 'radio_group': 'torrent', + }, + { + 'name': 'host', + 'default': 'http://localhost:8080/', + 'description': 'RPC Communication URI. Usually http://localhost:8080/' + }, + { + 'name': 'username', + }, + { + 'name': 'password', + 'type': 'password', + }, + { + 'name': 'remove_complete', + 'label': 'Remove torrent', + 'default': False, + 'advanced': True, + 'type': 'bool', + 'description': 'Remove the torrent after it finishes seeding.', + }, + { + 'name': 'delete_files', + 'label': 'Remove files', + 'default': True, + 'type': 'bool', + 'advanced': True, + 'description': 'Also remove the leftover files.', + }, + { + 'name': 'paused', + 'type': 'bool', + 'advanced': True, + 'default': False, + 'description': 'Add the torrent paused.', + }, + { + 'name': 'manual', + 'default': 0, + 'type': 'bool', + 'advanced': True, + 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.', + }, + ], + } + ], +}] diff --git a/couchpotato/core/downloaders/rtorrent/__init__.py b/couchpotato/core/downloaders/rtorrent/__init__.py deleted file mode 100755 index f793cad1..00000000 --- a/couchpotato/core/downloaders/rtorrent/__init__.py +++ /dev/null @@ -1,100 +0,0 @@ -from .main import rTorrent - - -def start(): - return rTorrent() - -config = [{ - 'name': 'rtorrent', - 'groups': [ - { - 'tab': 'downloaders', - 'list': 'download_providers', - 'name': 'rtorrent', - 'label': 'rTorrent', - 'description': '', - 'wizard': True, - 'options': [ - { - 'name': 'enabled', - 'default': 0, - 'type': 'enabler', - 'radio_group': 'torrent', - }, -# @RuudBurger: How do I migrate this? -# { -# 'name': 'url', -# 'default': 'http://localhost:80/RPC2', -# 'description': 'XML-RPC Endpoint URI. Usually scgi://localhost:5000 ' -# 'or http://localhost:80/RPC2' -# }, - { - 'name': 'host', - 'default': 'localhost:80', - 'description': 'RPC Communication URI. Usually scgi://localhost:5000, ' - 'httprpc://localhost/rutorrent or localhost:80' - }, - { - 'name': 'ssl', - 'default': 0, - 'type': 'bool', - 'advanced': True, - 'description': 'Use HyperText Transfer Protocol Secure, or https', - }, - { - 'name': 'rpc_url', - 'type': 'string', - 'default': 'RPC2', - 'advanced': True, - 'description': 'Change if your RPC mount is at a different path.', - }, - { - 'name': 'username', - }, - { - 'name': 'password', - 'type': 'password', - }, - { - 'name': 'label', - 'description': 'Label to apply on added torrents.', - }, - { - 'name': 'directory', - 'type': 'directory', - 'description': 'Download to this directory. Keep empty for default rTorrent download directory.', - }, - { - 'name': 'remove_complete', - 'label': 'Remove torrent', - 'default': False, - 'advanced': True, - 'type': 'bool', - 'description': 'Remove the torrent after it finishes seeding.', - }, - { - 'name': 'delete_files', - 'label': 'Remove files', - 'default': True, - 'type': 'bool', - 'advanced': True, - 'description': 'Also remove the leftover files.', - }, - { - 'name': 'paused', - 'type': 'bool', - 'advanced': True, - 'default': False, - 'description': 'Add the torrent paused.', - }, - { - 'name': 'manual', - 'default': 0, - 'type': 'bool', - 'advanced': True, - 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.', - }, - ], - } - ], -}] diff --git a/couchpotato/core/downloaders/rtorrent/main.py b/couchpotato/core/downloaders/rtorrent_.py old mode 100755 new mode 100644 similarity index 63% rename from couchpotato/core/downloaders/rtorrent/main.py rename to couchpotato/core/downloaders/rtorrent_.py index 08d34213..822501ae --- a/couchpotato/core/downloaders/rtorrent/main.py +++ b/couchpotato/core/downloaders/rtorrent_.py @@ -1,21 +1,25 @@ -from couchpotato.core.downloaders.base import Downloader, ReleaseDownloadList -from couchpotato.core.event import fireEvent, addEvent -from couchpotato.core.helpers.encoding import sp -from couchpotato.core.helpers.variable import cleanHost, splitString -from couchpotato.core.logger import CPLog from base64 import b16encode, b32decode -from bencode import bencode, bdecode from datetime import timedelta from hashlib import sha1 -from rtorrent import RTorrent -from rtorrent.err import MethodError from urlparse import urlparse import os +from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList + +from couchpotato.core.event import addEvent +from couchpotato.core.helpers.encoding import sp +from couchpotato.core.helpers.variable import cleanHost, splitString +from couchpotato.core.logger import CPLog +from bencode import bencode, bdecode +from rtorrent import RTorrent + + log = CPLog(__name__) +autoload = 'rTorrent' -class rTorrent(Downloader): + +class rTorrent(DownloaderBase): protocol = ['torrent', 'torrent_magnet'] rt = None @@ -54,27 +58,29 @@ class rTorrent(Downloader): return self.rt url = cleanHost(self.conf('host'), protocol = True, ssl = self.conf('ssl')) + + # Automatically add '+https' to 'httprpc' protocol if SSL is enabled + if self.conf('ssl') and url.startswith('httprpc://'): + url = url.replace('httprpc://', 'httprpc+https://') + parsed = urlparse(url) # rpc_url is only used on http/https scgi pass-through if parsed.scheme in ['http', 'https']: url += self.conf('rpc_url') - if self.conf('username') and self.conf('password'): - self.rt = RTorrent( - url, - self.conf('username'), - self.conf('password') - ) - else: - self.rt = RTorrent(url) + self.rt = RTorrent( + url, + self.conf('username'), + self.conf('password') + ) self.error_msg = '' try: - self.rt._verify_conn() + self.rt._verify_conn() except AssertionError as e: - self.error_msg = e.message - self.rt = None + self.error_msg = e.message + self.rt = None return self.rt @@ -87,44 +93,6 @@ class rTorrent(Downloader): return False - def updateProviderGroup(self, name, data): - if data.get('seed_time'): - log.info('seeding time ignored, not supported') - - if not name: - return False - - if not self.connect(): - return False - - views = self.rt.get_views() - - if name not in views: - self.rt.create_group(name) - - group = self.rt.get_group(name) - - try: - if data.get('seed_ratio'): - ratio = int(float(data.get('seed_ratio')) * 100) - log.debug('Updating provider ratio to %s, group name: %s', (ratio, name)) - - # Explicitly set all group options to ensure it is setup correctly - group.set_upload('1M') - group.set_min(ratio) - group.set_max(ratio) - group.set_command('d.stop') - group.enable() - else: - # Reset group action and disable it - group.set_command() - group.disable() - except MethodError as err: - log.error('Unable to set group options: %s', err.msg) - return False - - return True - def download(self, data = None, media = None, filedata = None): if not media: media = {} @@ -135,10 +103,6 @@ class rTorrent(Downloader): if not self.connect(): return False - group_name = 'cp_' + data.get('provider').lower() - if not self.updateProviderGroup(group_name, data): - return False - torrent_params = {} if self.conf('label'): torrent_params['label'] = self.conf('label') @@ -179,9 +143,6 @@ class rTorrent(Downloader): if self.conf('directory'): torrent.set_directory(self.conf('directory')) - # Set Ratio Group - torrent.set_visible(group_name) - # Start torrent if not self.conf('paused', default = 0): torrent.start() @@ -192,19 +153,13 @@ class rTorrent(Downloader): return False def getTorrentStatus(self, torrent): - if torrent.hashing or torrent.hash_checking or torrent.message: - return 'busy' - if not torrent.complete: return 'busy' - if not torrent.open: - return 'completed' - - if torrent.state and torrent.active: + if torrent.open: return 'seeding' - return 'busy' + return 'completed' def getAllDownloadStatus(self, ids): log.debug('Checking rTorrent download status.') @@ -238,7 +193,7 @@ class rTorrent(Downloader): 'original_status': torrent.state, 'timeleft': str(timedelta(seconds = float(torrent.left_bytes) / torrent.down_rate)) if torrent.down_rate > 0 else -1, 'folder': sp(torrent.directory), - 'files': '|'.join(torrent_files) + 'files': torrent_files }) return release_downloads @@ -282,7 +237,7 @@ class rTorrent(Downloader): if torrent.is_multi_file() and torrent.directory.endswith(torrent.name): # Remove empty directories bottom up try: - for path, _, _ in os.walk(torrent.directory, topdown = False): + for path, _, _ in os.walk(sp(torrent.directory), topdown = False): os.rmdir(path) except OSError: log.info('Directory "%s" contains extra files, unable to remove', torrent.directory) @@ -290,3 +245,92 @@ class rTorrent(Downloader): torrent.erase() # just removes the torrent, doesn't delete data return True + + +config = [{ + 'name': 'rtorrent', + 'groups': [ + { + 'tab': 'downloaders', + 'list': 'download_providers', + 'name': 'rtorrent', + 'label': 'rTorrent', + 'description': '', + 'wizard': True, + 'options': [ + { + 'name': 'enabled', + 'default': 0, + 'type': 'enabler', + 'radio_group': 'torrent', + }, + { + 'name': 'host', + 'default': 'localhost:80', + 'description': 'RPC Communication URI. Usually scgi://localhost:5000, ' + 'httprpc://localhost/rutorrent or localhost:80' + }, + { + 'name': 'ssl', + 'default': 0, + 'type': 'bool', + 'advanced': True, + 'description': 'Use HyperText Transfer Protocol Secure, or https', + }, + { + 'name': 'rpc_url', + 'type': 'string', + 'default': 'RPC2', + 'advanced': True, + 'description': 'Change if your RPC mount is at a different path.', + }, + { + 'name': 'username', + }, + { + 'name': 'password', + 'type': 'password', + }, + { + 'name': 'label', + 'description': 'Label to apply on added torrents.', + }, + { + 'name': 'directory', + 'type': 'directory', + 'description': 'Download to this directory. Keep empty for default rTorrent download directory.', + }, + { + 'name': 'remove_complete', + 'label': 'Remove torrent', + 'default': False, + 'advanced': True, + 'type': 'bool', + 'description': 'Remove the torrent after it finishes seeding.', + }, + { + 'name': 'delete_files', + 'label': 'Remove files', + 'default': True, + 'type': 'bool', + 'advanced': True, + 'description': 'Also remove the leftover files.', + }, + { + 'name': 'paused', + 'type': 'bool', + 'advanced': True, + 'default': False, + 'description': 'Add the torrent paused.', + }, + { + 'name': 'manual', + 'default': 0, + 'type': 'bool', + 'advanced': True, + 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.', + }, + ], + } + ], +}] diff --git a/couchpotato/core/downloaders/sabnzbd/main.py b/couchpotato/core/downloaders/sabnzbd.py similarity index 69% rename from couchpotato/core/downloaders/sabnzbd/main.py rename to couchpotato/core/downloaders/sabnzbd.py index ba58c090..cd51cb87 100644 --- a/couchpotato/core/downloaders/sabnzbd/main.py +++ b/couchpotato/core/downloaders/sabnzbd.py @@ -1,18 +1,22 @@ -from couchpotato.core.downloaders.base import Downloader, ReleaseDownloadList -from couchpotato.core.helpers.encoding import tryUrlencode, ss, sp -from couchpotato.core.helpers.variable import cleanHost, mergeDicts -from couchpotato.core.logger import CPLog -from couchpotato.environment import Env from datetime import timedelta from urllib2 import URLError import json import os import traceback +from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList +from couchpotato.core.helpers.encoding import tryUrlencode, ss, sp +from couchpotato.core.helpers.variable import cleanHost, mergeDicts +from couchpotato.core.logger import CPLog +from couchpotato.environment import Env + + log = CPLog(__name__) +autoload = 'Sabnzbd' -class Sabnzbd(Downloader): + +class Sabnzbd(DownloaderBase): protocol = ['nzb'] @@ -201,3 +205,77 @@ class Sabnzbd(Downloader): else: return data + +config = [{ + 'name': 'sabnzbd', + 'groups': [ + { + 'tab': 'downloaders', + 'list': 'download_providers', + 'name': 'sabnzbd', + 'label': 'Sabnzbd', + 'description': 'Use SABnzbd (0.7+) to download NZBs.', + 'wizard': True, + 'options': [ + { + 'name': 'enabled', + 'default': 0, + 'type': 'enabler', + 'radio_group': 'nzb', + }, + { + 'name': 'host', + 'default': 'localhost:8080', + }, + { + 'name': 'ssl', + 'default': 0, + 'type': 'bool', + 'advanced': True, + 'description': 'Use HyperText Transfer Protocol Secure, or https', + }, + { + 'name': 'api_key', + 'label': 'Api Key', + 'description': 'Used for all calls to Sabnzbd.', + }, + { + 'name': 'category', + 'label': 'Category', + 'description': 'The category CP places the nzb in. Like movies or couchpotato', + }, + { + 'name': 'priority', + 'label': 'Priority', + 'type': 'dropdown', + 'default': '0', + 'advanced': True, + 'values': [('Paused', -2), ('Low', -1), ('Normal', 0), ('High', 1), ('Forced', 2)], + 'description': 'Add to the queue with this priority.', + }, + { + 'name': 'manual', + 'default': False, + 'type': 'bool', + 'advanced': True, + 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.', + }, + { + 'name': 'remove_complete', + 'advanced': True, + 'label': 'Remove NZB', + 'default': False, + 'type': 'bool', + 'description': 'Remove the NZB from history after it completed.', + }, + { + 'name': 'delete_failed', + 'default': True, + 'advanced': True, + 'type': 'bool', + 'description': 'Delete a release after the download has failed.', + }, + ], + } + ], +}] diff --git a/couchpotato/core/downloaders/sabnzbd/__init__.py b/couchpotato/core/downloaders/sabnzbd/__init__.py deleted file mode 100644 index 2990078a..00000000 --- a/couchpotato/core/downloaders/sabnzbd/__init__.py +++ /dev/null @@ -1,79 +0,0 @@ -from .main import Sabnzbd - - -def start(): - return Sabnzbd() - -config = [{ - 'name': 'sabnzbd', - 'groups': [ - { - 'tab': 'downloaders', - 'list': 'download_providers', - 'name': 'sabnzbd', - 'label': 'Sabnzbd', - 'description': 'Use SABnzbd (0.7+) to download NZBs.', - 'wizard': True, - 'options': [ - { - 'name': 'enabled', - 'default': 0, - 'type': 'enabler', - 'radio_group': 'nzb', - }, - { - 'name': 'host', - 'default': 'localhost:8080', - }, - { - 'name': 'ssl', - 'default': 0, - 'type': 'bool', - 'advanced': True, - 'description': 'Use HyperText Transfer Protocol Secure, or https', - }, - { - 'name': 'api_key', - 'label': 'Api Key', - 'description': 'Used for all calls to Sabnzbd.', - }, - { - 'name': 'category', - 'label': 'Category', - 'description': 'The category CP places the nzb in. Like movies or couchpotato', - }, - { - 'name': 'priority', - 'label': 'Priority', - 'type': 'dropdown', - 'default': '0', - 'advanced': True, - 'values': [('Paused', -2), ('Low', -1), ('Normal', 0), ('High', 1), ('Forced', 2)], - 'description': 'Add to the queue with this priority.', - }, - { - 'name': 'manual', - 'default': False, - 'type': 'bool', - 'advanced': True, - 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.', - }, - { - 'name': 'remove_complete', - 'advanced': True, - 'label': 'Remove NZB', - 'default': False, - 'type': 'bool', - 'description': 'Remove the NZB from history after it completed.', - }, - { - 'name': 'delete_failed', - 'default': True, - 'advanced': True, - 'type': 'bool', - 'description': 'Delete a release after the download has failed.', - }, - ], - } - ], -}] diff --git a/couchpotato/core/downloaders/synology/main.py b/couchpotato/core/downloaders/synology.py similarity index 71% rename from couchpotato/core/downloaders/synology/main.py rename to couchpotato/core/downloaders/synology.py index 7e5b6098..2c12536f 100644 --- a/couchpotato/core/downloaders/synology/main.py +++ b/couchpotato/core/downloaders/synology.py @@ -1,15 +1,19 @@ -from couchpotato.core.downloaders.base import Downloader +import json +import traceback + +from couchpotato.core._base.downloader.main import DownloaderBase from couchpotato.core.helpers.encoding import isInt from couchpotato.core.helpers.variable import cleanHost from couchpotato.core.logger import CPLog -import json import requests -import traceback + log = CPLog(__name__) +autoload = 'Synology' -class Synology(Downloader): + +class Synology(DownloaderBase): protocol = ['nzb', 'torrent', 'torrent_magnet'] status_support = False @@ -29,7 +33,7 @@ class Synology(Downloader): try: # Send request to Synology - srpc = SynologyRPC(host[0], host[1], self.conf('username'), self.conf('password')) + srpc = SynologyRPC(host[0], host[1], self.conf('username'), self.conf('password'), self.conf('destination')) if data['protocol'] == 'torrent_magnet': log.info('Adding torrent URL %s', data['url']) response = srpc.create_task(url = data['url']) @@ -80,14 +84,16 @@ class SynologyRPC(object): """SynologyRPC lite library""" - def __init__(self, host = 'localhost', port = 5000, username = None, password = None): + def __init__(self, host = 'localhost', port = 5000, username = None, password = None, destination = None): super(SynologyRPC, self).__init__() self.download_url = 'http://%s:%s/webapi/DownloadStation/task.cgi' % (host, port) self.auth_url = 'http://%s:%s/webapi/auth.cgi' % (host, port) + self.sid = None self.username = username self.password = password + self.destination = destination self.session_name = 'DownloadStation' def _login(self): @@ -140,6 +146,10 @@ class SynologyRPC(object): 'version': '1', 'method': 'create', '_sid': self.sid} + + if self.destination and len(self.destination) > 0: + args['destination'] = self.destination + if url: log.info('Login success, adding torrent URI') args['uri'] = url @@ -160,3 +170,57 @@ class SynologyRPC(object): def test(self): return bool(self._login()) + + +config = [{ + 'name': 'synology', + 'groups': [ + { + 'tab': 'downloaders', + 'list': 'download_providers', + 'name': 'synology', + 'label': 'Synology', + 'description': 'Use Synology Download Station to download.', + 'wizard': True, + 'options': [ + { + 'name': 'enabled', + 'default': 0, + 'type': 'enabler', + 'radio_group': 'nzb,torrent', + }, + { + 'name': 'host', + 'default': 'localhost:5000', + 'description': 'Hostname with port. Usually localhost:5000', + }, + { + 'name': 'username', + }, + { + 'name': 'password', + 'type': 'password', + }, + { + 'name': 'destination', + 'description': 'Specify existing destination share to where your files will be downloaded, usually Downloads', + 'advanced': True, + }, + { + 'name': 'use_for', + 'label': 'Use for', + 'default': 'both', + 'type': 'dropdown', + 'values': [('usenet & torrents', 'both'), ('usenet', 'nzb'), ('torrent', 'torrent')], + }, + { + 'name': 'manual', + 'default': 0, + 'type': 'bool', + 'advanced': True, + 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.', + }, + ], + } + ], +}] diff --git a/couchpotato/core/downloaders/synology/__init__.py b/couchpotato/core/downloaders/synology/__init__.py deleted file mode 100644 index d0c57c2f..00000000 --- a/couchpotato/core/downloaders/synology/__init__.py +++ /dev/null @@ -1,53 +0,0 @@ -from .main import Synology - - -def start(): - return Synology() - -config = [{ - 'name': 'synology', - 'groups': [ - { - 'tab': 'downloaders', - 'list': 'download_providers', - 'name': 'synology', - 'label': 'Synology', - 'description': 'Use Synology Download Station to download.', - 'wizard': True, - 'options': [ - { - 'name': 'enabled', - 'default': 0, - 'type': 'enabler', - 'radio_group': 'nzb,torrent', - }, - { - 'name': 'host', - 'default': 'localhost:5000', - 'description': 'Hostname with port. Usually localhost:5000', - }, - { - 'name': 'username', - }, - { - 'name': 'password', - 'type': 'password', - }, - { - 'name': 'use_for', - 'label': 'Use for', - 'default': 'both', - 'type': 'dropdown', - 'values': [('usenet & torrents', 'both'), ('usenet', 'nzb'), ('torrent', 'torrent')], - }, - { - 'name': 'manual', - 'default': 0, - 'type': 'bool', - 'advanced': True, - 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.', - }, - ], - } - ], -}] diff --git a/couchpotato/core/downloaders/transmission/main.py b/couchpotato/core/downloaders/transmission.py similarity index 74% rename from couchpotato/core/downloaders/transmission/main.py rename to couchpotato/core/downloaders/transmission.py index 4c42bf0f..d941cca6 100644 --- a/couchpotato/core/downloaders/transmission/main.py +++ b/couchpotato/core/downloaders/transmission.py @@ -1,8 +1,4 @@ from base64 import b64encode -from couchpotato.core.downloaders.base import Downloader, ReleaseDownloadList -from couchpotato.core.helpers.encoding import isInt, sp -from couchpotato.core.helpers.variable import tryInt, tryFloat, cleanHost -from couchpotato.core.logger import CPLog from datetime import timedelta import httplib import json @@ -10,10 +6,18 @@ import os.path import re import urllib2 +from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList +from couchpotato.core.helpers.encoding import isInt, sp +from couchpotato.core.helpers.variable import tryInt, tryFloat, cleanHost +from couchpotato.core.logger import CPLog + + log = CPLog(__name__) +autoload = 'Transmission' -class Transmission(Downloader): + +class Transmission(DownloaderBase): protocol = ['torrent', 'torrent_magnet'] log = CPLog(__name__) @@ -137,7 +141,7 @@ class Transmission(Downloader): 'seed_ratio': torrent['uploadRatio'], 'timeleft': str(timedelta(seconds = torrent['eta'])), 'folder': sp(torrent_folder if len(torrent_files) == 1 else os.path.join(torrent_folder, torrent['name'])), - 'files': '|'.join(torrent_files) + 'files': torrent_files }) return release_downloads @@ -156,6 +160,7 @@ class Transmission(Downloader): log.debug('Requesting Transmission to remove the torrent %s%s.', (release_download['name'], ' and cleanup the downloaded files' if delete_files else '')) return self.trpc.remove_torrent(release_download['id'], delete_files) + class TransmissionRPC(object): """TransmissionRPC lite library""" @@ -169,8 +174,8 @@ class TransmissionRPC(object): self.session = {} if username and password: password_manager = urllib2.HTTPPasswordMgrWithDefaultRealm() - password_manager.add_password(realm = None, uri = self.url, user = username, passwd = password) - opener = urllib2.build_opener(urllib2.HTTPBasicAuthHandler(password_manager), urllib2.HTTPDigestAuthHandler(password_manager)) + password_manager.add_password(realm = 'Transmission', uri = self.url, user = username, passwd = password) + opener = urllib2.build_opener(urllib2.HTTPBasicAuthHandler(password_manager)) opener.addheaders = [('User-agent', 'couchpotato-transmission-client/1.0')] urllib2.install_opener(opener) elif username or password: @@ -251,3 +256,93 @@ class TransmissionRPC(object): post_data = {'arguments': {'ids': torrent_id, 'delete-local-data': delete_local_data}, 'method': 'torrent-remove', 'tag': self.tag} return self._request(post_data) + +config = [{ + 'name': 'transmission', + 'groups': [ + { + 'tab': 'downloaders', + 'list': 'download_providers', + 'name': 'transmission', + 'label': 'Transmission', + 'description': 'Use Transmission to download torrents.', + 'wizard': True, + 'options': [ + { + 'name': 'enabled', + 'default': 0, + 'type': 'enabler', + 'radio_group': 'torrent', + }, + { + 'name': 'host', + 'default': 'localhost:9091', + 'description': 'Hostname with port. Usually localhost:9091', + }, + { + 'name': 'rpc_url', + 'type': 'string', + 'default': 'transmission', + 'advanced': True, + 'description': 'Change if you don\'t run Transmission RPC at the default url.', + }, + { + 'name': 'username', + }, + { + 'name': 'password', + 'type': 'password', + }, + { + 'name': 'directory', + 'type': 'directory', + 'description': 'Download to this directory. Keep empty for default Transmission download directory.', + }, + { + 'name': 'remove_complete', + 'label': 'Remove torrent', + 'default': True, + 'advanced': True, + 'type': 'bool', + 'description': 'Remove the torrent from Transmission after it finished seeding.', + }, + { + 'name': 'delete_files', + 'label': 'Remove files', + 'default': True, + 'type': 'bool', + 'advanced': True, + 'description': 'Also remove the leftover files.', + }, + { + 'name': 'paused', + 'type': 'bool', + 'advanced': True, + 'default': False, + 'description': 'Add the torrent paused.', + }, + { + 'name': 'manual', + 'default': 0, + 'type': 'bool', + 'advanced': True, + 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.', + }, + { + 'name': 'stalled_as_failed', + 'default': True, + 'advanced': True, + 'type': 'bool', + 'description': 'Consider a stalled torrent as failed', + }, + { + 'name': 'delete_failed', + 'default': True, + 'advanced': True, + 'type': 'bool', + 'description': 'Delete a release after the download has failed.', + }, + ], + } + ], +}] diff --git a/couchpotato/core/downloaders/transmission/__init__.py b/couchpotato/core/downloaders/transmission/__init__.py deleted file mode 100644 index 4c9b4aad..00000000 --- a/couchpotato/core/downloaders/transmission/__init__.py +++ /dev/null @@ -1,95 +0,0 @@ -from .main import Transmission - - -def start(): - return Transmission() - -config = [{ - 'name': 'transmission', - 'groups': [ - { - 'tab': 'downloaders', - 'list': 'download_providers', - 'name': 'transmission', - 'label': 'Transmission', - 'description': 'Use Transmission to download torrents.', - 'wizard': True, - 'options': [ - { - 'name': 'enabled', - 'default': 0, - 'type': 'enabler', - 'radio_group': 'torrent', - }, - { - 'name': 'host', - 'default': 'localhost:9091', - 'description': 'Hostname with port. Usually localhost:9091', - }, - { - 'name': 'rpc_url', - 'type': 'string', - 'default': 'transmission', - 'advanced': True, - 'description': 'Change if you don\'t run Transmission RPC at the default url.', - }, - { - 'name': 'username', - }, - { - 'name': 'password', - 'type': 'password', - }, - { - 'name': 'directory', - 'type': 'directory', - 'description': 'Download to this directory. Keep empty for default Transmission download directory.', - }, - { - 'name': 'remove_complete', - 'label': 'Remove torrent', - 'default': True, - 'advanced': True, - 'type': 'bool', - 'description': 'Remove the torrent from Transmission after it finished seeding.', - }, - { - 'name': 'delete_files', - 'label': 'Remove files', - 'default': True, - 'type': 'bool', - 'advanced': True, - 'description': 'Also remove the leftover files.', - }, - { - 'name': 'paused', - 'type': 'bool', - 'advanced': True, - 'default': False, - 'description': 'Add the torrent paused.', - }, - { - 'name': 'manual', - 'default': 0, - 'type': 'bool', - 'advanced': True, - 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.', - }, - { - 'name': 'stalled_as_failed', - 'default': True, - 'advanced': True, - 'type': 'bool', - 'description': 'Consider a stalled torrent as failed', - }, - { - 'name': 'delete_failed', - 'default': True, - 'advanced': True, - 'type': 'bool', - 'description': 'Delete a release after the download has failed.', - }, - ], - } - ], -}] diff --git a/couchpotato/core/downloaders/utorrent/main.py b/couchpotato/core/downloaders/utorrent.py similarity index 80% rename from couchpotato/core/downloaders/utorrent/main.py rename to couchpotato/core/downloaders/utorrent.py index 6a5e4257..3164681c 100644 --- a/couchpotato/core/downloaders/utorrent/main.py +++ b/couchpotato/core/downloaders/utorrent.py @@ -1,12 +1,6 @@ from base64 import b16encode, b32decode -from bencode import bencode as benc, bdecode -from couchpotato.core.downloaders.base import Downloader, ReleaseDownloadList -from couchpotato.core.helpers.encoding import isInt, ss, sp -from couchpotato.core.helpers.variable import tryInt, tryFloat, cleanHost -from couchpotato.core.logger import CPLog from datetime import timedelta from hashlib import sha1 -from multipartpost import MultipartPostHandler import cookielib import httplib import json @@ -17,22 +11,32 @@ import time import urllib import urllib2 +from bencode import bencode as benc, bdecode +from couchpotato.core._base.downloader.main import DownloaderBase, ReleaseDownloadList +from couchpotato.core.helpers.encoding import isInt, ss, sp +from couchpotato.core.helpers.variable import tryInt, tryFloat, cleanHost +from couchpotato.core.logger import CPLog +from multipartpost import MultipartPostHandler + + log = CPLog(__name__) +autoload = 'uTorrent' -class uTorrent(Downloader): + +class uTorrent(DownloaderBase): protocol = ['torrent', 'torrent_magnet'] utorrent_api = None status_flags = { - 'STARTED' : 1, - 'CHECKING' : 2, - 'CHECK-START' : 4, - 'CHECKED' : 8, - 'ERROR' : 16, - 'PAUSED' : 32, - 'QUEUED' : 64, - 'LOADED' : 128 + 'STARTED': 1, + 'CHECKING': 2, + 'CHECK-START': 4, + 'CHECKED': 8, + 'ERROR': 16, + 'PAUSED': 32, + 'QUEUED': 64, + 'LOADED': 128 } def connect(self): @@ -164,7 +168,7 @@ class uTorrent(Downloader): status = 'busy' if (torrent[1] & self.status_flags['STARTED'] or torrent[1] & self.status_flags['QUEUED']) and torrent[4] == 1000: status = 'seeding' - elif (torrent[1] & self.status_flags['ERROR']): + elif torrent[1] & self.status_flags['ERROR']: status = 'failed' elif torrent[4] == 1000: status = 'completed' @@ -180,7 +184,7 @@ class uTorrent(Downloader): 'original_status': torrent[1], 'timeleft': str(timedelta(seconds = torrent[10])), 'folder': sp(torrent[26]), - 'files': '|'.join(torrent_files) + 'files': torrent_files }) return release_downloads @@ -225,7 +229,6 @@ class uTorrentAPI(object): password_manager = urllib2.HTTPPasswordMgrWithDefaultRealm() password_manager.add_password(realm = None, uri = self.url, user = username, passwd = password) self.opener.add_handler(urllib2.HTTPBasicAuthHandler(password_manager)) - self.opener.add_handler(urllib2.HTTPDigestAuthHandler(password_manager)) elif username or password: log.debug('User or password missing, not using authentication.') self.token = self.get_token() @@ -340,3 +343,79 @@ class uTorrentAPI(object): return False response = json.loads(data) return int(response.get('build')) + + +config = [{ + 'name': 'utorrent', + 'groups': [ + { + 'tab': 'downloaders', + 'list': 'download_providers', + 'name': 'utorrent', + 'label': 'uTorrent', + 'description': 'Use uTorrent (3.0+) to download torrents.', + 'wizard': True, + 'options': [ + { + 'name': 'enabled', + 'default': 0, + 'type': 'enabler', + 'radio_group': 'torrent', + }, + { + 'name': 'host', + 'default': 'localhost:8000', + 'description': 'Port can be found in settings when enabling WebUI.', + }, + { + 'name': 'username', + }, + { + 'name': 'password', + 'type': 'password', + }, + { + 'name': 'label', + 'description': 'Label to add torrent as.', + }, + { + 'name': 'remove_complete', + 'label': 'Remove torrent', + 'default': True, + 'advanced': True, + 'type': 'bool', + 'description': 'Remove the torrent from uTorrent after it finished seeding.', + }, + { + 'name': 'delete_files', + 'label': 'Remove files', + 'default': True, + 'type': 'bool', + 'advanced': True, + 'description': 'Also remove the leftover files.', + }, + { + 'name': 'paused', + 'type': 'bool', + 'advanced': True, + 'default': False, + 'description': 'Add the torrent paused.', + }, + { + 'name': 'manual', + 'default': 0, + 'type': 'bool', + 'advanced': True, + 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.', + }, + { + 'name': 'delete_failed', + 'default': True, + 'advanced': True, + 'type': 'bool', + 'description': 'Delete a release after the download has failed.', + }, + ], + } + ], +}] diff --git a/couchpotato/core/downloaders/utorrent/__init__.py b/couchpotato/core/downloaders/utorrent/__init__.py deleted file mode 100644 index da160956..00000000 --- a/couchpotato/core/downloaders/utorrent/__init__.py +++ /dev/null @@ -1,80 +0,0 @@ -from .main import uTorrent - - -def start(): - return uTorrent() - -config = [{ - 'name': 'utorrent', - 'groups': [ - { - 'tab': 'downloaders', - 'list': 'download_providers', - 'name': 'utorrent', - 'label': 'uTorrent', - 'description': 'Use uTorrent (3.0+) to download torrents.', - 'wizard': True, - 'options': [ - { - 'name': 'enabled', - 'default': 0, - 'type': 'enabler', - 'radio_group': 'torrent', - }, - { - 'name': 'host', - 'default': 'localhost:8000', - 'description': 'Port can be found in settings when enabling WebUI.', - }, - { - 'name': 'username', - }, - { - 'name': 'password', - 'type': 'password', - }, - { - 'name': 'label', - 'description': 'Label to add torrent as.', - }, - { - 'name': 'remove_complete', - 'label': 'Remove torrent', - 'default': True, - 'advanced': True, - 'type': 'bool', - 'description': 'Remove the torrent from uTorrent after it finished seeding.', - }, - { - 'name': 'delete_files', - 'label': 'Remove files', - 'default': True, - 'type': 'bool', - 'advanced': True, - 'description': 'Also remove the leftover files.', - }, - { - 'name': 'paused', - 'type': 'bool', - 'advanced': True, - 'default': False, - 'description': 'Add the torrent paused.', - }, - { - 'name': 'manual', - 'default': 0, - 'type': 'bool', - 'advanced': True, - 'description': 'Disable this downloader for automated searches, but use it when I manually send a release.', - }, - { - 'name': 'delete_failed', - 'default': True, - 'advanced': True, - 'type': 'bool', - 'description': 'Delete a release after the download has failed.', - }, - ], - } - ], -}] diff --git a/couchpotato/core/event.py b/couchpotato/core/event.py index 8af2967e..7246cde1 100644 --- a/couchpotato/core/event.py +++ b/couchpotato/core/event.py @@ -1,8 +1,10 @@ +import threading +import traceback + from axl.axel import Event from couchpotato.core.helpers.variable import mergeDicts, natsortKey from couchpotato.core.logger import CPLog -import threading -import traceback + log = CPLog(__name__) events = {} diff --git a/couchpotato/core/helpers/encoding.py b/couchpotato/core/helpers/encoding.py index 0b85f64b..41591683 100644 --- a/couchpotato/core/helpers/encoding.py +++ b/couchpotato/core/helpers/encoding.py @@ -1,12 +1,14 @@ -from couchpotato.core.logger import CPLog from string import ascii_letters, digits from urllib import quote_plus import os import re import traceback import unicodedata + +from couchpotato.core.logger import CPLog import six + log = CPLog(__name__) diff --git a/couchpotato/core/helpers/request.py b/couchpotato/core/helpers/request.py index a0baa85f..4c0add18 100644 --- a/couchpotato/core/helpers/request.py +++ b/couchpotato/core/helpers/request.py @@ -1,8 +1,9 @@ -from couchpotato.core.helpers.encoding import toUnicode -from couchpotato.core.helpers.variable import natsortKey from urllib import unquote import re +from couchpotato.core.helpers.encoding import toUnicode +from couchpotato.core.helpers.variable import natsortKey + def getParams(params): @@ -42,6 +43,7 @@ def getParams(params): return dictToList(temp) +non_decimal = re.compile(r'[^\d.]+') def dictToList(params): @@ -52,7 +54,15 @@ def dictToList(params): convert = lambda text: int(text) if text.isdigit() else text.lower() alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)] sorted_keys = sorted(value.keys(), key = alphanum_key) - new_value = [dictToList(value[k]) for k in sorted_keys] + + all_ints = 0 + for pnr in sorted_keys: + all_ints += 1 if non_decimal.sub('', pnr) == pnr else 0 + + if all_ints == len(sorted_keys): + new_value = [dictToList(value[k]) for k in sorted_keys] + else: + new_value = value except: new_value = value diff --git a/couchpotato/core/helpers/rss.py b/couchpotato/core/helpers/rss.py index 1a4d37c2..f455007e 100644 --- a/couchpotato/core/helpers/rss.py +++ b/couchpotato/core/helpers/rss.py @@ -1,6 +1,8 @@ -from couchpotato.core.logger import CPLog import xml.etree.ElementTree as XMLTree +from couchpotato.core.logger import CPLog + + log = CPLog(__name__) diff --git a/couchpotato/core/helpers/variable.py b/couchpotato/core/helpers/variable.py index 8be6f299..66e01f55 100644 --- a/couchpotato/core/helpers/variable.py +++ b/couchpotato/core/helpers/variable.py @@ -1,5 +1,3 @@ -from couchpotato.core.helpers.encoding import simplifyString, toSafeString, ss -from couchpotato.core.logger import CPLog import collections import hashlib import os @@ -8,9 +6,14 @@ import random import re import string import sys +import traceback + +from couchpotato.core.helpers.encoding import simplifyString, toSafeString, ss, sp +from couchpotato.core.logger import CPLog import six from six.moves import map, zip, filter + log = CPLog(__name__) @@ -214,6 +217,7 @@ def tryFloat(s): return float(s) except: return 0 + def natsortKey(string_): """See http://www.codinghorror.com/blog/archives/001018.html""" return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_)] @@ -225,26 +229,28 @@ def toIterable(value): return [value] -def getTitle(library_dict): +def getIdentifier(media): + return media.get('identifier') or media.get('identifiers', {}).get('imdb') + + +def getTitle(media_dict): try: try: - return library_dict['titles'][0]['title'] + return media_dict['title'] except: try: - for title in library_dict.titles: - if title.default: - return title.title + return media_dict['titles'][0] except: try: - return library_dict['info']['titles'][0] + return media_dict['info']['titles'][0] except: - log.error('Could not get title for %s', library_dict.identifier) - return None - - log.error('Could not get title for %s', library_dict['identifier']) - return None + try: + return media_dict['media']['info']['titles'][0] + except: + log.error('Could not get title for %s', getIdentifier(media_dict)) + return None except: - log.error('Could not get title for library item: %s', library_dict) + log.error('Could not get title for library item: %s', media_dict) return None @@ -289,8 +295,11 @@ def isSubFolder(sub_folder, base_folder): # Returns True if sub_folder is the same as or inside base_folder return base_folder and sub_folder and ss(os.path.normpath(base_folder).rstrip(os.path.sep) + os.path.sep) in ss(os.path.normpath(sub_folder).rstrip(os.path.sep) + os.path.sep) + # From SABNZBD -re_password = [re.compile(r'([^/\\]+)[/\\](.+)'), re.compile(r'(.+){{([^{}]+)}}$'), re.compile(r'(.+)\s+password\s*=\s*(.+)$', re.I)] +re_password = [re.compile(r'(.+){{([^{}]+)}}$'), re.compile(r'(.+)\s+password\s*=\s*(.+)$', re.I)] + + def scanForPassword(name): m = None for reg in re_password: @@ -299,3 +308,36 @@ def scanForPassword(name): if m: return m.group(1).strip('. '), m.group(2).strip() + + +under_pat = re.compile(r'_([a-z])') + +def underscoreToCamel(name): + return under_pat.sub(lambda x: x.group(1).upper(), name) + + +def removePyc(folder, only_excess = True, show_logs = True): + + folder = sp(folder) + + for root, dirs, files in os.walk(folder): + + pyc_files = filter(lambda filename: filename.endswith('.pyc'), files) + py_files = set(filter(lambda filename: filename.endswith('.py'), files)) + excess_pyc_files = filter(lambda pyc_filename: pyc_filename[:-1] not in py_files, pyc_files) if only_excess else pyc_files + + for excess_pyc_file in excess_pyc_files: + full_path = os.path.join(root, excess_pyc_file) + if show_logs: log.debug('Removing old PYC file: %s', full_path) + try: + os.remove(full_path) + except: + log.error('Couldn\'t remove %s: %s', (full_path, traceback.format_exc())) + + for dir_name in dirs: + full_path = os.path.join(root, dir_name) + if len(os.listdir(full_path)) == 0: + try: + os.rmdir(full_path) + except: + log.error('Couldn\'t remove empty directory %s: %s', (full_path, traceback.format_exc())) diff --git a/couchpotato/core/loader.py b/couchpotato/core/loader.py index 6c3a719d..11df37b4 100644 --- a/couchpotato/core/loader.py +++ b/couchpotato/core/loader.py @@ -1,31 +1,33 @@ -from couchpotato.core.event import fireEvent -from couchpotato.core.logger import CPLog -from importhelper import import_module import os import sys import traceback + +from couchpotato.core.event import fireEvent +from couchpotato.core.logger import CPLog +from importhelper import import_module import six + log = CPLog(__name__) class Loader(object): - plugins = {} - providers = {} - modules = {} + + def __init__(self): + self.plugins = {} + self.providers = {} + self.modules = {} + self.paths = {} def preload(self, root = ''): core = os.path.join(root, 'couchpotato', 'core') - self.paths = { + self.paths.update({ 'core': (0, 'couchpotato.core._base', os.path.join(core, '_base')), 'plugin': (1, 'couchpotato.core.plugins', os.path.join(core, 'plugins')), 'notifications': (20, 'couchpotato.core.notifications', os.path.join(core, 'notifications')), 'downloaders': (20, 'couchpotato.core.downloaders', os.path.join(core, 'downloaders')), - } - - # Add providers to loader - self.addPath(root, ['couchpotato', 'core', 'providers'], 25, recursive = False) + }) # Add media to loader self.addPath(root, ['couchpotato', 'core', 'media'], 25, recursive = True) @@ -57,12 +59,10 @@ class Loader(object): if m is None: continue - log.info('Loading %s: %s', (plugin['type'], plugin['name'])) - # Save default settings for plugin/provider did_save += self.loadSettings(m, module_name, save = False) - self.loadPlugins(m, plugin.get('name')) + self.loadPlugins(m, plugin.get('type'), plugin.get('name')) except ImportError as e: # todo:: subclass ImportError for missing requirements. if e.message.lower().startswith("missing"): @@ -96,14 +96,19 @@ class Loader(object): self.addModule(priority, plugin_type, module, os.path.basename(dir_name)) for name in os.listdir(dir_name): - if os.path.isdir(os.path.join(dir_name, name)) and name != 'static' and os.path.isfile(os.path.join(dir_name, name, '__init__.py')): + path = os.path.join(dir_name, name) + ext = os.path.splitext(path)[1] + ext_length = len(ext) + if name != 'static' and ((os.path.isdir(path) and os.path.isfile(os.path.join(path, '__init__.py'))) + or (os.path.isfile(path) and ext == '.py')): + name = name[:-ext_length] if ext_length > 0 else name module_name = '%s.%s' % (module, name) self.addModule(priority, plugin_type, module_name, name) def loadSettings(self, module, name, save = True): if not hasattr(module, 'config'): - log.debug('Skip loading settings for plugin %s as it has no config section' % module.__file__) + #log.debug('Skip loading settings for plugin %s as it has no config section' % module.__file__) return False try: @@ -119,13 +124,20 @@ class Loader(object): log.debug('Failed loading settings for "%s": %s', (name, traceback.format_exc())) return False - def loadPlugins(self, module, name): + def loadPlugins(self, module, type, name): - if not hasattr(module, 'start'): - log.debug('Skip startup for plugin %s as it has no start section' % module.__file__) + if not hasattr(module, 'autoload'): + #log.debug('Skip startup for plugin %s as it has no start section' % module.__file__) return False try: - module.start() + # Load single file plugin + if isinstance(module.autoload, (str, unicode)): + getattr(module, module.autoload)() + # Load folder plugin + else: + module.autoload() + + log.info('Loaded %s: %s', (type, name)) return True except: log.error('Failed loading plugin "%s": %s', (module.__file__, traceback.format_exc())) @@ -137,6 +149,9 @@ class Loader(object): self.modules[priority] = {} module = module.lstrip('.') + if plugin_type.startswith('couchpotato_core'): + plugin_type = plugin_type[17:] + self.modules[priority][module] = { 'priority': priority, 'module': module, diff --git a/couchpotato/core/logger.py b/couchpotato/core/logger.py index 8223f146..fba8d623 100644 --- a/couchpotato/core/logger.py +++ b/couchpotato/core/logger.py @@ -7,6 +7,9 @@ class CPLog(object): context = '' replace_private = ['api', 'apikey', 'api_key', 'password', 'username', 'h', 'uid', 'key', 'passkey'] + Env = None + is_develop = False + def __init__(self, context = ''): if context.endswith('.main'): context = context[:-5] @@ -14,6 +17,20 @@ class CPLog(object): self.context = context self.logger = logging.getLogger() + def setup(self): + + if not self.Env: + from couchpotato.environment import Env + + self.Env = Env + self.is_develop = Env.get('dev') + + from couchpotato.core.event import addEvent + addEvent('app.after_shutdown', self.close) + + def close(self, *args, **kwargs): + logging.shutdown() + def info(self, msg, replace_tuple = ()): self.logger.info(self.addContext(msg, replace_tuple)) @@ -37,7 +54,6 @@ class CPLog(object): def safeMessage(self, msg, replace_tuple = ()): - from couchpotato.environment import Env from couchpotato.core.helpers.encoding import ss, toUnicode msg = ss(msg) @@ -53,7 +69,8 @@ class CPLog(object): except Exception as e: self.logger.error('Failed encoding stuff to log "%s": %s' % (msg, e)) - if not Env.get('dev'): + self.setup() + if not self.is_develop: for replace in self.replace_private: msg = re.sub('(\?%s=)[^\&]+' % replace, '?%s=xxx' % replace, msg) @@ -61,7 +78,7 @@ class CPLog(object): # Replace api key try: - api_key = Env.setting('api_key') + api_key = self.Env.setting('api_key') if api_key: msg = msg.replace(api_key, 'API_KEY') except: diff --git a/couchpotato/core/media/__init__.py b/couchpotato/core/media/__init__.py index cf9302b1..4a3eb684 100644 --- a/couchpotato/core/media/__init__.py +++ b/couchpotato/core/media/__init__.py @@ -1,8 +1,12 @@ +import os import traceback -from couchpotato import get_session, CPLog -from couchpotato.core.event import addEvent, fireEventAsync, fireEvent + +from couchpotato import CPLog +from couchpotato.core.event import addEvent, fireEvent, fireEventAsync +from couchpotato.core.helpers.encoding import toUnicode from couchpotato.core.plugins.base import Plugin -from couchpotato.core.settings.model import Media +import six + log = CPLog(__name__) @@ -11,35 +15,22 @@ class MediaBase(Plugin): _type = None - default_dict = { - 'profile': {'types': {'quality': {}}}, - 'releases': {'status': {}, 'quality': {}, 'files': {}, 'info': {}}, - 'library': {'titles': {}, 'files': {}}, - 'files': {}, - 'status': {}, - 'category': {}, - } - def initType(self): addEvent('media.types', self.getType) def getType(self): return self._type - def createOnComplete(self, id): + def createOnComplete(self, media_id): def onComplete(): try: - db = get_session() - media = db.query(Media).filter_by(id = id).first() - media_dict = media.to_dict(self.default_dict) - event_name = '%s.searcher.single' % media.type + media = fireEvent('media.get', media_id, single = True) + event_name = '%s.searcher.single' % media.get('type') - fireEvent(event_name, media_dict, on_complete = self.createNotifyFront(id)) + fireEventAsync(event_name, media, on_complete = self.createNotifyFront(media_id), manual = True) except: log.error('Failed creating onComplete: %s', traceback.format_exc()) - finally: - db.close() return onComplete @@ -47,15 +38,61 @@ class MediaBase(Plugin): def notifyFront(): try: - db = get_session() - media = db.query(Media).filter_by(id = media_id).first() - media_dict = media.to_dict(self.default_dict) - event_name = '%s.update' % media.type + media = fireEvent('media.get', media_id, single = True) + event_name = '%s.update' % media.get('type') - fireEvent('notify.frontend', type = event_name, data = media_dict) + fireEvent('notify.frontend', type = event_name, data = media) except: log.error('Failed creating onComplete: %s', traceback.format_exc()) - finally: - db.close() return notifyFront + + def getDefaultTitle(self, info, ): + + # Set default title + default_title = toUnicode(info.get('title')) + titles = info.get('titles', []) + counter = 0 + def_title = None + for title in titles: + if (len(default_title) == 0 and counter == 0) or len(titles) == 1 or title.lower() == toUnicode(default_title.lower()) or (toUnicode(default_title) == six.u('') and toUnicode(titles[0]) == title): + def_title = toUnicode(title) + break + counter += 1 + + if not def_title: + def_title = toUnicode(titles[0]) + + return def_title or 'UNKNOWN' + + def getPoster(self, image_urls, existing_files): + image_type = 'poster' + + # Remove non-existing files + file_type = 'image_%s' % image_type + + # Make existing unique + unique_files = list(set(existing_files.get(file_type, []))) + + # Remove files that can't be found + for ef in unique_files: + if not os.path.isfile(ef): + unique_files.remove(ef) + + # Replace new files list + existing_files[file_type] = unique_files + if len(existing_files) == 0: + del existing_files[file_type] + + # Loop over type + for image in image_urls.get(image_type, []): + if not isinstance(image, (str, unicode)): + continue + + if file_type not in existing_files or len(existing_files.get(file_type, [])) == 0: + file_path = fireEvent('file.download', url = image, single = True) + if file_path: + existing_files[file_type] = [file_path] + break + else: + break diff --git a/couchpotato/core/media/_base/library/__init__.py b/couchpotato/core/media/_base/library/__init__.py index 553eff5a..a404f81c 100644 --- a/couchpotato/core/media/_base/library/__init__.py +++ b/couchpotato/core/media/_base/library/__init__.py @@ -1,13 +1,7 @@ -from couchpotato.core.event import addEvent -from couchpotato.core.plugins.base import Plugin +from .main import Library -class LibraryBase(Plugin): +def autoload(): + return Library() - _type = None - - def initType(self): - addEvent('library.types', self.getType) - - def getType(self): - return self._type +config = [] diff --git a/couchpotato/core/media/_base/library/base.py b/couchpotato/core/media/_base/library/base.py new file mode 100644 index 00000000..553eff5a --- /dev/null +++ b/couchpotato/core/media/_base/library/base.py @@ -0,0 +1,13 @@ +from couchpotato.core.event import addEvent +from couchpotato.core.plugins.base import Plugin + + +class LibraryBase(Plugin): + + _type = None + + def initType(self): + addEvent('library.types', self.getType) + + def getType(self): + return self._type diff --git a/couchpotato/core/media/_base/library/main.py b/couchpotato/core/media/_base/library/main.py new file mode 100644 index 00000000..a723de50 --- /dev/null +++ b/couchpotato/core/media/_base/library/main.py @@ -0,0 +1,18 @@ +from couchpotato.core.event import addEvent, fireEvent +from couchpotato.core.media._base.library.base import LibraryBase + + +class Library(LibraryBase): + def __init__(self): + addEvent('library.title', self.title) + + def title(self, library): + return fireEvent( + 'library.query', + library, + + condense = False, + include_year = False, + include_identifier = False, + single = True + ) diff --git a/couchpotato/core/media/_base/matcher/__init__.py b/couchpotato/core/media/_base/matcher/__init__.py new file mode 100644 index 00000000..c8b1e821 --- /dev/null +++ b/couchpotato/core/media/_base/matcher/__init__.py @@ -0,0 +1,7 @@ +from .main import Matcher + + +def autoload(): + return Matcher() + +config = [] diff --git a/couchpotato/core/media/_base/matcher/base.py b/couchpotato/core/media/_base/matcher/base.py new file mode 100644 index 00000000..86511263 --- /dev/null +++ b/couchpotato/core/media/_base/matcher/base.py @@ -0,0 +1,84 @@ +from couchpotato.core.event import addEvent +from couchpotato.core.helpers.encoding import simplifyString +from couchpotato.core.logger import CPLog +from couchpotato.core.plugins.base import Plugin + +log = CPLog(__name__) + + +class MatcherBase(Plugin): + type = None + + def __init__(self): + if self.type: + addEvent('%s.matcher.correct' % self.type, self.correct) + + def correct(self, chain, release, media, quality): + raise NotImplementedError() + + def flattenInfo(self, info): + # Flatten dictionary of matches (chain info) + if isinstance(info, dict): + return dict([(key, self.flattenInfo(value)) for key, value in info.items()]) + + # Flatten matches + result = None + + for match in info: + if isinstance(match, dict): + if result is None: + result = {} + + for key, value in match.items(): + if key not in result: + result[key] = [] + + result[key].append(value) + else: + if result is None: + result = [] + + result.append(match) + + return result + + def constructFromRaw(self, match): + if not match: + return None + + parts = [ + ''.join([ + y for y in x[1:] if y + ]) for x in match + ] + + return ''.join(parts)[:-1].strip() + + def simplifyValue(self, value): + if not value: + return value + + if isinstance(value, basestring): + return simplifyString(value) + + if isinstance(value, list): + return [self.simplifyValue(x) for x in value] + + raise ValueError("Unsupported value type") + + def chainMatch(self, chain, group, tags): + info = self.flattenInfo(chain.info[group]) + + found_tags = [] + for tag, accepted in tags.items(): + values = [self.simplifyValue(x) for x in info.get(tag, [None])] + + if any([val in accepted for val in values]): + found_tags.append(tag) + + log.debug('tags found: %s, required: %s' % (found_tags, tags.keys())) + + if set(tags.keys()) == set(found_tags): + return True + + return all([key in found_tags for key, value in tags.items()]) diff --git a/couchpotato/core/media/_base/matcher/main.py b/couchpotato/core/media/_base/matcher/main.py new file mode 100644 index 00000000..2034249b --- /dev/null +++ b/couchpotato/core/media/_base/matcher/main.py @@ -0,0 +1,89 @@ +from couchpotato.core.event import addEvent, fireEvent +from couchpotato.core.helpers.variable import possibleTitles +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.matcher.base import MatcherBase +from caper import Caper + +log = CPLog(__name__) + + +class Matcher(MatcherBase): + + def __init__(self): + super(Matcher, self).__init__() + + self.caper = Caper() + + addEvent('matcher.parse', self.parse) + addEvent('matcher.match', self.match) + + addEvent('matcher.flatten_info', self.flattenInfo) + addEvent('matcher.construct_from_raw', self.constructFromRaw) + + addEvent('matcher.correct_title', self.correctTitle) + addEvent('matcher.correct_quality', self.correctQuality) + + def parse(self, name, parser='scene'): + return self.caper.parse(name, parser) + + def match(self, release, media, quality): + match = fireEvent('matcher.parse', release['name'], single = True) + + if len(match.chains) < 1: + log.info2('Wrong: %s, unable to parse release name (no chains)', release['name']) + return False + + for chain in match.chains: + if fireEvent('%s.matcher.correct' % media['type'], chain, release, media, quality, single = True): + return chain + + return False + + def correctTitle(self, chain, media): + root_library = media['library']['root_library'] + + if 'show_name' not in chain.info or not len(chain.info['show_name']): + log.info('Wrong: missing show name in parsed result') + return False + + # Get the lower-case parsed show name from the chain + chain_words = [x.lower() for x in chain.info['show_name']] + + # Build a list of possible titles of the media we are searching for + titles = root_library['info']['titles'] + + # Add year suffix titles (will result in ['', ' ', '', ...]) + suffixes = [None, root_library['info']['year']] + + titles = [ + title + ((' %s' % suffix) if suffix else '') + for title in titles + for suffix in suffixes + ] + + # Check show titles match + # TODO check xem names + for title in titles: + for valid_words in [x.split(' ') for x in possibleTitles(title)]: + + if valid_words == chain_words: + return True + + return False + + def correctQuality(self, chain, quality, quality_map): + if quality['identifier'] not in quality_map: + log.info2('Wrong: unknown preferred quality %s', quality['identifier']) + return False + + if 'video' not in chain.info: + log.info2('Wrong: no video tags found') + return False + + video_tags = quality_map[quality['identifier']] + + if not self.chainMatch(chain, 'video', video_tags): + log.info2('Wrong: %s tags not in chain', video_tags) + return False + + return True diff --git a/couchpotato/core/media/_base/media/__init__.py b/couchpotato/core/media/_base/media/__init__.py index e5f5a0ec..b1cde097 100644 --- a/couchpotato/core/media/_base/media/__init__.py +++ b/couchpotato/core/media/_base/media/__init__.py @@ -1,7 +1,5 @@ from .main import MediaPlugin -def start(): +def autoload(): return MediaPlugin() - -config = [] diff --git a/couchpotato/core/media/_base/media/index.py b/couchpotato/core/media/_base/media/index.py new file mode 100644 index 00000000..b40e162b --- /dev/null +++ b/couchpotato/core/media/_base/media/index.py @@ -0,0 +1,199 @@ +from string import ascii_letters +from hashlib import md5 + +from CodernityDB.tree_index import MultiTreeBasedIndex, TreeBasedIndex +from couchpotato.core.helpers.encoding import toUnicode, simplifyString + + +class MediaIndex(MultiTreeBasedIndex): + _version = 3 + + custom_header = """from CodernityDB.tree_index import MultiTreeBasedIndex""" + + def __init__(self, *args, **kwargs): + kwargs['key_format'] = '32s' + super(MediaIndex, self).__init__(*args, **kwargs) + + def make_key(self, key): + return md5(key).hexdigest() + + def make_key_value(self, data): + if data.get('_t') == 'media' and (data.get('identifier') or data.get('identifiers')): + + identifiers = data.get('identifiers', {}) + if data.get('identifier') and 'imdb' not in identifiers: + identifiers['imdb'] = data.get('identifier') + + ids = [] + for x in identifiers: + ids.append(md5('%s-%s' % (x, identifiers[x])).hexdigest()) + + return ids, None + + +class MediaStatusIndex(TreeBasedIndex): + _version = 1 + + def __init__(self, *args, **kwargs): + kwargs['key_format'] = '32s' + super(MediaStatusIndex, self).__init__(*args, **kwargs) + + def make_key(self, key): + return md5(key).hexdigest() + + def make_key_value(self, data): + if data.get('_t') == 'media' and data.get('status'): + return md5(data.get('status')).hexdigest(), None + + +class MediaTypeIndex(TreeBasedIndex): + _version = 1 + + def __init__(self, *args, **kwargs): + kwargs['key_format'] = '32s' + super(MediaTypeIndex, self).__init__(*args, **kwargs) + + def make_key(self, key): + return md5(key).hexdigest() + + def make_key_value(self, data): + if data.get('_t') == 'media' and data.get('type'): + return md5(data.get('type')).hexdigest(), None + + +class TitleSearchIndex(MultiTreeBasedIndex): + _version = 1 + + custom_header = """from CodernityDB.tree_index import MultiTreeBasedIndex +from itertools import izip +from couchpotato.core.helpers.encoding import simplifyString""" + + def __init__(self, *args, **kwargs): + kwargs['key_format'] = '32s' + super(TitleSearchIndex, self).__init__(*args, **kwargs) + self.__l = kwargs.get('w_len', 2) + + def make_key_value(self, data): + + if data.get('_t') == 'media' and len(data.get('title', '')) > 0: + + out = set() + title = str(simplifyString(data.get('title').lower())) + l = self.__l + title_split = title.split() + + for x in range(len(title_split)): + combo = ' '.join(title_split[x:])[:32].strip() + out.add(combo.rjust(32, '_')) + combo_range = max(l, min(len(combo), 32)) + + for cx in range(1, combo_range): + ccombo = combo[:-cx].strip() + if len(ccombo) > l: + out.add(ccombo.rjust(32, '_')) + + return out, None + + def make_key(self, key): + return key.rjust(32, '_').lower() + + +class TitleIndex(TreeBasedIndex): + _version = 4 + + custom_header = """from CodernityDB.tree_index import TreeBasedIndex +from string import ascii_letters +from couchpotato.core.helpers.encoding import toUnicode, simplifyString""" + + def __init__(self, *args, **kwargs): + kwargs['key_format'] = '32s' + super(TitleIndex, self).__init__(*args, **kwargs) + + def make_key(self, key): + return self.simplify(key) + + def make_key_value(self, data): + if data.get('_t') == 'media' and data.get('title') is not None and len(data.get('title')) > 0: + return self.simplify(data['title']), None + + def simplify(self, title): + + title = toUnicode(title) + + nr_prefix = '' if title and len(title) > 0 and title[0] in ascii_letters else '#' + title = simplifyString(title) + + for prefix in ['the ', 'an ', 'a ']: + if prefix == title[:len(prefix)]: + title = title[len(prefix):] + break + + return str(nr_prefix + title).ljust(32, ' ')[:32] + + +class StartsWithIndex(TreeBasedIndex): + _version = 3 + + custom_header = """from CodernityDB.tree_index import TreeBasedIndex +from string import ascii_letters +from couchpotato.core.helpers.encoding import toUnicode, simplifyString""" + + def __init__(self, *args, **kwargs): + kwargs['key_format'] = '1s' + super(StartsWithIndex, self).__init__(*args, **kwargs) + + def make_key(self, key): + return self.first(key) + + def make_key_value(self, data): + if data.get('_t') == 'media' and data.get('title') is not None: + return self.first(data['title']), None + + def first(self, title): + title = toUnicode(title) + title = simplifyString(title) + + for prefix in ['the ', 'an ', 'a ']: + if prefix == title[:len(prefix)]: + title = title[len(prefix):] + break + + return str(title[0] if title and len(title) > 0 and title[0] in ascii_letters else '#').lower() + + + +class MediaChildrenIndex(TreeBasedIndex): + _version = 1 + + def __init__(self, *args, **kwargs): + kwargs['key_format'] = '32s' + super(MediaChildrenIndex, self).__init__(*args, **kwargs) + + def make_key(self, key): + return key + + def make_key_value(self, data): + if data.get('_t') == 'media' and data.get('parent_id'): + return data.get('parent_id'), None + + +class MediaTagIndex(MultiTreeBasedIndex): + _version = 2 + + custom_header = """from CodernityDB.tree_index import MultiTreeBasedIndex""" + + def __init__(self, *args, **kwargs): + kwargs['key_format'] = '32s' + super(MediaTagIndex, self).__init__(*args, **kwargs) + + def make_key_value(self, data): + if data.get('_t') == 'media' and data.get('tags') and len(data.get('tags', [])) > 0: + + tags = set() + for tag in data.get('tags', []): + tags.add(self.make_key(tag)) + + return list(tags), None + + def make_key(self, key): + return md5(key).hexdigest() diff --git a/couchpotato/core/media/_base/media/main.py b/couchpotato/core/media/_base/media/main.py index cbcd4245..4cfe597c 100644 --- a/couchpotato/core/media/_base/media/main.py +++ b/couchpotato/core/media/_base/media/main.py @@ -1,22 +1,33 @@ import traceback -from couchpotato import get_session, tryInt +from string import ascii_lowercase + +from CodernityDB.database import RecordNotFound +from couchpotato import tryInt, get_db from couchpotato.api import addApiView from couchpotato.core.event import fireEvent, fireEventAsync, addEvent from couchpotato.core.helpers.encoding import toUnicode -from couchpotato.core.helpers.variable import mergeDicts, splitString, getImdb, getTitle +from couchpotato.core.helpers.variable import splitString, getImdb, getTitle from couchpotato.core.logger import CPLog from couchpotato.core.media import MediaBase -from couchpotato.core.settings.model import Library, LibraryTitle, Release, \ - Media -from sqlalchemy.orm import joinedload_all -from sqlalchemy.sql.expression import or_, asc, not_, desc -from string import ascii_lowercase +from .index import MediaIndex, MediaStatusIndex, MediaTypeIndex, TitleSearchIndex, TitleIndex, StartsWithIndex, MediaChildrenIndex, MediaTagIndex + log = CPLog(__name__) class MediaPlugin(MediaBase): + _database = { + 'media': MediaIndex, + 'media_search_title': TitleSearchIndex, + 'media_status': MediaStatusIndex, + 'media_tag': MediaTagIndex, + 'media_by_type': MediaTypeIndex, + 'media_title': TitleIndex, + 'media_startswith': StartsWithIndex, + 'media_children': MediaChildrenIndex, + } + def __init__(self): addApiView('media.refresh', self.refresh, docs = { @@ -60,15 +71,19 @@ class MediaPlugin(MediaBase): addApiView('media.available_chars', self.charView) - addEvent('app.load', self.addSingleRefreshView) - addEvent('app.load', self.addSingleListView) - addEvent('app.load', self.addSingleCharView) - addEvent('app.load', self.addSingleDeleteView) + addEvent('app.load', self.addSingleRefreshView, priority = 100) + addEvent('app.load', self.addSingleListView, priority = 100) + addEvent('app.load', self.addSingleCharView, priority = 100) + addEvent('app.load', self.addSingleDeleteView, priority = 100) addEvent('media.get', self.get) + addEvent('media.with_status', self.withStatus) + addEvent('media.with_identifiers', self.withIdentifiers) addEvent('media.list', self.list) addEvent('media.delete', self.delete) addEvent('media.restatus', self.restatus) + addEvent('media.tag', self.tag) + addEvent('media.untag', self.unTag) def refresh(self, id = '', **kwargs): handlers = [] @@ -80,30 +95,27 @@ class MediaPlugin(MediaBase): if refresh_handler: handlers.append(refresh_handler) - fireEvent('notify.frontend', type = 'media.busy', data = {'id': [tryInt(x) for x in ids]}) + fireEvent('notify.frontend', type = 'media.busy', data = {'_id': ids}) fireEventAsync('schedule.queue', handlers = handlers) return { 'success': True, } - def createRefreshHandler(self, id): - db = get_session() + def createRefreshHandler(self, media_id): - media = db.query(Media).filter_by(id = id).first() - - if media: - - default_title = getTitle(media.library) - identifier = media.library.identifier - event = 'library.update.%s' % media.type + try: + media = get_db().get('id', media_id) + event = '%s.update_info' % media.get('type') def handler(): - fireEvent(event, identifier = identifier, default_title = default_title, on_complete = self.createOnComplete(id)) + fireEvent(event, media_id = media_id, on_complete = self.createOnComplete(media_id)) - if handler: return handler + except: + log.error('Refresh handler for non existing media: %s', traceback.format_exc()) + def addSingleRefreshView(self): for media_type in fireEvent('media.types', merge = True): @@ -111,20 +123,30 @@ class MediaPlugin(MediaBase): def get(self, media_id): - db = get_session() + try: + db = get_db() - imdb_id = getImdb(str(media_id)) + imdb_id = getImdb(str(media_id)) - if imdb_id: - m = db.query(Media).filter(Media.library.has(identifier = imdb_id)).first() - else: - m = db.query(Media).filter_by(id = media_id).first() + if imdb_id: + media = db.get('media', 'imdb-%s' % imdb_id, with_doc = True)['doc'] + else: + media = db.get('id', media_id) - results = None - if m: - results = m.to_dict(self.default_dict) + if media: - return results + # Attach category + try: media['category'] = db.get('id', media.get('category_id')) + except: pass + + media['releases'] = fireEvent('release.for_media', media['_id'], single = True) + + return media + + except RecordNotFound: + log.error('Media with id "%s" not found', media_id) + except: + raise def getView(self, id = None, **kwargs): @@ -135,9 +157,32 @@ class MediaPlugin(MediaBase): 'media': media, } - def list(self, types = None, status = None, release_status = None, limit_offset = None, starts_with = None, search = None, order = None): + def withStatus(self, status, with_doc = True): - db = get_session() + db = get_db() + + status = list(status if isinstance(status, (list, tuple)) else [status]) + + for s in status: + for ms in db.get_many('media_status', s, with_doc = with_doc): + yield ms['doc'] if with_doc else ms + + def withIdentifiers(self, identifiers, with_doc = False): + + db = get_db() + + for x in identifiers: + try: + media = db.get('media', '%s-%s' % (x, identifiers[x]), with_doc = with_doc) + return media + except: + pass + + log.debug('No media found with identifiers: %s', identifiers) + + def list(self, types = None, status = None, release_status = None, status_or = False, limit_offset = None, with_tags = None, starts_with = None, search = None): + + db = get_db() # Make a list from string if status and not isinstance(status, (list, tuple)): @@ -146,139 +191,100 @@ class MediaPlugin(MediaBase): release_status = [release_status] if types and not isinstance(types, (list, tuple)): types = [types] + if with_tags and not isinstance(with_tags, (list, tuple)): + with_tags = [with_tags] - # query movie ids - q = db.query(Media) \ - .with_entities(Media.id) \ - .group_by(Media.id) + # query media ids + if types: + all_media_ids = set() + for media_type in types: + all_media_ids = all_media_ids.union(set([x['_id'] for x in db.get_many('media_by_type', media_type)])) + else: + all_media_ids = set([x['_id'] for x in db.all('media')]) + + media_ids = list(all_media_ids) + filter_by = {} # Filter on movie status if status and len(status) > 0: - statuses = fireEvent('status.get', status, single = len(status) > 1) - statuses = [s.get('id') for s in statuses] - - q = q.filter(Media.status_id.in_(statuses)) + filter_by['media_status'] = set() + for media_status in fireEvent('media.with_status', status, with_doc = False, single = True): + filter_by['media_status'].add(media_status.get('_id')) # Filter on release status if release_status and len(release_status) > 0: - q = q.join(Media.releases) - - statuses = fireEvent('status.get', release_status, single = len(release_status) > 1) - statuses = [s.get('id') for s in statuses] - - q = q.filter(Release.status_id.in_(statuses)) - - # Filter on type - if types and len(types) > 0: - try: q = q.filter(Media.type.in_(types)) - except: pass - - # Only join when searching / ordering - if starts_with or search or order != 'release_order': - q = q.join(Media.library, Library.titles) \ - .filter(LibraryTitle.default == True) + filter_by['release_status'] = set() + for release_status in fireEvent('release.with_status', release_status, with_doc = False, single = True): + filter_by['release_status'].add(release_status.get('media_id')) # Add search filters - filter_or = [] if starts_with: - starts_with = toUnicode(starts_with.lower()) - if starts_with in ascii_lowercase: - filter_or.append(LibraryTitle.simple_title.startswith(starts_with)) - else: - ignore = [] - for letter in ascii_lowercase: - ignore.append(LibraryTitle.simple_title.startswith(toUnicode(letter))) - filter_or.append(not_(or_(*ignore))) + starts_with = toUnicode(starts_with.lower())[0] + starts_with = starts_with if starts_with in ascii_lowercase else '#' + filter_by['starts_with'] = [x['_id'] for x in db.get_many('media_startswith', starts_with)] + # Add tag filter + if with_tags: + filter_by['with_tags'] = set() + for tag in with_tags: + for x in db.get_many('media_tag', tag): + filter_by['with_tags'].add(x['_id']) + + # Filter with search query if search: - filter_or.append(LibraryTitle.simple_title.like('%%' + search + '%%')) + filter_by['search'] = [x['_id'] for x in db.get_many('media_search_title', search)] - if len(filter_or) > 0: - q = q.filter(or_(*filter_or)) + if status_or and 'media_status' in filter_by and 'release_status' in filter_by: + filter_by['status'] = list(filter_by['media_status']) + list(filter_by['release_status']) + del filter_by['media_status'] + del filter_by['release_status'] - total_count = q.count() + # Filter by combining ids + for x in filter_by: + media_ids = [n for n in media_ids if n in filter_by[x]] + + total_count = len(media_ids) if total_count == 0: return 0, [] - if order == 'release_order': - q = q.order_by(desc(Release.last_edit)) - else: - q = q.order_by(asc(LibraryTitle.simple_title)) - + offset = 0 + limit = -1 if limit_offset: splt = splitString(limit_offset) if isinstance(limit_offset, (str, unicode)) else limit_offset - limit = splt[0] - offset = 0 if len(splt) is 1 else splt[1] - q = q.limit(limit).offset(offset) + limit = tryInt(splt[0]) + offset = tryInt(0 if len(splt) is 1 else splt[1]) - # Get all media_ids in sorted order - media_ids = [m.id for m in q.all()] + # List movies based on title order + medias = [] + for m in db.all('media_title'): + media_id = m['_id'] + if media_id not in media_ids: continue + if offset > 0: + offset -= 1 + continue - # List release statuses - releases = db.query(Release) \ - .filter(Release.movie_id.in_(media_ids)) \ - .all() - - release_statuses = dict((m, set()) for m in media_ids) - releases_count = dict((m, 0) for m in media_ids) - for release in releases: - release_statuses[release.movie_id].add('%d,%d' % (release.status_id, release.quality_id)) - releases_count[release.movie_id] += 1 - - # Get main movie data - q2 = db.query(Media) \ - .options(joinedload_all('library.titles')) \ - .options(joinedload_all('library.files')) \ - .options(joinedload_all('status')) \ - .options(joinedload_all('files')) - - q2 = q2.filter(Media.id.in_(media_ids)) - - results = q2.all() - - # Create dict by movie id - movie_dict = {} - for movie in results: - movie_dict[movie.id] = movie - - # List movies based on media_ids order - movies = [] - for media_id in media_ids: - - releases = [] - for r in release_statuses.get(media_id): - x = splitString(r) - releases.append({'status_id': x[0], 'quality_id': x[1]}) + media = fireEvent('media.get', media_id, single = True) # Merge releases with movie dict - movies.append(mergeDicts(movie_dict[media_id].to_dict({ - 'library': {'titles': {}, 'files': {}}, - 'files': {}, - }), { - 'releases': releases, - 'releases_count': releases_count.get(media_id), - })) + medias.append(media) - return total_count, movies + # remove from media ids + media_ids.remove(media_id) + if len(media_ids) == 0 or len(medias) == limit: break + + return total_count, medias def listView(self, **kwargs): - types = splitString(kwargs.get('types')) - status = splitString(kwargs.get('status')) - release_status = splitString(kwargs.get('release_status')) - limit_offset = kwargs.get('limit_offset') - starts_with = kwargs.get('starts_with') - search = kwargs.get('search') - order = kwargs.get('order') - total_movies, movies = self.list( - types = types, - status = status, - release_status = release_status, - limit_offset = limit_offset, - starts_with = starts_with, - search = search, - order = order + types = splitString(kwargs.get('type')), + status = splitString(kwargs.get('status')), + release_status = splitString(kwargs.get('release_status')), + status_or = kwargs.get('status_or') is not None, + limit_offset = kwargs.get('limit_offset'), + with_tags = kwargs.get('with_tags'), + starts_with = splitString(kwargs.get('starts_with')), + search = kwargs.get('search') ) return { @@ -292,67 +298,57 @@ class MediaPlugin(MediaBase): for media_type in fireEvent('media.types', merge = True): def tempList(*args, **kwargs): - return self.listView(types = media_type, *args, **kwargs) + return self.listView(types = media_type, **kwargs) addApiView('%s.list' % media_type, tempList) def availableChars(self, types = None, status = None, release_status = None): - types = types or [] - status = status or [] - release_status = release_status or [] - - db = get_session() + db = get_db() # Make a list from string - if not isinstance(status, (list, tuple)): + if status and not isinstance(status, (list, tuple)): status = [status] if release_status and not isinstance(release_status, (list, tuple)): release_status = [release_status] if types and not isinstance(types, (list, tuple)): types = [types] - q = db.query(Media) + # query media ids + if types: + all_media_ids = set() + for media_type in types: + all_media_ids = all_media_ids.union(set([x['_id'] for x in db.get_many('media_by_type', media_type)])) + else: + all_media_ids = set([x['_id'] for x in db.all('media')]) + + media_ids = all_media_ids + filter_by = {} # Filter on movie status if status and len(status) > 0: - statuses = fireEvent('status.get', status, single = len(release_status) > 1) - statuses = [s.get('id') for s in statuses] - - q = q.filter(Media.status_id.in_(statuses)) + filter_by['media_status'] = set() + for media_status in fireEvent('media.with_status', status, with_doc = False, single = True): + filter_by['media_status'].add(media_status.get('_id')) # Filter on release status if release_status and len(release_status) > 0: + filter_by['release_status'] = set() + for release_status in fireEvent('release.with_status', release_status, with_doc = False, single = True): + filter_by['release_status'].add(release_status.get('media_id')) - statuses = fireEvent('status.get', release_status, single = len(release_status) > 1) - statuses = [s.get('id') for s in statuses] - - q = q.join(Media.releases) \ - .filter(Release.status_id.in_(statuses)) - - # Filter on type - if types and len(types) > 0: - try: q = q.filter(Media.type.in_(types)) - except: pass - - q = q.join(Library, LibraryTitle) \ - .with_entities(LibraryTitle.simple_title) \ - .filter(LibraryTitle.default == True) - - titles = q.all() + # Filter by combining ids + for x in filter_by: + media_ids = [n for n in media_ids if n in filter_by[x]] chars = set() - for title in titles: - try: - char = title[0][0] - char = char if char in ascii_lowercase else '#' - chars.add(str(char)) - except: - log.error('Failed getting title for %s', title.libraries_id) + for x in db.all('media_startswith'): + if x['_id'] in media_ids: + chars.add(x['key']) if len(chars) == 25: break - return ''.join(sorted(chars)) + return list(chars) def charView(self, **kwargs): @@ -371,59 +367,59 @@ class MediaPlugin(MediaBase): for media_type in fireEvent('media.types', merge = True): def tempChar(*args, **kwargs): - return self.charView(types = media_type, *args, **kwargs) + return self.charView(types = media_type, **kwargs) addApiView('%s.available_chars' % media_type, tempChar) def delete(self, media_id, delete_from = None): try: - db = get_session() + db = get_db() - media = db.query(Media).filter_by(id = media_id).first() + media = db.get('id', media_id) if media: deleted = False + + media_releases = fireEvent('release.for_media', media['_id'], single = True) + if delete_from == 'all': + # Delete connected releases + for release in media_releases: + db.delete(release) + db.delete(media) - db.commit() deleted = True else: - done_status = fireEvent('status.get', 'done', single = True) - total_releases = len(media.releases) + total_releases = len(media_releases) total_deleted = 0 - new_movie_status = None - for release in media.releases: - if delete_from in ['wanted', 'snatched', 'late']: - if release.status_id != done_status.get('id'): - db.delete(release) - total_deleted += 1 - new_movie_status = 'done' - elif delete_from == 'manage': - if release.status_id == done_status.get('id'): - db.delete(release) - total_deleted += 1 - new_movie_status = 'active' - db.commit() + new_media_status = None - if total_releases == total_deleted: + for release in media_releases: + if delete_from in ['wanted', 'snatched', 'late']: + if release.get('status') != 'done': + db.delete(release) + total_deleted += 1 + new_media_status = 'done' + elif delete_from == 'manage': + if release.get('status') == 'done': + db.delete(release) + total_deleted += 1 + + if (total_releases == total_deleted and media['status'] != 'active') or (not new_media_status and delete_from == 'late'): db.delete(media) - db.commit() deleted = True - elif new_movie_status: - new_status = fireEvent('status.get', new_movie_status, single = True) - media.profile_id = None - media.status_id = new_status.get('id') - db.commit() + elif new_media_status: + media['status'] = new_media_status + db.update(media) + + fireEvent('media.untag', media['_id'], 'recent', single = True) else: - fireEvent('media.restatus', media.id, single = True) + fireEvent('media.restatus', media.get('_id'), single = True) if deleted: - fireEvent('notify.frontend', type = 'movie.deleted', data = media.to_dict()) + fireEvent('notify.frontend', type = 'media.deleted', data = media) except: log.error('Failed deleting media: %s', traceback.format_exc()) - db.rollback() - finally: - db.close() return True @@ -446,35 +442,75 @@ class MediaPlugin(MediaBase): def restatus(self, media_id): - active_status, done_status = fireEvent('status.get', ['active', 'done'], single = True) - try: - db = get_session() + db = get_db() - m = db.query(Media).filter_by(id = media_id).first() - if not m or len(m.library.titles) == 0: - log.debug('Can\'t restatus movie, doesn\'t seem to exist.') - return False + m = db.get('id', media_id) + previous_status = m['status'] - log.debug('Changing status for %s', m.library.titles[0].title) - if not m.profile: - m.status_id = done_status.get('id') + log.debug('Changing status for %s', getTitle(m)) + if not m['profile_id']: + m['status'] = 'done' else: move_to_wanted = True - for t in m.profile.types: - for release in m.releases: - if t.quality.identifier is release.quality.identifier and (release.status_id is done_status.get('id') and t.finish): - move_to_wanted = False + try: + profile = db.get('id', m['profile_id']) + media_releases = fireEvent('release.for_media', m['_id'], single = True) - m.status_id = active_status.get('id') if move_to_wanted else done_status.get('id') + for q_identifier in profile['qualities']: + index = profile['qualities'].index(q_identifier) - db.commit() + for release in media_releases: + if q_identifier == release['quality'] and (release.get('status') == 'done' and profile['finish'][index]): + move_to_wanted = False + + m['status'] = 'active' if move_to_wanted else 'done' + except RecordNotFound: + log.debug('Failed restatus: %s', traceback.format_exc()) + + # Only update when status has changed + if previous_status != m['status']: + db.update(m) return True except: log.error('Failed restatus: %s', traceback.format_exc()) - db.rollback() - finally: - db.close() + def tag(self, media_id, tag): + + try: + db = get_db() + m = db.get('id', media_id) + + tags = m.get('tags') or [] + if tag not in tags: + tags.append(tag) + m['tags'] = tags + db.update(m) + + return True + except: + log.error('Failed tagging: %s', traceback.format_exc()) + + return False + + def unTag(self, media_id, tag): + + try: + db = get_db() + m = db.get('id', media_id) + + tags = m.get('tags') or [] + if tag in tags: + new_tags = list(set(tags)) + new_tags.remove(tag) + + m['tags'] = new_tags + db.update(m) + + return True + except: + log.error('Failed untagging: %s', traceback.format_exc()) + + return False diff --git a/couchpotato/core/media/movie/library/__init__.py b/couchpotato/core/media/_base/providers/__init__.py similarity index 100% rename from couchpotato/core/media/movie/library/__init__.py rename to couchpotato/core/media/_base/providers/__init__.py diff --git a/couchpotato/core/migration/__init__.py b/couchpotato/core/media/_base/providers/automation/__init__.py old mode 100755 new mode 100644 similarity index 100% rename from couchpotato/core/migration/__init__.py rename to couchpotato/core/media/_base/providers/automation/__init__.py diff --git a/couchpotato/core/media/_base/providers/automation/base.py b/couchpotato/core/media/_base/providers/automation/base.py new file mode 100644 index 00000000..21d205ae --- /dev/null +++ b/couchpotato/core/media/_base/providers/automation/base.py @@ -0,0 +1,8 @@ +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.base import Provider + +log = CPLog(__name__) + + +class AutomationBase(Provider): + pass diff --git a/couchpotato/core/providers/base.py b/couchpotato/core/media/_base/providers/base.py similarity index 87% rename from couchpotato/core/providers/base.py rename to couchpotato/core/media/_base/providers/base.py index 93e0900f..587545c8 100644 --- a/couchpotato/core/providers/base.py +++ b/couchpotato/core/media/_base/providers/base.py @@ -1,10 +1,3 @@ -from couchpotato.core.event import addEvent, fireEvent -from couchpotato.core.helpers.encoding import ss -from couchpotato.core.helpers.variable import tryFloat, mergeDicts, md5, \ - possibleTitles, getTitle -from couchpotato.core.logger import CPLog -from couchpotato.core.plugins.base import Plugin -from couchpotato.environment import Env from urlparse import urlparse import json import re @@ -12,6 +5,15 @@ import time import traceback import xml.etree.ElementTree as XMLTree +from couchpotato.core.event import addEvent, fireEvent +from couchpotato.core.helpers.encoding import ss +from couchpotato.core.helpers.variable import tryFloat, mergeDicts, md5, \ + possibleTitles +from couchpotato.core.logger import CPLog +from couchpotato.core.plugins.base import Plugin +from couchpotato.environment import Env + + log = CPLog(__name__) @@ -86,10 +88,14 @@ class Provider(Plugin): if data and len(data) > 0: try: - data = XMLTree.fromstring(ss(data)) + data = XMLTree.fromstring(data) return self.getElements(data, item_path) except: - log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc())) + try: + data = XMLTree.fromstring(ss(data)) + return self.getElements(data, item_path) + except: + log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc())) return [] @@ -102,7 +108,6 @@ class Provider(Plugin): class YarrProvider(Provider): protocol = None # nzb, torrent, torrent_magnet - type = 'movie' cat_ids = {} cat_backup_id = None @@ -124,6 +129,9 @@ class YarrProvider(Provider): else: return [] + def buildUrl(self, *args, **kwargs): + pass + def login(self): # Check if we are still logged in every hour @@ -176,11 +184,11 @@ class YarrProvider(Provider): try: return self.urlopen(url, headers = {'User-Agent': Env.getIdentifier()}, show_error = False) except: - log.error('Failed getting nzb from %s: %s', (self.getName(), traceback.format_exc())) + log.error('Failed getting release from %s: %s', (self.getName(), traceback.format_exc())) return 'try_next' - def search(self, movie, quality): + def search(self, media, quality): if self.isDisabled(): return [] @@ -192,15 +200,17 @@ class YarrProvider(Provider): # Create result container imdb_results = hasattr(self, '_search') - results = ResultList(self, movie, quality, imdb_results = imdb_results) + results = ResultList(self, media, quality, imdb_results = imdb_results) # Do search based on imdb id if imdb_results: - self._search(movie, quality, results) + self._search(media, quality, results) # Search possible titles else: - for title in possibleTitles(getTitle(movie['library'])): - self._searchOnTitle(title, movie, quality, results) + media_title = fireEvent('library.query', media, include_year = False, single = True) + + for title in possibleTitles(media_title): + self._searchOnTitle(title, media, quality, results) return results @@ -241,11 +251,16 @@ class YarrProvider(Provider): return 0 - def getCatId(self, identifier): + def getCatId(self, quality = None): + if not quality: quality = {} + identifier = quality.get('identifier') - for cats in self.cat_ids: - ids, qualities = cats - if identifier in qualities: + want_3d = False + if quality.get('custom'): + want_3d = quality['custom'].get('3d') + + for ids, qualities in self.cat_ids: + if identifier in qualities or (want_3d and '3d' in qualities): return ids if self.cat_backup_id: @@ -290,7 +305,7 @@ class ResultList(list): old_score = new_result['score'] new_result['score'] = int(old_score * is_correct_weight) - log.info('Found correct release with weight %.02f, old_score(%d) now scaled to score(%d)', ( + log.info2('Found correct release with weight %.02f, old_score(%d) now scaled to score(%d)', ( is_correct_weight, old_score, new_result['score'] diff --git a/couchpotato/core/providers/__init__.py b/couchpotato/core/media/_base/providers/info/__init__.py similarity index 100% rename from couchpotato/core/providers/__init__.py rename to couchpotato/core/media/_base/providers/info/__init__.py diff --git a/couchpotato/core/media/_base/providers/info/base.py b/couchpotato/core/media/_base/providers/info/base.py new file mode 100644 index 00000000..90a9153c --- /dev/null +++ b/couchpotato/core/media/_base/providers/info/base.py @@ -0,0 +1,5 @@ +from couchpotato.core.media._base.providers.base import Provider + + +class BaseInfoProvider(Provider): + type = 'unknown' diff --git a/couchpotato/core/providers/info/__init__.py b/couchpotato/core/media/_base/providers/metadata/__init__.py similarity index 100% rename from couchpotato/core/providers/info/__init__.py rename to couchpotato/core/media/_base/providers/metadata/__init__.py diff --git a/couchpotato/core/media/_base/providers/metadata/base.py b/couchpotato/core/media/_base/providers/metadata/base.py new file mode 100644 index 00000000..2a8c5cfe --- /dev/null +++ b/couchpotato/core/media/_base/providers/metadata/base.py @@ -0,0 +1,8 @@ +from couchpotato.core.logger import CPLog +from couchpotato.core.plugins.base import Plugin + +log = CPLog(__name__) + + +class MetaDataBase(Plugin): + pass diff --git a/couchpotato/core/providers/nzb/__init__.py b/couchpotato/core/media/_base/providers/nzb/__init__.py similarity index 100% rename from couchpotato/core/providers/nzb/__init__.py rename to couchpotato/core/media/_base/providers/nzb/__init__.py diff --git a/couchpotato/core/providers/nzb/base.py b/couchpotato/core/media/_base/providers/nzb/base.py similarity index 69% rename from couchpotato/core/providers/nzb/base.py rename to couchpotato/core/media/_base/providers/nzb/base.py index 53c73af0..5e19e524 100644 --- a/couchpotato/core/providers/nzb/base.py +++ b/couchpotato/core/media/_base/providers/nzb/base.py @@ -1,6 +1,7 @@ -from couchpotato.core.providers.base import YarrProvider import time +from couchpotato.core.media._base.providers.base import YarrProvider + class NZBProvider(YarrProvider): diff --git a/couchpotato/core/providers/nzb/binsearch/main.py b/couchpotato/core/media/_base/providers/nzb/binsearch.py similarity index 55% rename from couchpotato/core/providers/nzb/binsearch/main.py rename to couchpotato/core/media/_base/providers/nzb/binsearch.py index c54dd435..c61b72d3 100644 --- a/couchpotato/core/providers/nzb/binsearch/main.py +++ b/couchpotato/core/media/_base/providers/nzb/binsearch.py @@ -1,16 +1,16 @@ -from bs4 import BeautifulSoup -from couchpotato.core.helpers.encoding import tryUrlencode -from couchpotato.core.helpers.variable import tryInt -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.nzb.base import NZBProvider -from couchpotato.environment import Env import re import traceback +from bs4 import BeautifulSoup +from couchpotato.core.helpers.variable import tryInt, simplifyString +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.nzb.base import NZBProvider + + log = CPLog(__name__) -class BinSearch(NZBProvider): +class Base(NZBProvider): urls = { 'download': 'https://www.binsearch.info/fcgi/nzb.fcgi?q=%s', @@ -20,27 +20,15 @@ class BinSearch(NZBProvider): http_time_between_calls = 4 # Seconds - def _search(self, movie, quality, results): + def _search(self, media, quality, results): - arguments = tryUrlencode({ - 'q': movie['library']['identifier'], - 'm': 'n', - 'max': 400, - 'adv_age': Env.setting('retention', 'nzb'), - 'adv_sort': 'date', - 'adv_col': 'on', - 'adv_nfo': 'on', - 'minsize': quality.get('size_min'), - 'maxsize': quality.get('size_max'), - }) - - data = self.getHTMLData(self.urls['search'] % arguments) + data = self.getHTMLData(self.urls['search'] % self.buildUrl(media, quality)) if data: try: html = BeautifulSoup(data) - main_table = html.find('table', attrs = {'id':'r2'}) + main_table = html.find('table', attrs = {'id': 'r2'}) if not main_table: return @@ -48,11 +36,11 @@ class BinSearch(NZBProvider): items = main_table.find_all('tr') for row in items: - title = row.find('span', attrs = {'class':'s'}) + title = row.find('span', attrs = {'class': 's'}) if not title: continue - nzb_id = row.find('input', attrs = {'type':'checkbox'})['name'] + nzb_id = row.find('input', attrs = {'type': 'checkbox'})['name'] info = row.find('span', attrs = {'class':'d'}) size_match = re.search('size:.(?P[0-9\.]+.[GMB]+)', info.text) @@ -62,10 +50,10 @@ class BinSearch(NZBProvider): def extra_check(item): parts = re.search('available:.(?P\d+)./.(?P\d+)', info.text) - total = tryInt(parts.group('total')) - parts = tryInt(parts.group('parts')) + total = float(tryInt(parts.group('total'))) + parts = float(tryInt(parts.group('parts'))) - if (total / parts) < 0.95 or ((total / parts) >= 0.95 and not ('par2' in info.text.lower() or 'pa3' in info.text.lower())): + if (total / parts) < 1 and ((total / parts) < 0.95 or ((total / parts) >= 0.95 and not ('par2' in info.text.lower() or 'pa3' in info.text.lower()))): log.info2('Wrong: \'%s\', not complete: %s out of %s', (item['name'], parts, total)) return False @@ -77,7 +65,7 @@ class BinSearch(NZBProvider): results.append({ 'id': nzb_id, - 'name': title.text, + 'name': simplifyString(title.text), 'age': tryInt(age), 'size': self.parseSize(size_match.group('size')), 'url': self.urls['download'] % nzb_id, @@ -102,3 +90,31 @@ class BinSearch(NZBProvider): return 'try_next' + +config = [{ + 'name': 'binsearch', + 'groups': [ + { + 'tab': 'searcher', + 'list': 'nzb_providers', + 'name': 'binsearch', + 'description': 'Free provider, less accurate. See BinSearch', + 'wizard': True, + 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAAAAAA6mKC9AAAATklEQVQY02NwQAMMWAXOnz+PKvD//3/CAvM//z+fgiwAAs+RBab4PP//vwbFjPlAffgEChzOo2r5fBuIfRAC5w8D+QUofkkp8MHjOWQAAM3Sbogztg2wAAAAAElFTkSuQmCC', + 'options': [ + { + 'name': 'enabled', + 'type': 'enabler', + }, + { + 'name': 'extra_score', + 'advanced': True, + 'label': 'Extra Score', + 'type': 'int', + 'default': 0, + 'description': 'Starting score for each release found via this provider.', + } + ], + }, + ], +}] diff --git a/couchpotato/core/media/_base/providers/nzb/newznab.py b/couchpotato/core/media/_base/providers/nzb/newznab.py new file mode 100644 index 00000000..7db7e865 --- /dev/null +++ b/couchpotato/core/media/_base/providers/nzb/newznab.py @@ -0,0 +1,266 @@ +from urlparse import urlparse +import time +import traceback +import re + +from couchpotato.core.helpers.encoding import tryUrlencode, toUnicode +from couchpotato.core.helpers.rss import RSS +from couchpotato.core.helpers.variable import cleanHost, splitString, tryInt +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.base import ResultList +from couchpotato.core.media._base.providers.nzb.base import NZBProvider +from couchpotato.environment import Env +from dateutil.parser import parse +from requests import HTTPError + + +log = CPLog(__name__) + + +class Base(NZBProvider, RSS): + + urls = { + 'detail': 'details/%s', + 'download': 't=get&id=%s' + } + + passwords_regex = 'password|wachtwoord' + limits_reached = {} + + http_time_between_calls = 1 # Seconds + + def search(self, media, quality): + hosts = self.getHosts() + + results = ResultList(self, media, quality, imdb_results = True) + + for host in hosts: + if self.isDisabled(host): + continue + + self._searchOnHost(host, media, quality, results) + + return results + + def _searchOnHost(self, host, media, quality, results): + + query = self.buildUrl(media, host) + url = '%s&%s' % (self.getUrl(host['host']), query) + nzbs = self.getRSSData(url, cache_timeout = 1800, headers = {'User-Agent': Env.getIdentifier()}) + + for nzb in nzbs: + + date = None + spotter = None + for item in nzb: + if date and spotter: + break + if item.attrib.get('name') == 'usenetdate': + date = item.attrib.get('value') + break + + # Get the name of the person who posts the spot + if item.attrib.get('name') == 'poster': + if "@spot.net" in item.attrib.get('value'): + spotter = item.attrib.get('value').split("@")[0] + continue + + if not date: + date = self.getTextElement(nzb, 'pubDate') + + nzb_id = self.getTextElement(nzb, 'guid').split('/')[-1:].pop() + name = self.getTextElement(nzb, 'title') + + if not name: + continue + + name_extra = '' + if spotter: + name_extra = spotter + + description = '' + if "@spot.net" in nzb_id: + try: + # Get details for extended description to retrieve passwords + query = self.buildDetailsUrl(nzb_id, host['api_key']) + url = '%s&%s' % (self.getUrl(host['host']), query) + nzb_details = self.getRSSData(url, cache_timeout = 1800, headers = {'User-Agent': Env.getIdentifier()})[0] + + description = self.getTextElement(nzb_details, 'description') + + # Extract a password from the description + password = re.search('(?:' + self.passwords_regex + ')(?: *)(?:\:|\=)(?: *)(.*?)\|\n|$', description, flags = re.I).group(1) + if password: + name += ' {{%s}}' % password.strip() + except: + log.debug('Error getting details of "%s": %s', (name, traceback.format_exc())) + + results.append({ + 'id': nzb_id, + 'provider_extra': urlparse(host['host']).hostname or host['host'], + 'name': toUnicode(name), + 'name_extra': name_extra, + 'age': self.calculateAge(int(time.mktime(parse(date).timetuple()))), + 'size': int(self.getElement(nzb, 'enclosure').attrib['length']) / 1024 / 1024, + 'url': ((self.getUrl(host['host']) + self.urls['download']) % tryUrlencode(nzb_id)) + self.getApiExt(host), + 'detail_url': (cleanHost(host['host']) + self.urls['detail']) % tryUrlencode(nzb_id), + 'content': self.getTextElement(nzb, 'description'), + 'description': description, + 'score': host['extra_score'], + }) + + def getHosts(self): + + uses = splitString(str(self.conf('use')), clean = False) + hosts = splitString(self.conf('host'), clean = False) + api_keys = splitString(self.conf('api_key'), clean = False) + extra_score = splitString(self.conf('extra_score'), clean = False) + custom_tags = splitString(self.conf('custom_tag'), clean = False) + + list = [] + for nr in range(len(hosts)): + + try: key = api_keys[nr] + except: key = '' + + try: host = hosts[nr] + except: host = '' + + try: score = tryInt(extra_score[nr]) + except: score = 0 + + try: custom_tag = custom_tags[nr] + except: custom_tag = '' + + list.append({ + 'use': uses[nr], + 'host': host, + 'api_key': key, + 'extra_score': score, + 'custom_tag': custom_tag + }) + + return list + + def belongsTo(self, url, provider = None, host = None): + + hosts = self.getHosts() + + for host in hosts: + result = super(Base, self).belongsTo(url, host = host['host'], provider = provider) + if result: + return result + + def getUrl(self, host): + if '?page=newznabapi' in host: + return cleanHost(host)[:-1] + '&' + + return cleanHost(host) + 'api?' + + def isDisabled(self, host = None): + return not self.isEnabled(host) + + def isEnabled(self, host = None): + + # Return true if at least one is enabled and no host is given + if host is None: + for host in self.getHosts(): + if self.isEnabled(host): + return True + return False + + return NZBProvider.isEnabled(self) and host['host'] and host['api_key'] and int(host['use']) + + def getApiExt(self, host): + return '&apikey=%s' % host['api_key'] + + def download(self, url = '', nzb_id = ''): + host = urlparse(url).hostname + + if self.limits_reached.get(host): + # Try again in 3 hours + if self.limits_reached[host] > time.time() - 10800: + return 'try_next' + + try: + data = self.urlopen(url, show_error = False) + self.limits_reached[host] = False + return data + except HTTPError as e: + if e.code == 503: + response = e.read().lower() + if 'maximum api' in response or 'download limit' in response: + if not self.limits_reached.get(host): + log.error('Limit reached for newznab provider: %s', host) + self.limits_reached[host] = time.time() + return 'try_next' + + log.error('Failed download from %s: %s', (host, traceback.format_exc())) + + return 'try_next' + + def buildDetailsUrl(self, nzb_id, api_key): + query = tryUrlencode({ + 't': 'details', + 'id': nzb_id, + 'apikey': api_key, + }) + return query + + + +config = [{ + 'name': 'newznab', + 'groups': [ + { + 'tab': 'searcher', + 'list': 'nzb_providers', + 'name': 'newznab', + 'order': 10, + 'description': 'Enable NewzNab such as NZB.su, \ + NZBs.org, DOGnzb.cr, \ + Spotweb, NZBGeek, \ + SmackDown, NZBFinder', + 'wizard': True, + 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQAgMAAABinRfyAAAACVBMVEVjhwD///86aRovd/sBAAAAMklEQVQI12NgAIPQUCCRmQkjssDEShiRuRIqwZqZGcDAGBrqANUhGgIkWAOABKMDxCAA24UK50b26SAAAAAASUVORK5CYII=', + 'options': [ + { + 'name': 'enabled', + 'type': 'enabler', + 'default': True, + }, + { + 'name': 'use', + 'default': '0,0,0,0,0,0' + }, + { + 'name': 'host', + 'default': 'api.nzb.su,api.dognzb.cr,nzbs.org,https://index.nzbgeek.info, https://smackdownonyou.com, https://www.nzbfinder.ws', + 'description': 'The hostname of your newznab provider', + }, + { + 'name': 'extra_score', + 'advanced': True, + 'label': 'Extra Score', + 'default': '0,0,0,0,0,0', + 'description': 'Starting score for each release found via this provider.', + }, + { + 'name': 'custom_tag', + 'advanced': True, + 'label': 'Custom tag', + 'default': ',,,,,', + 'description': 'Add custom tags, for example add rls=1 to get only scene releases from nzbs.org', + }, + { + 'name': 'api_key', + 'default': ',,,,,', + 'label': 'Api Key', + 'description': 'Can be found on your profile page', + 'type': 'combined', + 'combine': ['use', 'host', 'api_key', 'extra_score', 'custom_tag'], + }, + ], + }, + ], +}] diff --git a/couchpotato/core/providers/nzb/nzbclub/main.py b/couchpotato/core/media/_base/providers/nzb/nzbclub.py similarity index 52% rename from couchpotato/core/providers/nzb/nzbclub/main.py rename to couchpotato/core/media/_base/providers/nzb/nzbclub.py index ce853cd5..a2660899 100644 --- a/couchpotato/core/providers/nzb/nzbclub/main.py +++ b/couchpotato/core/media/_base/providers/nzb/nzbclub.py @@ -1,40 +1,28 @@ +import time + from bs4 import BeautifulSoup -from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode +from couchpotato.core.helpers.encoding import toUnicode from couchpotato.core.helpers.rss import RSS from couchpotato.core.helpers.variable import tryInt from couchpotato.core.logger import CPLog -from couchpotato.core.providers.nzb.base import NZBProvider +from couchpotato.core.media._base.providers.nzb.base import NZBProvider from dateutil.parser import parse -import time + log = CPLog(__name__) -class NZBClub(NZBProvider, RSS): +class Base(NZBProvider, RSS): urls = { 'search': 'https://www.nzbclub.com/nzbfeeds.aspx?%s', } - http_time_between_calls = 4 #seconds + http_time_between_calls = 4 # seconds - def _searchOnTitle(self, title, movie, quality, results): + def _search(self, media, quality, results): - q = '"%s %s"' % (title, movie['library']['year']) - - q_param = tryUrlencode({ - 'q': q, - }) - - params = tryUrlencode({ - 'ig': 1, - 'rpp': 200, - 'st': 5, - 'sp': 1, - 'ns': 1, - }) - - nzbs = self.getRSSData(self.urls['search'] % ('%s&%s' % (q_param, params))) + nzbs = self.getRSSData(self.urls['search'] % self.buildUrl(media)) for nzb in nzbs: @@ -67,7 +55,7 @@ class NZBClub(NZBProvider, RSS): def getMoreInfo(self, item): full_description = self.getCache('nzbclub.%s' % item['id'], item['detail_url'], cache_timeout = 25920000) html = BeautifulSoup(full_description) - nfo_pre = html.find('pre', attrs = {'class':'nfo'}) + nfo_pre = html.find('pre', attrs = {'class': 'nfo'}) description = toUnicode(nfo_pre.text) if nfo_pre else '' item['description'] = description @@ -81,3 +69,32 @@ class NZBClub(NZBProvider, RSS): return False return True + + +config = [{ + 'name': 'nzbclub', + 'groups': [ + { + 'tab': 'searcher', + 'list': 'nzb_providers', + 'name': 'NZBClub', + 'description': 'Free provider, less accurate. See NZBClub', + 'wizard': True, + 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACEUlEQVQ4y3VSMWgUQRR9/8/s7OzeJSdnTsVGghLEYBNQjBpQiRBFhIB2EcHG1kbs0murhZAmVocExEZQ0c7CxkLINYcJJpoYj9wZcnu72fF21uJSXMzuhyne58/j/fcf4b+KokgBIOSU53lxP5b9oNVqDT36dH+5UjoiKvIwPFEEgWBshGZ3E7/NOupL9fMjx0e+ZhKsrq+c/FPZKJi0w4FsQXMBDEJsd7BNW9h2tuyP9vfTALIJkMIu1hYRtINM+dpzcWc0sbkreK4fUEogyraAmKGF3+7vcT/wtR9QwkCabSAzQQuvk0uglAo5YaQ5DASGYjfMXcHVOqKu6NmR7iehlKAdHWUqWPv1c3i+9uwVdRlEBGaGEAJCCrDo9ShhvF6qPq8tL57bp+DbRn2sHtUuCY9YphLMu5921VhrwYJ5tbt0tt6sjQP4vEfB2Ikz7/ytwbeR6ljHkXCUA6UcOLtPOg4MYhtH8ZcLw5er+xQMDAwEURRNl96X596Y6oxFwsw9fmtTOAr2Ik19nL365FZpsLSdnQPPM8aYewc+lDcX4rkHqbQMAGTJXulOLzycmr1bKBTi3DOGYagajcahiaOT89fbM0/dxEsUu3aidfPljWO3HzebzYNBELi5Z5RSJlrrHd/3w8lT114MrVTWOn875fHRiYVisRhorWMpZXdvNnLKGCOstb0AMlulVJI19w/+nceU4D0aCwAAAABJRU5ErkJggg==', + 'options': [ + { + 'name': 'enabled', + 'type': 'enabler', + }, + { + 'name': 'extra_score', + 'advanced': True, + 'label': 'Extra Score', + 'type': 'int', + 'default': 0, + 'description': 'Starting score for each release found via this provider.', + } + ], + }, + ], +}] diff --git a/couchpotato/core/media/_base/providers/nzb/nzbindex.py b/couchpotato/core/media/_base/providers/nzb/nzbindex.py new file mode 100644 index 00000000..58f4b23f --- /dev/null +++ b/couchpotato/core/media/_base/providers/nzb/nzbindex.py @@ -0,0 +1,126 @@ +import re +import time + +from bs4 import BeautifulSoup +from couchpotato.core.helpers.encoding import toUnicode +from couchpotato.core.helpers.rss import RSS +from couchpotato.core.helpers.variable import tryInt +from couchpotato.core.logger import CPLog +from couchpotato.core.event import fireEvent +from couchpotato.core.media._base.providers.nzb.base import NZBProvider +from dateutil.parser import parse + + +log = CPLog(__name__) + + +class Base(NZBProvider, RSS): + + urls = { + 'download': 'https://www.nzbindex.com/download/', + 'search': 'https://www.nzbindex.com/rss/?%s', + } + + http_time_between_calls = 1 # Seconds + + def _search(self, media, quality, results): + + nzbs = self.getRSSData(self.urls['search'] % self.buildUrl(media, quality)) + + for nzb in nzbs: + + enclosure = self.getElement(nzb, 'enclosure').attrib + nzbindex_id = int(self.getTextElement(nzb, "link").split('/')[4]) + + title = self.getTextElement(nzb, "title") + + match = fireEvent('matcher.parse', title, parser='usenet', single = True) + if not match.chains: + log.info('Unable to parse release with title "%s"', title) + continue + + # TODO should we consider other lower-weight chains here? + info = fireEvent('matcher.flatten_info', match.chains[0].info, single = True) + + release_name = fireEvent('matcher.construct_from_raw', info.get('release_name'), single = True) + + file_name = info.get('detail', {}).get('file_name') + file_name = file_name[0] if file_name else None + + title = release_name or file_name + + # Strip extension from parsed title (if one exists) + ext_pos = title.rfind('.') + + # Assume extension if smaller than 4 characters + # TODO this should probably be done a better way + if len(title[ext_pos + 1:]) <= 4: + title = title[:ext_pos] + + if not title: + log.info('Unable to find release name from match') + continue + + try: + description = self.getTextElement(nzb, "description") + except: + description = '' + + def extra_check(item): + if '#c20000' in item['description'].lower(): + log.info('Wrong: Seems to be passworded: %s', item['name']) + return False + + return True + + results.append({ + 'id': nzbindex_id, + 'name': title, + 'age': self.calculateAge(int(time.mktime(parse(self.getTextElement(nzb, "pubDate")).timetuple()))), + 'size': tryInt(enclosure['length']) / 1024 / 1024, + 'url': enclosure['url'], + 'detail_url': enclosure['url'].replace('/download/', '/release/'), + 'description': description, + 'get_more_info': self.getMoreInfo, + 'extra_check': extra_check, + }) + + def getMoreInfo(self, item): + try: + if '/nfo/' in item['description'].lower(): + nfo_url = re.search('href=\"(?P.+)\" ', item['description']).group('nfo') + full_description = self.getCache('nzbindex.%s' % item['id'], url = nfo_url, cache_timeout = 25920000) + html = BeautifulSoup(full_description) + item['description'] = toUnicode(html.find('pre', attrs = {'id': 'nfo0'}).text) + except: + pass + + +config = [{ + 'name': 'nzbindex', + 'groups': [ + { + 'tab': 'searcher', + 'list': 'nzb_providers', + 'name': 'nzbindex', + 'description': 'Free provider, less accurate. See NZBIndex', + 'wizard': True, + 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAo0lEQVR42t2SQQ2AMBAEcUCwUAv94QMLfHliAQtYqIVawEItYAG6yZFMLkUANNlk79Kbbtp2P1j9uKxVV9VWFeStl+Wh3fWK9hNwEoADZkJtMD49AqS5AUjWGx6A+m+ARICGrM5W+wSTB0gETKzdHZwCEZAJ8PGZQN4AiQAmkR9s06EBAugJiBoAAPFfAQcBgZcIHzwA6TYP4JsXeSg3P9L31w3eksbH3zMb/wAAAABJRU5ErkJggg==', + 'options': [ + { + 'name': 'enabled', + 'type': 'enabler', + 'default': True, + }, + { + 'name': 'extra_score', + 'advanced': True, + 'label': 'Extra Score', + 'type': 'int', + 'default': 0, + 'description': 'Starting score for each release found via this provider.', + } + ], + }, + ], +}] diff --git a/couchpotato/core/media/_base/providers/nzb/omgwtfnzbs.py b/couchpotato/core/media/_base/providers/nzb/omgwtfnzbs.py new file mode 100644 index 00000000..bac0614d --- /dev/null +++ b/couchpotato/core/media/_base/providers/nzb/omgwtfnzbs.py @@ -0,0 +1,103 @@ +from urlparse import urlparse, parse_qs +import time + +from couchpotato.core.event import fireEvent +from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode +from couchpotato.core.helpers.rss import RSS +from couchpotato.core.helpers.variable import tryInt +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.nzb.base import NZBProvider +from dateutil.parser import parse + + +log = CPLog(__name__) + + +class Base(NZBProvider, RSS): + + urls = { + 'search': 'https://rss.omgwtfnzbs.org/rss-search.php?%s', + 'detail_url': 'https://omgwtfnzbs.org/details.php?id=%s', + } + + http_time_between_calls = 1 # Seconds + + cat_ids = [ + ([15], ['dvdrip']), + ([15, 16], ['brrip']), + ([16], ['720p', '1080p', 'bd50']), + ([17], ['dvdr']), + ] + cat_backup_id = 'movie' + + def search(self, movie, quality): + + if quality['identifier'] in fireEvent('quality.pre_releases', single = True): + return [] + + return super(Base, self).search(movie, quality) + + def _searchOnTitle(self, title, movie, quality, results): + + q = '%s %s' % (title, movie['info']['year']) + params = tryUrlencode({ + 'search': q, + 'catid': ','.join([str(x) for x in self.getCatId(quality)]), + 'user': self.conf('username', default = ''), + 'api': self.conf('api_key', default = ''), + }) + + nzbs = self.getRSSData(self.urls['search'] % params) + + for nzb in nzbs: + + enclosure = self.getElement(nzb, 'enclosure').attrib + nzb_id = parse_qs(urlparse(self.getTextElement(nzb, 'link')).query).get('id')[0] + + results.append({ + 'id': nzb_id, + 'name': toUnicode(self.getTextElement(nzb, 'title')), + 'age': self.calculateAge(int(time.mktime(parse(self.getTextElement(nzb, 'pubDate')).timetuple()))), + 'size': tryInt(enclosure['length']) / 1024 / 1024, + 'url': enclosure['url'], + 'detail_url': self.urls['detail_url'] % nzb_id, + 'description': self.getTextElement(nzb, 'description') + }) + + +config = [{ + 'name': 'omgwtfnzbs', + 'groups': [ + { + 'tab': 'searcher', + 'list': 'nzb_providers', + 'name': 'OMGWTFNZBs', + 'description': 'See OMGWTFNZBs', + 'wizard': True, + 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQEAIAAADAAbR1AAADbElEQVR4AZ2UW0ybZRiAy/OvdHaLYvB0YTRIFi7GkM44zRLmIfNixkWdiRMyYoxRE8/TC7MYvXCGEBmr3mxLwVMwY0wYA7e6Wso4lB6h/U9taSlMGIfBXLYlJMyo0S///2dJI5lxN8/F2/f9nu9737e/jYmXr6KTbN9BGG9HE/NotQ76UWziNzrXFiETk/5ARUNH+7+0kW7fSgTl0VKGOLZzidOkmuuIo7q2oTArNLPIzhdIkqXkerFOm2CaD/5bcKrjIL2c3fkhPxOq93Kcb91v46fV9TQKF4TgV/TbUsQtzfCaK6jMOd5DJrguSIIhexmqqVxN0FXbRR8/ND/LYTTj6J7nl2gnL47OkDW4KJhnQHCa6JpKVNJGA3OC58nwBJoZ//ebbIyKpBxjrr0o1q1FMRkrKXZnHWF85VvxMrJxibwhGyd0f5bLnKzqJs1k0Sfo+EU8hdAUvkbcwKEgs2D0OiV4jmmD1zb+Tp6er0JMMvDxPo5xev9zTBF683NS+N56n1YiB95B5crr93KRuKhKI0tb0Kw2mgLLqTjLEWO8424i9IvURaYeOckwf3+/yCC9e3bQQ/MuD+Monk0k+XFXMUfx7z5EEP+XlXi5tLlMxH8zLppw7idJrugcus30kC86gc7UrQqjLIukM8zWHOACeU+TiMxXN6ExVOkgz4lvPEzice1GIVhxhG4CrZvpl6TH55giKWqXGLy9hZh5aUtgDSew/msSyCKpl+DDNfxJc8NBIsxUxUnz14O/oONu+IIIvso9TLBQ1SY5rUhuSzUhAqJ2mRXBLDOCeUtgUZXsaObT8BffhUJPqWgiV+3zKKzYH0ClvTRLhD77HIqVkyh5jThnivehoG+qJctIRSPn6bxvO4FCgTl9c1DmbpjLajbQFE8aW5SU3rg+zOPGUjTUF9NFpLEbH2c/KmGYlY69/GQJVtGMSUcEp9eCbB1nctbxHTLRdTUkGDf+B02uGWRG3OvpJ/zSMwzif+oxVBID3cQKBavLCiPmB2PM2UuSCUPgrX4VDb97AwEG67bh4+KTOlncvu3M31BwA5rLHbCfEjwkNDky9e/SSbSxnD46Pg0RJtpXRvhmBSZHpRjWtKwFybjuQeXaKxto4WjLZZZvVmC17pZLJFkwxm5++PS2Mrwc7nyIMYZe/IzoP5d6QgEybqTXAAAAAElFTkSuQmCC', + 'options': [ + { + 'name': 'enabled', + 'type': 'enabler', + }, + { + 'name': 'username', + 'default': '', + }, + { + 'name': 'api_key', + 'label': 'Api Key', + 'default': '', + }, + { + 'name': 'extra_score', + 'advanced': True, + 'label': 'Extra Score', + 'default': 20, + 'type': 'int', + 'description': 'Starting score for each release found via this provider.', + } + ], + }, + ], +}] diff --git a/couchpotato/core/providers/torrent/__init__.py b/couchpotato/core/media/_base/providers/torrent/__init__.py similarity index 100% rename from couchpotato/core/providers/torrent/__init__.py rename to couchpotato/core/media/_base/providers/torrent/__init__.py diff --git a/couchpotato/core/media/_base/providers/torrent/awesomehd.py b/couchpotato/core/media/_base/providers/torrent/awesomehd.py new file mode 100644 index 00000000..78c46488 --- /dev/null +++ b/couchpotato/core/media/_base/providers/torrent/awesomehd.py @@ -0,0 +1,141 @@ +import re +import traceback + +from bs4 import BeautifulSoup +from couchpotato.core.helpers.variable import tryInt, getIdentifier +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.base import TorrentProvider + + +log = CPLog(__name__) + + +class Base(TorrentProvider): + + urls = { + 'test': 'https://awesome-hd.net/', + 'detail': 'https://awesome-hd.net/torrents.php?torrentid=%s', + 'search': 'https://awesome-hd.net/searchapi.php?action=imdbsearch&passkey=%s&imdb=%s&internal=%s', + 'download': 'https://awesome-hd.net/torrents.php?action=download&id=%s&authkey=%s&torrent_pass=%s', + } + http_time_between_calls = 1 + + def _search(self, movie, quality, results): + + data = self.getHTMLData(self.urls['search'] % (self.conf('passkey'), getIdentifier(movie), self.conf('only_internal'))) + + if data: + try: + soup = BeautifulSoup(data) + + if soup.find('error'): + log.error(soup.find('error').get_text()) + return + + authkey = soup.find('authkey').get_text() + entries = soup.find_all('torrent') + + for entry in entries: + + torrentscore = 0 + torrent_id = entry.find('id').get_text() + name = entry.find('name').get_text() + year = entry.find('year').get_text() + releasegroup = entry.find('releasegroup').get_text() + resolution = entry.find('resolution').get_text() + encoding = entry.find('encoding').get_text() + freeleech = entry.find('freeleech').get_text() + torrent_desc = '/ %s / %s / %s ' % (releasegroup, resolution, encoding) + + if freeleech == '0.25' and self.conf('prefer_internal'): + torrent_desc += '/ Internal' + torrentscore += 200 + + if encoding == 'x264' and self.conf('favor') in ['encode', 'both']: + torrentscore += 300 + if re.search('Remux', encoding) and self.conf('favor') in ['remux', 'both']: + torrentscore += 200 + + results.append({ + 'id': torrent_id, + 'name': re.sub('[^A-Za-z0-9\-_ \(\).]+', '', '%s (%s) %s' % (name, year, torrent_desc)), + 'url': self.urls['download'] % (torrent_id, authkey, self.conf('passkey')), + 'detail_url': self.urls['detail'] % torrent_id, + 'size': self.parseSize(entry.find('size').get_text()), + 'seeders': tryInt(entry.find('seeders').get_text()), + 'leechers': tryInt(entry.find('leechers').get_text()), + 'score': torrentscore + }) + + except: + log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc())) + + +config = [{ + 'name': 'awesomehd', + 'groups': [ + { + 'tab': 'searcher', + 'list': 'torrent_providers', + 'name': 'Awesome-HD', + 'description': 'AHD', + 'wizard': True, + 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAC+UlEQVR4AV1SO0y6dxQ9H4g8CoIoohZ5NA0aR2UgkYpNB5uocTSaLlrDblMH09Gt8d90r3YpJkanxjA4GGkbO7RNxSABq8jDGnkpD+UD5NV7Bxvbk9wvv+/3uPece66A/yEWi42FQqHVfD7/cbPZtIEglUpjOp3uZHR0dBvAn3gDIRqNgjE4OKj0+Xzf3NzcfD4wMCCjf5TLZbTbbajVatzf3+Pu7q5uNpt35ufnvwBQAScQRREEldfr9RWLxan+/n5YrVa+jFarhVfQQyQSCU4EhULhX15engEgSrjC0dHRVqlUmjQYDBgaGgKtuTqz4mTgIoVCASaTCX19fajVapOHh4dbFJBks9mxcDi8qtFoJEajkfVyJWi1WkxMTMDhcIAT8x6D7/Dd6+vr1fHx8TGp2+3+iqo5+YCzBwIBToK5ubl/mQwPDyMSibAs2Gw2UHNRrValz8/PDUk8Hv9EqVRCr9fj4uICTNflcqFer+Pg4AB7e3uoVCq8x9Rxfn6O7u5uqFQq8FspZXxHTekggByA3W4Hr9PpNDeRL3I1cMhkMrBrnZ2dyGQyvNYIs7OzVbJNPjIyAraLwYdcjR8wXl5eIJfLwRIFQQDLYkm3t7c1CdGPPT4+cpOImp4PODMeaK+n10As2jBbrHifHOjS6qAguVFimkqlwAMmIQnHV1dX4NDQhVwuhyZTV6pgIktzDzkkk0lEwhEEzs7ASQr5Ai4vL1nuccfCwsLO/v6+p9FoyJhF6ekJro/cPCzIZLNQa7rQoK77/SdgWWpKkCaJ5EB9aWnpe6nH40nRMBnJV4f5gw+FX3/5GX/8/htXRZdOzzqhJWn6nl6YbTZqqhrhULD16fT0d8FgcFtYW1vD5uamfGVl5cd4IjldKhZACdkJvKfWUANrxEaJV4hiGVaL1b+7653hXzwRZQr2X76xsfG1xWIRaZzbNPv/CdrjEL9cX/+WXFBSgEPgzxuwG3Yans9OT0+naBZMIJDNfzudzp8WFxd/APAX3uAf9WOTxOPLdosAAAAASUVORK5CYII=', + 'options': [ + { + 'name': 'enabled', + 'type': 'enabler', + 'default': False, + }, + { + 'name': 'passkey', + 'default': '', + }, + { + 'name': 'seed_ratio', + 'label': 'Seed ratio', + 'type': 'float', + 'default': 1, + 'description': 'Will not be (re)moved until this seed ratio is met.', + }, + { + 'name': 'seed_time', + 'label': 'Seed time', + 'type': 'int', + 'default': 40, + 'description': 'Will not be (re)moved until this seed time (in hours) is met.', + }, + { + 'name': 'only_internal', + 'advanced': True, + 'type': 'bool', + 'default': 1, + 'description': 'Only search for internal releases.' + }, + { + 'name': 'prefer_internal', + 'advanced': True, + 'type': 'bool', + 'default': 1, + 'description': 'Favors internal releases over non-internal releases.' + }, + { + 'name': 'favor', + 'advanced': True, + 'default': 'both', + 'type': 'dropdown', + 'values': [('Encodes & Remuxes', 'both'), ('Encodes', 'encode'), ('Remuxes', 'remux'), ('None', 'none')], + 'description': 'Give extra scoring to encodes or remuxes.' + }, + { + 'name': 'extra_score', + 'advanced': True, + 'type': 'int', + 'default': 20, + 'description': 'Starting score for each release found via this provider.', + }, + ], + }, + ], +}] + diff --git a/couchpotato/core/providers/torrent/base.py b/couchpotato/core/media/_base/providers/torrent/base.py similarity index 75% rename from couchpotato/core/providers/torrent/base.py rename to couchpotato/core/media/_base/providers/torrent/base.py index e134c8f3..9f5f2890 100644 --- a/couchpotato/core/providers/torrent/base.py +++ b/couchpotato/core/media/_base/providers/torrent/base.py @@ -1,9 +1,11 @@ +import time import traceback + from couchpotato.core.helpers.variable import getImdb, md5, cleanHost from couchpotato.core.logger import CPLog -from couchpotato.core.providers.base import YarrProvider +from couchpotato.core.media._base.providers.base import YarrProvider from couchpotato.environment import Env -import time + log = CPLog(__name__) @@ -15,6 +17,22 @@ class TorrentProvider(YarrProvider): proxy_domain = None proxy_list = [] + def imdbMatch(self, url, imdbId): + if getImdb(url) == imdbId: + return True + + if url[:4] == 'http': + try: + cache_key = md5(url) + data = self.getCache(cache_key, url) + except IOError: + log.error('Failed to open %s.', url) + return False + + return getImdb(data) == imdbId + + return False + def getDomain(self, url = ''): forced_domain = self.conf('domain') @@ -26,7 +44,8 @@ class TorrentProvider(YarrProvider): prop_name = 'proxy.%s' % proxy last_check = float(Env.prop(prop_name, default = 0)) - if last_check > time.time() - 1209600: + + if last_check > time.time() - 86400: continue data = '' diff --git a/couchpotato/core/media/_base/providers/torrent/bithdtv.py b/couchpotato/core/media/_base/providers/torrent/bithdtv.py new file mode 100644 index 00000000..57bc2218 --- /dev/null +++ b/couchpotato/core/media/_base/providers/torrent/bithdtv.py @@ -0,0 +1,139 @@ +import traceback + +from bs4 import BeautifulSoup +from couchpotato.core.helpers.encoding import toUnicode +from couchpotato.core.helpers.variable import tryInt +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.base import TorrentProvider + + +log = CPLog(__name__) + + +class Base(TorrentProvider): + + urls = { + 'test': 'http://www.bit-hdtv.com/', + 'login': 'http://www.bit-hdtv.com/takelogin.php', + 'login_check': 'http://www.bit-hdtv.com/messages.php', + 'detail': 'http://www.bit-hdtv.com/details.php?id=%s', + 'search': 'http://www.bit-hdtv.com/torrents.php?', + } + + # Searches for movies only - BiT-HDTV's subcategory and resolution search filters appear to be broken + http_time_between_calls = 1 # Seconds + + def _search(self, media, quality, results): + + query = self.buildUrl(media, quality) + + url = "%s&%s" % (self.urls['search'], query) + + data = self.getHTMLData(url) + + if data: + # Remove BiT-HDTV's output garbage so outdated BS4 versions successfully parse the HTML + split_data = data.partition('-->') + if '## SELECT COUNT(' in split_data[0]: + data = split_data[2] + + html = BeautifulSoup(data) + + try: + result_table = html.find('table', attrs = {'width': '750', 'class': ''}) + if result_table is None: + return + + entries = result_table.find_all('tr') + for result in entries[1:]: + + cells = result.find_all('td') + link = cells[2].find('a') + torrent_id = link['href'].replace('/details.php?id=', '') + + results.append({ + 'id': torrent_id, + 'name': link.contents[0].get_text(), + 'url': cells[0].find('a')['href'], + 'detail_url': self.urls['detail'] % torrent_id, + 'size': self.parseSize(cells[6].get_text()), + 'seeders': tryInt(cells[8].string), + 'leechers': tryInt(cells[9].string), + 'get_more_info': self.getMoreInfo, + }) + + except: + log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc())) + + def getLoginParams(self): + return { + 'username': self.conf('username'), + 'password': self.conf('password'), + } + + def getMoreInfo(self, item): + full_description = self.getCache('bithdtv.%s' % item['id'], item['detail_url'], cache_timeout = 25920000) + html = BeautifulSoup(full_description) + nfo_pre = html.find('table', attrs = {'class': 'detail'}) + description = toUnicode(nfo_pre.text) if nfo_pre else '' + + item['description'] = description + return item + + def loginSuccess(self, output): + return 'logout.php' in output.lower() + + loginCheckSuccess = loginSuccess + + +config = [{ + 'name': 'bithdtv', + 'groups': [ + { + 'tab': 'searcher', + 'list': 'torrent_providers', + 'name': 'BiT-HDTV', + 'description': 'BiT-HDTV', + 'wizard': True, + 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAAABnRSTlMAAAAAAABupgeRAAABMklEQVR4AZ3Qu0ojcQCF8W9MJcQbJNgEEQUbQVIqWgnaWfkIvoCgggixEAmIhRtY2GV3w7KwU61B0EYIxmiw0YCik84ipaCuc0nmP5dcjIUgOjqDvxf4OAdf9mnMLcUJyPyGSCP+YRdC+Kp8iagJKhuS+InYRhTGgDbeV2uEMand4ZRxizjXHQEimxhraAnUr73BNqQxMiNeV2SwcjTLEVtb4Zl10mXutvOWm2otw5Sxz6TGTbdd6ncuYvVLXAXrvM+ruyBpy1S3JLGDfUQ1O6jn5vTsrJXvqSt4UNfj6vxTRPxBHER5QeSirhLGk/5rWN+ffB1XZuxjnDy1q87m7TS+xOGA+Iv4gfkbaw+nOMXHDHnITGEk0VfRFnn4Po4vNYm6RGukmggR0L08+l+e4HMeASo/i6AJUjLgAAAAAElFTkSuQmCC', + 'options': [ + { + 'name': 'enabled', + 'type': 'enabler', + 'default': False, + }, + { + 'name': 'username', + 'default': '', + }, + { + 'name': 'password', + 'default': '', + 'type': 'password', + }, + { + 'name': 'seed_ratio', + 'label': 'Seed ratio', + 'type': 'float', + 'default': 1, + 'description': 'Will not be (re)moved until this seed ratio is met.', + }, + { + 'name': 'seed_time', + 'label': 'Seed time', + 'type': 'int', + 'default': 40, + 'description': 'Will not be (re)moved until this seed time (in hours) is met.', + }, + { + 'name': 'extra_score', + 'advanced': True, + 'label': 'Extra Score', + 'type': 'int', + 'default': 20, + 'description': 'Starting score for each release found via this provider.', + } + ], + }, + ], +}] diff --git a/couchpotato/core/media/_base/providers/torrent/bitsoup.py b/couchpotato/core/media/_base/providers/torrent/bitsoup.py new file mode 100644 index 00000000..9519e58b --- /dev/null +++ b/couchpotato/core/media/_base/providers/torrent/bitsoup.py @@ -0,0 +1,134 @@ +import traceback + +from bs4 import BeautifulSoup, SoupStrainer +from couchpotato.core.helpers.variable import tryInt +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.base import TorrentProvider + + +log = CPLog(__name__) + + +class Base(TorrentProvider): + + urls = { + 'test': 'https://www.bitsoup.me/', + 'login': 'https://www.bitsoup.me/takelogin.php', + 'login_check': 'https://www.bitsoup.me/my.php', + 'search': 'https://www.bitsoup.me/browse.php?%s', + 'baseurl': 'https://www.bitsoup.me/%s', + } + + http_time_between_calls = 1 # Seconds + only_tables_tags = SoupStrainer('table') + + def _searchOnTitle(self, title, movie, quality, results): + + url = self.urls['search'] % self.buildUrl(title, movie, quality) + data = self.getHTMLData(url) + + if data: + html = BeautifulSoup(data, 'html.parser', parse_only = self.only_tables_tags) + + try: + result_table = html.find('table', attrs = {'class': 'koptekst'}) + if not result_table or 'nothing found!' in data.lower(): + return + + entries = result_table.find_all('tr') + for result in entries[1:]: + + all_cells = result.find_all('td') + + torrent = all_cells[1].find('a') + download = all_cells[3].find('a') + + torrent_id = torrent['href'] + torrent_id = torrent_id.replace('details.php?id=', '') + torrent_id = torrent_id.replace('&hit=1', '') + + torrent_name = torrent.getText() + + torrent_size = self.parseSize(all_cells[7].getText()) + torrent_seeders = tryInt(all_cells[9].getText()) + torrent_leechers = tryInt(all_cells[10].getText()) + torrent_url = self.urls['baseurl'] % download['href'] + torrent_detail_url = self.urls['baseurl'] % torrent['href'] + + results.append({ + 'id': torrent_id, + 'name': torrent_name, + 'size': torrent_size, + 'seeders': torrent_seeders, + 'leechers': torrent_leechers, + 'url': torrent_url, + 'detail_url': torrent_detail_url, + }) + + except: + log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc())) + + def getLoginParams(self): + return { + 'username': self.conf('username'), + 'password': self.conf('password'), + 'ssl': 'yes', + } + + def loginSuccess(self, output): + return 'logout.php' in output.lower() + + loginCheckSuccess = loginSuccess + + +config = [{ + 'name': 'bitsoup', + 'groups': [ + { + 'tab': 'searcher', + 'list': 'torrent_providers', + 'name': 'Bitsoup', + 'description': 'Bitsoup', + 'wizard': True, + 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAAB8ElEQVR4AbWSS2sTURiGz3euk0mswaE37HhNhIrajQheFgF3rgR/lAt/gOBCXNZlo6AbqfUWRVCxi04wqUnTRibpJLaJzdzOOZ6WUumyC5/VHOb9eN/FA91uFx0FjI4IPfgiGLTWH73tn348GKmN7ijD0d2b41fO5qJEaX24AWNIUrVQCTTJ3Llx6vbV6Vtzk7Gi9+ebi996guFDDYAQAVj4FExP5qdOZB49W62t/zH3hECcwsPnbWeMXz6Xi2K1f0ApeK3hMCHHbP5gvvoriBgFAAQJEAxhjJ4u+YWTNsVI6b1JgtPWZkoIefKy4fcii2OTw2BABs7wj3bYDlLL4rvjGWOdTser1j5Xf7c3Q/MbHQYApxItvnm31mhQQ71eX2vUB76/vsWB2hg0QuogrMwLIG8P3InM2/eVGXeDViqVwWB79vRU2lgJYmdHcgXCTAXQFJTN5HguvDCR2Hxsxe8EvT54nlcul5vNpqDIEgwRQanAhAAABgRIyiQcjpIkkTOuWyqVoN/vSylX67XXH74uV1vHRUyxxFqbLBCSmBpiXSq6xcL5QrGYzWZ3XQIAwdlOJB+/aL764ucdmncYs0WsCI7kvTnn+qyDMEnTVCn1Tz5KsBFg6fvWcmsUAcnYNC/g2hnromvvqbHvxv+39S+MX+bWkFXwAgAAAABJRU5ErkJggg==', + 'options': [ + { + 'name': 'enabled', + 'type': 'enabler', + 'default': False, + }, + { + 'name': 'username', + 'default': '', + }, + { + 'name': 'password', + 'default': '', + 'type': 'password', + }, + { + 'name': 'seed_ratio', + 'label': 'Seed ratio', + 'type': 'float', + 'default': 1, + 'description': 'Will not be (re)moved until this seed ratio is met.', + }, + { + 'name': 'seed_time', + 'label': 'Seed time', + 'type': 'int', + 'default': 40, + 'description': 'Will not be (re)moved until this seed time (in hours) is met.', + }, + { + 'name': 'extra_score', + 'advanced': True, + 'label': 'Extra Score', + 'type': 'int', + 'default': 20, + 'description': 'Starting score for each release found via this provider.', + } + ], + }, + ], +}] diff --git a/couchpotato/core/media/_base/providers/torrent/hdbits.py b/couchpotato/core/media/_base/providers/torrent/hdbits.py new file mode 100644 index 00000000..ebf2899b --- /dev/null +++ b/couchpotato/core/media/_base/providers/torrent/hdbits.py @@ -0,0 +1,116 @@ +import re +import json +import traceback + +from couchpotato.core.helpers.variable import tryInt, getIdentifier +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.base import TorrentProvider + + +log = CPLog(__name__) + + +class Base(TorrentProvider): + + urls = { + 'test': 'https://hdbits.org/', + 'detail': 'https://hdbits.org/details.php?id=%s', + 'download': 'https://hdbits.org/download.php?id=%s&passkey=%s', + 'api': 'https://hdbits.org/api/torrents' + } + + http_time_between_calls = 1 # Seconds + + def _post_query(self, **params): + + post_data = { + 'username': self.conf('username'), + 'passkey': self.conf('passkey') + } + post_data.update(params) + + try: + result = self.getJsonData(self.urls['api'], data = json.dumps(post_data)) + + if result: + if result['status'] != 0: + log.error('Error searching hdbits: %s' % result['message']) + else: + return result['data'] + except: + pass + + return None + + def _search(self, movie, quality, results): + + match = re.match(r'tt(\d{7})', getIdentifier(movie)) + + data = self._post_query(imdb = {'id': match.group(1)}) + + if data: + try: + for result in data: + results.append({ + 'id': result['id'], + 'name': result['name'], + 'url': self.urls['download'] % (result['id'], self.conf('passkey')), + 'detail_url': self.urls['detail'] % result['id'], + 'size': tryInt(result['size']) / 1024 / 1024, + 'seeders': tryInt(result['seeders']), + 'leechers': tryInt(result['leechers']) + }) + except: + log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc())) + + +config = [{ + 'name': 'hdbits', + 'groups': [ + { + 'tab': 'searcher', + 'list': 'torrent_providers', + 'name': 'HDBits', + 'wizard': True, + 'description': 'HDBits', + 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAABi0lEQVR4AZWSzUsbQRjGdyabTcvSNPTSHlpQQeMHJApC8CJRvHgQQU969+LJP8G7f4N3DwpeFRQvRr0EKaUl0ATSpkigUNFsMl/r9NmZLCEHA/nNO5PfvMPDm0DI6fV3ZxiolEICe1oZCBVCCmBPKwOh2ErKBHGE4KYEXBpSLkUlqO4LcM7f+6nVhRnOhSkOz/hexk+tL+YL0yPF2YmN4tynD++4gTLGkNNac9YFLoREBR1+cnF3dFY6v/m6PD+FaXiNJtgA4xYbABxiGrz6+6HWaI5/+Qh37YS0/3Znc8UxwNGBIIBX22z+/ZdJ+4wzyjpR4PEpODg8tgUXBv2iWUzSpa12B0IR6n6lvt8Aek2lZHb084+fdRNgrwY8z81PjhVy2d2ttUrtV/lbBa+JXGEpDMPnoF2tN1QYRqVUtf6nFbThb7wk7le395elcqhASLb39okDiHY00VCtCTEHwSiH4AI0lkOiT1dwMeSfT3SRxiQWNO7Zwj1egkoVIQFMKvSiC3bcjXq9Jf8DcDIRT3hh10kAAAAASUVORK5CYII=', + 'options': [ + { + 'name': 'enabled', + 'type': 'enabler', + 'default': False, + }, + { + 'name': 'username', + 'default': '', + }, + { + 'name': 'passkey', + 'default': '', + }, + { + 'name': 'seed_ratio', + 'label': 'Seed ratio', + 'type': 'float', + 'default': 1, + 'description': 'Will not be (re)moved until this seed ratio is met.', + }, + { + 'name': 'seed_time', + 'label': 'Seed time', + 'type': 'int', + 'default': 40, + 'description': 'Will not be (re)moved until this seed time (in hours) is met.', + }, + { + 'name': 'extra_score', + 'advanced': True, + 'label': 'Extra Score', + 'type': 'int', + 'default': 0, + 'description': 'Starting score for each release found via this provider.', + }, + ], + }, + ], +}] diff --git a/couchpotato/core/providers/torrent/ilovetorrents/main.py b/couchpotato/core/media/_base/providers/torrent/ilovetorrents.py similarity index 51% rename from couchpotato/core/providers/torrent/ilovetorrents/main.py rename to couchpotato/core/media/_base/providers/torrent/ilovetorrents.py index f8ed67a3..b6ccecd1 100644 --- a/couchpotato/core/providers/torrent/ilovetorrents/main.py +++ b/couchpotato/core/media/_base/providers/torrent/ilovetorrents.py @@ -1,19 +1,21 @@ -from bs4 import BeautifulSoup -from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode -from couchpotato.core.helpers.variable import tryInt -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.torrent.base import TorrentProvider import re import traceback +from bs4 import BeautifulSoup +from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode +from couchpotato.core.helpers.variable import tryInt, splitString +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.base import TorrentProvider + + log = CPLog(__name__) -class ILoveTorrents(TorrentProvider): +class Base(TorrentProvider): urls = { 'download': 'https://www.ilovetorrents.me/%s', - 'detail': 'https//www.ilovetorrents.me/%s', + 'detail': 'https://www.ilovetorrents.me/%s', 'search': 'https://www.ilovetorrents.me/browse.php?search=%s&page=%s&cat=%s', 'test': 'https://www.ilovetorrents.me/', 'login': 'https://www.ilovetorrents.me/takelogin.php', @@ -21,9 +23,9 @@ class ILoveTorrents(TorrentProvider): } cat_ids = [ - (['41'], ['720p', '1080p', 'brrip']), - (['19'], ['cam', 'ts', 'dvdrip', 'tc', 'r5', 'scr']), - (['20'], ['dvdr']) + (['41'], ['720p', '1080p', 'brrip']), + (['19'], ['cam', 'ts', 'dvdrip', 'tc', 'r5', 'scr']), + (['20'], ['dvdr']) ] cat_backup_id = 200 @@ -34,28 +36,35 @@ class ILoveTorrents(TorrentProvider): page = 0 total_pages = 1 - cats = self.getCatId(quality['identifier']) + cats = self.getCatId(quality) while page < total_pages: - movieTitle = tryUrlencode('"%s" %s' % (title, movie['library']['year'])) + movieTitle = tryUrlencode('"%s" %s' % (title, movie['info']['year'])) search_url = self.urls['search'] % (movieTitle, page, cats[0]) page += 1 data = self.getHTMLData(search_url) if data: try: - soup = BeautifulSoup(data) - results_table = soup.find('table', attrs = {'class': 'koptekst'}) + results_table = None + + data_split = splitString(data, '.+'')', i['href']).group('pageNumber')) for i in pagelinks] - total_pages = max(pageNumbers) - + page_numbers = [int(re.search('page=(?P.+'')', i['href']).group('page_number')) for i in pagelinks] + total_pages = max(page_numbers) except: pass @@ -77,7 +86,7 @@ class ILoveTorrents(TorrentProvider): return confirmed + trusted + vip + moderated id = re.search('id=(?P\d+)&', link).group('id') - url = self.urls['download'] % (download) + url = self.urls['download'] % download fileSize = self.parseSize(result.select('td.rowhead')[5].text) results.append({ @@ -111,7 +120,7 @@ class ILoveTorrents(TorrentProvider): try: full_description = self.getHTMLData(item['detail_url']) html = BeautifulSoup(full_description) - nfo_pre = html.find('td', attrs = {'class':'main'}).findAll('table')[1] + nfo_pre = html.find('td', attrs = {'class': 'main'}).findAll('table')[1] description = toUnicode(nfo_pre.text) if nfo_pre else '' except: log.error('Failed getting more info for %s', item['name']) @@ -126,3 +135,61 @@ class ILoveTorrents(TorrentProvider): return 'logout.php' in output.lower() loginCheckSuccess = loginSuccess + + +config = [{ + 'name': 'ilovetorrents', + 'groups': [ + { + 'tab': 'searcher', + 'list': 'torrent_providers', + 'name': 'ILoveTorrents', + 'description': 'Where the Love of Torrents is Born', + 'wizard': True, + 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAACPUlEQVR4AYWM0U9SbxjH3+v266I/oNvWZTfd2J1d0ZqbZEFwWrUImOKs4YwtumFKZvvlJJADR2TCQQlMPKg5NmpREgaekAPnBATKgmK1LqQlx6awHnZWF1Tr2Xfvvs+7z+dB0mlO7StpAh+M4S/2jbo3w8+xvJvlnSneEt+10zwer5ujNUOoChjALWFw5XOwdCAk/P57cGvPl+Oht0W7VJHN5NC1uW1BON4hGjXbwpVWMZhsy9v7sEIXAsDNYBXgdkEoIKyWD2CF8ut/aOXTZc/fBSgLWw1BgA4BDHOV0GkT90cBQpXahU5TFomsb38XhJC5/Tbh1P8c6rJlBeGfAeyMhUFwNVcs9lxV9Ot0dwmyd+mrNvRtbJ2fSPC6Z3Vsvub2z3sDFACAAYzk0+kUyxEkyfN7PopqNBro55A+P6yPKIrL5zF1HwjdeBJJCObIsZO79bo3sHhWhglo5WMV3mazuVPb4fLvSL8/FAkB1hK6rXQPwYhMyROK8VK5LAiH/jsMt0HQjxiN4/ePdoilllcqDyt3Mkg8mRBNbIhMb8RERkowQA/p76g0/UDDdCoNmDminM0qSK5vlpE5kugCHhNPxntwWmJPYTMZtYcFR6ABHQsVRlYLukVORaaULvqKI46keFSCv77kSPS6kxrPptLNDHgz16fWBtyxe6v5h08LUy+KI8ushqTPWWIX8Sg6b45IrGtyW6zXFb/hpQf9m3oqfWuB0fpSw0uZ4WB69En69uOk2rmO2V52PXj+A/mI4ESKpb2HAAAAAElFTkSuQmCC', + 'options': [ + { + 'name': 'enabled', + 'type': 'enabler', + 'default': False + }, + { + 'name': 'username', + 'label': 'Username', + 'type': 'string', + 'default': '', + 'description': 'The user name for your ILT account', + }, + { + 'name': 'password', + 'label': 'Password', + 'type': 'password', + 'default': '', + 'description': 'The password for your ILT account.', + }, + { + 'name': 'seed_ratio', + 'label': 'Seed ratio', + 'type': 'float', + 'default': 1, + 'description': 'Will not be (re)moved until this seed ratio is met.', + }, + { + 'name': 'seed_time', + 'label': 'Seed time', + 'type': 'int', + 'default': 40, + 'description': 'Will not be (re)moved until this seed time (in hours) is met.', + }, + { + 'name': 'extra_score', + 'advanced': True, + 'label': 'Extra Score', + 'type': 'int', + 'default': 0, + 'description': 'Starting score for each release found via this provider.', + } + ], + } + ] +}] diff --git a/couchpotato/core/media/_base/providers/torrent/iptorrents.py b/couchpotato/core/media/_base/providers/torrent/iptorrents.py new file mode 100644 index 00000000..0915ca31 --- /dev/null +++ b/couchpotato/core/media/_base/providers/torrent/iptorrents.py @@ -0,0 +1,172 @@ +import traceback + +from bs4 import BeautifulSoup +from couchpotato.core.helpers.encoding import tryUrlencode +from couchpotato.core.helpers.variable import tryInt +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.base import TorrentProvider +import six + + +log = CPLog(__name__) + + +class Base(TorrentProvider): + + urls = { + 'test': 'https://www.iptorrents.com/', + 'base_url': 'https://www.iptorrents.com', + 'login': 'https://www.iptorrents.com/torrents/', + 'login_check': 'https://www.iptorrents.com/inbox.php', + 'search': 'https://www.iptorrents.com/torrents/?%s%%s&q=%s&qf=ti&p=%%d', + } + + http_time_between_calls = 1 # Seconds + cat_backup_id = None + + def buildUrl(self, title, media, quality): + return self._buildUrl(title.replace(':', ''), quality) + + def _buildUrl(self, query, quality): + + cat_ids = self.getCatId(quality) + + if not cat_ids: + log.warning('Unable to find category ids for identifier "%s"', quality.get('identifier')) + return None + + return self.urls['search'] % ("&".join(("l%d=" % x) for x in cat_ids), tryUrlencode(query).replace('%', '%%')) + + def _searchOnTitle(self, title, media, quality, results): + + freeleech = '' if not self.conf('freeleech') else '&free=on' + + base_url = self.buildUrl(title, media, quality) + if not base_url: return + + pages = 1 + current_page = 1 + while current_page <= pages and not self.shuttingDown(): + data = self.getHTMLData(base_url % (freeleech, current_page)) + + if data: + html = BeautifulSoup(data) + + try: + page_nav = html.find('span', attrs = {'class': 'page_nav'}) + if page_nav: + next_link = page_nav.find("a", text = "Next") + if next_link: + final_page_link = next_link.previous_sibling.previous_sibling + pages = int(final_page_link.string) + + result_table = html.find('table', attrs = {'class': 'torrents'}) + + if not result_table or 'nothing found!' in data.lower(): + return + + entries = result_table.find_all('tr') + + for result in entries[1:]: + + torrent = result.find_all('td') + if len(torrent) <= 1: + break + + torrent = torrent[1].find('a') + + torrent_id = torrent['href'].replace('/details.php?id=', '') + torrent_name = six.text_type(torrent.string) + torrent_download_url = self.urls['base_url'] + (result.find_all('td')[3].find('a'))['href'].replace(' ', '.') + torrent_details_url = self.urls['base_url'] + torrent['href'] + torrent_size = self.parseSize(result.find_all('td')[5].string) + torrent_seeders = tryInt(result.find('td', attrs = {'class': 'ac t_seeders'}).string) + torrent_leechers = tryInt(result.find('td', attrs = {'class': 'ac t_leechers'}).string) + + results.append({ + 'id': torrent_id, + 'name': torrent_name, + 'url': torrent_download_url, + 'detail_url': torrent_details_url, + 'size': torrent_size, + 'seeders': torrent_seeders, + 'leechers': torrent_leechers, + }) + + except: + log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc())) + break + + current_page += 1 + + def getLoginParams(self): + return { + 'username': self.conf('username'), + 'password': self.conf('password'), + 'login': 'submit', + } + + def loginSuccess(self, output): + return 'don\'t have an account' not in output.lower() + + def loginCheckSuccess(self, output): + return '/logout.php' in output.lower() + + +config = [{ + 'name': 'iptorrents', + 'groups': [ + { + 'tab': 'searcher', + 'list': 'torrent_providers', + 'name': 'IPTorrents', + 'description': 'IPTorrents', + 'wizard': True, + 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAABRklEQVR42qWQO0vDUBiG8zeKY3EqQUtNO7g0J6ZJ1+ifKIIFQXAqDYKCyaaYxM3udrZLHdRFhXrZ6liCW6mubfk874EESgqaeOCF7/Y8hEh41aq6yZi2nyZgBGya9XKtZs4No05pAkZV2YbEmyMMsoSxLQeC46wCTdPPY4HruPQyGIhF97qLWsS78Miydn4XdK46NJ9OsQAYBzMIMf8MQ9wtCnTdWCaIDx/u7uljOIQEe0hiIWPamSTLay3+RxOCSPI9+RJAo7Er9r2bnqjBFAqyK+VyK4f5/Cr5ni8OFKVCz49PFI5GdNvvU7ttE1M1zMU+8AMqFksEhrMnQsBDzqmDAwzx2ehRLwT7yyCI+vSC99c3mozH1NxrJgWWtR1BOECfEJSVCm6WCzJGCA7+IWhBsM4zywDPwEp4vCjx2DzBH2ODAfsDb33Ps6dQwJgAAAAASUVORK5CYII=', + 'options': [ + { + 'name': 'enabled', + 'type': 'enabler', + 'default': False, + }, + { + 'name': 'username', + 'default': '', + }, + { + 'name': 'password', + 'default': '', + 'type': 'password', + }, + { + 'name': 'freeleech', + 'default': 0, + 'type': 'bool', + 'description': 'Only search for [FreeLeech] torrents.', + }, + { + 'name': 'seed_ratio', + 'label': 'Seed ratio', + 'type': 'float', + 'default': 1, + 'description': 'Will not be (re)moved until this seed ratio is met.', + }, + { + 'name': 'seed_time', + 'label': 'Seed time', + 'type': 'int', + 'default': 40, + 'description': 'Will not be (re)moved until this seed time (in hours) is met.', + }, + { + 'name': 'extra_score', + 'advanced': True, + 'label': 'Extra Score', + 'type': 'int', + 'default': 0, + 'description': 'Starting score for each release found via this provider.', + } + ], + }, + ], +}] diff --git a/couchpotato/core/media/_base/providers/torrent/kickasstorrents.py b/couchpotato/core/media/_base/providers/torrent/kickasstorrents.py new file mode 100644 index 00000000..730bb608 --- /dev/null +++ b/couchpotato/core/media/_base/providers/torrent/kickasstorrents.py @@ -0,0 +1,182 @@ +import re +import traceback + +from bs4 import BeautifulSoup +from couchpotato.core.helpers.variable import tryInt, getIdentifier +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.base import TorrentMagnetProvider + + +log = CPLog(__name__) + + +class Base(TorrentMagnetProvider): + + urls = { + 'detail': '%s/%s', + 'search': '%s/%s-i%s/', + } + + cat_ids = [ + (['cam'], ['cam']), + (['telesync'], ['ts', 'tc']), + (['screener', 'tvrip'], ['screener']), + (['x264', '720p', '1080p', 'blu-ray', 'hdrip'], ['bd50', '1080p', '720p', 'brrip']), + (['dvdrip'], ['dvdrip']), + (['dvd'], ['dvdr']), + ] + + http_time_between_calls = 1 # Seconds + cat_backup_id = None + + proxy_list = [ + 'https://kickass.to', + 'http://kickass.pw', + 'http://kickassto.come.in', + 'http://katproxy.ws', + 'http://www.kickassunblock.info', + 'http://www.kickassproxy.info', + 'http://katph.eu', + 'http://kickassto.come.in', + ] + + def _search(self, media, quality, results): + + data = self.getHTMLData(self.urls['search'] % (self.getDomain(), 'm', getIdentifier(media).replace('tt', ''))) + + if data: + + cat_ids = self.getCatId(quality) + table_order = ['name', 'size', None, 'age', 'seeds', 'leechers'] + + try: + html = BeautifulSoup(data) + resultdiv = html.find('div', attrs = {'class': 'tabs'}) + for result in resultdiv.find_all('div', recursive = False): + if result.get('id').lower().strip('tab-') not in cat_ids: + continue + + try: + for temp in result.find_all('tr'): + if temp['class'] is 'firstr' or not temp.get('id'): + continue + + new = {} + + nr = 0 + for td in temp.find_all('td'): + column_name = table_order[nr] + if column_name: + + if column_name == 'name': + link = td.find('div', {'class': 'torrentname'}).find_all('a')[2] + new['id'] = temp.get('id')[-7:] + new['name'] = link.text + new['url'] = td.find('a', 'imagnet')['href'] + new['detail_url'] = self.urls['detail'] % (self.getDomain(), link['href'][1:]) + new['verified'] = True if td.find('a', 'iverify') else False + new['score'] = 100 if new['verified'] else 0 + elif column_name is 'size': + new['size'] = self.parseSize(td.text) + elif column_name is 'age': + new['age'] = self.ageToDays(td.text) + elif column_name is 'seeds': + new['seeders'] = tryInt(td.text) + elif column_name is 'leechers': + new['leechers'] = tryInt(td.text) + + nr += 1 + + # Only store verified torrents + if self.conf('only_verified') and not new['verified']: + continue + + results.append(new) + except: + log.error('Failed parsing KickAssTorrents: %s', traceback.format_exc()) + + except AttributeError: + log.debug('No search results found.') + + def ageToDays(self, age_str): + age = 0 + age_str = age_str.replace(' ', ' ') + + regex = '(\d*.?\d+).(sec|hour|day|week|month|year)+' + matches = re.findall(regex, age_str) + for match in matches: + nr, size = match + mult = 1 + if size == 'week': + mult = 7 + elif size == 'month': + mult = 30.5 + elif size == 'year': + mult = 365 + + age += tryInt(nr) * mult + + return tryInt(age) + + def isEnabled(self): + return super(Base, self).isEnabled() and self.getDomain() + + def correctProxy(self, data): + return 'search query' in data.lower() + + +config = [{ + 'name': 'kickasstorrents', + 'groups': [ + { + 'tab': 'searcher', + 'list': 'torrent_providers', + 'name': 'KickAssTorrents', + 'description': 'KickAssTorrents', + 'wizard': True, + 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAACD0lEQVR42pXK20uTcRjA8d/fsJsuap0orBuFlm3hir3JJvQOVmuwllN20Lb2isI2nVHKjBqrCWYaNnNuBrkSWxglhDVJOkBdSWUOq5FgoiOrMdRJ2xPPxW+8OUf1ge/FcyCUSVe2qedK5U/OxNTTXRNXEQ52Glb4O6dNEfK1auJkvRY7+/zxnQbA/D596laXcY3OWOiaIX2393SGznUmxkUo/YkDgqHemuzobQ7+NV+reo5Q1mqp68GABdY3+/EloO+JeN4tEqiFU8f3CwhyWo9E7wfMgI0ELTDx0AvjIxcgvZoC9P7NMN7yMmrFeoKa68rfDfmrARsNN0Ihr55cx59ctZWSiwS5bLKpwW4dYJH+M/B6/CYszE0BFZ+egG+Ln+HRoBN/cpl1pV6COIMkOnBVA/w+fXgGKJVM4LxhumMleoL06hJ3wKcCfl+/TAKKx17gnFePRwkqxR4BQSpFkbCrrQJueI7mWpyfATQ9OQY43+uv/+PutBycJ3y2qn2x7jY50GJvnwLKZjOwspyE5I8F4N+1yr1uwqcs3ym63Hwo29EiAyzUWQVr6WVAS4lZCPutQG/2GtES2YiW3d3XflYKtL72kzAcdEDHeSa3czeIMyyz/TApRKvcFfE0isHbJMnrHCf6xTLb1ORvWNlWo91cvHrJUQo0o6ZoRi7dIiT/g2WEDi27Iyov21xMCvgNfXvtwIACfHwAAAAASUVORK5CYII=', + 'options': [ + { + 'name': 'enabled', + 'type': 'enabler', + 'default': True, + }, + { + 'name': 'domain', + 'advanced': True, + 'label': 'Proxy server', + 'description': 'Domain for requests, keep empty to let CouchPotato pick.', + }, + { + 'name': 'seed_ratio', + 'label': 'Seed ratio', + 'type': 'float', + 'default': 1, + 'description': 'Will not be (re)moved until this seed ratio is met.', + }, + { + 'name': 'seed_time', + 'label': 'Seed time', + 'type': 'int', + 'default': 40, + 'description': 'Will not be (re)moved until this seed time (in hours) is met.', + }, + { + 'name': 'only_verified', + 'advanced': True, + 'type': 'bool', + 'default': False, + 'description': 'Only search for verified releases.' + }, + { + 'name': 'extra_score', + 'advanced': True, + 'label': 'Extra Score', + 'type': 'int', + 'default': 0, + 'description': 'Starting score for each release found via this provider.', + } + ], + }, + ], +}] diff --git a/couchpotato/core/providers/torrent/passthepopcorn/main.py b/couchpotato/core/media/_base/providers/torrent/passthepopcorn.py similarity index 60% rename from couchpotato/core/providers/torrent/passthepopcorn/main.py rename to couchpotato/core/media/_base/providers/torrent/passthepopcorn.py index 57a36c27..609ef2d4 100644 --- a/couchpotato/core/providers/torrent/passthepopcorn/main.py +++ b/couchpotato/core/media/_base/providers/torrent/passthepopcorn.py @@ -1,19 +1,21 @@ -from couchpotato.core.helpers.encoding import tryUrlencode -from couchpotato.core.helpers.variable import getTitle, tryInt, mergeDicts -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.torrent.base import TorrentProvider -from dateutil.parser import parse import htmlentitydefs import json import re import time import traceback + +from couchpotato.core.helpers.encoding import tryUrlencode +from couchpotato.core.helpers.variable import getTitle, tryInt, mergeDicts, getIdentifier +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.base import TorrentProvider +from dateutil.parser import parse import six + log = CPLog(__name__) -class PassThePopcorn(TorrentProvider): +class Base(TorrentProvider): urls = { 'domain': 'https://tls.passthepopcorn.me', @@ -26,43 +28,15 @@ class PassThePopcorn(TorrentProvider): http_time_between_calls = 2 - quality_search_params = { - 'bd50': {'media': 'Blu-ray', 'format': 'BD50'}, - '1080p': {'resolution': '1080p'}, - '720p': {'resolution': '720p'}, - 'brrip': {'media': 'Blu-ray'}, - 'dvdr': {'resolution': 'anysd'}, - 'dvdrip': {'media': 'DVD'}, - 'scr': {'media': 'DVD-Screener'}, - 'r5': {'media': 'R5'}, - 'tc': {'media': 'TC'}, - 'ts': {'media': 'TS'}, - 'cam': {'media': 'CAM'} - } + def _search(self, media, quality, results): - post_search_filters = { - 'bd50': {'Codec': ['BD50']}, - '1080p': {'Resolution': ['1080p']}, - '720p': {'Resolution': ['720p']}, - 'brrip': {'Source': ['Blu-ray'], 'Quality': ['High Definition'], 'Container': ['!ISO']}, - 'dvdr': {'Codec': ['DVD5', 'DVD9']}, - 'dvdrip': {'Source': ['DVD'], 'Codec': ['!DVD5', '!DVD9']}, - 'scr': {'Source': ['DVD-Screener']}, - 'r5': {'Source': ['R5']}, - 'tc': {'Source': ['TC']}, - 'ts': {'Source': ['TS']}, - 'cam': {'Source': ['CAM']} - } - - def _search(self, movie, quality, results): - - movie_title = getTitle(movie['library']) + movie_title = getTitle(media) quality_id = quality['identifier'] params = mergeDicts(self.quality_search_params[quality_id].copy(), { 'order_by': 'relevance', 'order_way': 'descending', - 'searchstr': movie['library']['identifier'] + 'searchstr': getIdentifier(media) }) url = '%s?json=noredirect&%s' % (self.urls['torrent'], tryUrlencode(params)) @@ -162,23 +136,23 @@ class PassThePopcorn(TorrentProvider): def htmlToUnicode(self, text): def fixup(m): - text = m.group(0) - if text[:2] == "&#": + txt = m.group(0) + if txt[:2] == "&#": # character reference try: - if text[:3] == "&#x": - return unichr(int(text[3:-1], 16)) + if txt[:3] == "&#x": + return unichr(int(txt[3:-1], 16)) else: - return unichr(int(text[2:-1])) + return unichr(int(txt[2:-1])) except ValueError: pass else: # named entity try: - text = unichr(htmlentitydefs.name2codepoint[text[1:-1]]) + txt = unichr(htmlentitydefs.name2codepoint[txt[1:-1]]) except KeyError: pass - return text # leave as is + return txt # leave as is return re.sub("&#?\w+;", fixup, six.u('%s') % text) def unicodeToASCII(self, text): @@ -190,11 +164,11 @@ class PassThePopcorn(TorrentProvider): def getLoginParams(self): return { - 'username': self.conf('username'), - 'password': self.conf('password'), - 'passkey': self.conf('passkey'), - 'keeplogged': '1', - 'login': 'Login' + 'username': self.conf('username'), + 'password': self.conf('password'), + 'passkey': self.conf('passkey'), + 'keeplogged': '1', + 'login': 'Login' } def loginSuccess(self, output): @@ -204,3 +178,90 @@ class PassThePopcorn(TorrentProvider): return False loginCheckSuccess = loginSuccess + + +config = [{ + 'name': 'passthepopcorn', + 'groups': [ + { + 'tab': 'searcher', + 'list': 'torrent_providers', + 'name': 'PassThePopcorn', + 'description': 'PassThePopcorn.me', + 'wizard': True, + 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAAARklEQVQoz2NgIAP8BwMiGWRpIN1JNWn/t6T9f532+W8GkNt7vzz9UkfarZVpb68BuWlbnqW1nU7L2DMx7eCoBlpqGOppCQB83zIgIg+wWQAAAABJRU5ErkJggg==', + 'options': [ + { + 'name': 'enabled', + 'type': 'enabler', + 'default': False + }, + { + 'name': 'domain', + 'advanced': True, + 'label': 'Proxy server', + 'description': 'Domain for requests (HTTPS only!), keep empty to use default (tls.passthepopcorn.me).', + }, + { + 'name': 'username', + 'default': '', + }, + { + 'name': 'password', + 'default': '', + 'type': 'password', + }, + { + 'name': 'passkey', + 'default': '', + }, + { + 'name': 'prefer_golden', + 'advanced': True, + 'type': 'bool', + 'label': 'Prefer golden', + 'default': 1, + 'description': 'Favors Golden Popcorn-releases over all other releases.' + }, + { + 'name': 'prefer_scene', + 'advanced': True, + 'type': 'bool', + 'label': 'Prefer scene', + 'default': 0, + 'description': 'Favors scene-releases over non-scene releases.' + }, + { + 'name': 'require_approval', + 'advanced': True, + 'type': 'bool', + 'label': 'Require approval', + 'default': 0, + 'description': 'Require staff-approval for releases to be accepted.' + }, + { + 'name': 'seed_ratio', + 'label': 'Seed ratio', + 'type': 'float', + 'default': 1, + 'description': 'Will not be (re)moved until this seed ratio is met.', + }, + { + 'name': 'seed_time', + 'label': 'Seed time', + 'type': 'int', + 'default': 40, + 'description': 'Will not be (re)moved until this seed time (in hours) is met.', + }, + { + 'name': 'extra_score', + 'advanced': True, + 'label': 'Extra Score', + 'type': 'int', + 'default': 20, + 'description': 'Starting score for each release found via this provider.', + } + ], + } + ] +}] diff --git a/couchpotato/core/media/_base/providers/torrent/sceneaccess.py b/couchpotato/core/media/_base/providers/torrent/sceneaccess.py new file mode 100644 index 00000000..e172f6ad --- /dev/null +++ b/couchpotato/core/media/_base/providers/torrent/sceneaccess.py @@ -0,0 +1,135 @@ +import traceback + +from bs4 import BeautifulSoup +from couchpotato.core.helpers.encoding import toUnicode +from couchpotato.core.helpers.variable import tryInt +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.base import TorrentProvider + + +log = CPLog(__name__) + + +class Base(TorrentProvider): + + urls = { + 'test': 'https://www.sceneaccess.eu/', + 'login': 'https://www.sceneaccess.eu/login', + 'login_check': 'https://www.sceneaccess.eu/inbox', + 'detail': 'https://www.sceneaccess.eu/details?id=%s', + 'search': 'https://www.sceneaccess.eu/browse?c%d=%d', + 'archive': 'https://www.sceneaccess.eu/archive?&c%d=%d', + 'download': 'https://www.sceneaccess.eu/%s', + } + + http_time_between_calls = 1 # Seconds + + def _searchOnTitle(self, title, media, quality, results): + + url = self.buildUrl(title, media, quality) + data = self.getHTMLData(url) + + if data: + html = BeautifulSoup(data) + + try: + resultsTable = html.find('table', attrs = {'id': 'torrents-table'}) + if resultsTable is None: + return + + entries = resultsTable.find_all('tr', attrs = {'class': 'tt_row'}) + for result in entries: + + link = result.find('td', attrs = {'class': 'ttr_name'}).find('a') + url = result.find('td', attrs = {'class': 'td_dl'}).find('a') + leechers = result.find('td', attrs = {'class': 'ttr_leechers'}).find('a') + torrent_id = link['href'].replace('details?id=', '') + + results.append({ + 'id': torrent_id, + 'name': link['title'], + 'url': self.urls['download'] % url['href'], + 'detail_url': self.urls['detail'] % torrent_id, + 'size': self.parseSize(result.find('td', attrs = {'class': 'ttr_size'}).contents[0]), + 'seeders': tryInt(result.find('td', attrs = {'class': 'ttr_seeders'}).find('a').string), + 'leechers': tryInt(leechers.string) if leechers else 0, + 'get_more_info': self.getMoreInfo, + }) + + except: + log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc())) + + def getMoreInfo(self, item): + full_description = self.getCache('sceneaccess.%s' % item['id'], item['detail_url'], cache_timeout = 25920000) + html = BeautifulSoup(full_description) + nfo_pre = html.find('div', attrs = {'id': 'details_table'}) + description = toUnicode(nfo_pre.text) if nfo_pre else '' + + item['description'] = description + return item + + # Login + def getLoginParams(self): + return { + 'username': self.conf('username'), + 'password': self.conf('password'), + 'submit': 'come on in', + } + + def loginSuccess(self, output): + return '/inbox' in output.lower() + + loginCheckSuccess = loginSuccess + + +config = [{ + 'name': 'sceneaccess', + 'groups': [ + { + 'tab': 'searcher', + 'list': 'torrent_providers', + 'name': 'SceneAccess', + 'description': 'SceneAccess', + 'wizard': True, + 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAAABnRSTlMAAAAAAABupgeRAAACT0lEQVR4AYVQS0sbURidO3OTmajJ5FElTTOkPmZ01GhHrIq0aoWAj1Vc+A/cuRMXbl24V9SlCGqrLhVFCrooEhCp2BAx0mobTY2kaR7qmOm87EXL1EWxh29xL+c7nPMdgGHYO5bF/gdbefnr6WlbWRnxluMwAB4Z0uEgXa7nwaDL7+/RNPzxbYvb/XJ0FBYVfd/ayh0fQ4qCGEHcm0KLRZUk7Pb2YRJPRwcsKMidnKD3t9VVT3s7BDh+z5FOZ3Vfn3h+Hltfx00mRRSRWFcUmmVNhYVqPn8dj3va2oh+txvcQRVF9ebm1fi4k+dRFbosY5rm4Hk7xxULQnJnx93S4g0EIEEQRoDLo6PrWEw8Pc0eHLwYGopMTDirqlJ7eyhYYGHhfgfHCcKYksZGVB/NcXI2mw6HhZERqrjYTNPHi4tFPh8aJIYIhgPlcCRDoZLW1s75+Z/7+59nZ/OJhLWigqAoKZX6Mjf3dXkZ3pydGYLc4aEoCCkInzQ1fRobS2xuvllaonkedfArnY5OTdGVldBkOADgqq2Nr6z8CIWaJietDHOhKB+HhwFKC6Gnq4ukKJvP9zcSbjYDXbeVlkKzuZBhnnV3e3t6UOmaJO0ODibW1hB1GYkg8R/gup7Z3TVZLJ5AILW9LcZiVpYtYBhw16O3t7cauckyeF9Tgz0ATpL2+nopmWycmbnY2LiKRjFk6/d7+/vRJfl4HGzV1T0UIM43MGBvaIBWK/YvwM5w+IMgGH8tkyEgvIpE7M3Nt6qqZrNyOq1kMmouh455Ggz+BhKY4GEc2CfwAAAAAElFTkSuQmCC', + 'options': [ + { + 'name': 'enabled', + 'type': 'enabler', + 'default': False, + }, + { + 'name': 'username', + 'default': '', + }, + { + 'name': 'password', + 'default': '', + 'type': 'password', + }, + { + 'name': 'seed_ratio', + 'label': 'Seed ratio', + 'type': 'float', + 'default': 1, + 'description': 'Will not be (re)moved until this seed ratio is met.', + }, + { + 'name': 'seed_time', + 'label': 'Seed time', + 'type': 'int', + 'default': 40, + 'description': 'Will not be (re)moved until this seed time (in hours) is met.', + }, + { + 'name': 'extra_score', + 'advanced': True, + 'label': 'Extra Score', + 'type': 'int', + 'default': 20, + 'description': 'Starting score for each release found via this provider.', + } + ], + }, + ], +}] diff --git a/couchpotato/core/providers/torrent/thepiratebay/main.py b/couchpotato/core/media/_base/providers/torrent/thepiratebay.py similarity index 50% rename from couchpotato/core/providers/torrent/thepiratebay/main.py rename to couchpotato/core/media/_base/providers/torrent/thepiratebay.py index 6ef5123a..57bcfbdc 100644 --- a/couchpotato/core/providers/torrent/thepiratebay/main.py +++ b/couchpotato/core/media/_base/providers/torrent/thepiratebay.py @@ -1,54 +1,55 @@ -from bs4 import BeautifulSoup -from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode -from couchpotato.core.helpers.variable import tryInt -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.torrent.base import TorrentMagnetProvider import re import traceback + +from bs4 import BeautifulSoup +from couchpotato.core.helpers.encoding import toUnicode +from couchpotato.core.helpers.variable import tryInt +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.base import TorrentMagnetProvider import six + log = CPLog(__name__) -class ThePirateBay(TorrentMagnetProvider): +class Base(TorrentMagnetProvider): urls = { - 'detail': '%s/torrent/%s', - 'search': '%s/search/%s/%s/7/%s' + 'detail': '%s/torrent/%s', + 'search': '%s/search/%%s/%%s/7/%%s' } - cat_ids = [ - ([207], ['720p', '1080p']), - ([201], ['cam', 'ts', 'dvdrip', 'tc', 'r5', 'scr']), - ([201, 207], ['brrip']), - ([202], ['dvdr']) - ] - cat_backup_id = 200 disable_provider = False http_time_between_calls = 0 proxy_list = [ - 'https://tpb.ipredator.se', + 'https://nobay.net', + 'https://thebay.al', 'https://thepiratebay.se', - 'http://pirateproxy.ca', - 'http://tpb.al', + 'http://thepiratebay.cd', + 'http://thebootlegbay.com', 'http://www.tpb.gr', - 'http://nl.tpb.li', - 'http://proxybay.eu', - 'https://www.getpirate.com', - 'http://piratebay.io', + 'http://tpbproxy.co.uk', + 'http://pirateproxy.in', + 'http://www.getpirate.com', + 'http://piratebay.io', + 'http://bayproxy.li', + 'http://proxybay.pw', ] - def _searchOnTitle(self, title, movie, quality, results): + def _search(self, media, quality, results): page = 0 total_pages = 1 - cats = self.getCatId(quality['identifier']) + cats = self.getCatId(quality) + + base_search_url = self.urls['search'] % self.getDomain() while page < total_pages: - search_url = self.urls['search'] % (self.getDomain(), tryUrlencode('"%s" %s' % (title, movie['library']['year'])), page, ','.join(str(x) for x in cats)) + search_url = base_search_url % self.buildUrl(media, page, cats) + page += 1 data = self.getHTMLData(search_url) @@ -67,7 +68,7 @@ class ThePirateBay(TorrentMagnetProvider): pass entries = results_table.find_all('tr') - for result in entries[2:]: + for result in entries[1:]: link = result.find(href = re.compile('torrent\/\d+\/')) download = result.find(href = re.compile('magnet:')) @@ -88,7 +89,7 @@ class ThePirateBay(TorrentMagnetProvider): results.append({ 'id': re.search('/(?P\d+)/', link['href']).group('id'), - 'name': link.string, + 'name': six.text_type(link.string), 'url': download['href'], 'detail_url': self.getDomain(link['href']), 'size': self.parseSize(size), @@ -102,7 +103,7 @@ class ThePirateBay(TorrentMagnetProvider): log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc())) def isEnabled(self): - return super(ThePirateBay, self).isEnabled() and self.getDomain() + return super(Base, self).isEnabled() and self.getDomain() def correctProxy(self, data): return 'title="Pirate Search"' in data @@ -111,7 +112,61 @@ class ThePirateBay(TorrentMagnetProvider): full_description = self.getCache('tpb.%s' % item['id'], item['detail_url'], cache_timeout = 25920000) html = BeautifulSoup(full_description) nfo_pre = html.find('div', attrs = {'class': 'nfo'}) - description = toUnicode(nfo_pre.text) if nfo_pre else '' + description = '' + try: + description = toUnicode(nfo_pre.text) + except: + pass item['description'] = description return item + + +config = [{ + 'name': 'thepiratebay', + 'groups': [ + { + 'tab': 'searcher', + 'list': 'torrent_providers', + 'name': 'ThePirateBay', + 'description': 'The world\'s largest bittorrent tracker. ThePirateBay', + 'wizard': True, + 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAAAAAA6mKC9AAAA3UlEQVQY02P4DwT/YADIZvj//7qnozMYODmtAAusZoCDELDAegYGViZhAWZmRoYoqIDupfhNN1M3dTBEggXWMZg9jZRXV77YxhAOFpjDwMAPMoCXmcHsF1SAQZ6bQY2VgUEbKHClcAYzg3mINEO8jSCD478/DPsZmvqWblu1bOmStes3Pp0ezVDF4Gif0Hfx9///74/ObRZ2YNiZ47C8XIRBxFJR0jbSSUud4f9zAQWn8NTuziAt2zy5xIMM/z8LFX0E+fD/x0MRDCeA1v7Z++Y/FDzyvAtyBxIA+h8A8ZKLeT+lJroAAAAASUVORK5CYII=', + 'options': [ + { + 'name': 'enabled', + 'type': 'enabler', + 'default': False + }, + { + 'name': 'domain', + 'advanced': True, + 'label': 'Proxy server', + 'description': 'Domain for requests, keep empty to let CouchPotato pick.', + }, + { + 'name': 'seed_ratio', + 'label': 'Seed ratio', + 'type': 'float', + 'default': 1, + 'description': 'Will not be (re)moved until this seed ratio is met.', + }, + { + 'name': 'seed_time', + 'label': 'Seed time', + 'type': 'int', + 'default': 40, + 'description': 'Will not be (re)moved until this seed time (in hours) is met.', + }, + { + 'name': 'extra_score', + 'advanced': True, + 'label': 'Extra Score', + 'type': 'int', + 'default': 0, + 'description': 'Starting score for each release found via this provider.', + } + ], + } + ] +}] diff --git a/couchpotato/core/providers/torrent/torrentbytes/main.py b/couchpotato/core/media/_base/providers/torrent/torrentbytes.py similarity index 51% rename from couchpotato/core/providers/torrent/torrentbytes/main.py rename to couchpotato/core/media/_base/providers/torrent/torrentbytes.py index 603da6e0..156243eb 100644 --- a/couchpotato/core/providers/torrent/torrentbytes/main.py +++ b/couchpotato/core/media/_base/providers/torrent/torrentbytes.py @@ -1,14 +1,16 @@ +import traceback + from bs4 import BeautifulSoup from couchpotato.core.helpers.encoding import tryUrlencode from couchpotato.core.helpers.variable import tryInt from couchpotato.core.logger import CPLog -from couchpotato.core.providers.torrent.base import TorrentProvider -import traceback +from couchpotato.core.media._base.providers.torrent.base import TorrentProvider + log = CPLog(__name__) -class TorrentBytes(TorrentProvider): +class Base(TorrentProvider): urls = { 'test': 'https://www.torrentbytes.net/', @@ -29,19 +31,19 @@ class TorrentBytes(TorrentProvider): ([20], ['dvdr']), ] - http_time_between_calls = 1 #seconds + http_time_between_calls = 1 # Seconds cat_backup_id = None def _searchOnTitle(self, title, movie, quality, results): - url = self.urls['search'] % (tryUrlencode('%s %s' % (title.replace(':', ''), movie['library']['year'])), self.getCatId(quality['identifier'])[0]) + url = self.urls['search'] % (tryUrlencode('%s %s' % (title.replace(':', ''), movie['info']['year'])), self.getCatId(quality)[0]) data = self.getHTMLData(url) if data: html = BeautifulSoup(data) try: - result_table = html.find('table', attrs = {'border' : '1'}) + result_table = html.find('table', attrs = {'border': '1'}) if not result_table: return @@ -50,7 +52,7 @@ class TorrentBytes(TorrentProvider): for result in entries[1:]: cells = result.find_all('td') - link = cells[1].find('a', attrs = {'class' : 'index'}) + link = cells[1].find('a', attrs = {'class': 'index'}) full_id = link['href'].replace('details.php?id=', '') torrent_id = full_id[:6] @@ -80,3 +82,55 @@ class TorrentBytes(TorrentProvider): loginCheckSuccess = loginSuccess + +config = [{ + 'name': 'torrentbytes', + 'groups': [ + { + 'tab': 'searcher', + 'list': 'torrent_providers', + 'name': 'TorrentBytes', + 'description': 'TorrentBytes', + 'wizard': True, + 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAMAAAAoLQ9TAAAAeFBMVEUAAAAAAEQAA1QAEmEAKnQALHYAMoEAOokAQpIASYsASZgAS5UATZwATosATpgAVJ0AWZwAYZ4AZKAAaZ8Ab7IAcbMAfccAgcQAgcsAhM4AiscAjMkAmt0AoOIApecAp/EAqvQAs+kAt+wA3P8A4f8A//8VAAAfDbiaAl08AAAAjUlEQVQYGQXBO04DQRAFwHqz7Z8sECIl5f73ISRD5GBs7UxTlWfg9vYXnvJRQJqOL88D6BAwJtMMumHUVCl60aa6H93IrIv0b+157f1lpk+fm87lMWrZH0vncKbXdRUQrRmrh9C6Iwkq6rg4PXZcyXmbizzeV/g+rDra0rGve8jPKLSOJNi2AQAwAGjwD7ApPkEHdtPQAAAAAElFTkSuQmCC', + 'options': [ + { + 'name': 'enabled', + 'type': 'enabler', + 'default': False, + }, + { + 'name': 'username', + 'default': '', + }, + { + 'name': 'password', + 'default': '', + 'type': 'password', + }, + { + 'name': 'seed_ratio', + 'label': 'Seed ratio', + 'type': 'float', + 'default': 1, + 'description': 'Will not be (re)moved until this seed ratio is met.', + }, + { + 'name': 'seed_time', + 'label': 'Seed time', + 'type': 'int', + 'default': 40, + 'description': 'Will not be (re)moved until this seed time (in hours) is met.', + }, + { + 'name': 'extra_score', + 'advanced': True, + 'label': 'Extra Score', + 'type': 'int', + 'default': 20, + 'description': 'Starting score for each release found via this provider.', + } + ], + }, + ], +}] diff --git a/couchpotato/core/media/_base/providers/torrent/torrentday.py b/couchpotato/core/media/_base/providers/torrent/torrentday.py new file mode 100644 index 00000000..a3e9b78c --- /dev/null +++ b/couchpotato/core/media/_base/providers/torrent/torrentday.py @@ -0,0 +1,114 @@ +from couchpotato.core.helpers.variable import tryInt +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.base import TorrentProvider + +log = CPLog(__name__) + + +class Base(TorrentProvider): + + urls = { + 'test': 'http://www.td.af/', + 'login': 'http://www.td.af/torrents/', + 'login_check': 'http://www.torrentday.com/userdetails.php', + 'detail': 'http://www.td.af/details.php?id=%s', + 'search': 'http://www.td.af/V3/API/API.php', + 'download': 'http://www.td.af/download.php/%s/%s', + } + + http_time_between_calls = 1 # Seconds + + def _searchOnTitle(self, title, media, quality, results): + + query = '"%s" %s' % (title, media['info']['year']) + + data = { + '/browse.php?': None, + 'cata': 'yes', + 'jxt': 8, + 'jxw': 'b', + 'search': query, + } + + data = self.getJsonData(self.urls['search'], data = data) + try: torrents = data.get('Fs', [])[0].get('Cn', {}).get('torrents', []) + except: return + + for torrent in torrents: + results.append({ + 'id': torrent['id'], + 'name': torrent['name'], + 'url': self.urls['download'] % (torrent['id'], torrent['fname']), + 'detail_url': self.urls['detail'] % torrent['id'], + 'size': self.parseSize(torrent.get('size')), + 'seeders': tryInt(torrent.get('seed')), + 'leechers': tryInt(torrent.get('leech')), + }) + + def getLoginParams(self): + return { + 'username': self.conf('username'), + 'password': self.conf('password'), + 'submit.x': 18, + 'submit.y': 11, + 'submit': 'submit', + } + + def loginSuccess(self, output): + return 'Password not correct' not in output + + def loginCheckSuccess(self, output): + return 'logout.php' in output.lower() + + +config = [{ + 'name': 'torrentday', + 'groups': [ + { + 'tab': 'searcher', + 'list': 'torrent_providers', + 'name': 'TorrentDay', + 'description': 'TorrentDay', + 'wizard': True, + 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAC5ElEQVQ4y12TXUgUURTH//fO7Di7foeQJH6gEEEIZZllVohfSG/6UA+RSFAQQj74VA8+Bj30lmAlRVSEvZRfhNhaka5ZUG1paKaW39tq5O6Ou+PM3M4o6m6X+XPPzD3zm/+dcy574r515WfIW8CZBM4YAA5Gc/aQC3yd7oXYEONcsISE5dTDh91HS0t7FEWhBUAeN9ynV/d9qJAgE4AECURAcVsGlCCnly26LMA0IQwTa52dje3d3e3hcPi8qqrrMjcVYI3EHCQZlkFOHBwR2QHh2ASAAIJxWGAQEDxjePhs3527XjJwnb37OHBq0T+Tyyjh+9KnEzNJ7nouc1Q/3A3HGsOvnJy+PSUlj81w2Lny9WuJ6+3AmTjD4HOcrdR2dWXLRQePvyaSLfQOPMPC8mC9iHCsOxSyzJCelzdSXlNzD5ujpb25Wbfc/XXJemTXF4+nnCNq+AMLe50uFfEJTiw4GXSFtiHL0SnIq66+p0kSArqO+eH3RdsAv9+f5vW7L7GICq6rmM8XBCAXlBw90rOyxibn5yzfkg/L09M52/jxqdESaIrBXHYZZbB1GX8cEpySxKIB8S5XcOnvqpli1zuwmrTtoLjw5LOK/eeuWsE4JH5IRPaPZKiKigmPp+5pa+u1aEjIMhEgrRkmi9mgxGUhM7LNJSzOzsE3+cOeExovXOjdytE0LV4zqNZUtV0uZzAGoGkhDH/2YHZiErmv4uyWQnZZWc+hoqL3WzlTExN5hhA8IEwkZWZOxwB++30YG/9GkYCPvqAaHAW5uWPROW86OmqCprUR7z1yZDAGQNuCvkoB/baIKUBWMTYymv+gra3eJNvjXu+B562tFyXqTJ6YuHK8rKwvBmC3vR7cOCPQLWFz8LnfXWUrJo9U19BwMyUlJRjTSMJ2ENxUiGxq9KXQfwqYlnWstvbR5aamG9g0uzM8Q4OFt++3NNixQ2NgYmeN03FOTUv7XVpV9aKisvLl1vN/WVhNc/Fi1NEAAAAASUVORK5CYII=', + 'options': [ + { + 'name': 'enabled', + 'type': 'enabler', + 'default': False, + }, + { + 'name': 'username', + 'default': '', + }, + { + 'name': 'password', + 'default': '', + 'type': 'password', + }, + { + 'name': 'seed_ratio', + 'label': 'Seed ratio', + 'type': 'float', + 'default': 1, + 'description': 'Will not be (re)moved until this seed ratio is met.', + }, + { + 'name': 'seed_time', + 'label': 'Seed time', + 'type': 'int', + 'default': 40, + 'description': 'Will not be (re)moved until this seed time (in hours) is met.', + }, + { + 'name': 'extra_score', + 'advanced': True, + 'label': 'Extra Score', + 'type': 'int', + 'default': 0, + 'description': 'Starting score for each release found via this provider.', + } + ], + }, + ], +}] diff --git a/couchpotato/core/media/_base/providers/torrent/torrentleech.py b/couchpotato/core/media/_base/providers/torrent/torrentleech.py new file mode 100644 index 00000000..5f59dab7 --- /dev/null +++ b/couchpotato/core/media/_base/providers/torrent/torrentleech.py @@ -0,0 +1,126 @@ +import traceback + +from bs4 import BeautifulSoup +from couchpotato.core.helpers.variable import tryInt +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.base import TorrentProvider +import six + + +log = CPLog(__name__) + + +class Base(TorrentProvider): + + urls = { + 'test': 'http://www.torrentleech.org/', + 'login': 'http://www.torrentleech.org/user/account/login/', + 'login_check': 'http://torrentleech.org/user/messages', + 'detail': 'http://www.torrentleech.org/torrent/%s', + 'search': 'http://www.torrentleech.org/torrents/browse/index/query/%s/categories/%d', + 'download': 'http://www.torrentleech.org%s', + } + + http_time_between_calls = 1 # Seconds + cat_backup_id = None + + def _searchOnTitle(self, title, media, quality, results): + + url = self.urls['search'] % self.buildUrl(title, media, quality) + + data = self.getHTMLData(url) + + if data: + html = BeautifulSoup(data) + + try: + result_table = html.find('table', attrs = {'id': 'torrenttable'}) + if not result_table: + return + + entries = result_table.find_all('tr') + + for result in entries[1:]: + + link = result.find('td', attrs = {'class': 'name'}).find('a') + url = result.find('td', attrs = {'class': 'quickdownload'}).find('a') + details = result.find('td', attrs = {'class': 'name'}).find('a') + + results.append({ + 'id': link['href'].replace('/torrent/', ''), + 'name': six.text_type(link.string), + 'url': self.urls['download'] % url['href'], + 'detail_url': self.urls['download'] % details['href'], + 'size': self.parseSize(result.find_all('td')[4].string), + 'seeders': tryInt(result.find('td', attrs = {'class': 'seeders'}).string), + 'leechers': tryInt(result.find('td', attrs = {'class': 'leechers'}).string), + }) + + except: + log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc())) + + def getLoginParams(self): + return { + 'username': self.conf('username'), + 'password': self.conf('password'), + 'remember_me': 'on', + 'login': 'submit', + } + + def loginSuccess(self, output): + return '/user/account/logout' in output.lower() or 'welcome back' in output.lower() + + loginCheckSuccess = loginSuccess + + +config = [{ + 'name': 'torrentleech', + 'groups': [ + { + 'tab': 'searcher', + 'list': 'torrent_providers', + 'name': 'TorrentLeech', + 'description': 'TorrentLeech', + 'wizard': True, + 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAACHUlEQVR4AZVSO48SYRSdGTCBEMKzILLAWiybkKAGMZRUUJEoDZX7B9zsbuQPYEEjNLTQkYgJDwsoSaxspEBsCITXjjNAIKi8AkzceXgmbHQ1NJ5iMufmO9/9zrmXlCSJ+B8o75J8Pp/NZj0eTzweBy0Wi4PBYD6f12o1r9ebTCZx+22HcrnMsuxms7m6urTZ7LPZDMVYLBZ8ZV3yo8aq9Pq0wzCMTqe77dDv9y8uLyAWBH6xWOyL0K/56fcb+rrPgPZ6PZfLRe1fsl6vCUmGKIqoqNXqdDr9Dbjps9znUV0uTqdTjuPkDoVCIfcuJ4gizjMMm8u9vW+1nr04czqdK56c37CbKY9j2+1WEARZ0Gq1RFHAz2q1qlQqXxoN69HRcDjUarW8ZD6QUigUOnY8uKYH8N1sNkul9yiGw+F6vS4Rxn8EsodEIqHRaOSnq9T7ajQazWQycEIR1AEBYDabSZJyHDucJyegwWBQr9ebTCaKvHd4cCQANUU9evwQ1Ofz4YvUKUI43GE8HouSiFiNRhOowWBIpVLyHITJkuW3PwgAEf3pgIwxF5r+OplMEsk3CPT5szCMnY7EwUdhwUh/CXiej0Qi3idPz89fdrpdbsfBzH7S3Q9K5pP4c0sAKpVKoVAQGO1ut+t0OoFAQHkH2Da/3/+but3uarWK0ZMQoNdyucRutdttmqZxMTzY7XaYxsrgtUjEZrNhkSwWyy/0NCatZumrNQAAAABJRU5ErkJggg==', + 'options': [ + { + 'name': 'enabled', + 'type': 'enabler', + 'default': False, + }, + { + 'name': 'username', + 'default': '', + }, + { + 'name': 'password', + 'default': '', + 'type': 'password', + }, + { + 'name': 'seed_ratio', + 'label': 'Seed ratio', + 'type': 'float', + 'default': 1, + 'description': 'Will not be (re)moved until this seed ratio is met.', + }, + { + 'name': 'seed_time', + 'label': 'Seed time', + 'type': 'int', + 'default': 40, + 'description': 'Will not be (re)moved until this seed time (in hours) is met.', + }, + { + 'name': 'extra_score', + 'advanced': True, + 'label': 'Extra Score', + 'type': 'int', + 'default': 20, + 'description': 'Starting score for each release found via this provider.', + } + ], + }, + ], +}] diff --git a/couchpotato/core/providers/torrent/torrentpotato/main.py b/couchpotato/core/media/_base/providers/torrent/torrentpotato.py similarity index 50% rename from couchpotato/core/providers/torrent/torrentpotato/main.py rename to couchpotato/core/media/_base/providers/torrent/torrentpotato.py index eaaf8d2c..d1426765 100644 --- a/couchpotato/core/providers/torrent/torrentpotato/main.py +++ b/couchpotato/core/media/_base/providers/torrent/torrentpotato.py @@ -1,45 +1,40 @@ -from couchpotato.core.helpers.encoding import tryUrlencode, toUnicode -from couchpotato.core.helpers.variable import splitString, tryInt, tryFloat -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.base import ResultList -from couchpotato.core.providers.torrent.base import TorrentProvider from urlparse import urlparse import re import traceback +from couchpotato.core.helpers.encoding import toUnicode +from couchpotato.core.helpers.variable import splitString, tryInt, tryFloat +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.base import ResultList +from couchpotato.core.media._base.providers.torrent.base import TorrentProvider + + log = CPLog(__name__) -class TorrentPotato(TorrentProvider): +class Base(TorrentProvider): urls = {} limits_reached = {} http_time_between_calls = 1 # Seconds - def search(self, movie, quality): + def search(self, media, quality): hosts = self.getHosts() - results = ResultList(self, movie, quality, imdb_results = True) + results = ResultList(self, media, quality, imdb_results = True) for host in hosts: if self.isDisabled(host): continue - self._searchOnHost(host, movie, quality, results) + self._searchOnHost(host, media, quality, results) return results - def _searchOnHost(self, host, movie, quality, results): + def _searchOnHost(self, host, media, quality, results): - arguments = tryUrlencode({ - 'user': host['name'], - 'passkey': host['pass_key'], - 'imdbid': movie['library']['identifier'] - }) - url = '%s?%s' % (host['host'], arguments) - - torrents = self.getJsonData(url, cache_timeout = 1800) + torrents = self.getJsonData(self.buildUrl(media, host), cache_timeout = 1800) if torrents: try: @@ -75,7 +70,7 @@ class TorrentPotato(TorrentProvider): pass_keys = splitString(self.conf('pass_key'), clean = False) extra_score = splitString(self.conf('extra_score'), clean = False) - list = [] + host_list = [] for nr in range(len(hosts)): try: key = pass_keys[nr] @@ -93,7 +88,7 @@ class TorrentPotato(TorrentProvider): try: seed_time = seed_times[nr] except: seed_time = '' - list.append({ + host_list.append({ 'use': uses[nr], 'host': host, 'name': name, @@ -103,14 +98,14 @@ class TorrentPotato(TorrentProvider): 'extra_score': tryInt(extra_score[nr]) if len(extra_score) > nr else 0 }) - return list + return host_list def belongsTo(self, url, provider = None, host = None): hosts = self.getHosts() for host in hosts: - result = super(TorrentPotato, self).belongsTo(url, host = host['host'], provider = provider) + result = super(Base, self).belongsTo(url, host = host['host'], provider = provider) if result: return result @@ -127,3 +122,67 @@ class TorrentPotato(TorrentProvider): return False return TorrentProvider.isEnabled(self) and host['host'] and host['pass_key'] and int(host['use']) + + +config = [{ + 'name': 'torrentpotato', + 'groups': [ + { + 'tab': 'searcher', + 'list': 'torrent_providers', + 'name': 'TorrentPotato', + 'order': 10, + 'description': 'CouchPotato torrent provider. Checkout the wiki page about this provider for more info.', + 'wizard': True, + 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAAABnRSTlMAAAAAAABupgeRAAABSElEQVR4AZ2Nz0oCURTGv8t1YMpqUxt9ARFxoQ/gQtppgvUKcu/sxB5iBJkogspaBC6iVUplEC6kv+oiiKDNhAtt16roP0HQgdsMLgaxfvy4nHP4Pi48qE2g4v91JOqT1CH/UnA7w7icUlLawyEdj+ZI/7h6YluWbRiddHonHh9M70aj7VTKzuXuikUMci/EO/ACnAI15599oAk8AR/AgxBQNCzreD7bmpl+FOIVuAHqQDUcJo+AK+CZFKLt95/MpSmMt0TiW9POxse6UvYZ6zB2wFgjFiNpOGesR0rZ0PVPXf8KhUCl22CwClz4eN8weoZBb9c0bdPsOWvHx/cYu9Y0CoNoZTJrwAbn5DrnZc6XOV+igVbnsgo0IxEomlJuA1vUIYGyq3PZBChwmExCUSmVZgMBDIUCK4UCFIv5vHIhm/XUDeAf/ADbcpd5+aXSWQAAAABJRU5ErkJggg==', + 'options': [ + { + 'name': 'enabled', + 'type': 'enabler', + 'default': False, + }, + { + 'name': 'use', + 'default': '' + }, + { + 'name': 'host', + 'default': '', + 'description': 'The url path of your TorrentPotato provider.', + }, + { + 'name': 'extra_score', + 'advanced': True, + 'label': 'Extra Score', + 'default': '0', + 'description': 'Starting score for each release found via this provider.', + }, + { + 'name': 'name', + 'label': 'Username', + 'default': '', + }, + { + 'name': 'seed_ratio', + 'label': 'Seed ratio', + 'default': '1', + 'description': 'Will not be (re)moved until this seed ratio is met.', + }, + { + 'name': 'seed_time', + 'label': 'Seed time', + 'default': '40', + 'description': 'Will not be (re)moved until this seed time (in hours) is met.', + }, + { + 'name': 'pass_key', + 'default': ',', + 'label': 'Pass Key', + 'description': 'Can be found on your profile page', + 'type': 'combined', + 'combine': ['use', 'host', 'pass_key', 'name', 'seed_ratio', 'seed_time', 'extra_score'], + }, + ], + }, + ], +}] diff --git a/couchpotato/core/media/_base/providers/torrent/torrentshack.py b/couchpotato/core/media/_base/providers/torrent/torrentshack.py new file mode 100644 index 00000000..0cfa04d1 --- /dev/null +++ b/couchpotato/core/media/_base/providers/torrent/torrentshack.py @@ -0,0 +1,132 @@ +import traceback + +from bs4 import BeautifulSoup +from couchpotato.core.helpers.variable import tryInt +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.base import TorrentProvider +import six + + +log = CPLog(__name__) + + +class Base(TorrentProvider): + + urls = { + 'test': 'https://torrentshack.net/', + 'login': 'https://torrentshack.net/login.php', + 'login_check': 'https://torrentshack.net/inbox.php', + 'detail': 'https://torrentshack.net/torrent/%s', + 'search': 'https://torrentshack.net/torrents.php?action=advanced&searchstr=%s&scene=%s&filter_cat[%d]=1', + 'download': 'https://torrentshack.net/%s', + } + + http_time_between_calls = 1 # Seconds + + def _search(self, media, quality, results): + + url = self.urls['search'] % self.buildUrl(media, quality) + data = self.getHTMLData(url) + + if data: + html = BeautifulSoup(data) + + try: + result_table = html.find('table', attrs = {'id': 'torrent_table'}) + if not result_table: + return + + entries = result_table.find_all('tr', attrs = {'class': 'torrent'}) + + for result in entries: + + link = result.find('span', attrs = {'class': 'torrent_name_link'}).parent + url = result.find('td', attrs = {'class': 'torrent_td'}).find('a') + + results.append({ + 'id': link['href'].replace('torrents.php?torrentid=', ''), + 'name': six.text_type(link.span.string).translate({ord(six.u('\xad')): None}), + 'url': self.urls['download'] % url['href'], + 'detail_url': self.urls['download'] % link['href'], + 'size': self.parseSize(result.find_all('td')[4].string), + 'seeders': tryInt(result.find_all('td')[6].string), + 'leechers': tryInt(result.find_all('td')[7].string), + }) + + except: + log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc())) + + def getLoginParams(self): + return { + 'username': self.conf('username'), + 'password': self.conf('password'), + 'keeplogged': '1', + 'login': 'Login', + } + + def loginSuccess(self, output): + return 'logout.php' in output.lower() + + loginCheckSuccess = loginSuccess + + def getSceneOnly(self): + return '1' if self.conf('scene_only') else '' + + +config = [{ + 'name': 'torrentshack', + 'groups': [ + { + 'tab': 'searcher', + 'list': 'torrent_providers', + 'name': 'TorrentShack', + 'description': 'TorrentShack', + 'wizard': True, + 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAABmElEQVQoFQXBzY2cVRiE0afqvd84CQiAnxWWtyxsS6ThINBYg2Dc7mZBMEjE4mzs6e9WcY5+ePNuVFJJodQAoLo+SaWCy9rcV8cmjah3CI6iYu7oRU30kE5xxELRfamklY3k1NL19sSm7vPzP/ZdNZzKVDaY2sPZJBh9fv5ITrmG2+Vp4e1sPchVqTCQZJnVXi+/L4uuAJGly1+Pw8CprLbi8Om7tbT19/XRqJUk11JP9uHj9ulxhXbvJbI9qJvr5YkGXFG2IBT8tXczt+sfzDZCp3765f3t9tHEHGEDACma77+8o4oATKk+/PfW9YmHruRFjWoVSFsVsGu1YSKq6Oc37+n98unPZSRlY7vsKDqN+92X3yR9+PdXee3iJNKMStqdcZqoTJbUSi5JOkpfRlhSI0mSpEmCFKoU7FqSNOLAk54uGwCStMUCgLrVic62g7oDoFmmdI+P3S0pDe1xvDqb6XrZqbtzShWNoh9fv/XQHaDdM9OqrZi2M7M3UrB2vlkPS1IbdEBk7UiSoD6VlZ6aKWer4aH4f/AvKoHUTjuyAAAAAElFTkSuQmCC', + 'options': [ + { + 'name': 'enabled', + 'type': 'enabler', + 'default': False, + }, + { + 'name': 'username', + 'default': '', + }, + { + 'name': 'password', + 'default': '', + 'type': 'password', + }, + { + 'name': 'seed_ratio', + 'label': 'Seed ratio', + 'type': 'float', + 'default': 1, + 'description': 'Will not be (re)moved until this seed ratio is met.', + }, + { + 'name': 'seed_time', + 'label': 'Seed time', + 'type': 'int', + 'default': 40, + 'description': 'Will not be (re)moved until this seed time (in hours) is met.', + }, + { + 'name': 'scene_only', + 'type': 'bool', + 'default': False, + 'description': 'Only allow scene releases.' + }, + { + 'name': 'extra_score', + 'advanced': True, + 'label': 'Extra Score', + 'type': 'int', + 'default': 0, + 'description': 'Starting score for each release found via this provider.', + } + ], + }, + ], +}] diff --git a/couchpotato/core/media/_base/providers/torrent/torrentz.py b/couchpotato/core/media/_base/providers/torrent/torrentz.py new file mode 100644 index 00000000..8a5455c9 --- /dev/null +++ b/couchpotato/core/media/_base/providers/torrent/torrentz.py @@ -0,0 +1,129 @@ +import re +import traceback + +from couchpotato.core.helpers.encoding import tryUrlencode +from couchpotato.core.helpers.rss import RSS +from couchpotato.core.helpers.variable import tryInt, splitString +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.base import TorrentMagnetProvider +import six + + +log = CPLog(__name__) + + +class Base(TorrentMagnetProvider, RSS): + + urls = { + 'detail': 'https://torrentz.eu/%s', + 'search': 'https://torrentz.eu/feed?q=%s', + 'verified_search': 'https://torrentz.eu/feed_verified?q=%s' + } + + http_time_between_calls = 0 + + def _search(self, media, quality, results): + + search_url = self.urls['verified_search'] if self.conf('verified_only') else self.urls['search'] + + # Create search parameters + search_params = self.buildUrl(media) + + smin = quality.get('size_min') + smax = quality.get('size_max') + if smin and smax: + search_params += ' size %sm - %sm' % (smin, smax) + + min_seeds = tryInt(self.conf('minimal_seeds')) + if min_seeds: + search_params += ' seed > %s' % (min_seeds - 1) + + rss_data = self.getRSSData(search_url % search_params) + + if rss_data: + try: + + for result in rss_data: + + name = self.getTextElement(result, 'title') + detail_url = self.getTextElement(result, 'link') + description = self.getTextElement(result, 'description') + + magnet = splitString(detail_url, '/')[-1] + magnet_url = 'magnet:?xt=urn:btih:%s&dn=%s&tr=%s' % (magnet.upper(), tryUrlencode(name), tryUrlencode('udp://tracker.openbittorrent.com/announce')) + + reg = re.search('Size: (?P\d+) MB Seeds: (?P[\d,]+) Peers: (?P[\d,]+)', six.text_type(description)) + size = reg.group('size') + seeds = reg.group('seeds').replace(',', '') + peers = reg.group('peers').replace(',', '') + + results.append({ + 'id': magnet, + 'name': six.text_type(name), + 'url': magnet_url, + 'detail_url': detail_url, + 'size': tryInt(size), + 'seeders': tryInt(seeds), + 'leechers': tryInt(peers), + }) + + except: + log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc())) + + +config = [{ + 'name': 'torrentz', + 'groups': [ + { + 'tab': 'searcher', + 'list': 'torrent_providers', + 'name': 'Torrentz', + 'description': 'Torrentz is a free, fast and powerful meta-search engine. Torrentz', + 'wizard': True, + 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAYAAAAf8/9hAAAAQklEQVQ4y2NgAALjtJn/ycEMlGiGG0IVAxiwAKzOxaKGARcgxgC8YNSAwWoAzuRMjgsIugqfAUR5CZcBRIcHsWEAADSA96Ig020yAAAAAElFTkSuQmCC', + 'options': [ + { + 'name': 'enabled', + 'type': 'enabler', + 'default': True + }, + { + 'name': 'verified_only', + 'type': 'bool', + 'default': True, + 'advanced': True, + 'description': 'Only search verified releases', + }, + { + 'name': 'minimal_seeds', + 'type': 'int', + 'default': 1, + 'advanced': True, + 'description': 'Only return releases with minimal X seeds', + }, + { + 'name': 'seed_ratio', + 'label': 'Seed ratio', + 'type': 'float', + 'default': 1, + 'description': 'Will not be (re)moved until this seed ratio is met.', + }, + { + 'name': 'seed_time', + 'label': 'Seed time', + 'type': 'int', + 'default': 40, + 'description': 'Will not be (re)moved until this seed time (in hours) is met.', + }, + { + 'name': 'extra_score', + 'advanced': True, + 'label': 'Extra Score', + 'type': 'int', + 'default': 0, + 'description': 'Starting score for each release found via this provider.', + } + ], + } + ] +}] diff --git a/couchpotato/core/media/_base/providers/torrent/yify.py b/couchpotato/core/media/_base/providers/torrent/yify.py new file mode 100644 index 00000000..0daf20a0 --- /dev/null +++ b/couchpotato/core/media/_base/providers/torrent/yify.py @@ -0,0 +1,120 @@ +import traceback + +from couchpotato.core.helpers.variable import tryInt, getIdentifier +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.base import TorrentMagnetProvider + + +log = CPLog(__name__) + + +class Base(TorrentMagnetProvider): + + urls = { + 'test': '%s/api', + 'search': '%s/api/list.json?keywords=%s&quality=%s', + 'detail': '%s/api/movie.json?id=%s' + } + + http_time_between_calls = 1 # seconds + + proxy_list = [ + 'http://yify.unlocktorrent.com', + 'http://yify-torrents.com.come.in', + 'http://yts.re', + 'http://yts.im' + 'http://yify-torrents.im', + ] + + def search(self, movie, quality): + + if not quality.get('hd', False): + return [] + + return super(Base, self).search(movie, quality) + + def _search(self, movie, quality, results): + + domain = self.getDomain() + if not domain: + return + + search_url = self.urls['search'] % (domain, getIdentifier(movie), quality['identifier']) + + data = self.getJsonData(search_url) + + if data and data.get('MovieList'): + try: + for result in data.get('MovieList'): + + if result['Quality'] and result['Quality'] not in result['MovieTitle']: + title = result['MovieTitle'] + ' BrRip ' + result['Quality'] + else: + title = result['MovieTitle'] + ' BrRip' + + results.append({ + 'id': result['MovieID'], + 'name': title, + 'url': result['TorrentMagnetUrl'], + 'detail_url': self.urls['detail'] % (domain, result['MovieID']), + 'size': self.parseSize(result['Size']), + 'seeders': tryInt(result['TorrentSeeds']), + 'leechers': tryInt(result['TorrentPeers']), + }) + + except: + log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc())) + + def correctProxy(self, data): + data = data.lower() + return 'yify' in data and 'yts' in data + + +config = [{ + 'name': 'yify', + 'groups': [ + { + 'tab': 'searcher', + 'list': 'torrent_providers', + 'name': 'Yify', + 'description': 'Free provider, less accurate. Small HD movies, encoded by Yify.', + 'wizard': False, + 'icon': 'iVBORw0KGgoAAAANSUhEUgAAABAAAAAQCAIAAACQkWg2AAACL0lEQVR4AS1SPW/UQBAd23fxne/Ld2dvzvHuzPocEBAKokCBqGiQ6IgACYmvUKRBFEQgKKGg4BAlUoggggYUEQpSHOI7CIEoQs/fYcbLaU/efTvvvZlnA1qydoxU5kcxX0CkgmQZtPy0hCUjvK+WgEByOZ5dns1O5bzna8fRVkgsxH8B0YouIvBhdD5T11NiVOoKrsttyUcpRW0InUrFnwe9HzuP2uaQZYhF2LQ76TTXw2RVMTK8mYYbjfh+zNquMVCrqn93aArLSixPxnafdGDLaz1tjY5rmNa8z5BczEQOxQfCl1GyoqoWxYRN1bkh7ELw3q/vhP6HIL4TG9KumpjgvwuyM7OsjSj98E/vszMfZ7xvPtMaWxGO5crwIumKCR5HxDtJ0AWKGG204RfUd/3smJYqwem/Q7BTS1ZGfM4LNpVwuKAz6cMeROst0S2EwNE7GjTehO2H3dxqIpdkydat15G3F8SXBi4GlpBNlSz012L/k2+W0CLLk/jbcf13rf41yJeMQ8QWUZiHCfCA9ad+81nEKPtoS9mJOf9v0NmMJHgUT6xayheK9EIK7JJeU/AF4scDF7Y5SPlJrRcxJ+um4ibNEdObxLiIwJim+eT2AL5D9CIcnZ5zvSJi9eIlNHVVtZ831dk5svPgvjPWTq+ktWkd/kD0qtm71x+sDQe3kt6DXnM7Ct+GajmTxKlkAokWljyAKSm5oWa2w+BH4P2UuVub7eTyiGOQYapY/wEztHduSDYz5gAAAABJRU5ErkJggg==', + 'options': [ + { + 'name': 'enabled', + 'type': 'enabler', + 'default': False + }, + { + 'name': 'domain', + 'advanced': True, + 'label': 'Proxy server', + 'description': 'Domain for requests, keep empty to let CouchPotato pick.', + }, + { + 'name': 'seed_ratio', + 'label': 'Seed ratio', + 'type': 'float', + 'default': 1, + 'description': 'Will not be (re)moved until this seed ratio is met.', + }, + { + 'name': 'seed_time', + 'label': 'Seed time', + 'type': 'int', + 'default': 40, + 'description': 'Will not be (re)moved until this seed time (in hours) is met.', + }, + { + 'name': 'extra_score', + 'advanced': True, + 'label': 'Extra Score', + 'type': 'int', + 'default': 0, + 'description': 'Starting score for each release found via this provider.', + } + ], + } + ] +}] diff --git a/couchpotato/core/providers/metadata/__init__.py b/couchpotato/core/media/_base/providers/userscript/__init__.py similarity index 100% rename from couchpotato/core/providers/metadata/__init__.py rename to couchpotato/core/media/_base/providers/userscript/__init__.py diff --git a/couchpotato/core/providers/userscript/base.py b/couchpotato/core/media/_base/providers/userscript/base.py similarity index 99% rename from couchpotato/core/providers/userscript/base.py rename to couchpotato/core/media/_base/providers/userscript/base.py index 531510b0..6491ac34 100644 --- a/couchpotato/core/providers/userscript/base.py +++ b/couchpotato/core/media/_base/providers/userscript/base.py @@ -1,9 +1,11 @@ +from urlparse import urlparse + from couchpotato.core.event import addEvent, fireEvent from couchpotato.core.helpers.encoding import simplifyString from couchpotato.core.helpers.variable import getImdb, md5 from couchpotato.core.logger import CPLog from couchpotato.core.plugins.base import Plugin -from urlparse import urlparse + log = CPLog(__name__) diff --git a/couchpotato/core/media/_base/search/__init__.py b/couchpotato/core/media/_base/search/__init__.py index 09bc84ef..c23fdb72 100644 --- a/couchpotato/core/media/_base/search/__init__.py +++ b/couchpotato/core/media/_base/search/__init__.py @@ -1,7 +1,5 @@ from .main import Search -def start(): +def autoload(): return Search() - -config = [] diff --git a/couchpotato/core/media/_base/search/main.py b/couchpotato/core/media/_base/search/main.py index 81897b5f..1d0603cb 100644 --- a/couchpotato/core/media/_base/search/main.py +++ b/couchpotato/core/media/_base/search/main.py @@ -1,6 +1,6 @@ from couchpotato.api import addApiView from couchpotato.core.event import fireEvent, addEvent -from couchpotato.core.helpers.variable import mergeDicts +from couchpotato.core.helpers.variable import mergeDicts, getImdb from couchpotato.core.logger import CPLog from couchpotato.core.plugins.base import Plugin @@ -35,12 +35,21 @@ class Search(Plugin): elif isinstance(types, (list, tuple, set)): types = list(types) + imdb_identifier = getImdb(q) + if not types: - result = fireEvent('info.search', q = q, merge = True) + if imdb_identifier: + result = fireEvent('movie.info', identifier = imdb_identifier, merge = True) + result = {result['type']: [result]} + else: + result = fireEvent('info.search', q = q, merge = True) else: result = {} for media_type in types: - result[media_type] = fireEvent('%s.search' % media_type) + if imdb_identifier: + result[media_type] = fireEvent('%s.info' % media_type, identifier = imdb_identifier) + else: + result[media_type] = fireEvent('%s.search' % media_type, q = q) return mergeDicts({ 'success': True, diff --git a/couchpotato/core/media/_base/search/static/search.css b/couchpotato/core/media/_base/search/static/search.css index 57210d68..4f7b77bf 100644 --- a/couchpotato/core/media/_base/search/static/search.css +++ b/couchpotato/core/media/_base/search/static/search.css @@ -6,12 +6,10 @@ top: 0; text-align: right; height: 100%; - border-bottom: 4px solid transparent; transition: all .4s cubic-bezier(0.9,0,0.1,1); - position: absolute; z-index: 20; - border: 1px solid transparent; - border-width: 0 0 4px; + border: 0 solid transparent; + border-bottom-width: 4px; } .search_form:hover { border-color: #047792; @@ -22,19 +20,19 @@ right: 44px; } } - + .search_form.focused, .search_form.shown { border-color: #04bce6; } - + .search_form .input { height: 100%; overflow: hidden; width: 45px; transition: all .4s cubic-bezier(0.9,0,0.1,1); } - + .search_form.focused .input, .search_form.shown .input { width: 380px; @@ -49,7 +47,6 @@ color: #FFF; font-size: 25px; height: 100%; - padding: 10px; width: 100%; opacity: 0; padding: 0 40px 0 10px; @@ -59,23 +56,23 @@ .search_form.shown .input input { opacity: 1; } - + .search_form input::-ms-clear { width : 0; height: 0; } - + @media all and (max-width: 480px) { .search_form .input input { font-size: 15px; } - + .search_form.focused .input, .search_form.shown .input { width: 277px; } } - + .search_form .input a { position: absolute; top: 0; @@ -89,7 +86,7 @@ font-size: 15px; color: #FFF; } - + .search_form .input a:after { content: "\e03e"; } @@ -97,7 +94,7 @@ .search_form.shown.filled .input a:after { content: "\e04e"; } - + @media all and (max-width: 480px) { .search_form .input a { line-height: 44px; @@ -167,13 +164,13 @@ .media_result .options select[name=title] { width: 170px; } .media_result .options select[name=profile] { width: 90px; } .media_result .options select[name=category] { width: 80px; } - + @media all and (max-width: 480px) { - + .media_result .options select[name=title] { width: 90px; } .media_result .options select[name=profile] { width: 50px; } .media_result .options select[name=category] { width: 50px; } - + } .media_result .options .button { @@ -227,14 +224,14 @@ right: 7px; vertical-align: middle; } - + .media_result .info h2 { margin: 0; font-weight: normal; font-size: 20px; padding: 0; } - + .search_form .info h2 { position: absolute; width: 100%; @@ -247,12 +244,12 @@ overflow: hidden; white-space: nowrap; } - + .search_form .info h2 .title { position: absolute; width: 88%; } - + .media_result .info h2 .year { padding: 0 5px; text-align: center; @@ -260,14 +257,14 @@ width: 12%; right: 0; } - + @media all and (max-width: 480px) { - + .search_form .info h2 .year { font-size: 12px; margin-top: 7px; } - + } .search_form .mask, @@ -277,4 +274,4 @@ width: 100%; left: 0; top: 0; -} \ No newline at end of file +} diff --git a/couchpotato/core/media/_base/search/static/search.js b/couchpotato/core/media/_base/search/static/search.js index 470dcf0b..a7cd3640 100644 --- a/couchpotato/core/media/_base/search/static/search.js +++ b/couchpotato/core/media/_base/search/static/search.js @@ -13,10 +13,13 @@ Block.Search = new Class({ self.input = new Element('input', { 'placeholder': 'Search & add a new media', 'events': { + 'input': self.keyup.bind(self), + 'paste': self.keyup.bind(self), + 'change': self.keyup.bind(self), 'keyup': self.keyup.bind(self), 'focus': function(){ if(focus_timer) clearTimeout(focus_timer); - self.el.addClass('focused') + self.el.addClass('focused'); if(this.get('value')) self.hideResults(false) }, @@ -57,17 +60,17 @@ Block.Search = new Class({ (e).preventDefault(); if(self.last_q === ''){ - self.input.blur() + self.input.blur(); self.last_q = null; } else { self.last_q = ''; self.input.set('value', ''); - self.input.focus() + self.input.focus(); - self.media = {} - self.results.empty() + self.media = {}; + self.results.empty(); self.el.removeClass('filled') } @@ -92,16 +95,16 @@ Block.Search = new Class({ self.hidden = bool; }, - keyup: function(e){ + keyup: function(){ var self = this; - self.el[self.q() ? 'addClass' : 'removeClass']('filled') + self.el[self.q() ? 'addClass' : 'removeClass']('filled'); if(self.q() != self.last_q){ if(self.api_request && self.api_request.isRunning()) self.api_request.cancel(); - if(self.autocomplete_timer) clearTimeout(self.autocomplete_timer) + if(self.autocomplete_timer) clearTimeout(self.autocomplete_timer); self.autocomplete_timer = self.autocomplete.delay(300, self) } @@ -111,7 +114,7 @@ Block.Search = new Class({ var self = this; if(!self.q()){ - self.hideResults(true) + self.hideResults(true); return } @@ -139,7 +142,7 @@ Block.Search = new Class({ }) } else - self.fill(q, cache) + self.fill(q, cache); self.last_q = q; @@ -148,31 +151,31 @@ Block.Search = new Class({ fill: function(q, json){ var self = this; - self.cache[q] = json + self.cache[q] = json; - self.media = {} - self.results.empty() - - Object.each(json, function(media, type){ + self.media = {}; + self.results.empty(); + + Object.each(json, function(media){ if(typeOf(media) == 'array'){ Object.each(media, function(m){ - + var m = new Block.Search[m.type.capitalize() + 'Item'](m); - $(m).inject(self.results) - self.media[m.imdb || 'r-'+Math.floor(Math.random()*10000)] = m - + $(m).inject(self.results); + self.media[m.imdb || 'r-'+Math.floor(Math.random()*10000)] = m; + if(q == m.imdb) m.showOptions() - + }); } - }) + }); // Calculate result heights var w = window.getSize(), rc = self.result_container.getCoordinates(); - self.results.setStyle('max-height', (w.y - rc.top - 50) + 'px') + self.results.setStyle('max-height', (w.y - rc.top - 50) + 'px'); self.mask.fade('out') }, @@ -185,4 +188,4 @@ Block.Search = new Class({ return this.input.get('value').trim(); } -}); \ No newline at end of file +}); diff --git a/couchpotato/core/media/_base/searcher/__init__.py b/couchpotato/core/media/_base/searcher/__init__.py index 72c7d6ef..bf69b950 100644 --- a/couchpotato/core/media/_base/searcher/__init__.py +++ b/couchpotato/core/media/_base/searcher/__init__.py @@ -1,7 +1,7 @@ from .main import Searcher -def start(): +def autoload(): return Searcher() config = [{ diff --git a/couchpotato/core/media/_base/searcher/main.py b/couchpotato/core/media/_base/searcher/main.py index e7209b60..4e8dae2e 100644 --- a/couchpotato/core/media/_base/searcher/main.py +++ b/couchpotato/core/media/_base/searcher/main.py @@ -1,20 +1,24 @@ +import datetime +import re + from couchpotato.api import addApiView from couchpotato.core.event import addEvent, fireEvent from couchpotato.core.helpers.encoding import simplifyString from couchpotato.core.helpers.variable import splitString, removeEmpty, removeDuplicate from couchpotato.core.logger import CPLog from couchpotato.core.media._base.searcher.base import SearcherBase -import datetime -import re + log = CPLog(__name__) class Searcher(SearcherBase): + # noinspection PyMissingConstructor def __init__(self): addEvent('searcher.protocols', self.getSearchProtocols) addEvent('searcher.contains_other_quality', self.containsOtherQuality) + addEvent('searcher.correct_3d', self.correct3D) addEvent('searcher.correct_year', self.correctYear) addEvent('searcher.correct_name', self.correctName) addEvent('searcher.correct_words', self.correctWords) @@ -48,7 +52,7 @@ class Searcher(SearcherBase): results = [] for search_protocol in protocols: - protocol_results = fireEvent('provider.search.%s.%s' % (search_protocol, media['type']), media, quality, merge = True) + protocol_results = fireEvent('provider.search.%s.%s' % (search_protocol, media.get('type')), media, quality, merge = True) if protocol_results: results += protocol_results @@ -83,31 +87,23 @@ class Searcher(SearcherBase): def containsOtherQuality(self, nzb, movie_year = None, preferred_quality = None): if not preferred_quality: preferred_quality = {} - name = nzb['name'] - size = nzb.get('size', 0) - nzb_words = re.split('\W+', simplifyString(name)) - - qualities = fireEvent('quality.all', single = True) - found = {} - for quality in qualities: - # Main in words - if quality['identifier'] in nzb_words: - found[quality['identifier']] = True - - # Alt in words - if list(set(nzb_words) & set(quality['alternative'])): - found[quality['identifier']] = True # Try guessing via quality tags - guess = fireEvent('quality.guess', [nzb.get('name')], single = True) + guess = fireEvent('quality.guess', files = [nzb.get('name')], size = nzb.get('size', None), single = True) if guess: found[guess['identifier']] = True # Hack for older movies that don't contain quality tag + name = nzb['name'] + size = nzb.get('size', 0) + year_name = fireEvent('scanner.name_year', name, single = True) if len(found) == 0 and movie_year < datetime.datetime.now().year - 3 and not year_name.get('year', None): - if size > 3000: # Assume dvdr + if size > 20000: # Assume bd50 + log.info('Quality was missing in name, assuming it\'s a BR-Disk based on the size: %s', size) + found['bd50'] = True + elif size > 3000: # Assume dvdr log.info('Quality was missing in name, assuming it\'s a DVD-R based on the size: %s', size) found['dvdr'] = True else: # Assume dvdrip @@ -119,7 +115,21 @@ class Searcher(SearcherBase): if found.get(allowed): del found[allowed] - return not (found.get(preferred_quality['identifier']) and len(found) == 1) + if found.get(preferred_quality['identifier']) and len(found) == 1: + return False + + return found + + def correct3D(self, nzb, preferred_quality = None): + if not preferred_quality: preferred_quality = {} + if not preferred_quality.get('custom'): return + + threed = preferred_quality['custom'].get('3d') + + # Try guessing via quality tags + guess = fireEvent('quality.guess', [nzb.get('name')], single = True) + + return threed == guess.get('is_3d') def correctYear(self, haystack, year, year_range): @@ -181,7 +191,7 @@ class Searcher(SearcherBase): req = splitString(req_set, '&') req_match += len(list(set(rel_words) & set(req))) == len(req) - if len(required_words) > 0 and req_match == 0: + if len(required_words) > 0 and req_match == 0: log.info2('Wrong: Required word missing: %s', rel_name) return False diff --git a/couchpotato/core/media/movie/_base/__init__.py b/couchpotato/core/media/movie/_base/__init__.py index 22211332..14720463 100644 --- a/couchpotato/core/media/movie/_base/__init__.py +++ b/couchpotato/core/media/movie/_base/__init__.py @@ -1,7 +1,5 @@ from .main import MovieBase -def start(): +def autoload(): return MovieBase() - -config = [] diff --git a/couchpotato/core/media/movie/_base/main.py b/couchpotato/core/media/movie/_base/main.py index a7ecf2d2..07c47514 100644 --- a/couchpotato/core/media/movie/_base/main.py +++ b/couchpotato/core/media/movie/_base/main.py @@ -1,13 +1,17 @@ +import os import traceback -from couchpotato import get_session +import time + +from CodernityDB.database import RecordNotFound +from couchpotato import get_db from couchpotato.api import addApiView from couchpotato.core.event import fireEvent, fireEventAsync, addEvent from couchpotato.core.helpers.encoding import toUnicode -from couchpotato.core.helpers.variable import splitString, tryInt, getTitle +from couchpotato.core.helpers.variable import splitString, getTitle, getImdb, getIdentifier from couchpotato.core.logger import CPLog from couchpotato.core.media.movie import MovieTypeBase -from couchpotato.core.settings.model import Media -import time +import six + log = CPLog(__name__) @@ -42,16 +46,21 @@ class MovieBase(MovieTypeBase): }) addEvent('movie.add', self.add) + addEvent('movie.update_info', self.updateInfo) + addEvent('movie.update_release_dates', self.updateReleaseDate) - def add(self, params = None, force_readd = True, search_after = True, update_library = False, status_id = None): + def add(self, params = None, force_readd = True, search_after = True, update_after = True, notify_after = True, status = None): if not params: params = {} + # Make sure it's a correct zero filled imdb id + params['identifier'] = getImdb(params.get('identifier', '')) + if not params.get('identifier'): msg = 'Can\'t add movie without imdb identifier.' log.error(msg) fireEvent('notify.frontend', type = 'movie.is_tvshow', message = msg) return False - else: + elif not params.get('info'): try: is_movie = fireEvent('movie.is_movie', identifier = params.get('identifier'), single = True) if not is_movie: @@ -62,90 +71,135 @@ class MovieBase(MovieTypeBase): except: pass - library = fireEvent('library.add.movie', single = True, attrs = params, update_after = update_library) + info = params.get('info') + if not info or (info and len(info.get('titles', [])) == 0): + info = fireEvent('movie.info', merge = True, extended = False, identifier = params.get('identifier')) - # Status - status_active, snatched_status, ignored_status, done_status, downloaded_status = \ - fireEvent('status.get', ['active', 'snatched', 'ignored', 'done', 'downloaded'], single = True) + # Set default title + default_title = toUnicode(info.get('title')) + titles = info.get('titles', []) + counter = 0 + def_title = None + for title in titles: + if (len(default_title) == 0 and counter == 0) or len(titles) == 1 or title.lower() == toUnicode(default_title.lower()) or (toUnicode(default_title) == six.u('') and toUnicode(titles[0]) == title): + def_title = toUnicode(title) + break + counter += 1 - default_profile = fireEvent('profile.default', single = True) + if not def_title: + def_title = toUnicode(titles[0]) + + # Default profile and category + default_profile = {} + if (not params.get('profile_id') and status != 'done') or params.get('ignore_previous', False): + default_profile = fireEvent('profile.default', single = True) cat_id = params.get('category_id') try: - db = get_session() - m = db.query(Media).filter_by(library_id = library.get('id')).first() + db = get_db() + + media = { + '_t': 'media', + 'type': 'movie', + 'title': def_title, + 'identifiers': { + 'imdb': params.get('identifier') + }, + 'status': status if status else 'active', + 'profile_id': params.get('profile_id') or default_profile.get('_id'), + 'category_id': cat_id if cat_id is not None and len(cat_id) > 0 and cat_id != '-1' else None, + } + + # Update movie info + try: del info['in_wanted'] + except: pass + try: del info['in_library'] + except: pass + media['info'] = info + + new = False + previous_profile = None + try: + m = db.get('media', 'imdb-%s' % params.get('identifier'), with_doc = True)['doc'] + + try: + db.get('id', m.get('profile_id')) + previous_profile = m.get('profile_id') + except RecordNotFound: + pass + except: + log.error('Failed getting previous profile: %s', traceback.format_exc()) + except: + new = True + m = db.insert(media) + + # Update dict to be usable + m.update(media) + added = True do_search = False search_after = search_after and self.conf('search_on_add', section = 'moviesearcher') - if not m: - m = Media( - library_id = library.get('id'), - profile_id = params.get('profile_id', default_profile.get('id')), - status_id = status_id if status_id else status_active.get('id'), - category_id = tryInt(cat_id) if cat_id is not None and tryInt(cat_id) > 0 else None, - ) - db.add(m) - db.commit() + onComplete = None - onComplete = None + if new: if search_after: - onComplete = self.createOnComplete(m.id) - - fireEventAsync('library.update.movie', params.get('identifier'), default_title = params.get('title', ''), on_complete = onComplete) + onComplete = self.createOnComplete(m['_id']) search_after = False elif force_readd: # Clean snatched history - for release in m.releases: - if release.status_id in [downloaded_status.get('id'), snatched_status.get('id'), done_status.get('id')]: + for release in fireEvent('release.for_media', m['_id'], single = True): + if release.get('status') in ['downloaded', 'snatched', 'seeding', 'done']: if params.get('ignore_previous', False): - release.status_id = ignored_status.get('id') + release['status'] = 'ignored' + db.update(release) else: - fireEvent('release.delete', release.id, single = True) + fireEvent('release.delete', release['_id'], single = True) - m.profile_id = params.get('profile_id', default_profile.get('id')) - m.category_id = tryInt(cat_id) if cat_id is not None and tryInt(cat_id) > 0 else (m.category_id or None) + m['profile_id'] = (params.get('profile_id') or default_profile.get('_id')) if not previous_profile else previous_profile + m['category_id'] = cat_id if cat_id is not None and len(cat_id) > 0 else (m.get('category_id') or None) + m['last_edit'] = int(time.time()) + m['tags'] = [] + + do_search = True + db.update(m) else: + try: del params['info'] + except: pass log.debug('Movie already exists, not updating: %s', params) added = False - if force_readd: - m.status_id = status_id if status_id else status_active.get('id') - m.last_edit = int(time.time()) - do_search = True - - db.commit() + # Trigger update info + if added and update_after: + # Do full update to get images etc + fireEventAsync('movie.update_info', m['_id'], default_title = params.get('title'), on_complete = onComplete) # Remove releases - available_status = fireEvent('status.get', 'available', single = True) - for rel in m.releases: - if rel.status_id is available_status.get('id'): + for rel in fireEvent('release.for_media', m['_id'], single = True): + if rel['status'] is 'available': db.delete(rel) - db.commit() - movie_dict = m.to_dict(self.default_dict) + movie_dict = fireEvent('media.get', m['_id'], single = True) if do_search and search_after: - onComplete = self.createOnComplete(m.id) + onComplete = self.createOnComplete(m['_id']) onComplete() - if added: + if added and notify_after: + if params.get('title'): message = 'Successfully added "%s" to your wanted list.' % params.get('title', '') else: - title = getTitle(m.library) + title = getTitle(m) if title: message = 'Successfully added "%s" to your wanted list.' % title else: - message = 'Succesfully added to your wanted list.' + message = 'Successfully added to your wanted list.' fireEvent('notify.frontend', type = 'movie.added', data = movie_dict, message = message) return movie_dict except: - log.error('Failed deleting media: %s', traceback.format_exc()) - db.rollback() - finally: - db.close() + log.error('Failed adding media: %s', traceback.format_exc()) def addView(self, **kwargs): add_dict = self.add(params = kwargs) @@ -158,50 +212,169 @@ class MovieBase(MovieTypeBase): def edit(self, id = '', **kwargs): try: - db = get_session() - - available_status = fireEvent('status.get', 'available', single = True) + db = get_db() ids = splitString(id) for media_id in ids: - m = db.query(Media).filter_by(id = media_id).first() - if not m: - continue + try: + m = db.get('id', media_id) + m['profile_id'] = kwargs.get('profile_id') - m.profile_id = kwargs.get('profile_id') + cat_id = kwargs.get('category_id') + if cat_id is not None: + m['category_id'] = cat_id if len(cat_id) > 0 else None - cat_id = kwargs.get('category_id') - if cat_id is not None: - m.category_id = tryInt(cat_id) if tryInt(cat_id) > 0 else None + # Remove releases + for rel in fireEvent('release.for_media', m['_id'], single = True): + if rel['status'] is 'available': + db.delete(rel) - # Remove releases - for rel in m.releases: - if rel.status_id is available_status.get('id'): - db.delete(rel) - db.commit() + # Default title + if kwargs.get('default_title'): + m['title'] = kwargs.get('default_title') - # Default title - if kwargs.get('default_title'): - for title in m.library.titles: - title.default = toUnicode(kwargs.get('default_title', '')).lower() == toUnicode(title.title).lower() + db.update(m) - db.commit() + fireEvent('media.restatus', m['_id']) - fireEvent('media.restatus', m.id) + m = db.get('id', media_id) - movie_dict = m.to_dict(self.default_dict) - fireEventAsync('movie.searcher.single', movie_dict, on_complete = self.createNotifyFront(media_id)) + movie_dict = fireEvent('media.get', m['_id'], single = True) + fireEventAsync('movie.searcher.single', movie_dict, on_complete = self.createNotifyFront(media_id)) + + except: + log.error('Can\'t edit non-existing media') return { 'success': True, } except: - log.error('Failed deleting media: %s', traceback.format_exc()) - db.rollback() - finally: - db.close() + log.error('Failed editing media: %s', traceback.format_exc()) return { 'success': False, } + + def updateInfo(self, media_id = None, identifier = None, default_title = None, extended = False): + """ + Update movie information inside media['doc']['info'] + + @param media_id: document id + @param default_title: default title, if empty, use first one or existing one + @param extended: update with extended info (parses more info, actors, images from some info providers) + @return: dict, with media + """ + + if self.shuttingDown(): + return + + try: + db = get_db() + + if media_id: + media = db.get('id', media_id) + else: + media = db.get('media', 'imdb-%s' % identifier, with_doc = True)['doc'] + + info = fireEvent('movie.info', merge = True, extended = extended, identifier = getIdentifier(media)) + + # Don't need those here + try: del info['in_wanted'] + except: pass + try: del info['in_library'] + except: pass + + if not info or len(info) == 0: + log.error('Could not update, no movie info to work with: %s', identifier) + return False + + # Update basic info + media['info'] = info + + titles = info.get('titles', []) + log.debug('Adding titles: %s', titles) + + # Define default title + if default_title: + def_title = None + if default_title: + counter = 0 + for title in titles: + if title.lower() == toUnicode(default_title.lower()) or (toUnicode(default_title) == six.u('') and toUnicode(titles[0]) == title): + def_title = toUnicode(title) + break + counter += 1 + + if not def_title: + def_title = toUnicode(titles[0]) + + media['title'] = def_title + + # Files + images = info.get('images', []) + media['files'] = media.get('files', {}) + for image_type in ['poster']: + + # Remove non-existing files + file_type = 'image_%s' % image_type + existing_files = list(set(media['files'].get(file_type, []))) + for ef in media['files'].get(file_type, []): + if not os.path.isfile(ef): + existing_files.remove(ef) + + # Replace new files list + media['files'][file_type] = existing_files + if len(existing_files) == 0: + del media['files'][file_type] + + # Loop over type + for image in images.get(image_type, []): + if not isinstance(image, (str, unicode)): + continue + + if file_type not in media['files'] or len(media['files'].get(file_type, [])) == 0: + file_path = fireEvent('file.download', url = image, single = True) + if file_path: + media['files'][file_type] = [file_path] + break + else: + break + + db.update(media) + + return media + except: + log.error('Failed update media: %s', traceback.format_exc()) + + return {} + + def updateReleaseDate(self, media_id): + """ + Update release_date (eta) info only + + @param media_id: document id + @return: dict, with dates dvd, theater, bluray, expires + """ + + try: + db = get_db() + + media = db.get('id', media_id) + + if not media.get('info'): + media = self.updateInfo(media_id) + dates = media.get('info', {}).get('release_date') + else: + dates = media.get('info').get('release_date') + + if dates and (dates.get('expires', 0) < time.time() or dates.get('expires', 0) > time.time() + (604800 * 4)) or not dates: + dates = fireEvent('movie.info.release_date', identifier = getIdentifier(media), merge = True) + media['info'].update({'release_date': dates}) + db.update(media) + + return dates + except: + log.error('Failed updating release dates: %s', traceback.format_exc()) + + return {} diff --git a/couchpotato/core/media/movie/_base/static/list.js b/couchpotato/core/media/movie/_base/static/list.js index 85dee2e5..83ac4ede 100644 --- a/couchpotato/core/media/movie/_base/static/list.js +++ b/couchpotato/core/media/movie/_base/static/list.js @@ -26,7 +26,7 @@ var MovieList = new Class({ self.filter = self.options.filter || { 'starts_with': null, 'search': null - } + }; self.el = new Element('div.movies').adopt( self.title = self.options.title ? new Element('h2', { @@ -52,18 +52,18 @@ var MovieList = new Class({ self.getMovies(); - App.on('movie.added', self.movieAdded.bind(self)) + App.on('movie.added', self.movieAdded.bind(self)); App.on('movie.deleted', self.movieDeleted.bind(self)) }, movieDeleted: function(notification){ var self = this; - if(self.movies_added[notification.data.id]){ + if(self.movies_added[notification.data._id]){ self.movies.each(function(movie){ - if(movie.get('id') == notification.data.id){ + if(movie.get('_id') == notification.data._id){ movie.destroy(); - delete self.movies_added[notification.data.id]; + delete self.movies_added[notification.data._id]; self.setCounter(self.counter_count-1); self.total_movies--; } @@ -77,7 +77,7 @@ var MovieList = new Class({ var self = this; self.fireEvent('movieAdded', notification); - if(self.options.add_new && !self.movies_added[notification.data.id] && notification.data.status.identifier == self.options.status){ + if(self.options.add_new && !self.movies_added[notification.data._id] && notification.data.status == self.options.status){ window.scroll(0,0); self.createMovie(notification.data, 'top'); self.setCounter(self.counter_count+1); @@ -96,7 +96,7 @@ var MovieList = new Class({ if(self.options.load_more) self.scrollspy = new ScrollSpy({ min: function(){ - var c = self.load_more.getCoordinates() + var c = self.load_more.getCoordinates(); return c.top - window.document.getSize().y - 300 }, onEnter: self.loadMore.bind(self) @@ -179,15 +179,15 @@ var MovieList = new Class({ m.fireEvent('injected'); - self.movies.include(m) - self.movies_added[movie.id] = true; + self.movies.include(m); + self.movies_added[movie._id] = true; }, createNavigation: function(){ var self = this; var chars = '#ABCDEFGHIJKLMNOPQRSTUVWXYZ'; - self.el.addClass('with_navigation') + self.el.addClass('with_navigation'); self.navigation = new Element('div.alph_nav').adopt( self.mass_edit_form = new Element('div.mass_edit_form').adopt( @@ -242,7 +242,7 @@ var MovieList = new Class({ this.addClass(a); el.inject(el.getParent(), 'top'); - el.getSiblings().hide() + el.getSiblings().hide(); setTimeout(function(){ el.getSiblings().setStyle('display', null); }, 100) @@ -259,8 +259,8 @@ var MovieList = new Class({ self.mass_edit_select_class = new Form.Check(self.mass_edit_select); Quality.getActiveProfiles().each(function(profile){ new Element('option', { - 'value': profile.id ? profile.id : profile.data.id, - 'text': profile.label ? profile.label : profile.data.label + 'value': profile.get('_id'), + 'text': profile.get('label') }).inject(self.mass_edit_quality) }); @@ -286,9 +286,9 @@ var MovieList = new Class({ 'status': self.options.status }, self.filter), 'onSuccess': function(json){ - available_chars = json.chars + available_chars = json.chars; - json.chars.split('').each(function(c){ + available_chars.each(function(c){ self.letters[c.capitalize()].addClass('available') }) @@ -300,7 +300,7 @@ var MovieList = new Class({ self.navigation_alpha = new Element('ul.numbers', { 'events': { 'click:relay(li.available)': function(e, el){ - self.activateLetter(el.get('data-letter')) + self.activateLetter(el.get('data-letter')); self.getMovies(true) } } @@ -318,7 +318,7 @@ var MovieList = new Class({ // All self.letters['all'] = new Element('li.letter_all.available.active', { - 'text': 'ALL', + 'text': 'ALL' }).inject(self.navigation_alpha); // Chars @@ -334,7 +334,7 @@ var MovieList = new Class({ if (self.options.menu.length > 0) self.options.menu.each(function(menu_item){ self.navigation_menu.addLink(menu_item); - }) + }); else self.navigation_menu.hide(); @@ -347,15 +347,15 @@ var MovieList = new Class({ movies = self.movies.length; self.movies.each(function(movie){ selected += movie.isSelected() ? 1 : 0 - }) + }); var indeterminate = selected > 0 && selected < movies, checked = selected == movies && selected > 0; - self.mass_edit_select.set('indeterminate', indeterminate) + self.mass_edit_select.set('indeterminate', indeterminate); - self.mass_edit_select_class[checked ? 'check' : 'uncheck']() - self.mass_edit_select_class.element[indeterminate ? 'addClass' : 'removeClass']('indeterminate') + self.mass_edit_select_class[checked ? 'check' : 'uncheck'](); + self.mass_edit_select_class.element[indeterminate ? 'addClass' : 'removeClass']('indeterminate'); self.mass_edit_selected.set('text', selected); }, @@ -371,8 +371,9 @@ var MovieList = new Class({ 'events': { 'click': function(e){ (e).preventDefault(); - this.set('text', 'Deleting..') + this.set('text', 'Deleting..'); Api.request('media.delete', { + 'method': 'post', 'data': { 'id': ids.join(','), 'delete_from': self.options.identifier @@ -383,7 +384,7 @@ var MovieList = new Class({ var erase_movies = []; self.movies.each(function(movie){ if (movie.isSelected()){ - $(movie).destroy() + $(movie).destroy(); erase_movies.include(movie); } }); @@ -410,9 +411,10 @@ var MovieList = new Class({ changeQualitySelected: function(){ var self = this; - var ids = self.getSelectedMovies() + var ids = self.getSelectedMovies(); Api.request('movie.edit', { + 'method': 'post', 'data': { 'id': ids.join(','), 'profile_id': self.mass_edit_quality.get('value') @@ -423,11 +425,12 @@ var MovieList = new Class({ refreshSelected: function(){ var self = this; - var ids = self.getSelectedMovies() + var ids = self.getSelectedMovies(); Api.request('media.refresh', { + 'method': 'post', 'data': { - 'id': ids.join(','), + 'id': ids.join(',') } }); }, @@ -435,10 +438,10 @@ var MovieList = new Class({ getSelectedMovies: function(){ var self = this; - var ids = [] + var ids = []; self.movies.each(function(movie){ if (movie.isSelected()) - ids.include(movie.get('id')) + ids.include(movie.get('_id')) }); return ids @@ -459,15 +462,15 @@ var MovieList = new Class({ reset: function(){ var self = this; - self.movies = [] + self.movies = []; if(self.mass_edit_select) - self.calculateSelected() + self.calculateSelected(); if(self.navigation_alpha) - self.navigation_alpha.getElements('.active').removeClass('active') + self.navigation_alpha.getElements('.active').removeClass('active'); self.offset = 0; if(self.scrollspy){ - self.load_more.show(); + //self.load_more.show(); self.scrollspy.start(); } }, @@ -475,7 +478,7 @@ var MovieList = new Class({ activateLetter: function(letter){ var self = this; - self.reset() + self.reset(); self.letters[letter || 'all'].addClass('active'); self.filter.starts_with = letter; @@ -487,7 +490,7 @@ var MovieList = new Class({ self.el .removeClass(self.current_view+'_list') - .addClass(new_view+'_list') + .addClass(new_view+'_list'); self.current_view = new_view; Cookie.write(self.options.identifier+'_view2', new_view, {duration: 1000}); @@ -504,9 +507,9 @@ var MovieList = new Class({ if(self.search_timer) clearTimeout(self.search_timer); self.search_timer = (function(){ var search_value = self.navigation_search_input.get('value'); - if (search_value == self.last_search_value) return + if (search_value == self.last_search_value) return; - self.reset() + self.reset(); self.activateLetter(); self.filter.search = search_value; @@ -552,7 +555,7 @@ var MovieList = new Class({ Api.request(self.options.api_call || 'media.list', { 'data': Object.merge({ - 'type': 'movie', + 'type': self.options.type || 'movie', 'status': self.options.status, 'limit_offset': self.options.limit ? self.options.limit + ',' + self.offset : null }, self.filter), @@ -563,7 +566,7 @@ var MovieList = new Class({ if(self.loader_first){ var lf = self.loader_first; - self.loader_first.addClass('hide') + self.loader_first.addClass('hide'); self.loader_first = null; setTimeout(function(){ lf.destroy(); @@ -603,10 +606,10 @@ var MovieList = new Class({ var is_empty = self.movies.length == 0 && (self.total_movies == 0 || self.total_movies === undefined); if(self.title) - self.title[is_empty ? 'hide' : 'show']() + self.title[is_empty ? 'hide' : 'show'](); if(self.description) - self.description.setStyle('display', [is_empty ? 'none' : '']) + self.description.setStyle('display', [is_empty ? 'none' : '']); if(is_empty && self.options.on_empty_element){ self.options.on_empty_element.inject(self.loader_first || self.title || self.movie_list, 'after'); @@ -629,4 +632,4 @@ var MovieList = new Class({ return this.el; } -}); \ No newline at end of file +}); diff --git a/couchpotato/static/scripts/page/manage.js b/couchpotato/core/media/movie/_base/static/manage.js similarity index 90% rename from couchpotato/static/scripts/page/manage.js rename to couchpotato/core/media/movie/_base/static/manage.js index ed9120fa..e8618999 100644 --- a/couchpotato/static/scripts/page/manage.js +++ b/couchpotato/core/media/movie/_base/static/manage.js @@ -2,6 +2,7 @@ Page.Manage = new Class({ Extends: PageBase, + order: 20, name: 'manage', title: 'Do stuff to your existing movies!', @@ -28,7 +29,9 @@ Page.Manage = new Class({ self.list = new MovieList({ 'identifier': 'manage', 'filter': { - 'release_status': 'done' + 'status': 'done', + 'release_status': 'done', + 'status_or': 1 }, 'actions': [MA.IMDB, MA.Trailer, MA.Files, MA.Readd, MA.Edit, MA.Delete], 'menu': [self.refresh_button, self.refresh_quick], @@ -103,7 +106,7 @@ Page.Manage = new Class({ } else { // Capture progress so we can use it in our *each* closure - var progress = json.progress + var progress = json.progress; // Don't add loader when page is loading still if(!self.list.navigation) @@ -114,15 +117,15 @@ Page.Manage = new Class({ self.progress_container.empty(); - var sorted_table = self.parseProgress(json.progress) + var sorted_table = self.parseProgress(json.progress); sorted_table.each(function(folder){ - var folder_progress = progress[folder] + var folder_progress = progress[folder]; new Element('div').adopt( new Element('span.folder', {'text': folder + (folder_progress.eta > 0 ? ', ' + new Date ().increment('second', folder_progress.eta).timeDiffInWords().replace('from now', 'to go') : '') }), - new Element('span.percentage', {'text': folder_progress.total ? (((folder_progress.total-folder_progress.to_go)/folder_progress.total)*100).round() + '%' : '0%'}) + new Element('span.percentage', {'text': folder_progress.total ? Math.round(((folder_progress.total-folder_progress.to_go)/folder_progress.total)*100) + '%' : '0%'}) ).inject(self.progress_container) }); diff --git a/couchpotato/core/media/movie/_base/static/movie.actions.js b/couchpotato/core/media/movie/_base/static/movie.actions.js index 66c84c68..ff71f31d 100644 --- a/couchpotato/core/media/movie/_base/static/movie.actions.js +++ b/couchpotato/core/media/movie/_base/static/movie.actions.js @@ -60,22 +60,6 @@ var MovieAction = new Class({ 'z-index': '1' } }).inject(self.movie, 'top').fade('hide'); - //self.positionMask(); - }, - - positionMask: function(){ - var self = this, - movie = $(self.movie), - s = movie.getSize() - - return; - - return self.mask.setStyles({ - 'width': s.x, - 'height': s.y - }).position({ - 'relativeTo': movie - }) }, toElement: function(){ @@ -94,7 +78,7 @@ MA.IMDB = new Class({ create: function(){ var self = this; - self.id = self.movie.get('imdb') || self.movie.get('identifier'); + self.id = self.movie.getIdentifier ? self.movie.getIdentifier() : self.get('imdb'); self.el = new Element('a.imdb', { 'title': 'Go to the IMDB page of ' + self.getTitle(), @@ -121,13 +105,13 @@ MA.Release = new Class({ } }); - if(self.movie.data.releases.length == 0) - self.el.hide() + if(!self.movie.data.releases || self.movie.data.releases.length == 0) + self.el.hide(); else self.showHelper(); App.on('movie.searcher.ended', function(notification){ - if(self.movie.data.id != notification.data.id) return; + if(self.movie.data._id != notification.data._id) return; self.releases = null; if(self.options_container){ @@ -143,30 +127,7 @@ MA.Release = new Class({ if(e) (e).preventDefault(); - if(self.releases) - self.createReleases(); - else { - - self.movie.busy(true); - - Api.request('release.for_movie', { - 'data': { - 'id': self.movie.data.id - }, - 'onComplete': function(json){ - self.movie.busy(false, 1); - - if(json && json.releases){ - self.releases = json.releases; - self.createReleases(); - } - else - alert('Something went wrong, check the logs.'); - } - }); - - } - + self.createReleases(); }, @@ -187,106 +148,102 @@ MA.Release = new Class({ new Element('span.age', {'text': 'Age'}), new Element('span.score', {'text': 'Score'}), new Element('span.provider', {'text': 'Provider'}) - ).inject(self.release_container) + ).inject(self.release_container); - self.releases.each(function(release){ + if(self.movie.data.releases) + self.movie.data.releases.each(function(release){ - var status = Status.get(release.status_id), - quality = Quality.getProfile(release.quality_id) || {}, - info = release.info, - provider = self.get(release, 'provider') + (release.info['provider_extra'] ? self.get(release, 'provider_extra') : ''); - release.status = status; + var quality = Quality.getQuality(release.quality) || {}, + info = release.info || {}, + provider = self.get(release, 'provider') + (info['provider_extra'] ? self.get(release, 'provider_extra') : ''); - var release_name = self.get(release, 'name'); - if(release.files && release.files.length > 0){ - try { - var movie_file = release.files.filter(function(file){ - var type = File.Type.get(file.type_id); - return type && type.identifier == 'movie' - }).pick(); - release_name = movie_file.path.split(Api.getOption('path_sep')).getLast(); - } - catch(e){} - } - - // Create release - var item = new Element('div', { - 'class': 'item '+status.identifier, - 'id': 'release_'+release.id - }).adopt( - new Element('span.name', {'text': release_name, 'title': release_name}), - new Element('span.status', {'text': status.identifier, 'class': 'release_status '+status.identifier}), - new Element('span.quality', {'text': quality.get('label') || 'n/a'}), - new Element('span.size', {'text': release.info['size'] ? Math.floor(self.get(release, 'size')) : 'n/a'}), - new Element('span.age', {'text': self.get(release, 'age')}), - new Element('span.score', {'text': self.get(release, 'score')}), - new Element('span.provider', { 'text': provider, 'title': provider }), - release.info['detail_url'] ? new Element('a.info.icon2', { - 'href': release.info['detail_url'], - 'target': '_blank' - }) : new Element('a'), - new Element('a.download.icon2', { - 'events': { - 'click': function(e){ - (e).preventDefault(); - if(!this.hasClass('completed')) - self.download(release); - } + var release_name = self.get(release, 'name'); + if(release.files && release.files.length > 0){ + try { + var movie_file = release.files.filter(function(file){ + var type = File.Type.get(file.type_id); + return type && type.identifier == 'movie' + }).pick(); + release_name = movie_file.path.split(Api.getOption('path_sep')).getLast(); } - }), - new Element('a.delete.icon2', { - 'events': { - 'click': function(e){ - (e).preventDefault(); - self.ignore(release); - } - } - }) - ).inject(self.release_container); - release['el'] = item; - - if(status.identifier == 'ignored' || status.identifier == 'failed' || status.identifier == 'snatched'){ - if(!self.last_release || (self.last_release && self.last_release.status.identifier != 'snatched' && status.identifier == 'snatched')) - self.last_release = release; - } - else if(!self.next_release && status.identifier == 'available'){ - self.next_release = release; - } - - var update_handle = function(notification) { - if(notification.data.id != release.id) return; - - var q = self.movie.quality.getElement('.q_id' + release.quality_id), - status = Status.get(release.status_id), - new_status = Status.get(notification.data.status_id); - - release.status_id = new_status.id - release.el.set('class', 'item ' + new_status.identifier); - - var status_el = release.el.getElement('.release_status'); - status_el.set('class', 'release_status ' + new_status.identifier); - status_el.set('text', new_status.identifier); - - if(!q && (new_status.identifier == 'snatched' || new_status.identifier == 'seeding' || new_status.identifier == 'done')) - var q = self.addQuality(release.quality_id); - - if(new_status && q && !q.hasClass(new_status.identifier)) { - q.removeClass(status.identifier).addClass(new_status.identifier); - q.set('title', q.get('title').replace(status.label, new_status.label)); + catch(e){} } - } - App.on('release.update_status', update_handle); + // Create release + release['el'] = new Element('div', { + 'class': 'item '+release.status, + 'id': 'release_'+release._id + }).adopt( + new Element('span.name', {'text': release_name, 'title': release_name}), + new Element('span.status', {'text': release.status, 'class': 'release_status '+release.status}), + new Element('span.quality', {'text': quality.label + (release.is_3d ? ' 3D' : '') || 'n/a'}), + new Element('span.size', {'text': info['size'] ? Math.floor(self.get(release, 'size')) : 'n/a'}), + new Element('span.age', {'text': self.get(release, 'age')}), + new Element('span.score', {'text': self.get(release, 'score')}), + new Element('span.provider', { 'text': provider, 'title': provider }), + info['detail_url'] ? new Element('a.info.icon2', { + 'href': info['detail_url'], + 'target': '_blank' + }) : new Element('a'), + new Element('a.download.icon2', { + 'events': { + 'click': function(e){ + (e).preventDefault(); + if(!this.hasClass('completed')) + self.download(release); + } + } + }), + new Element('a.delete.icon2', { + 'events': { + 'click': function(e){ + (e).preventDefault(); + self.ignore(release); + } + } + }) + ).inject(self.release_container); - }); + if(release.status == 'ignored' || release.status == 'failed' || release.status == 'snatched'){ + if(!self.last_release || (self.last_release && self.last_release.status != 'snatched' && release.status == 'snatched')) + self.last_release = release; + } + else if(!self.next_release && release.status == 'available'){ + self.next_release = release; + } + + var update_handle = function(notification) { + if(notification.data._id != release._id) return; + + var q = self.movie.quality.getElement('.q_' + release.quality), + new_status = notification.data.status; + + release.el.set('class', 'item ' + new_status); + + var status_el = release.el.getElement('.release_status'); + status_el.set('class', 'release_status ' + new_status); + status_el.set('text', new_status); + + if(!q && (new_status == 'snatched' || new_status == 'seeding' || new_status == 'done')) + q = self.addQuality(release.quality_id); + + if(q && !q.hasClass(new_status)) { + q.removeClass(release.status).addClass(new_status); + q.set('title', q.get('title').replace(release.status, new_status)); + } + }; + + App.on('release.update_status', update_handle); + + }); if(self.last_release) - self.release_container.getElements('#release_'+self.last_release.id).addClass('last_release'); + self.release_container.getElements('#release_'+self.last_release._id).addClass('last_release'); if(self.next_release) - self.release_container.getElements('#release_'+self.next_release.id).addClass('next_release'); + self.release_container.getElements('#release_'+self.next_release._id).addClass('next_release'); - if(self.next_release || (self.last_release && ['ignored', 'failed'].indexOf(self.last_release.status.identifier) === false)){ + if(self.next_release || (self.last_release && ['ignored', 'failed'].indexOf(self.last_release.status) === false)){ self.trynext_container = new Element('div.buttons.try_container').inject(self.release_container, 'top'); @@ -295,7 +252,7 @@ MA.Release = new Class({ self.trynext_container.adopt( new Element('span.or', { - 'text': 'This movie is snatched, if anything went wrong, download' + 'text': 'If anything went wrong, download' }), lr ? new Element('a.button.orange', { 'text': 'the same release again', @@ -341,18 +298,17 @@ MA.Release = new Class({ var has_available = false, has_snatched = false; - self.movie.data.releases.each(function(release){ - if(has_available && has_snatched) return; + if(self.movie.data.releases) + self.movie.data.releases.each(function(release){ + if(has_available && has_snatched) return; - var status = Status.get(release.status_id); + if(['snatched', 'downloaded', 'seeding', 'done'].contains(release.status)) + has_snatched = true; - if(['snatched', 'downloaded', 'seeding'].contains(status.identifier)) - has_snatched = true; + if(['available'].contains(release.status)) + has_available = true; - if(['available'].contains(status.identifier)) - has_available = true; - - }); + }); if(has_available || has_snatched){ @@ -385,13 +341,13 @@ MA.Release = new Class({ }, get: function(release, type){ - return release.info[type] !== undefined ? release.info[type] : 'n/a' + return (release.info && release.info[type] !== undefined) ? release.info[type] : 'n/a' }, download: function(release){ var self = this; - var release_el = self.release_container.getElement('#release_'+release.id), + var release_el = self.release_container.getElement('#release_'+release._id), icon = release_el.getElement('.download.icon2'); if(icon) @@ -399,7 +355,7 @@ MA.Release = new Class({ Api.request('release.manual_download', { 'data': { - 'id': release.id + 'id': release._id }, 'onComplete': function(json){ if(icon) @@ -418,12 +374,11 @@ MA.Release = new Class({ }, ignore: function(release){ - var self = this; Api.request('release.ignore', { 'data': { - 'id': release.id - }, + 'id': release._id + } }) }, @@ -433,7 +388,7 @@ MA.Release = new Class({ Api.request('media.delete', { 'data': { - 'id': self.movie.get('id'), + 'id': self.movie.get('_id'), 'delete_from': 'wanted' }, 'onComplete': function(){ @@ -455,7 +410,7 @@ MA.Release = new Class({ Api.request('movie.searcher.try_next', { 'data': { - 'id': self.movie.get('id') + 'media_id': self.movie.get('_id') } }); @@ -483,7 +438,7 @@ MA.Trailer = new Class({ watch: function(offset){ var self = this; - var data_url = 'https://gdata.youtube.com/feeds/videos?vq="{title}" {year} trailer&max-results=1&alt=json-in-script&orderby=relevance&sortorder=descending&format=5&fmt=18' + var data_url = 'https://gdata.youtube.com/feeds/videos?vq="{title}" {year} trailer&max-results=1&alt=json-in-script&orderby=relevance&sortorder=descending&format=5&fmt=18'; var url = data_url.substitute({ 'title': encodeURI(self.getTitle()), 'year': self.get('year'), @@ -542,7 +497,7 @@ MA.Trailer = new Class({ } } - } + }; self.player.addEventListener('onStateChange', change_quality); } @@ -559,7 +514,7 @@ MA.Trailer = new Class({ $(self.movie).setStyle('height', null); setTimeout(function(){ - self.container.destroy() + self.container.destroy(); self.close_button.destroy(); }, 1800) } @@ -610,13 +565,13 @@ MA.Edit = new Class({ ) ).inject(self.movie, 'top'); - Array.each(self.movie.data.library.titles, function(alt){ + Array.each(self.movie.data.info.titles, function(title){ new Element('option', { - 'text': alt.title + 'text': title }).inject(self.title_select); - if(alt['default']) - self.title_select.set('value', alt.title); + if(title == self.movie.data.title) + self.title_select.set('value', title); }); @@ -629,14 +584,14 @@ MA.Edit = new Class({ self.category_select.show(); categories.each(function(category){ - var category_id = category.data.id; + var category_id = category.data._id; new Element('option', { 'value': category_id, 'text': category.data.label }).inject(self.category_select); - if(self.movie.category && self.movie.category.data && self.movie.category.data.id == category_id) + if(self.movie.category && self.movie.category.data && self.movie.category.data._id == category_id) self.category_select.set('value', category_id); }); @@ -649,7 +604,7 @@ MA.Edit = new Class({ profiles.each(function(profile){ - var profile_id = profile.id ? profile.id : profile.data.id; + var profile_id = profile.get('_id'); new Element('option', { 'value': profile_id, @@ -672,7 +627,7 @@ MA.Edit = new Class({ Api.request('movie.edit', { 'data': { - 'id': self.movie.get('id'), + 'id': self.movie.get('_id'), 'default_title': self.title_select.get('value'), 'profile_id': self.profile_select.get('value'), 'category_id': self.category_select.get('value') @@ -688,7 +643,7 @@ MA.Edit = new Class({ self.movie.slide('out'); } -}) +}); MA.Refresh = new Class({ @@ -712,7 +667,7 @@ MA.Refresh = new Class({ Api.request('media.refresh', { 'data': { - 'id': self.movie.get('id') + 'id': self.movie.get('_id') } }); } @@ -726,10 +681,10 @@ MA.Readd = new Class({ create: function(){ var self = this; - var movie_done = Status.get(self.movie.data.status_id).identifier == 'done'; - if(!movie_done) + var movie_done = self.movie.data.status == 'done'; + if(self.movie.data.releases && !movie_done) var snatched = self.movie.data.releases.filter(function(release){ - return release.status && (release.status.identifier == 'snatched' || release.status.identifier == 'downloaded' || release.status.identifier == 'done'); + return release.status && (release.status == 'snatched' || release.status == 'seeding' || release.status == 'downloaded' || release.status == 'done'); }).length; if(movie_done || snatched && snatched > 0) @@ -748,7 +703,7 @@ MA.Readd = new Class({ Api.request('movie.add', { 'data': { - 'identifier': self.movie.get('identifier'), + 'identifier': self.movie.getIdentifier(), 'ignore_previous': 1 } }); @@ -823,7 +778,7 @@ MA.Delete = new Class({ function(){ Api.request('media.delete', { 'data': { - 'id': self.movie.get('id'), + 'id': self.movie.get('_id'), 'delete_from': self.movie.list.options.identifier }, 'onComplete': function(){ @@ -852,46 +807,17 @@ MA.Files = new Class({ create: function(){ var self = this; - self.el = new Element('a.directory', { - 'title': 'Available files', - 'events': { - 'click': self.show.bind(self) - } - }); - - }, - - show: function(e){ - var self = this; - (e).preventDefault(); - - if(self.releases) - self.showFiles(); - else { - - self.movie.busy(true); - - Api.request('release.for_movie', { - 'data': { - 'id': self.movie.data.id - }, - 'onComplete': function(json){ - self.movie.busy(false, 1); - - if(json && json.releases){ - self.releases = json.releases; - self.showFiles(); - } - else - alert('Something went wrong, check the logs.'); + if(self.movie.data.releases && self.movie.data.releases.length > 0) + self.el = new Element('a.directory', { + 'title': 'Available files', + 'events': { + 'click': self.show.bind(self) } }); - } - }, - showFiles: function(){ + show: function(){ var self = this; if(!self.options_container){ @@ -902,26 +828,26 @@ MA.Files = new Class({ // Header new Element('div.item.head').adopt( new Element('span.name', {'text': 'File'}), - new Element('span.type', {'text': 'Type'}), - new Element('span.is_available', {'text': 'Available'}) - ).inject(self.files_container) + new Element('span.type', {'text': 'Type'}) + ).inject(self.files_container); - Array.each(self.releases, function(release){ + if(self.movie.data.releases) + Array.each(self.movie.data.releases, function(release){ + var rel = new Element('div.release').inject(self.files_container); - var rel = new Element('div.release').inject(self.files_container); - - Array.each(release.files, function(file){ - new Element('div.file.item').adopt( - new Element('span.name', {'text': file.path}), - new Element('span.type', {'text': File.Type.get(file.type_id).name}), - new Element('span.available', {'text': file.available}) - ).inject(rel) + Object.each(release.files, function(files, type){ + Array.each(files, function(file){ + new Element('div.file.item').adopt( + new Element('span.name', {'text': file}), + new Element('span.type', {'text': type}) + ).inject(rel) + }); + }); }); - }); } self.movie.slide('in', self.options_container); - }, + } -}); \ No newline at end of file +}); diff --git a/couchpotato/core/media/movie/_base/static/movie.css b/couchpotato/core/media/movie/_base/static/movie.css index a88a2077..311b111d 100644 --- a/couchpotato/core/media/movie/_base/static/movie.css +++ b/couchpotato/core/media/movie/_base/static/movie.css @@ -123,15 +123,18 @@ .movies.thumbs_list .movie { width: 16.66667%; height: auto; + min-height: 200px; display: inline-block; margin: 0; padding: 0; vertical-align: top; + line-height: 0; } @media all and (max-width: 800px) { .movies.thumbs_list .movie { width: 25%; + min-height: 100px; } } @@ -343,6 +346,7 @@ top: auto; right: auto; color: #FFF; + line-height: 18px; } .touch_enabled .movies.list_list .movie .info .year { @@ -353,18 +357,40 @@ top: 30px; clear: both; bottom: 30px; - overflow: hidden; position: absolute; } - .movies .data:hover .description { - overflow: auto; - } .movies.list_list .movie:not(.details_view) .info .description, .movies.mass_edit_list .info .description, .movies.thumbs_list .info .description { display: none; } + .movies .data .eta { + display: none; + } + + .movies.details_list .data .eta { + position: absolute; + bottom: 0; + right: 0; + display: block; + min-height: 20px; + text-align: right; + font-style: italic; + opacity: .8; + font-size: 11px; + } + + .movies.details_list .movie:hover .data .eta { + display: none; + } + + .movies.thumbs_list .data .eta { + display: block; + position: absolute; + bottom: 40px; + } + .movies .data .quality { position: absolute; bottom: 2px; @@ -396,7 +422,6 @@ .movies .data .quality span { padding: 2px 3px; - font-weight: bold; opacity: 0.5; font-size: 10px; height: 16px; @@ -449,7 +474,6 @@ right: 20px; line-height: 0; top: 0; - display: block; width: auto; opacity: 0; display: none; @@ -831,7 +855,6 @@ } .movies .alph_nav .search input { - padding: 6px 5px; width: 100%; height: 44px; display: inline-block; @@ -839,7 +862,6 @@ background: none; color: #444; font-size: 14px; - padding: 10px; padding: 0 10px 0 30px; border-bottom: 1px solid rgba(0,0,0,.08); } @@ -1041,7 +1063,6 @@ } .movies .progress > div .percentage { - font-weight: bold; display: inline-block; text-transform: uppercase; font-weight: normal; diff --git a/couchpotato/core/media/movie/_base/static/movie.js b/couchpotato/core/media/movie/_base/static/movie.js index 3ca1912c..47880089 100644 --- a/couchpotato/core/media/movie/_base/static/movie.js +++ b/couchpotato/core/media/movie/_base/static/movie.js @@ -23,46 +23,46 @@ var Movie = new Class({ addEvents: function(){ var self = this; - self.global_events = {} + self.global_events = {}; // Do refresh with new data self.global_events['movie.update'] = function(notification){ - if(self.data.id != notification.data.id) return; + if(self.data._id != notification.data._id) return; self.busy(false); self.removeView(); self.update.delay(2000, self, notification); - } + }; App.on('movie.update', self.global_events['movie.update']); // Add spinner on load / search ['media.busy', 'movie.searcher.started'].each(function(listener){ self.global_events[listener] = function(notification){ - if(notification.data && (self.data.id == notification.data.id || (typeOf(notification.data.id) == 'array' && notification.data.id.indexOf(self.data.id) > -1))) + if(notification.data && (self.data._id == notification.data._id || (typeOf(notification.data._id) == 'array' && notification.data._id.indexOf(self.data._id) > -1))) self.busy(true); - } + }; App.on(listener, self.global_events[listener]); - }) + }); // Remove spinner self.global_events['movie.searcher.ended'] = function(notification){ - if(notification.data && self.data.id == notification.data.id) + if(notification.data && self.data._id == notification.data._id) self.busy(false) - } + }; App.on('movie.searcher.ended', self.global_events['movie.searcher.ended']); // Reload when releases have updated self.global_events['release.update_status'] = function(notification){ - var data = notification.data - if(data && self.data.id == data.movie_id){ + var data = notification.data; + if(data && self.data._id == data.movie_id){ if(!self.data.releases) self.data.releases = []; - self.data.releases.push({'quality_id': data.quality_id, 'status_id': data.status_id}); + self.data.releases.push({'quality': data.quality, 'status': data.status}); self.updateReleases(); } - } + }; App.on('release.update_status', self.global_events['release.update_status']); @@ -73,7 +73,7 @@ var Movie = new Class({ self.el.destroy(); delete self.list.movies_added[self.get('id')]; - self.list.movies.erase(self) + self.list.movies.erase(self); self.list.checkIfEmpty(); @@ -117,18 +117,6 @@ var Movie = new Class({ }).inject(self.el, 'top').fade('hide'); }, - positionMask: function(){ - var self = this, - s = self.el.getSize() - - return self.mask.setStyles({ - 'width': s.x, - 'height': s.y - }).position({ - 'relativeTo': self.el - }) - }, - update: function(notification){ var self = this; @@ -146,8 +134,22 @@ var Movie = new Class({ create: function(){ var self = this; - var s = Status.get(self.get('status_id')); - self.el.addClass('status_'+s.identifier); + self.el.addClass('status_'+self.get('status')); + + var eta = null, + eta_date = null, + now = Math.round(+new Date()/1000); + + if(self.data.info.release_date) + [self.data.info.release_date.dvd, self.data.info.release_date.theater].each(function(timestamp){ + if (timestamp > 0 && (eta == null || Math.abs(timestamp - now) < Math.abs(eta - now))) + eta = timestamp; + }); + + if(eta){ + eta_date = new Date(eta * 1000); + eta_date = eta_date.toLocaleString('en-us', { month: "long" }) + ' ' + eta_date.getFullYear(); + } self.el.adopt( self.select_checkbox = new Element('input[type=checkbox].inlay', { @@ -157,7 +159,10 @@ var Movie = new Class({ } } }), - self.thumbnail = File.Select.single('poster', self.data.library.files), + self.thumbnail = (self.data.files && self.data.files.image_poster) ? new Element('img', { + 'class': 'type_image poster', + 'src': Api.createUrl('file.cache') + self.data.files.image_poster[0].split(Api.getOption('path_sep')).pop() + }): null, self.data_container = new Element('div.data.inlay.light').adopt( self.info_container = new Element('div.info').adopt( new Element('div.title').adopt( @@ -165,12 +170,16 @@ var Movie = new Class({ 'text': self.getTitle() || 'n/a' }), self.year = new Element('div.year', { - 'text': self.data.library.year || 'n/a' + 'text': self.data.info.year || 'n/a' }) ), - self.description = new Element('div.description', { - 'text': self.data.library.plot + self.description = new Element('div.description.tiny_scroll', { + 'text': self.data.info.plot }), + self.eta = eta_date && (now+8035200 > eta) ? new Element('div.eta', { + 'text': eta_date, + 'title': 'ETA' + }) : null, self.quality = new Element('div.quality', { 'events': { 'click': function(e){ @@ -185,7 +194,7 @@ var Movie = new Class({ ) ); - if(self.thumbnail.empty) + if(!self.thumbnail) self.el.addClass('no_thumbnail'); //self.changeView(self.view); @@ -195,7 +204,7 @@ var Movie = new Class({ if(self.profile.data) self.profile.getTypes().each(function(type){ - var q = self.addQuality(type.quality_id || type.get('quality_id')); + var q = self.addQuality(type.get('quality'), type.get('3d')); if((type.finish == true || type.get('finish')) && !q.hasClass('finish')){ q.addClass('finish'); q.set('title', q.get('title') + ' Will finish searching for this movie if this quality is found.') @@ -207,7 +216,7 @@ var Movie = new Class({ self.updateReleases(); Object.each(self.options.actions, function(action, key){ - self.action[key.toLowerCase()] = action = new self.options.actions[key](self) + self.action[key.toLowerCase()] = action = new self.options.actions[key](self); if(action.el) self.actions.adopt(action) }); @@ -220,27 +229,27 @@ var Movie = new Class({ self.data.releases.each(function(release){ - var q = self.quality.getElement('.q_id'+ release.quality_id), - status = Status.get(release.status_id); + var q = self.quality.getElement('.q_'+ release.quality+(release.is_3d ? '.is_3d' : ':not(.is_3d)')), + status = release.status; - if(!q && (status.identifier == 'snatched' || status.identifier == 'seeding' || status.identifier == 'done')) - var q = self.addQuality(release.quality_id) + if(!q && (status == 'snatched' || status == 'seeding' || status == 'done')) + q = self.addQuality(release.quality, release.is_3d || false); - if (status && q && !q.hasClass(status.identifier)){ - q.addClass(status.identifier); - q.set('title', (q.get('title') ? q.get('title') : '') + ' status: '+ status.label) + if (q && !q.hasClass(status)){ + q.addClass(status); + q.set('title', (q.get('title') ? q.get('title') : '') + ' status: '+ status) } }); }, - addQuality: function(quality_id){ + addQuality: function(quality, is_3d){ var self = this; - var q = Quality.getQuality(quality_id); + var q = Quality.getQuality(quality); return new Element('span', { - 'text': q.label, - 'class': 'q_'+q.identifier + ' q_id' + q.id, + 'text': q.label + (is_3d ? ' 3D' : ''), + 'class': 'q_'+q.identifier + (is_3d ? ' is_3d' : ''), 'title': '' }).inject(self.quality); @@ -249,16 +258,10 @@ var Movie = new Class({ getTitle: function(){ var self = this; - var titles = self.data.library.titles; - - var title = titles.filter(function(title){ - return title['default'] - }).pop() - - if(title) - return self.getUnprefixedTitle(title.title) - else if(titles.length > 0) - return self.getUnprefixedTitle(titles[0].title) + if(self.data.title) + return self.getUnprefixedTitle(self.data.title); + else if(self.data.info.titles.length > 0) + return self.getUnprefixedTitle(self.data.info.titles[0]); return 'Unknown movie' }, @@ -266,6 +269,10 @@ var Movie = new Class({ getUnprefixedTitle: function(t){ if(t.substr(0, 4).toLowerCase() == 'the ') t = t.substr(4) + ', The'; + else if(t.substr(0, 3).toLowerCase() == 'an ') + t = t.substr(3) + ', An'; + else if(t.substr(0, 2).toLowerCase() == 'a ') + t = t.substr(2) + ', A'; return t; }, @@ -279,12 +286,12 @@ var Movie = new Class({ self.el.addEvent('outerClick', function(){ self.removeView(); self.slide('out') - }) + }); el.show(); self.data_container.addClass('hide_right'); } else { - self.el.removeEvents('outerClick') + self.el.removeEvents('outerClick'); setTimeout(function(){ if(self.el) @@ -301,7 +308,7 @@ var Movie = new Class({ if(self.el) self.el .removeClass(self.view+'_view') - .addClass(new_view+'_view') + .addClass(new_view+'_view'); self.view = new_view; }, @@ -312,8 +319,19 @@ var Movie = new Class({ self.el.removeClass(self.view+'_view') }, + getIdentifier: function(){ + var self = this; + + try { + return self.get('identifiers').imdb; + } + catch (e){ } + + return self.get('imdb'); + }, + get: function(attr){ - return this.data[attr] || this.data.library[attr] + return this.data[attr] || this.data.info[attr] }, select: function(bool){ diff --git a/couchpotato/core/media/movie/_base/static/search.js b/couchpotato/core/media/movie/_base/static/search.js index e04167f0..3b426762 100644 --- a/couchpotato/core/media/movie/_base/static/search.js +++ b/couchpotato/core/media/movie/_base/static/search.js @@ -41,7 +41,7 @@ Block.Search.MovieItem = new Class({ ) ) ) - ) + ); if(info.titles) info.titles.each(function(title){ @@ -132,19 +132,19 @@ Block.Search.MovieItem = new Class({ if(!self.options_el.hasClass('set')){ - if(self.info.in_library){ + if(info.in_library){ var in_library = []; - self.info.in_library.releases.each(function(release){ - in_library.include(release.quality.label) + (info.in_library.releases || []).each(function(release){ + in_library.include(release.quality) }); } self.options_el.grab( new Element('div', { - 'class': self.info.in_wanted && self.info.in_wanted.profile_id || in_library ? 'in_library_wanted' : '' + 'class': info.in_wanted && info.in_wanted.profile_id || in_library ? 'in_library_wanted' : '' }).adopt( - self.info.in_wanted && self.info.in_wanted.profile_id ? new Element('span.in_wanted', { - 'text': 'Already in wanted list: ' + Quality.getProfile(self.info.in_wanted.profile_id).get('label') + info.in_wanted && info.in_wanted.profile_id ? new Element('span.in_wanted', { + 'text': 'Already in wanted list: ' + Quality.getProfile(info.in_wanted.profile_id).get('label') }) : (in_library ? new Element('span.in_library', { 'text': 'Already in library: ' + in_library.join(', ') }) : null), @@ -172,7 +172,7 @@ Block.Search.MovieItem = new Class({ new Element('option', { 'text': alt.title }).inject(self.title_select) - }) + }); // Fill categories @@ -184,7 +184,7 @@ Block.Search.MovieItem = new Class({ self.category_select.show(); categories.each(function(category){ new Element('option', { - 'value': category.data.id, + 'value': category.data._id, 'text': category.data.label }).inject(self.category_select); }); @@ -197,8 +197,8 @@ Block.Search.MovieItem = new Class({ profiles.each(function(profile){ new Element('option', { - 'value': profile.id ? profile.id : profile.data.id, - 'text': profile.label ? profile.label : profile.data.label + 'value': profile.get('_id'), + 'text': profile.get('label') }).inject(self.profile_select) }); @@ -215,9 +215,9 @@ Block.Search.MovieItem = new Class({ loadingMask: function(){ var self = this; - self.mask = new Element('div.mask').inject(self.el).fade('hide') + self.mask = new Element('div.mask').inject(self.el).fade('hide'); - createSpinner(self.mask) + createSpinner(self.mask); self.mask.fade('in') }, diff --git a/couchpotato/static/scripts/page/wanted.js b/couchpotato/core/media/movie/_base/static/wanted.js similarity index 93% rename from couchpotato/static/scripts/page/wanted.js rename to couchpotato/core/media/movie/_base/static/wanted.js index 6a3d4cbc..461a64e1 100644 --- a/couchpotato/static/scripts/page/wanted.js +++ b/couchpotato/core/media/movie/_base/static/wanted.js @@ -2,6 +2,7 @@ Page.Wanted = new Class({ Extends: PageBase, + order: 10, name: 'wanted', title: 'Gimmy gimmy gimmy!', folder_browser: null, @@ -73,7 +74,7 @@ Page.Wanted = new Class({ } else { var progress = json.movie; - self.manual_search.set('text', 'Searching.. (' + (((progress.total-progress.to_go)/progress.total)*100).round() + '%)'); + self.manual_search.set('text', 'Searching.. (' + Math.round(((progress.total-progress.to_go)/progress.total)*100) + '%)'); } } }); @@ -87,7 +88,7 @@ Page.Wanted = new Class({ var self = this; var options = { 'name': 'Scan_folder' - } + }; if(!self.folder_browser){ self.folder_browser = new Option['Directory']("Scan", "folder", "", options); @@ -96,9 +97,9 @@ Page.Wanted = new Class({ var folder = self.folder_browser.getValue(); Api.request('renamer.scan', { 'data': { - 'base_folder': folder, - }, - }); + 'base_folder': folder + } + }); }; self.folder_browser.inject(self.el, 'top'); diff --git a/couchpotato/core/media/movie/charts/__init__.py b/couchpotato/core/media/movie/charts/__init__.py new file mode 100644 index 00000000..361da51a --- /dev/null +++ b/couchpotato/core/media/movie/charts/__init__.py @@ -0,0 +1,48 @@ +from .main import Charts + + +def autoload(): + return Charts() + + +config = [{ + 'name': 'charts', + 'groups': [ + { + 'label': 'Charts', + 'description': 'Displays selected charts on the home page', + 'type': 'list', + 'name': 'charts_providers', + 'tab': 'display', + 'options': [ + { + 'name': 'max_items', + 'default': 5, + 'type': 'int', + 'description': 'Maximum number of items displayed from each chart.', + }, + { + 'name': 'update_interval', + 'default': 12, + 'type': 'int', + 'advanced': True, + 'description': '(hours)', + }, + { + 'name': 'hide_wanted', + 'default': False, + 'type': 'bool', + 'advanced': True, + 'description': 'Hide the chart movies that are already in your wanted list.', + }, + { + 'name': 'hide_library', + 'default': False, + 'type': 'bool', + 'advanced': True, + 'description': 'Hide the chart movies that are already in your library.', + }, + ], + }, + ], +}] diff --git a/couchpotato/core/media/movie/charts/main.py b/couchpotato/core/media/movie/charts/main.py new file mode 100644 index 00000000..fe6ddc0f --- /dev/null +++ b/couchpotato/core/media/movie/charts/main.py @@ -0,0 +1,61 @@ +import time + +from couchpotato import tryInt +from couchpotato.core.logger import CPLog +from couchpotato.api import addApiView +from couchpotato.core.event import addEvent,fireEvent +from couchpotato.core.plugins.base import Plugin + + +log = CPLog(__name__) + + +class Charts(Plugin): + + update_in_progress = False + + def __init__(self): + addApiView('charts.view', self.automationView) + addEvent('app.load', self.setCrons) + + def setCrons(self): + fireEvent('schedule.interval', 'charts.update_cache', self.updateViewCache, hours = self.conf('update_interval', default = 12)) + + def automationView(self, force_update = False, **kwargs): + + if force_update: + charts = self.updateViewCache() + else: + charts = self.getCache('charts_cached') + if not charts: + charts = self.updateViewCache() + + return { + 'success': True, + 'count': len(charts), + 'charts': charts + } + + def updateViewCache(self): + + if self.update_in_progress: + while self.update_in_progress: + time.sleep(1) + catched_charts = self.getCache('charts_cached') + if catched_charts: + return catched_charts + + charts = [] + try: + self.update_in_progress = True + charts = fireEvent('automation.get_chart_list', merge = True) + for chart in charts: + chart['hide_wanted'] = self.conf('hide_wanted') + chart['hide_library'] = self.conf('hide_library') + self.setCache('charts_cached', charts, timeout = 7200 * tryInt(self.conf('update_interval', default = 12))) + except: + log.error('Failed refreshing charts') + + self.update_in_progress = False + + return charts diff --git a/couchpotato/core/media/movie/charts/static/charts.css b/couchpotato/core/media/movie/charts/static/charts.css new file mode 100644 index 00000000..261169f6 --- /dev/null +++ b/couchpotato/core/media/movie/charts/static/charts.css @@ -0,0 +1,266 @@ +.charts { + clear: both; + margin-bottom: 30px; +} + + .charts > h2 { + height: 40px; + } + + .charts .chart { + display: inline-block; + width: 50%; + vertical-align: top; + max-height: 510px; + scrollbar-base-color: #4e5969; + } + + .charts .chart .media_result.hidden { + display: none; + } + +.charts .refresh { + clear:both; + position: relative; +} + + .charts .refresh .refreshing { + display: block; + padding: 20px; + font-size: 20px; + text-align:center; + } + + .charts .refresh a { + text-align: center; + padding: 0; + display: none; + width: 30px; + height: 30px; + position: absolute; + right: 10px; + top: -40px; + opacity: .7; + } + + .charts .refresh a:hover { + opacity: 1; + } + + .charts p.no_charts_enabled { + padding: 0.7em 1em; + display: none; + } + + .charts .chart h3 a { + color: #fff; + } + + +.charts .chart .media_result { + display: inline-block; + width: 100%; + height: 150px; +} + +@media all and (max-width: 960px) { + .charts .chart { + width: 50%; + } +} + +@media all and (max-width: 600px) { + .charts .chart { + width: 100%; + } +} + +.charts .chart .media_result .data { + left: 150px; + background: #4e5969; + border: none; +} + + .charts .chart .media_result .data .info { + top: 10px; + left: 15px; + right: 15px; + bottom: 10px; + overflow: hidden; + } + + .charts .chart .media_result .data .info h2 { + white-space: normal; + max-height: 120px; + font-size: 18px; + line-height: 18px; + } + + .charts .chart .media_result .data .info .rating, + .charts .chart .media_result .data .info .genres, + .charts .chart .media_result .data .info .year { + position: static; + display: block; + padding: 0; + opacity: .6; + } + + .charts .chart .media_result .data .info .year { + margin: 10px 0 0; + } + + .charts .chart .media_result .data .info .rating { + font-size: 20px; + float: right; + margin-top: -20px; + } + .charts .chart .media_result .data .info .rating:before { + content: "\e031"; + font-family: 'Elusive-Icons'; + font-size: 14px; + margin: 0 5px 0 0; + vertical-align: bottom; + } + + .charts .chart .media_result .data .info .genres { + font-size: 11px; + font-style: italic; + text-align: right; + } + + .charts .chart .media_result .data .info .plot { + display: block; + font-size: 11px; + overflow: hidden; + text-align: justify; + height: 100%; + z-index: 2; + top: 64px; + position: absolute; + background: #4e5969; + cursor: pointer; + transition: all .4s ease-in-out; + padding: 0 3px 10px 0; + } + .charts .chart .media_result .data:before { + content: ''; + display: block; + height: 10px; + right: 0; + left: 0; + bottom: 10px; + position: absolute; + background: linear-gradient( + 0deg, + rgba(78, 89, 105, 1) 0%, + rgba(78, 89, 105, 0) 100% + ); + z-index: 3; + pointer-events: none; + } + + .charts .chart .media_result .data .info .plot.full { + top: 0; + overflow: auto; + } + +.charts .chart .media_result .data { + cursor: default; +} + +.charts .chart .media_result .options { + left: 150px; +} + .charts .chart .media_result .options select[name=title] { width: 100%; } + .charts .chart .media_result .options select[name=profile] { width: 100%; } + .charts .chart .media_result .options select[name=category] { width: 100%; } + + .charts .chart .media_result .button { + position: absolute; + margin: 2px 0 0 0; + right: 15px; + bottom: 15px; + } + + +.charts .chart .media_result .thumbnail { + width: 100px; + position: absolute; + left: 50px; +} + +.charts .chart .media_result .chart_number { + color: white; + position: absolute; + top: 0; + padding: 10px; + font: bold 2em/1em Helvetica, Sans-Serif; + width: 50px; + height: 100%; + text-align: center; + border-left: 8px solid transparent; +} + + .charts .chart .media_result.chart_in_wanted .chart_number { + border-color: rgba(0, 255, 40, 0.3); + } + + .charts .chart .media_result.chart_in_library .chart_number { + border-color: rgba(0, 202, 32, 0.3); + } + + +.charts .chart .media_result .actions { + position: absolute; + top: 10px; + right: 10px; + display: none; + width: 90px; +} + .charts .chart .media_result:hover .actions { + display: block; + } + .charts .chart .media_result:hover h2 .title { + opacity: 0; + } + .charts .chart .media_result .data.open .actions { + display: none; + } + + .charts .chart .media_result .actions a { + margin-left: 10px; + vertical-align: middle; + } + + +.toggle_menu { + height: 50px; +} + +.toggle_menu a { + display: block; + width: 50%; + float: left; + color: rgba(255,255,255,.6); + border-bottom: 1px solid rgba(255, 255, 255, 0.0666667); +} + +.toggle_menu a:hover { + border-color: #047792; + border-width: 4px; + color: #fff; +} + +.toggle_menu a.active { + border-bottom: 4px solid #04bce6; + color: #fff; +} + +.toggle_menu a:last-child { + float: right; +} + +.toggle_menu h2 { + height: 40px; +} + diff --git a/couchpotato/core/media/movie/charts/static/charts.js b/couchpotato/core/media/movie/charts/static/charts.js new file mode 100644 index 00000000..a04e248f --- /dev/null +++ b/couchpotato/core/media/movie/charts/static/charts.js @@ -0,0 +1,172 @@ +var Charts = new Class({ + + Implements: [Options, Events], + + initialize: function(options){ + var self = this; + self.setOptions(options); + + self.create(); + }, + + create: function(){ + var self = this; + + self.el = new Element('div.charts').adopt( + self.el_no_charts_enabled = new Element('p.no_charts_enabled', { + 'html': 'Hey, it looks like you have no charts enabled at the moment. If you\'d like some great movie suggestions you can go to settings and turn on some charts of your choice.' + }), + self.el_refresh_container = new Element('div.refresh').adopt( + self.el_refresh_link = new Element('a.refresh.icon2', { + 'href': '#', + 'events': { + 'click': function(e) { + e.preventDefault(); + + self.el.getElements('.chart').destroy(); + self.el_refreshing_text.show(); + self.el_refresh_link.hide(); + + self.api_request = Api.request('charts.view', { + 'data': { 'force_update': 1 }, + 'onComplete': self.fill.bind(self) + }); + } + } + }), + self.el_refreshing_text = new Element('span.refreshing', { + 'text': 'Refreshing charts...' + }) + ) + ); + + if( Cookie.read('suggestions_charts_menu_selected') === 'charts') + self.el.show(); + else + self.el.hide(); + + self.api_request = Api.request('charts.view', { + 'onComplete': self.fill.bind(self) + }); + + self.fireEvent.delay(0, self, 'created'); + + }, + + fill: function(json){ + + var self = this; + + self.el_refreshing_text.hide(); + self.el_refresh_link.show(); + + if(!json || json.count == 0){ + self.el_no_charts_enabled.show(); + self.el_refresh_link.show(); + self.el_refreshing_text.hide(); + } + else { + self.el_no_charts_enabled.hide(); + + json.charts.sort(function(a, b) { + return a.order - b.order; + }); + + Object.each(json.charts, function(chart){ + + var c = new Element('div.chart.tiny_scroll').grab( + new Element('h3').grab( new Element('a', { + 'text': chart.name, + 'href': chart.url + })) + ); + + var it = 1; + + Object.each(chart.list, function(movie){ + + var m = new Block.Search.MovieItem(movie, { + 'onAdded': function(){ + self.afterAdded(m, movie) + } + }); + + var in_database_class = (chart.hide_wanted && movie.in_wanted) ? 'hidden' : (movie.in_wanted ? 'chart_in_wanted' : ((chart.hide_library && movie.in_library) ? 'hidden': (movie.in_library ? 'chart_in_library' : ''))), + in_database_title = movie.in_wanted ? 'Movie in wanted list' : (movie.in_library ? 'Movie in library' : ''); + + m.el + .addClass(in_database_class) + .grab( + new Element('div.chart_number', { + 'text': it++, + 'title': in_database_title + }) + ); + + m.data_container.grab( + new Element('div.actions').adopt( + new Element('a.add.icon2', { + 'title': 'Add movie with your default quality', + 'data-add': movie.imdb, + 'events': { + 'click': m.showOptions.bind(m) + } + }), + $(new MA.IMDB(m)), + $(new MA.Trailer(m, { + 'height': 150 + })) + ) + ); + m.data_container.removeEvents('click'); + + var plot = false; + if(m.info.plot && m.info.plot.length > 0) + plot = m.info.plot; + + // Add rating + m.info_container.adopt( + m.rating = m.info.rating && m.info.rating.imdb && m.info.rating.imdb.length == 2 && parseFloat(m.info.rating.imdb[0]) > 0 ? new Element('span.rating', { + 'text': parseFloat(m.info.rating.imdb[0]), + 'title': parseInt(m.info.rating.imdb[1]) + ' votes' + }) : null, + m.genre = m.info.genres && m.info.genres.length > 0 ? new Element('span.genres', { + 'text': m.info.genres.slice(0, 3).join(', ') + }) : null, + m.plot = plot ? new Element('span.plot', { + 'text': plot, + 'events': { + 'click': function(){ + this.toggleClass('full') + } + } + }) : null + ); + + $(m).inject(c); + + }); + + c.inject(self.el); + + }); + + } + + self.fireEvent('loaded'); + + }, + + afterAdded: function(m){ + + $(m).getElement('div.chart_number') + .addClass('chart_in_wanted') + .set('title', 'Movie in wanted list'); + + }, + + toElement: function(){ + return this.el; + } + +}); diff --git a/couchpotato/core/media/movie/library.py b/couchpotato/core/media/movie/library.py new file mode 100644 index 00000000..28cb1b46 --- /dev/null +++ b/couchpotato/core/media/movie/library.py @@ -0,0 +1,32 @@ +from couchpotato.core.event import addEvent +from couchpotato.core.helpers.variable import getTitle +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.library.base import LibraryBase + + +log = CPLog(__name__) + +autoload = 'MovieLibraryPlugin' + + +class MovieLibraryPlugin(LibraryBase): + + def __init__(self): + addEvent('library.query', self.query) + + def query(self, media, first = True, include_year = True, **kwargs): + if media.get('type') != 'movie': + return + + default_title = getTitle(media) + titles = media['info'].get('titles', []) + titles.insert(0, default_title) + + # Add year identifier to titles + if include_year: + titles = [title + (' %s' % str(media['info']['year'])) for title in titles] + + if first: + return titles[0] if titles else None + + return titles diff --git a/couchpotato/core/media/movie/library/movie/__init__.py b/couchpotato/core/media/movie/library/movie/__init__.py deleted file mode 100644 index 98ed54c0..00000000 --- a/couchpotato/core/media/movie/library/movie/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from .main import MovieLibraryPlugin - - -def start(): - return MovieLibraryPlugin() - -config = [] diff --git a/couchpotato/core/media/movie/library/movie/main.py b/couchpotato/core/media/movie/library/movie/main.py deleted file mode 100644 index 034a8fb0..00000000 --- a/couchpotato/core/media/movie/library/movie/main.py +++ /dev/null @@ -1,200 +0,0 @@ -from couchpotato import get_session -from couchpotato.core.event import addEvent, fireEventAsync, fireEvent -from couchpotato.core.helpers.encoding import toUnicode, simplifyString -from couchpotato.core.logger import CPLog -from couchpotato.core.media._base.library import LibraryBase -from couchpotato.core.settings.model import Library, LibraryTitle, File -from string import ascii_letters -import time -import traceback -import six - -log = CPLog(__name__) - - -class MovieLibraryPlugin(LibraryBase): - - default_dict = {'titles': {}, 'files': {}} - - def __init__(self): - addEvent('library.add.movie', self.add) - addEvent('library.update.movie', self.update) - addEvent('library.update.movie.release_date', self.updateReleaseDate) - - def add(self, attrs = None, update_after = True): - if not attrs: attrs = {} - - primary_provider = attrs.get('primary_provider', 'imdb') - - try: - db = get_session() - - l = db.query(Library).filter_by(identifier = attrs.get('identifier')).first() - if not l: - status = fireEvent('status.get', 'needs_update', single = True) - l = Library( - year = attrs.get('year'), - identifier = attrs.get('identifier'), - plot = toUnicode(attrs.get('plot')), - tagline = toUnicode(attrs.get('tagline')), - status_id = status.get('id'), - info = {} - ) - - title = LibraryTitle( - title = toUnicode(attrs.get('title')), - simple_title = self.simplifyTitle(attrs.get('title')), - ) - - l.titles.append(title) - - db.add(l) - db.commit() - - # Update library info - if update_after is not False: - handle = fireEventAsync if update_after is 'async' else fireEvent - handle('library.update.movie', identifier = l.identifier, default_title = toUnicode(attrs.get('title', ''))) - - library_dict = l.to_dict(self.default_dict) - return library_dict - except: - log.error('Failed adding media: %s', traceback.format_exc()) - db.rollback() - finally: - db.close() - - return {} - - def update(self, identifier, default_title = '', extended = False): - - if self.shuttingDown(): - return - - try: - db = get_session() - - library = db.query(Library).filter_by(identifier = identifier).first() - done_status = fireEvent('status.get', 'done', single = True) - - info = fireEvent('movie.info', merge = True, extended = extended, identifier = identifier) - - # Don't need those here - try: del info['in_wanted'] - except: pass - try: del info['in_library'] - except: pass - - if not info or len(info) == 0: - log.error('Could not update, no movie info to work with: %s', identifier) - return False - - # Main info - library.plot = toUnicode(info.get('plot', '')) - library.tagline = toUnicode(info.get('tagline', '')) - library.year = info.get('year', 0) - library.status_id = done_status.get('id') - library.info.update(info) - db.commit() - - # Titles - [db.delete(title) for title in library.titles] - db.commit() - - titles = info.get('titles', []) - log.debug('Adding titles: %s', titles) - counter = 0 - - def_title = None - for title in titles: - if (len(default_title) == 0 and counter == 0) or len(titles) == 1 or title.lower() == toUnicode(default_title.lower()) or (toUnicode(default_title) == six.u('') and toUnicode(titles[0]) == title): - def_title = toUnicode(title) - break - counter += 1 - - if not def_title: - def_title = toUnicode(titles[0]) - - for title in titles: - if not title: - continue - title = toUnicode(title) - t = LibraryTitle( - title = title, - simple_title = self.simplifyTitle(title), - default = title == def_title - ) - library.titles.append(t) - - db.commit() - - # Files - images = info.get('images', []) - for image_type in ['poster']: - for image in images.get(image_type, []): - if not isinstance(image, (str, unicode)): - continue - - file_path = fireEvent('file.download', url = image, single = True) - if file_path: - file_obj = fireEvent('file.add', path = file_path, type_tuple = ('image', image_type), single = True) - try: - file_obj = db.query(File).filter_by(id = file_obj.get('id')).one() - library.files.append(file_obj) - db.commit() - - break - except: - log.debug('Failed to attach to library: %s', traceback.format_exc()) - db.rollback() - - library_dict = library.to_dict(self.default_dict) - return library_dict - except: - log.error('Failed update media: %s', traceback.format_exc()) - db.rollback() - finally: - db.close() - - return {} - - def updateReleaseDate(self, identifier): - - try: - db = get_session() - library = db.query(Library).filter_by(identifier = identifier).first() - - if not library.info: - library_dict = self.update(identifier) - dates = library_dict.get('info', {}).get('release_date') - else: - dates = library.info.get('release_date') - - if dates and (dates.get('expires', 0) < time.time() or dates.get('expires', 0) > time.time() + (604800 * 4)) or not dates: - dates = fireEvent('movie.release_date', identifier = identifier, merge = True) - library.info.update({'release_date': dates}) - db.commit() - - return dates - except: - log.error('Failed updating release dates: %s', traceback.format_exc()) - db.rollback() - finally: - db.close() - - return {} - - - def simplifyTitle(self, title): - - title = toUnicode(title) - - nr_prefix = '' if title[0] in ascii_letters else '#' - title = simplifyString(title) - - for prefix in ['the ']: - if prefix == title[:len(prefix)]: - title = title[len(prefix):] - break - - return nr_prefix + title diff --git a/couchpotato/core/providers/trailer/__init__.py b/couchpotato/core/media/movie/providers/__init__.py similarity index 100% rename from couchpotato/core/providers/trailer/__init__.py rename to couchpotato/core/media/movie/providers/__init__.py diff --git a/couchpotato/core/providers/automation/__init__.py b/couchpotato/core/media/movie/providers/automation/__init__.py similarity index 100% rename from couchpotato/core/providers/automation/__init__.py rename to couchpotato/core/media/movie/providers/automation/__init__.py diff --git a/couchpotato/core/providers/automation/base.py b/couchpotato/core/media/movie/providers/automation/base.py similarity index 84% rename from couchpotato/core/providers/automation/base.py rename to couchpotato/core/media/movie/providers/automation/base.py index e57f5c63..1a8d981f 100644 --- a/couchpotato/core/providers/automation/base.py +++ b/couchpotato/core/media/movie/providers/automation/base.py @@ -1,16 +1,19 @@ +import time + from couchpotato.core.event import addEvent, fireEvent from couchpotato.core.logger import CPLog -from couchpotato.core.providers.base import Provider +from couchpotato.core.media._base.providers.automation.base import AutomationBase from couchpotato.environment import Env from couchpotato.core.helpers.variable import splitString -import time + log = CPLog(__name__) -class Automation(Provider): +class Automation(AutomationBase): enabled_option = 'automation_enabled' + chart_enabled_option = 'chart_display_enabled' http_time_between_calls = 2 interval = 1800 @@ -18,6 +21,7 @@ class Automation(Provider): def __init__(self): addEvent('automation.get_movies', self._getMovies) + addEvent('automation.get_chart_list', self._getChartList) def _getMovies(self): @@ -32,6 +36,13 @@ class Automation(Provider): return self.getIMDBids() + def _getChartList(self): + + if not (self.conf(self.chart_enabled_option) or self.conf(self.chart_enabled_option) is None): + return + + return self.getChartList() + def search(self, name, year = None, imdb_only = False): prop_name = 'automation.cached.%s.%s' % (name, year) @@ -92,5 +103,9 @@ class Automation(Provider): def getIMDBids(self): return [] + def getChartList(self): + # Example return: [ {'name': 'Display name of list', 'url': 'http://example.com/', 'order': 1, 'list': []} ] + return + def canCheck(self): return time.time() > self.last_checked + self.interval diff --git a/couchpotato/core/media/movie/providers/automation/bluray.py b/couchpotato/core/media/movie/providers/automation/bluray.py new file mode 100644 index 00000000..0501c601 --- /dev/null +++ b/couchpotato/core/media/movie/providers/automation/bluray.py @@ -0,0 +1,158 @@ +from bs4 import BeautifulSoup +from couchpotato import fireEvent +from couchpotato.core.helpers.rss import RSS +from couchpotato.core.helpers.variable import tryInt +from couchpotato.core.logger import CPLog +from couchpotato.core.media.movie.providers.automation.base import Automation + +log = CPLog(__name__) + +autoload = 'Bluray' + + +class Bluray(Automation, RSS): + + interval = 1800 + rss_url = 'http://www.blu-ray.com/rss/newreleasesfeed.xml' + backlog_url = 'http://www.blu-ray.com/movies/movies.php?show=newreleases&page=%s' + display_url = 'http://www.blu-ray.com/movies/movies.php?show=newreleases' + chart_order = 1 + + def getIMDBids(self): + + movies = [] + + if self.conf('backlog'): + + page = 0 + while True: + page += 1 + + url = self.backlog_url % page + data = self.getHTMLData(url) + soup = BeautifulSoup(data) + + try: + # Stop if the release year is before the minimal year + page_year = soup.body.find_all('center')[3].table.tr.find_all('td', recursive = False)[3].h3.get_text().split(', ')[1] + if tryInt(page_year) < self.getMinimal('year'): + break + + for table in soup.body.find_all('center')[3].table.tr.find_all('td', recursive = False)[3].find_all('table')[1:20]: + name = table.h3.get_text().lower().split('blu-ray')[0].strip() + year = table.small.get_text().split('|')[1].strip() + + if not name.find('/') == -1: # make sure it is not a double movie release + continue + + if tryInt(year) < self.getMinimal('year'): + continue + + imdb = self.search(name, year) + + if imdb: + if self.isMinimalMovie(imdb): + movies.append(imdb['imdb']) + except: + log.debug('Error loading page: %s', page) + break + + self.conf('backlog', value = False) + + rss_movies = self.getRSSData(self.rss_url) + + for movie in rss_movies: + name = self.getTextElement(movie, 'title').lower().split('blu-ray')[0].strip('(').rstrip() + year = self.getTextElement(movie, 'description').split('|')[1].strip('(').strip() + + if not name.find('/') == -1: # make sure it is not a double movie release + continue + + if tryInt(year) < self.getMinimal('year'): + continue + + imdb = self.search(name, year) + + if imdb: + if self.isMinimalMovie(imdb): + movies.append(imdb['imdb']) + + return movies + + + def getChartList(self): + # Nearly identical to 'getIMDBids', but we don't care about minimalMovie and return all movie data (not just id) + movie_list = {'name': 'Blu-ray.com - New Releases', 'url': self.display_url, 'order': self.chart_order, 'list': []} + movie_ids = [] + max_items = int(self.conf('max_items', section='charts', default=5)) + rss_movies = self.getRSSData(self.rss_url) + + for movie in rss_movies: + name = self.getTextElement(movie, 'title').lower().split('blu-ray')[0].strip('(').rstrip() + year = self.getTextElement(movie, 'description').split('|')[1].strip('(').strip() + + if not name.find('/') == -1: # make sure it is not a double movie release + continue + + movie = self.search(name, year) + + if movie: + + if movie.get('imdb') in movie_ids: + continue + + is_movie = fireEvent('movie.is_movie', identifier = movie.get('imdb'), single = True) + if not is_movie: + continue + + movie_ids.append(movie.get('imdb')) + movie_list['list'].append( movie ) + if len(movie_list['list']) >= max_items: + break + + if not movie_list['list']: + return + + return [ movie_list ] + + +config = [{ + 'name': 'bluray', + 'groups': [ + { + 'tab': 'automation', + 'list': 'automation_providers', + 'name': 'bluray_automation', + 'label': 'Blu-ray.com', + 'description': 'Imports movies from blu-ray.com.', + 'options': [ + { + 'name': 'automation_enabled', + 'default': False, + 'type': 'enabler', + }, + { + 'name': 'backlog', + 'advanced': True, + 'description': 'Parses the history until the minimum movie year is reached. (Will be disabled once it has completed)', + 'default': False, + 'type': 'bool', + }, + ], + }, + { + 'tab': 'display', + 'list': 'charts_providers', + 'name': 'bluray_charts_display', + 'label': 'Blu-ray.com', + 'description': 'Display new releases from Blu-ray.com', + 'options': [ + { + 'name': 'chart_display_enabled', + 'default': True, + 'type': 'enabler', + }, + ], + }, + ], +}] diff --git a/couchpotato/core/providers/automation/flixster/main.py b/couchpotato/core/media/movie/providers/automation/flixster.py similarity index 54% rename from couchpotato/core/providers/automation/flixster/main.py rename to couchpotato/core/media/movie/providers/automation/flixster.py index f07ecd6b..53a06d76 100644 --- a/couchpotato/core/providers/automation/flixster/main.py +++ b/couchpotato/core/media/movie/providers/automation/flixster.py @@ -1,9 +1,11 @@ from couchpotato.core.helpers.variable import tryInt, splitString from couchpotato.core.logger import CPLog -from couchpotato.core.providers.automation.base import Automation +from couchpotato.core.media.movie.providers.automation.base import Automation log = CPLog(__name__) +autoload = 'Flixster' + class Flixster(Automation): @@ -48,3 +50,34 @@ class Flixster(Automation): }) return movies + + +config = [{ + 'name': 'flixster', + 'groups': [ + { + 'tab': 'automation', + 'list': 'watchlist_providers', + 'name': 'flixster_automation', + 'label': 'Flixster', + 'description': 'Import movies from any public Flixster watchlist', + 'options': [ + { + 'name': 'automation_enabled', + 'default': False, + 'type': 'enabler', + }, + { + 'name': 'automation_ids_use', + 'label': 'Use', + }, + { + 'name': 'automation_ids', + 'label': 'User ID', + 'type': 'combined', + 'combine': ['automation_ids_use', 'automation_ids'], + }, + ], + }, + ], +}] diff --git a/couchpotato/core/providers/automation/goodfilms/main.py b/couchpotato/core/media/movie/providers/automation/goodfilms.py similarity index 56% rename from couchpotato/core/providers/automation/goodfilms/main.py rename to couchpotato/core/media/movie/providers/automation/goodfilms.py index e668a4fb..483cf029 100644 --- a/couchpotato/core/providers/automation/goodfilms/main.py +++ b/couchpotato/core/media/movie/providers/automation/goodfilms.py @@ -1,9 +1,11 @@ from bs4 import BeautifulSoup from couchpotato.core.logger import CPLog -from couchpotato.core.providers.automation.base import Automation +from couchpotato.core.media.movie.providers.automation.base import Automation log = CPLog(__name__) +autoload = 'Goodfilms' + class Goodfilms(Automation): @@ -44,7 +46,10 @@ class Goodfilms(Automation): break for movie in this_watch_list: - movies.append({ 'title': movie['data-film-title'], 'year': movie['data-film-year'] }) + movies.append({ + 'title': movie['data-film-title'], + 'year': movie['data-film-year'] + }) if not 'next page' in data.lower(): break @@ -52,3 +57,28 @@ class Goodfilms(Automation): page += 1 return movies + + +config = [{ + 'name': 'goodfilms', + 'groups': [ + { + 'tab': 'automation', + 'list': 'watchlist_providers', + 'name': 'goodfilms_automation', + 'label': 'Goodfilms', + 'description': 'import movies from your Goodfilms queue', + 'options': [ + { + 'name': 'automation_enabled', + 'default': False, + 'type': 'enabler', + }, + { + 'name': 'automation_username', + 'label': 'Username', + }, + ], + }, + ], +}] diff --git a/couchpotato/core/media/movie/providers/automation/imdb.py b/couchpotato/core/media/movie/providers/automation/imdb.py new file mode 100644 index 00000000..783e8f50 --- /dev/null +++ b/couchpotato/core/media/movie/providers/automation/imdb.py @@ -0,0 +1,328 @@ +import traceback +import re + +from bs4 import BeautifulSoup +from couchpotato import fireEvent +from couchpotato.core.helpers.encoding import ss +from couchpotato.core.helpers.rss import RSS +from couchpotato.core.helpers.variable import getImdb, splitString, tryInt +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.base import MultiProvider +from couchpotato.core.media.movie.providers.automation.base import Automation + + +log = CPLog(__name__) + +autoload = 'IMDB' + + +class IMDB(MultiProvider): + + def getTypes(self): + return [IMDBWatchlist, IMDBAutomation] + + +class IMDBBase(Automation, RSS): + + interval = 1800 + + def getInfo(self, imdb_id): + return fireEvent('movie.info', identifier = imdb_id, extended = False, merge = True) + + def getFromURL(self, url): + log.debug('Getting IMDBs from: %s', url) + html = self.getHTMLData(url) + + try: + split = splitString(html, split_on = "
")[1] + html = splitString(split, split_on = "
")[0] + except: + try: + split = splitString(html, split_on = "
") + + if len(split) < 2: + log.error('Failed parsing IMDB page "%s", unexpected html.', url) + return [] + + html = BeautifulSoup(split[1]) + for x in ['list compact', 'lister', 'list detail sub-list']: + html2 = html.find('div', attrs = { + 'class': x + }) + + if html2: + html = html2.contents + html = ''.join([str(x) for x in html]) + break + except: + log.error('Failed parsing IMDB page "%s": %s', (url, traceback.format_exc())) + + html = ss(html) + imdbs = getImdb(html, multiple = True) if html else [] + + return imdbs + + +class IMDBWatchlist(IMDBBase): + + enabled_option = 'automation_enabled' + + def getIMDBids(self): + + movies = [] + + watchlist_enablers = [tryInt(x) for x in splitString(self.conf('automation_urls_use'))] + watchlist_urls = splitString(self.conf('automation_urls')) + + index = -1 + for watchlist_url in watchlist_urls: + + try: + # Get list ID + ids = re.findall('(?:list/|list_id=)([a-zA-Z0-9\-_]{11})', watchlist_url) + if len(ids) == 1: + watchlist_url = 'http://www.imdb.com/list/%s/?view=compact&sort=created:asc' % ids[0] + # Try find user id with watchlist + else: + userids = re.findall('(ur\d{7,9})', watchlist_url) + if len(userids) == 1: + watchlist_url = 'http://www.imdb.com/user/%s/watchlist?view=compact&sort=created:asc' % userids[0] + except: + log.error('Failed getting id from watchlist: %s', traceback.format_exc()) + + index += 1 + if not watchlist_enablers[index]: + continue + + start = 0 + while True: + try: + + w_url = '%s&start=%s' % (watchlist_url, start) + imdbs = self.getFromURL(w_url) + + for imdb in imdbs: + if imdb not in movies: + movies.append(imdb) + + if self.shuttingDown(): + break + + log.debug('Found %s movies on %s', (len(imdbs), w_url)) + + if len(imdbs) < 225: + break + + start = len(movies) + + except: + log.error('Failed loading IMDB watchlist: %s %s', (watchlist_url, traceback.format_exc())) + break + + return movies + + +class IMDBAutomation(IMDBBase): + + enabled_option = 'automation_providers_enabled' + + charts = { + 'theater': { + 'order': 1, + 'name': 'IMDB - Movies in Theaters', + 'url': 'http://www.imdb.com/movies-in-theaters/', + }, + 'boxoffice': { + 'order': 2, + 'name': 'IMDB - Box Office', + 'url': 'http://www.imdb.com/boxoffice/', + }, + 'rentals': { + 'order': 3, + 'name': 'IMDB - Top DVD rentals', + 'url': 'http://www.imdb.com/boxoffice/rentals', + 'type': 'json', + }, + 'top250': { + 'order': 4, + 'name': 'IMDB - Top 250 Movies', + 'url': 'http://www.imdb.com/chart/top', + }, + } + + def getIMDBids(self): + + movies = [] + + for name in self.charts: + chart = self.charts[name] + url = chart.get('url') + + if self.conf('automation_charts_%s' % name): + imdb_ids = self.getFromURL(url) + + try: + for imdb_id in imdb_ids: + info = self.getInfo(imdb_id) + if info and self.isMinimalMovie(info): + movies.append(imdb_id) + + if self.shuttingDown(): + break + + except: + log.error('Failed loading IMDB chart results from %s: %s', (url, traceback.format_exc())) + + return movies + + def getChartList(self): + + # Nearly identical to 'getIMDBids', but we don't care about minimalMovie and return all movie data (not just id) + movie_lists = [] + max_items = int(self.conf('max_items', section = 'charts', default=5)) + + for name in self.charts: + chart = self.charts[name].copy() + url = chart.get('url') + + if self.conf('chart_display_%s' % name): + + chart['list'] = [] + + imdb_ids = self.getFromURL(url) + + try: + for imdb_id in imdb_ids[0:max_items]: + + is_movie = fireEvent('movie.is_movie', identifier = imdb_id, single = True) + if not is_movie: + continue + + info = self.getInfo(imdb_id) + chart['list'].append(info) + + if self.shuttingDown(): + break + except: + log.error('Failed loading IMDB chart results from %s: %s', (url, traceback.format_exc())) + + if chart['list']: + movie_lists.append(chart) + + + return movie_lists + + +config = [{ + 'name': 'imdb', + 'groups': [ + { + 'tab': 'automation', + 'list': 'watchlist_providers', + 'name': 'imdb_automation_watchlist', + 'label': 'IMDB', + 'description': 'From any public IMDB watchlists.', + 'options': [ + { + 'name': 'automation_enabled', + 'default': False, + 'type': 'enabler', + }, + { + 'name': 'automation_urls_use', + 'label': 'Use', + }, + { + 'name': 'automation_urls', + 'label': 'url', + 'type': 'combined', + 'combine': ['automation_urls_use', 'automation_urls'], + }, + ], + }, + { + 'tab': 'automation', + 'list': 'automation_providers', + 'name': 'imdb_automation_charts', + 'label': 'IMDB', + 'description': 'Import movies from IMDB Charts', + 'options': [ + { + 'name': 'automation_providers_enabled', + 'default': False, + 'type': 'enabler', + }, + { + 'name': 'automation_charts_theater', + 'type': 'bool', + 'label': 'In Theaters', + 'description': 'New Movies In-Theaters chart', + 'default': True, + }, + { + 'name': 'automation_charts_rentals', + 'type': 'bool', + 'label': 'DVD Rentals', + 'description': 'Top DVD rentals chart', + 'default': True, + }, + { + 'name': 'automation_charts_top250', + 'type': 'bool', + 'label': 'TOP 250', + 'description': 'IMDB TOP 250 chart', + 'default': False, + }, + { + 'name': 'automation_charts_boxoffice', + 'type': 'bool', + 'label': 'Box office TOP 10', + 'description': 'IMDB Box office TOP 10 chart', + 'default': True, + }, + ], + }, + { + 'tab': 'display', + 'list': 'charts_providers', + 'name': 'imdb_charts_display', + 'label': 'IMDB', + 'description': 'Display movies from IMDB Charts', + 'options': [ + { + 'name': 'chart_display_enabled', + 'default': True, + 'type': 'enabler', + }, + { + 'name': 'chart_display_theater', + 'type': 'bool', + 'label': 'In Theaters', + 'description': 'New Movies In-Theaters chart', + 'default': False, + }, + { + 'name': 'chart_display_top250', + 'type': 'bool', + 'label': 'TOP 250', + 'description': 'IMDB TOP 250 chart', + 'default': False, + }, + { + 'name': 'chart_display_rentals', + 'type': 'bool', + 'label': 'DVD Rentals', + 'description': 'Top DVD rentals chart', + 'default': True, + }, + { + 'name': 'chart_display_boxoffice', + 'type': 'bool', + 'label': 'Box office TOP 10', + 'description': 'IMDB Box office TOP 10 chart', + 'default': True, + }, + ], + }, + ], +}] diff --git a/couchpotato/core/providers/automation/itunes/main.py b/couchpotato/core/media/movie/providers/automation/itunes.py similarity index 61% rename from couchpotato/core/providers/automation/itunes/main.py rename to couchpotato/core/media/movie/providers/automation/itunes.py index 086c981d..fcb20b12 100644 --- a/couchpotato/core/providers/automation/itunes/main.py +++ b/couchpotato/core/media/movie/providers/automation/itunes.py @@ -1,14 +1,18 @@ -from couchpotato.core.helpers.rss import RSS -from couchpotato.core.helpers.variable import md5, splitString, tryInt -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.automation.base import Automation from xml.etree.ElementTree import QName import datetime import traceback import xml.etree.ElementTree as XMLTree +from couchpotato.core.helpers.rss import RSS +from couchpotato.core.helpers.variable import md5, splitString, tryInt +from couchpotato.core.logger import CPLog +from couchpotato.core.media.movie.providers.automation.base import Automation + + log = CPLog(__name__) +autoload = 'ITunes' + class ITunes(Automation, RSS): @@ -22,7 +26,7 @@ class ITunes(Automation, RSS): urls = splitString(self.conf('automation_urls')) namespace = 'http://www.w3.org/2005/Atom' - namespace_im = 'https://rss.itunes.apple.com' + namespace_im = 'http://itunes.apple.com/rss' index = -1 for url in urls: @@ -58,3 +62,36 @@ class ITunes(Automation, RSS): log.error('Failed loading iTunes rss feed: %s %s', (url, traceback.format_exc())) return movies + + +config = [{ + 'name': 'itunes', + 'groups': [ + { + 'tab': 'automation', + 'list': 'automation_providers', + 'name': 'itunes_automation', + 'label': 'iTunes', + 'description': 'From any iTunes Store feed. Url should be the RSS link.', + 'options': [ + { + 'name': 'automation_enabled', + 'default': False, + 'type': 'enabler', + }, + { + 'name': 'automation_urls_use', + 'label': 'Use', + 'default': ',', + }, + { + 'name': 'automation_urls', + 'label': 'url', + 'type': 'combined', + 'combine': ['automation_urls_use', 'automation_urls'], + 'default': 'https://itunes.apple.com/rss/topmovies/limit=25/xml,', + }, + ], + }, + ], +}] diff --git a/couchpotato/core/providers/automation/kinepolis/main.py b/couchpotato/core/media/movie/providers/automation/kinepolis.py similarity index 50% rename from couchpotato/core/providers/automation/kinepolis/main.py rename to couchpotato/core/media/movie/providers/automation/kinepolis.py index 4158d488..a0f25965 100644 --- a/couchpotato/core/providers/automation/kinepolis/main.py +++ b/couchpotato/core/media/movie/providers/automation/kinepolis.py @@ -1,10 +1,14 @@ -from couchpotato.core.helpers.rss import RSS -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.automation.base import Automation import datetime +from couchpotato.core.helpers.rss import RSS +from couchpotato.core.logger import CPLog +from couchpotato.core.media.movie.providers.automation.base import Automation + + log = CPLog(__name__) +autoload = 'Kinepolis' + class Kinepolis(Automation, RSS): @@ -27,3 +31,24 @@ class Kinepolis(Automation, RSS): movies.append(imdb['imdb']) return movies + + +config = [{ + 'name': 'kinepolis', + 'groups': [ + { + 'tab': 'automation', + 'list': 'automation_providers', + 'name': 'kinepolis_automation', + 'label': 'Kinepolis', + 'description': 'Imports movies from the current top 10 of kinepolis.', + 'options': [ + { + 'name': 'automation_enabled', + 'default': False, + 'type': 'enabler', + }, + ], + }, + ], +}] diff --git a/couchpotato/core/providers/automation/letterboxd/main.py b/couchpotato/core/media/movie/providers/automation/letterboxd.py similarity index 51% rename from couchpotato/core/providers/automation/letterboxd/main.py rename to couchpotato/core/media/movie/providers/automation/letterboxd.py index dbbf53b1..e9fc8741 100644 --- a/couchpotato/core/providers/automation/letterboxd/main.py +++ b/couchpotato/core/media/movie/providers/automation/letterboxd.py @@ -1,11 +1,15 @@ +import re + from bs4 import BeautifulSoup from couchpotato.core.helpers.variable import tryInt, splitString, removeEmpty from couchpotato.core.logger import CPLog -from couchpotato.core.providers.automation.base import Automation -import re +from couchpotato.core.media.movie.providers.automation.base import Automation + log = CPLog(__name__) +autoload = 'Letterboxd' + class Letterboxd(Automation): @@ -46,6 +50,40 @@ class Letterboxd(Automation): for movie in soup.find_all('a', attrs = {'class': 'frame'}): match = removeEmpty(self.pattern.split(movie['title'])) - movies.append({'title': match[0], 'year': match[1] }) + movies.append({ + 'title': match[0], + 'year': match[1] + }) return movies + + +config = [{ + 'name': 'letterboxd', + 'groups': [ + { + 'tab': 'automation', + 'list': 'watchlist_providers', + 'name': 'letterboxd_automation', + 'label': 'Letterboxd', + 'description': 'Import movies from any public Letterboxd watchlist', + 'options': [ + { + 'name': 'automation_enabled', + 'default': False, + 'type': 'enabler', + }, + { + 'name': 'automation_urls_use', + 'label': 'Use', + }, + { + 'name': 'automation_urls', + 'label': 'Username', + 'type': 'combined', + 'combine': ['automation_urls_use', 'automation_urls'], + }, + ], + }, + ], +}] diff --git a/couchpotato/core/media/movie/providers/automation/moviemeter.py b/couchpotato/core/media/movie/providers/automation/moviemeter.py new file mode 100644 index 00000000..b06046fb --- /dev/null +++ b/couchpotato/core/media/movie/providers/automation/moviemeter.py @@ -0,0 +1,55 @@ +from couchpotato.core.event import fireEvent +from couchpotato.core.helpers.rss import RSS +from couchpotato.core.logger import CPLog +from couchpotato.core.media.movie.providers.automation.base import Automation + +log = CPLog(__name__) + +autoload = 'Moviemeter' + + +class Moviemeter(Automation, RSS): + + interval = 1800 + rss_url = 'http://www.moviemeter.nl/rss/cinema' + + def getIMDBids(self): + + movies = [] + + rss_movies = self.getRSSData(self.rss_url) + + for movie in rss_movies: + + title = self.getTextElement(movie, 'title') + name_year = fireEvent('scanner.name_year', title, single = True) + if name_year.get('name') and name_year.get('year'): + imdb = self.search(name_year.get('name'), name_year.get('year')) + + if imdb and self.isMinimalMovie(imdb): + movies.append(imdb['imdb']) + else: + log.error('Failed getting name and year from: %s', title) + + return movies + + +config = [{ + 'name': 'moviemeter', + 'groups': [ + { + 'tab': 'automation', + 'list': 'automation_providers', + 'name': 'moviemeter_automation', + 'label': 'Moviemeter', + 'description': 'Imports movies from the current top 10 of moviemeter.nl.', + 'options': [ + { + 'name': 'automation_enabled', + 'default': False, + 'type': 'enabler', + }, + ], + }, + ], +}] diff --git a/couchpotato/core/providers/automation/movies_io/main.py b/couchpotato/core/media/movie/providers/automation/movies_io.py similarity index 50% rename from couchpotato/core/providers/automation/movies_io/main.py rename to couchpotato/core/media/movie/providers/automation/movies_io.py index 0737e2e6..3b0b54f2 100644 --- a/couchpotato/core/providers/automation/movies_io/main.py +++ b/couchpotato/core/media/movie/providers/automation/movies_io.py @@ -2,10 +2,12 @@ from couchpotato.core.event import fireEvent from couchpotato.core.helpers.rss import RSS from couchpotato.core.helpers.variable import tryInt, splitString from couchpotato.core.logger import CPLog -from couchpotato.core.providers.automation.base import Automation +from couchpotato.core.media.movie.providers.automation.base import Automation log = CPLog(__name__) +autoload = 'MoviesIO' + class MoviesIO(Automation, RSS): @@ -37,3 +39,34 @@ class MoviesIO(Automation, RSS): movies.append(imdb) return movies + + +config = [{ + 'name': 'moviesio', + 'groups': [ + { + 'tab': 'automation', + 'list': 'watchlist_providers', + 'name': 'moviesio', + 'label': 'Movies.IO', + 'description': 'Imports movies from Movies.io RSS watchlists', + 'options': [ + { + 'name': 'automation_enabled', + 'default': False, + 'type': 'enabler', + }, + { + 'name': 'automation_urls_use', + 'label': 'Use', + }, + { + 'name': 'automation_urls', + 'label': 'url', + 'type': 'combined', + 'combine': ['automation_urls_use', 'automation_urls'], + }, + ], + }, + ], +}] diff --git a/couchpotato/core/media/movie/providers/automation/popularmovies.py b/couchpotato/core/media/movie/providers/automation/popularmovies.py new file mode 100644 index 00000000..eb46ecef --- /dev/null +++ b/couchpotato/core/media/movie/providers/automation/popularmovies.py @@ -0,0 +1,47 @@ +from couchpotato import fireEvent +from couchpotato.core.logger import CPLog +from couchpotato.core.media.movie.providers.automation.base import Automation + +log = CPLog(__name__) + +autoload = 'PopularMovies' + + +class PopularMovies(Automation): + + interval = 1800 + url = 'https://s3.amazonaws.com/popular-movies/movies.json' + + def getIMDBids(self): + + movies = [] + retrieved_movies = self.getJsonData(self.url) + + for movie in retrieved_movies.get('movies'): + imdb_id = movie.get('imdb_id') + info = fireEvent('movie.info', identifier = imdb_id, extended = False, merge = True) + if self.isMinimalMovie(info): + movies.append(imdb_id) + + return movies + + +config = [{ + 'name': 'popularmovies', + 'groups': [ + { + 'tab': 'automation', + 'list': 'automation_providers', + 'name': 'popularmovies_automation', + 'label': 'Popular Movies', + 'description': 'Imports the top titles of movies that have been in theaters. Script provided by Steven Lu', + 'options': [ + { + 'name': 'automation_enabled', + 'default': False, + 'type': 'enabler', + }, + ], + }, + ], +}] diff --git a/couchpotato/core/providers/automation/rottentomatoes/main.py b/couchpotato/core/media/movie/providers/automation/rottentomatoes.py similarity index 54% rename from couchpotato/core/providers/automation/rottentomatoes/main.py rename to couchpotato/core/media/movie/providers/automation/rottentomatoes.py index c873a8e1..a01f76d2 100644 --- a/couchpotato/core/providers/automation/rottentomatoes/main.py +++ b/couchpotato/core/media/movie/providers/automation/rottentomatoes.py @@ -1,13 +1,17 @@ -from couchpotato.core.helpers.rss import RSS -from couchpotato.core.helpers.variable import tryInt, splitString -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.automation.base import Automation from xml.etree.ElementTree import QName import datetime import re +from couchpotato.core.helpers.rss import RSS +from couchpotato.core.helpers.variable import tryInt, splitString +from couchpotato.core.logger import CPLog +from couchpotato.core.media.movie.providers.automation.base import Automation + + log = CPLog(__name__) +autoload = 'Rottentomatoes' + class Rottentomatoes(Automation, RSS): @@ -51,3 +55,42 @@ class Rottentomatoes(Automation, RSS): movies.append(imdb['imdb']) return movies + + +config = [{ + 'name': 'rottentomatoes', + 'groups': [ + { + 'tab': 'automation', + 'list': 'automation_providers', + 'name': 'rottentomatoes_automation', + 'label': 'Rottentomatoes', + 'description': 'Imports movies from rottentomatoes rss feeds specified below.', + 'options': [ + { + 'name': 'automation_enabled', + 'default': False, + 'type': 'enabler', + }, + { + 'name': 'automation_urls_use', + 'label': 'Use', + 'default': '1', + }, + { + 'name': 'automation_urls', + 'label': 'url', + 'type': 'combined', + 'combine': ['automation_urls_use', 'automation_urls'], + 'default': 'http://www.rottentomatoes.com/syndication/rss/in_theaters.xml', + }, + { + 'name': 'tomatometer_percent', + 'default': '80', + 'label': 'Tomatometer', + 'description': 'Use as extra scoring requirement', + }, + ], + }, + ], +}] diff --git a/couchpotato/core/providers/automation/trakt/main.py b/couchpotato/core/media/movie/providers/automation/trakt.py similarity index 51% rename from couchpotato/core/providers/automation/trakt/main.py rename to couchpotato/core/media/movie/providers/automation/trakt.py index 0109daf3..440a59d8 100644 --- a/couchpotato/core/providers/automation/trakt/main.py +++ b/couchpotato/core/media/movie/providers/automation/trakt.py @@ -1,11 +1,15 @@ +import base64 + from couchpotato.core.event import addEvent from couchpotato.core.helpers.variable import sha1 from couchpotato.core.logger import CPLog -from couchpotato.core.providers.automation.base import Automation -import base64 +from couchpotato.core.media.movie.providers.automation.base import Automation + log = CPLog(__name__) +autoload = 'Trakt' + class Trakt(Automation): @@ -42,3 +46,38 @@ class Trakt(Automation): data = self.getJsonData(self.urls['base'] + method_url, headers = headers) return data if data else [] + + +config = [{ + 'name': 'trakt', + 'groups': [ + { + 'tab': 'automation', + 'list': 'watchlist_providers', + 'name': 'trakt_automation', + 'label': 'Trakt', + 'description': 'import movies from your own watchlist', + 'options': [ + { + 'name': 'automation_enabled', + 'default': False, + 'type': 'enabler', + }, + { + 'name': 'automation_api_key', + 'label': 'Apikey', + }, + { + 'name': 'automation_username', + 'label': 'Username', + }, + { + 'name': 'automation_password', + 'label': 'Password', + 'type': 'password', + 'description': 'When you have "Protect my data" checked on trakt.', + }, + ], + }, + ], +}] diff --git a/couchpotato/core/media/movie/providers/base.py b/couchpotato/core/media/movie/providers/base.py new file mode 100644 index 00000000..4e80d5d3 --- /dev/null +++ b/couchpotato/core/media/movie/providers/base.py @@ -0,0 +1,5 @@ +from couchpotato.core.media._base.providers.info.base import BaseInfoProvider + + +class MovieProvider(BaseInfoProvider): + type = 'movie' diff --git a/couchpotato/core/providers/userscript/__init__.py b/couchpotato/core/media/movie/providers/info/__init__.py similarity index 100% rename from couchpotato/core/providers/userscript/__init__.py rename to couchpotato/core/media/movie/providers/info/__init__.py diff --git a/couchpotato/core/providers/info/_modifier/main.py b/couchpotato/core/media/movie/providers/info/_modifier.py similarity index 68% rename from couchpotato/core/providers/info/_modifier/main.py rename to couchpotato/core/media/movie/providers/info/_modifier.py index 88d4381c..f6a3089b 100644 --- a/couchpotato/core/providers/info/_modifier/main.py +++ b/couchpotato/core/media/movie/providers/info/_modifier.py @@ -1,14 +1,18 @@ -from couchpotato import get_session +import copy +import traceback + +from CodernityDB.database import RecordNotFound +from couchpotato import get_db from couchpotato.core.event import addEvent, fireEvent from couchpotato.core.helpers.variable import mergeDicts, randomString from couchpotato.core.logger import CPLog from couchpotato.core.plugins.base import Plugin -from couchpotato.core.settings.model import Library -import copy -import traceback + log = CPLog(__name__) +autoload = 'MovieResultModifier' + class MovieResultModifier(Plugin): @@ -22,7 +26,14 @@ class MovieResultModifier(Plugin): 'backdrop': [], 'poster_original': [], 'backdrop_original': [], - 'actors': {} + 'actors': {}, + 'landscape': [], + 'logo': [], + 'clear_art': [], + 'disc_art': [], + 'banner': [], + 'extra_thumbs': [], + 'extra_fanart': [] }, 'runtime': 0, 'plot': '', @@ -86,21 +97,30 @@ class MovieResultModifier(Plugin): } # Add release info from current library - db = get_session() + db = get_db() try: - l = db.query(Library).filter_by(identifier = imdb).first() - if l: - # Statuses - active_status, done_status = fireEvent('status.get', ['active', 'done'], single = True) + media = None + try: + media = db.get('media', 'imdb-%s' % imdb, with_doc = True)['doc'] + except RecordNotFound: + pass - for movie in l.movies: - if movie.status_id == active_status['id']: - temp['in_wanted'] = fireEvent('media.get', movie.id, single = True) + if media: - for release in movie.releases: - if release.status_id == done_status['id']: - temp['in_library'] = fireEvent('media.get', movie.id, single = True) + if media.get('status') == 'active': + temp['in_wanted'] = media + + try: temp['in_wanted']['profile'] = db.get('id', media['profile_id']) + except: temp['in_wanted']['profile'] = {'label': ''} + + for release in fireEvent('release.for_media', media['_id'], single = True): + if release.get('status') == 'done': + if not temp['in_library']: + temp['in_library'] = media + temp['in_library']['releases'] = [] + + temp['in_library']['releases'].append(release) except: log.error('Tried getting more info on searched movies: %s', traceback.format_exc()) diff --git a/couchpotato/core/providers/info/couchpotatoapi/main.py b/couchpotato/core/media/movie/providers/info/couchpotatoapi.py similarity index 94% rename from couchpotato/core/providers/info/couchpotatoapi/main.py rename to couchpotato/core/media/movie/providers/info/couchpotatoapi.py index 848cbf05..4c65bf8c 100644 --- a/couchpotato/core/providers/info/couchpotatoapi/main.py +++ b/couchpotato/core/media/movie/providers/info/couchpotatoapi.py @@ -1,13 +1,17 @@ -from couchpotato.core.event import addEvent, fireEvent -from couchpotato.core.helpers.encoding import tryUrlencode -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.info.base import MovieProvider -from couchpotato.environment import Env import base64 import time +from couchpotato.core.event import addEvent, fireEvent +from couchpotato.core.helpers.encoding import tryUrlencode +from couchpotato.core.logger import CPLog +from couchpotato.core.media.movie.providers.base import MovieProvider +from couchpotato.environment import Env + + log = CPLog(__name__) +autoload = 'CouchPotatoApi' + class CouchPotatoApi(MovieProvider): @@ -25,10 +29,12 @@ class CouchPotatoApi(MovieProvider): api_version = 1 def __init__(self): - addEvent('movie.info', self.getInfo, priority = 1) + addEvent('movie.info', self.getInfo, priority = 2) + addEvent('movie.info.release_date', self.getReleaseDate) + addEvent('info.search', self.search, priority = 1) addEvent('movie.search', self.search, priority = 1) - addEvent('movie.release_date', self.getReleaseDate) + addEvent('movie.suggest', self.getSuggestions) addEvent('movie.is_movie', self.isMovie) diff --git a/couchpotato/core/media/movie/providers/info/fanarttv.py b/couchpotato/core/media/movie/providers/info/fanarttv.py new file mode 100644 index 00000000..8bfa92c8 --- /dev/null +++ b/couchpotato/core/media/movie/providers/info/fanarttv.py @@ -0,0 +1,130 @@ +import traceback + +from couchpotato import tryInt +from couchpotato.core.event import addEvent +from couchpotato.core.logger import CPLog +from couchpotato.core.media.movie.providers.base import MovieProvider + + +log = CPLog(__name__) + +autoload = 'FanartTV' + + +class FanartTV(MovieProvider): + + urls = { + 'api': 'http://api.fanart.tv/webservice/movie/b28b14e9be662e027cfbc7c3dd600405/%s/JSON/all/1/2' + } + + MAX_EXTRAFANART = 20 + http_time_between_calls = 0 + + def __init__(self): + addEvent('movie.info', self.getArt, priority = 1) + + def getArt(self, identifier = None, **kwargs): + + log.debug("Getting Extra Artwork from Fanart.tv...") + if not identifier: + return {} + + images = {} + + try: + url = self.urls['api'] % identifier + fanart_data = self.getJsonData(url) + + if fanart_data: + name, resource = fanart_data.items()[0] + log.debug('Found images for %s', name) + images = self._parseMovie(resource) + + except: + log.error('Failed getting extra art for %s: %s', + (identifier, traceback.format_exc())) + return {} + + return { + 'images': images + } + + def _parseMovie(self, movie): + images = { + 'landscape': self._getMultImages(movie.get('moviethumb', []), 1), + 'logo': [], + 'disc_art': self._getMultImages(self._trimDiscs(movie.get('moviedisc', [])), 1), + 'clear_art': self._getMultImages(movie.get('hdmovieart', []), 1), + 'banner': self._getMultImages(movie.get('moviebanner', []), 1), + 'extra_fanart': [], + } + + if len(images['clear_art']) == 0: + images['clear_art'] = self._getMultImages(movie.get('movieart', []), 1) + + images['logo'] = self._getMultImages(movie.get('hdmovielogo', []), 1) + if len(images['logo']) == 0: + images['logo'] = self._getMultImages(movie.get('movielogo', []), 1) + + fanarts = self._getMultImages(movie.get('moviebackground', []), self.MAX_EXTRAFANART + 1) + + if fanarts: + images['backdrop_original'] = [fanarts[0]] + images['extra_fanart'] = fanarts[1:] + + return images + + def _trimDiscs(self, disc_images): + """ + Return a subset of discImages. Only bluray disc images will be returned. + """ + + trimmed = [] + for disc in disc_images: + if disc.get('disc_type') == 'bluray': + trimmed.append(disc) + + if len(trimmed) == 0: + return disc_images + + return trimmed + + def _getImage(self, images): + image_url = None + highscore = -1 + for image in images: + if tryInt(image.get('likes')) > highscore: + highscore = tryInt(image.get('likes')) + image_url = image.get('url') + + return image_url + + def _getMultImages(self, images, n): + """ + Chooses the best n images and returns them as a list. + If n<0, all images will be returned. + """ + image_urls = [] + pool = [] + for image in images: + if image.get('lang') == 'en': + pool.append(image) + orig_pool_size = len(pool) + + while len(pool) > 0 and (n < 0 or orig_pool_size - len(pool) < n): + best = None + highscore = -1 + for image in pool: + if tryInt(image.get('likes')) > highscore: + highscore = tryInt(image.get('likes')) + best = image + image_urls.append(best.get('url')) + pool.remove(best) + + return image_urls + + def isDisabled(self): + if self.conf('api_key') == '': + log.error('No API key provided.') + return True + return False diff --git a/couchpotato/core/providers/info/omdbapi/main.py b/couchpotato/core/media/movie/providers/info/omdbapi.py old mode 100755 new mode 100644 similarity index 96% rename from couchpotato/core/providers/info/omdbapi/main.py rename to couchpotato/core/media/movie/providers/info/omdbapi.py index 8f04d3b6..d3a83b63 --- a/couchpotato/core/providers/info/omdbapi/main.py +++ b/couchpotato/core/media/movie/providers/info/omdbapi.py @@ -1,14 +1,18 @@ -from couchpotato.core.event import addEvent, fireEvent -from couchpotato.core.helpers.encoding import tryUrlencode -from couchpotato.core.helpers.variable import tryInt, tryFloat, splitString -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.info.base import MovieProvider import json import re import traceback +from couchpotato.core.event import addEvent, fireEvent +from couchpotato.core.helpers.encoding import tryUrlencode +from couchpotato.core.helpers.variable import tryInt, tryFloat, splitString +from couchpotato.core.logger import CPLog +from couchpotato.core.media.movie.providers.base import MovieProvider + + log = CPLog(__name__) +autoload = 'OMDBAPI' + class OMDBAPI(MovieProvider): @@ -77,6 +81,9 @@ class OMDBAPI(MovieProvider): if movie.get('Response') == 'Parse Error' or movie.get('Response') == 'False': return movie_data + if movie.get('Type').lower() != 'movie': + return movie_data + tmp_movie = movie.copy() for key in tmp_movie: if tmp_movie.get(key).lower() == 'n/a': diff --git a/couchpotato/core/providers/info/themoviedb/main.py b/couchpotato/core/media/movie/providers/info/themoviedb.py similarity index 74% rename from couchpotato/core/providers/info/themoviedb/main.py rename to couchpotato/core/media/movie/providers/info/themoviedb.py index d301db2b..4a397edd 100644 --- a/couchpotato/core/providers/info/themoviedb/main.py +++ b/couchpotato/core/media/movie/providers/info/themoviedb.py @@ -1,20 +1,22 @@ +import traceback + from couchpotato.core.event import addEvent from couchpotato.core.helpers.encoding import simplifyString, toUnicode, ss from couchpotato.core.helpers.variable import tryInt from couchpotato.core.logger import CPLog -from couchpotato.core.providers.info.base import MovieProvider +from couchpotato.core.media.movie.providers.base import MovieProvider import tmdb3 -import traceback log = CPLog(__name__) +autoload = 'TheMovieDb' + class TheMovieDb(MovieProvider): + MAX_EXTRATHUMBS = 4 def __init__(self): - #addEvent('info.search', self.search, priority = 2) - #addEvent('movie.search', self.search, priority = 2) - addEvent('movie.info', self.getInfo, priority = 2) + addEvent('movie.info', self.getInfo, priority = 3) addEvent('movie.info_by_tmdb', self.getInfo) # Configure TMDB settings @@ -73,6 +75,7 @@ class TheMovieDb(MovieProvider): if not result: try: log.debug('Getting info: %s', cache_key) + # noinspection PyArgumentList movie = tmdb3.Movie(identifier) try: exists = movie.title is not None except: exists = False @@ -95,16 +98,18 @@ class TheMovieDb(MovieProvider): if not movie_data: # Images - poster = self.getImage(movie, type = 'poster', size = 'poster') + poster = self.getImage(movie, type = 'poster', size = 'w154') poster_original = self.getImage(movie, type = 'poster', size = 'original') backdrop_original = self.getImage(movie, type = 'backdrop', size = 'original') + extra_thumbs = self.getMultImages(movie, type = 'backdrops', size = 'original', n = self.MAX_EXTRATHUMBS, skipfirst = True) images = { 'poster': [poster] if poster else [], #'backdrop': [backdrop] if backdrop else [], 'poster_original': [poster_original] if poster_original else [], 'backdrop_original': [backdrop_original] if backdrop_original else [], - 'actors': {} + 'actors': {}, + 'extra_thumbs': extra_thumbs } # Genres @@ -148,8 +153,10 @@ class TheMovieDb(MovieProvider): movie_data = dict((k, v) for k, v in movie_data.items() if v) # Add alternative names + if movie_data['original_title'] and movie_data['original_title'] not in movie_data['titles']: + movie_data['titles'].insert(0, movie_data['original_title']) + if extended: - movie_data['titles'].append(movie.originaltitle) for alt in movie.alternate_titles: alt_name = alt.title if alt_name and alt_name not in movie_data['titles'] and alt_name.lower() != 'none' and alt_name is not None: @@ -170,8 +177,53 @@ class TheMovieDb(MovieProvider): return image_url + def getMultImages(self, movie, type = 'backdrops', size = 'original', n = -1, skipfirst = False): + """ + If n < 0, return all images. Otherwise return n images. + If n > len(getattr(movie, type)), then return all images. + If skipfirst is True, then it will skip getattr(movie, type)[0]. This + is because backdrops[0] is typically backdrop. + """ + + image_urls = [] + try: + images = getattr(movie, type) + if n < 0 or n > len(images): + num_images = len(images) + else: + num_images = n + + for i in range(int(skipfirst), num_images + int(skipfirst)): + image_urls.append(images[i].geturl(size = size)) + + except: + log.debug('Failed getting %i %s.%s for "%s"', (n, type, size, ss(str(movie)))) + + return image_urls + def isDisabled(self): if self.conf('api_key') == '': log.error('No API key provided.') return True return False + + +config = [{ + 'name': 'themoviedb', + 'groups': [ + { + 'tab': 'providers', + 'name': 'tmdb', + 'label': 'TheMovieDB', + 'hidden': True, + 'description': 'Used for all calls to TheMovieDB.', + 'options': [ + { + 'name': 'api_key', + 'default': '9b939aee0aaafc12a65bf448e4af9543', + 'label': 'Api Key', + }, + ], + }, + ], +}] diff --git a/libs/migrate/versioning/templates/__init__.py b/couchpotato/core/media/movie/providers/metadata/__init__.py similarity index 100% rename from libs/migrate/versioning/templates/__init__.py rename to couchpotato/core/media/movie/providers/metadata/__init__.py diff --git a/couchpotato/core/media/movie/providers/metadata/base.py b/couchpotato/core/media/movie/providers/metadata/base.py new file mode 100644 index 00000000..7968000b --- /dev/null +++ b/couchpotato/core/media/movie/providers/metadata/base.py @@ -0,0 +1,187 @@ +import os +import shutil +import traceback + +from couchpotato.core.event import addEvent, fireEvent +from couchpotato.core.helpers.encoding import sp +from couchpotato.core.helpers.variable import getIdentifier, underscoreToCamel +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.metadata.base import MetaDataBase +from couchpotato.environment import Env + + +log = CPLog(__name__) + + +class MovieMetaData(MetaDataBase): + + enabled_option = 'meta_enabled' + + def __init__(self): + addEvent('renamer.after', self.create) + + def create(self, message = None, group = None): + if self.isDisabled(): return + if not group: group = {} + + log.info('Creating %s metadata.', self.getName()) + + # Update library to get latest info + try: + group['media'] = fireEvent('movie.update_info', group['media'].get('_id'), identifier = getIdentifier(group['media']), extended = True, single = True) + except: + log.error('Failed to update movie, before creating metadata: %s', traceback.format_exc()) + + root_name = self.getRootName(group) + meta_name = os.path.basename(root_name) + root = os.path.dirname(root_name) + + movie_info = group['media'].get('info') + + for file_type in ['nfo']: + try: + self._createType(meta_name, root, movie_info, group, file_type, 0) + except: + log.error('Unable to create %s file: %s', ('nfo', traceback.format_exc())) + + for file_type in ['thumbnail', 'fanart', 'banner', 'disc_art', 'logo', 'clear_art', 'landscape', 'extra_thumbs', 'extra_fanart']: + try: + if file_type == 'thumbnail': + num_images = len(movie_info['images']['poster_original']) + elif file_type == 'fanart': + num_images = len(movie_info['images']['backdrop_original']) + else: + num_images = len(movie_info['images'][file_type]) + + for i in range(num_images): + self._createType(meta_name, root, movie_info, group, file_type, i) + except: + log.error('Unable to create %s file: %s', (file_type, traceback.format_exc())) + + def _createType(self, meta_name, root, movie_info, group, file_type, i): # Get file path + camelcase_method = underscoreToCamel(file_type.capitalize()) + name = getattr(self, 'get' + camelcase_method + 'Name')(meta_name, root, i) + + if name and (self.conf('meta_' + file_type) or self.conf('meta_' + file_type) is None): + + # Get file content + content = getattr(self, 'get' + camelcase_method)(movie_info = movie_info, data = group, i = i) + if content: + log.debug('Creating %s file: %s', (file_type, name)) + if os.path.isfile(content): + content = sp(content) + name = sp(name) + + if not os.path.exists(os.path.dirname(name)): + os.makedirs(os.path.dirname(name)) + + shutil.copy2(content, name) + shutil.copyfile(content, name) + + # Try and copy stats seperately + try: shutil.copystat(content, name) + except: pass + else: + self.createFile(name, content) + group['renamed_files'].append(name) + + try: + os.chmod(sp(name), Env.getPermission('file')) + except: + log.debug('Failed setting permissions for %s: %s', (name, traceback.format_exc())) + + def getRootName(self, data = None): + if not data: data = {} + return os.path.join(data['destination_dir'], data['filename']) + + def getFanartName(self, name, root, i): + return + + def getThumbnailName(self, name, root, i): + return + + def getBannerName(self, name, root, i): + return + + def getClearArtName(self, name, root, i): + return + + def getLogoName(self, name, root, i): + return + + def getDiscArtName(self, name, root, i): + return + + def getLandscapeName(self, name, root, i): + return + + def getExtraThumbsName(self, name, root, i): + return + + def getExtraFanartName(self, name, root, i): + return + + def getNfoName(self, name, root, i): + return + + def getNfo(self, movie_info = None, data = None, i = 0): + if not data: data = {} + if not movie_info: movie_info = {} + + def getThumbnail(self, movie_info = None, data = None, wanted_file_type = 'poster_original', i = 0): + if not data: data = {} + if not movie_info: movie_info = {} + + # See if it is in current files + files = data['media'].get('files') + if files.get('image_' + wanted_file_type): + if os.path.isfile(files['image_' + wanted_file_type][i]): + return files['image_' + wanted_file_type][i] + + # Download using existing info + try: + images = movie_info['images'][wanted_file_type] + file_path = fireEvent('file.download', url = images[i], single = True) + return file_path + except: + pass + + def getFanart(self, movie_info = None, data = None, i = 0): + if not data: data = {} + if not movie_info: movie_info = {} + return self.getThumbnail(movie_info = movie_info, data = data, wanted_file_type = 'backdrop_original', i = i) + + def getBanner(self, movie_info = None, data = None, i = 0): + if not data: data = {} + if not movie_info: movie_info = {} + return self.getThumbnail(movie_info = movie_info, data = data, wanted_file_type = 'banner', i = i) + + def getClearArt(self, movie_info = None, data = None, i = 0): + if not data: data = {} + if not movie_info: movie_info = {} + return self.getThumbnail(movie_info = movie_info, data = data, wanted_file_type = 'clear_art', i = i) + + def getLogo(self, movie_info = None, data = None, i = 0): + if not data: data = {} + if not movie_info: movie_info = {} + return self.getThumbnail(movie_info = movie_info, data = data, wanted_file_type = 'logo', i = i) + + def getDiscArt(self, movie_info = None, data = None, i = 0): + if not data: data = {} + if not movie_info: movie_info = {} + return self.getThumbnail(movie_info = movie_info, data = data, wanted_file_type = 'disc_art', i = i) + + def getLandscape(self, movie_info = None, data = None, i = 0): + if not data: data = {} + if not movie_info: movie_info = {} + return self.getThumbnail(movie_info = movie_info, data= data, wanted_file_type = 'landscape', i = i) + + def getExtraThumbs(self, movie_info = None, data = None, i = 0): + if not data: data = {} + if not movie_info: movie_info = {} + return self.getThumbnail(movie_info = movie_info, data = data, wanted_file_type = 'extra_thumbs', i = i) + + def getExtraFanart(self, movie_info = None, data = None, i = 0): + if not data: data = {} + if not movie_info: movie_info = {} + return self.getThumbnail(movie_info = movie_info, data = data, wanted_file_type = 'extra_fanart', i = i) diff --git a/couchpotato/core/media/movie/providers/metadata/mediabrowser.py b/couchpotato/core/media/movie/providers/metadata/mediabrowser.py new file mode 100644 index 00000000..6e40e4c1 --- /dev/null +++ b/couchpotato/core/media/movie/providers/metadata/mediabrowser.py @@ -0,0 +1,36 @@ +import os + +from couchpotato.core.media.movie.providers.metadata.base import MovieMetaData + + +autoload = 'MediaBrowser' + + +class MediaBrowser(MovieMetaData): + + def getThumbnailName(self, name, root, i): + return os.path.join(root, 'folder.jpg') + + def getFanartName(self, name, root, i): + return os.path.join(root, 'backdrop.jpg') + + +config = [{ + 'name': 'mediabrowser', + 'groups': [ + { + 'tab': 'renamer', + 'subtab': 'metadata', + 'name': 'mediabrowser_metadata', + 'label': 'MediaBrowser', + 'description': 'Generate folder.jpg and backdrop.jpg', + 'options': [ + { + 'name': 'meta_enabled', + 'default': False, + 'type': 'enabler', + }, + ], + }, + ], +}] diff --git a/couchpotato/core/media/movie/providers/metadata/ps3.py b/couchpotato/core/media/movie/providers/metadata/ps3.py new file mode 100644 index 00000000..05df0a53 --- /dev/null +++ b/couchpotato/core/media/movie/providers/metadata/ps3.py @@ -0,0 +1,33 @@ +import os + +from couchpotato.core.media.movie.providers.metadata.base import MovieMetaData + + +autoload = 'SonyPS3' + + +class SonyPS3(MovieMetaData): + + def getThumbnailName(self, name, root, i): + return os.path.join(root, 'cover.jpg') + + +config = [{ + 'name': 'sonyps3', + 'groups': [ + { + 'tab': 'renamer', + 'subtab': 'metadata', + 'name': 'sonyps3_metadata', + 'label': 'Sony PS3', + 'description': 'Generate cover.jpg', + 'options': [ + { + 'name': 'meta_enabled', + 'default': False, + 'type': 'enabler', + }, + ], + }, + ], +}] diff --git a/couchpotato/core/providers/metadata/wmc/__init__.py b/couchpotato/core/media/movie/providers/metadata/wmc.py similarity index 66% rename from couchpotato/core/providers/metadata/wmc/__init__.py rename to couchpotato/core/media/movie/providers/metadata/wmc.py index 167a24d7..3cb9e3c7 100644 --- a/couchpotato/core/providers/metadata/wmc/__init__.py +++ b/couchpotato/core/media/movie/providers/metadata/wmc.py @@ -1,8 +1,16 @@ -from .main import WindowsMediaCenter +import os + +from couchpotato.core.media.movie.providers.metadata.base import MovieMetaData -def start(): - return WindowsMediaCenter() +autoload = 'WindowsMediaCenter' + + +class WindowsMediaCenter(MovieMetaData): + + def getThumbnailName(self, name, root, i): + return os.path.join(root, 'folder.jpg') + config = [{ 'name': 'windowsmediacenter', diff --git a/couchpotato/core/media/movie/providers/metadata/xbmc.py b/couchpotato/core/media/movie/providers/metadata/xbmc.py new file mode 100644 index 00000000..ff0c119f --- /dev/null +++ b/couchpotato/core/media/movie/providers/metadata/xbmc.py @@ -0,0 +1,370 @@ +from xml.etree.ElementTree import Element, SubElement, tostring +import os +import re +import traceback +import xml.dom.minidom + +from couchpotato.core.media.movie.providers.metadata.base import MovieMetaData +from couchpotato.core.helpers.encoding import toUnicode +from couchpotato.core.helpers.variable import getTitle +from couchpotato.core.logger import CPLog + + +log = CPLog(__name__) + +autoload = 'XBMC' + + +class XBMC(MovieMetaData): + + def getFanartName(self, name, root, i): + return self.createMetaName(self.conf('meta_fanart_name'), name, root) + + def getThumbnailName(self, name, root, i): + return self.createMetaName(self.conf('meta_thumbnail_name'), name, root) + + def getNfoName(self, name, root, i): + return self.createMetaName(self.conf('meta_nfo_name'), name, root) + + def getBannerName(self, name, root, i): + return self.createMetaName(self.conf('meta_banner_name'), name, root) + + def getClearArtName(self, name, root, i): + return self.createMetaName(self.conf('meta_clear_art_name'), name, root) + + def getLogoName(self, name, root, i): + return self.createMetaName(self.conf('meta_logo_name'), name, root) + + def getDiscArtName(self, name, root, i): + return self.createMetaName(self.conf('meta_disc_art_name'), name, root) + + def getLandscapeName(self, name, root, i): + return self.createMetaName(self.conf('meta_landscape_name'), name, root) + + def getExtraThumbsName(self, name, root, i): + return self.createMetaNameMult(self.conf('meta_extra_thumbs_name'), name, root, i) + + def getExtraFanartName(self, name, root, i): + return self.createMetaNameMult(self.conf('meta_extra_fanart_name'), name, root, i) + + def createMetaName(self, basename, name, root): + return os.path.join(root, basename.replace('%s', name)) + + def createMetaNameMult(self, basename, name, root, i): + return os.path.join(root, basename.replace('%s', name).replace('', str(i + 1))) + + def getNfo(self, movie_info=None, data=None, i=0): + if not data: data = {} + if not movie_info: movie_info = {} + + # return imdb url only + if self.conf('meta_url_only'): + return 'http://www.imdb.com/title/%s/' % toUnicode(data['identifier']) + + nfoxml = Element('movie') + + # Title + try: + el = SubElement(nfoxml, 'title') + el.text = toUnicode(getTitle(data)) + except: + pass + + # IMDB id + try: + el = SubElement(nfoxml, 'id') + el.text = toUnicode(data['identifier']) + except: + pass + + # Runtime + try: + runtime = SubElement(nfoxml, 'runtime') + runtime.text = '%s min' % movie_info.get('runtime') + except: + pass + + # Other values + types = ['year', 'mpaa', 'originaltitle:original_title', 'outline', 'plot', 'tagline', 'premiered:released'] + for type in types: + + if ':' in type: + name, type = type.split(':') + else: + name = type + + try: + if movie_info.get(type): + el = SubElement(nfoxml, name) + el.text = toUnicode(movie_info.get(type, '')) + except: + pass + + # Rating + for rating_type in ['imdb', 'rotten', 'tmdb']: + try: + r, v = movie_info['rating'][rating_type] + rating = SubElement(nfoxml, 'rating') + rating.text = str(r) + votes = SubElement(nfoxml, 'votes') + votes.text = str(v) + break + except: + log.debug('Failed adding rating info from %s: %s', (rating_type, traceback.format_exc())) + + # Genre + for genre in movie_info.get('genres', []): + genres = SubElement(nfoxml, 'genre') + genres.text = toUnicode(genre) + + # Actors + for actor_name in movie_info.get('actor_roles', {}): + role_name = movie_info['actor_roles'][actor_name] + + actor = SubElement(nfoxml, 'actor') + name = SubElement(actor, 'name') + name.text = toUnicode(actor_name) + if role_name: + role = SubElement(actor, 'role') + role.text = toUnicode(role_name) + if movie_info['images']['actors'].get(actor_name): + thumb = SubElement(actor, 'thumb') + thumb.text = toUnicode(movie_info['images']['actors'].get(actor_name)) + + # Directors + for director_name in movie_info.get('directors', []): + director = SubElement(nfoxml, 'director') + director.text = toUnicode(director_name) + + # Writers + for writer in movie_info.get('writers', []): + writers = SubElement(nfoxml, 'credits') + writers.text = toUnicode(writer) + + # Sets or collections + collection_name = movie_info.get('collection') + if collection_name: + collection = SubElement(nfoxml, 'set') + collection.text = toUnicode(collection_name) + sorttitle = SubElement(nfoxml, 'sorttitle') + sorttitle.text = '%s %s' % (toUnicode(collection_name), movie_info.get('year')) + + # Images + for image_url in movie_info['images']['poster_original']: + image = SubElement(nfoxml, 'thumb') + image.text = toUnicode(image_url) + + image_types = [ + ('fanart', 'backdrop_original'), + ('banner', 'banner'), + ('discart', 'disc_art'), + ('logo', 'logo'), + ('clearart', 'clear_art'), + ('landscape', 'landscape'), + ('extrathumb', 'extra_thumbs'), + ('extrafanart', 'extra_fanart'), + ] + + for image_type in image_types: + sub, type = image_type + + sub_element = SubElement(nfoxml, sub) + for image_url in movie_info['images'][type]: + image = SubElement(sub_element, 'thumb') + image.text = toUnicode(image_url) + + # Add trailer if found + trailer_found = False + if data.get('renamed_files'): + for filename in data.get('renamed_files'): + if 'trailer' in filename: + trailer = SubElement(nfoxml, 'trailer') + trailer.text = toUnicode(filename) + trailer_found = True + if not trailer_found and data['files'].get('trailer'): + trailer = SubElement(nfoxml, 'trailer') + trailer.text = toUnicode(data['files']['trailer'][0]) + + # Add file metadata + fileinfo = SubElement(nfoxml, 'fileinfo') + streamdetails = SubElement(fileinfo, 'streamdetails') + + # Video data + if data['meta_data'].get('video'): + video = SubElement(streamdetails, 'video') + codec = SubElement(video, 'codec') + codec.text = toUnicode(data['meta_data']['video']) + aspect = SubElement(video, 'aspect') + aspect.text = str(data['meta_data']['aspect']) + width = SubElement(video, 'width') + width.text = str(data['meta_data']['resolution_width']) + height = SubElement(video, 'height') + height.text = str(data['meta_data']['resolution_height']) + + # Audio data + if data['meta_data'].get('audio'): + audio = SubElement(streamdetails, 'audio') + codec = SubElement(audio, 'codec') + codec.text = toUnicode(data['meta_data'].get('audio')) + channels = SubElement(audio, 'channels') + channels.text = toUnicode(data['meta_data'].get('audio_channels')) + + # Clean up the xml and return it + nfoxml = xml.dom.minidom.parseString(tostring(nfoxml)) + xml_string = nfoxml.toprettyxml(indent = ' ') + text_re = re.compile('>\n\s+([^<>\s].*?)\n\s+\g<1>%s is the rootname of the movie. For example "/path/to/movie cd1.mkv" will be "/path/to/movie"' + }, + { + 'name': 'meta_url_only', + 'label': 'Only IMDB URL', + 'default': False, + 'advanced': True, + 'description': 'Create a nfo with only the IMDB url inside', + 'type': 'bool', + }, + { + 'name': 'meta_fanart', + 'label': 'Fanart', + 'default': True, + 'type': 'bool', + }, + { + 'name': 'meta_fanart_name', + 'label': 'Fanart filename', + 'default': '%s-fanart.jpg', + 'advanced': True, + }, + { + 'name': 'meta_thumbnail', + 'label': 'Thumbnail', + 'default': True, + 'type': 'bool', + }, + { + 'name': 'meta_thumbnail_name', + 'label': 'Thumbnail filename', + 'default': '%s.tbn', + 'advanced': True, + }, + { + 'name': 'meta_banner', + 'label': 'Banner', + 'default': False, + 'type': 'bool' + }, + { + 'name': 'meta_banner_name', + 'label': 'Banner filename', + 'default': 'banner.jpg', + 'advanced': True, + }, + { + 'name': 'meta_clear_art', + 'label': 'ClearArt', + 'default': False, + 'type': 'bool' + }, + { + 'name': 'meta_clear_art_name', + 'label': 'ClearArt filename', + 'default': 'clearart.png', + 'advanced': True, + }, + { + 'name': 'meta_disc_art', + 'label': 'DiscArt', + 'default': False, + 'type': 'bool' + }, + { + 'name': 'meta_disc_art_name', + 'label': 'DiscArt filename', + 'default': 'disc.png', + 'advanced': True, + }, + { + 'name': 'meta_landscape', + 'label': 'Landscape', + 'default': False, + 'type': 'bool' + }, + { + 'name': 'meta_landscape_name', + 'label': 'Landscape filename', + 'default': 'landscape.jpg', + 'advanced': True, + }, + { + 'name': 'meta_logo', + 'label': 'ClearLogo', + 'default': False, + 'type': 'bool' + }, + { + 'name': 'meta_logo_name', + 'label': 'ClearLogo filename', + 'default': 'logo.png', + 'advanced': True, + }, + { + 'name': 'meta_extra_thumbs', + 'label': 'Extrathumbs', + 'default': False, + 'type': 'bool' + }, + { + 'name': 'meta_extra_thumbs_name', + 'label': 'Extrathumbs filename', + 'description': '<i> is the image number, and must be included to have multiple images', + 'default': 'extrathumbs/thumb.jpg', + 'advanced': True + }, + { + 'name': 'meta_extra_fanart', + 'label': 'Extrafanart', + 'default': False, + 'type': 'bool' + }, + { + 'name': 'meta_extra_fanart_name', + 'label': 'Extrafanart filename', + 'default': 'extrafanart/extrafanart.jpg', + 'description': '<i> is the image number, and must be included to have multiple images', + 'advanced': True + } + ], + }, + ], +}] diff --git a/libs/migrate/versioning/templates/repository/__init__.py b/couchpotato/core/media/movie/providers/nzb/__init__.py similarity index 100% rename from libs/migrate/versioning/templates/repository/__init__.py rename to couchpotato/core/media/movie/providers/nzb/__init__.py diff --git a/libs/migrate/versioning/templates/repository/default/__init__.py b/couchpotato/core/media/movie/providers/nzb/base.py similarity index 100% rename from libs/migrate/versioning/templates/repository/default/__init__.py rename to couchpotato/core/media/movie/providers/nzb/base.py diff --git a/couchpotato/core/media/movie/providers/nzb/binsearch.py b/couchpotato/core/media/movie/providers/nzb/binsearch.py new file mode 100644 index 00000000..d6f48522 --- /dev/null +++ b/couchpotato/core/media/movie/providers/nzb/binsearch.py @@ -0,0 +1,27 @@ +from couchpotato.core.helpers.encoding import tryUrlencode +from couchpotato.core.helpers.variable import getIdentifier +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.nzb.binsearch import Base +from couchpotato.core.media.movie.providers.base import MovieProvider +from couchpotato.environment import Env + +log = CPLog(__name__) + +autoload = 'BinSearch' + + +class BinSearch(MovieProvider, Base): + + def buildUrl(self, media, quality): + query = tryUrlencode({ + 'q': getIdentifier(media), + 'm': 'n', + 'max': 400, + 'adv_age': Env.setting('retention', 'nzb'), + 'adv_sort': 'date', + 'adv_col': 'on', + 'adv_nfo': 'on', + 'minsize': quality.get('size_min'), + 'maxsize': quality.get('size_max'), + }) + return query diff --git a/couchpotato/core/media/movie/providers/nzb/newznab.py b/couchpotato/core/media/movie/providers/nzb/newznab.py new file mode 100644 index 00000000..fc94acbf --- /dev/null +++ b/couchpotato/core/media/movie/providers/nzb/newznab.py @@ -0,0 +1,26 @@ +from couchpotato.core.helpers.encoding import tryUrlencode +from couchpotato.core.helpers.variable import getIdentifier +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.nzb.newznab import Base +from couchpotato.core.media.movie.providers.base import MovieProvider + +log = CPLog(__name__) + +autoload = 'Newznab' + + +class Newznab(MovieProvider, Base): + + def buildUrl(self, media, host): + + query = tryUrlencode({ + 't': 'movie', + 'imdbid': getIdentifier(media).replace('tt', ''), + 'apikey': host['api_key'], + 'extended': 1 + }) + + if len(host.get('custom_tag', '')) > 0: + query = '%s&%s' % (query, host.get('custom_tag')) + + return query diff --git a/couchpotato/core/media/movie/providers/nzb/nzbclub.py b/couchpotato/core/media/movie/providers/nzb/nzbclub.py new file mode 100644 index 00000000..2a43ba2f --- /dev/null +++ b/couchpotato/core/media/movie/providers/nzb/nzbclub.py @@ -0,0 +1,27 @@ +from couchpotato.core.helpers.encoding import tryUrlencode +from couchpotato.core.logger import CPLog +from couchpotato.core.event import fireEvent +from couchpotato.core.media._base.providers.nzb.nzbclub import Base +from couchpotato.core.media.movie.providers.base import MovieProvider + +log = CPLog(__name__) + +autoload = 'NZBClub' + + +class NZBClub(MovieProvider, Base): + + def buildUrl(self, media): + + q = tryUrlencode({ + 'q': '"%s"' % fireEvent('library.query', media, single = True), + }) + + query = tryUrlencode({ + 'ig': 1, + 'rpp': 200, + 'st': 5, + 'sp': 1, + 'ns': 1, + }) + return '%s&%s' % (q, query) diff --git a/couchpotato/core/media/movie/providers/nzb/nzbindex.py b/couchpotato/core/media/movie/providers/nzb/nzbindex.py new file mode 100644 index 00000000..70e939dc --- /dev/null +++ b/couchpotato/core/media/movie/providers/nzb/nzbindex.py @@ -0,0 +1,30 @@ +from couchpotato.core.helpers.encoding import tryUrlencode +from couchpotato.core.logger import CPLog +from couchpotato.core.event import fireEvent +from couchpotato.core.media._base.providers.nzb.nzbindex import Base +from couchpotato.core.media.movie.providers.base import MovieProvider +from couchpotato.environment import Env + +log = CPLog(__name__) + +autoload = 'NzbIndex' + + +class NzbIndex(MovieProvider, Base): + + def buildUrl(self, media, quality): + title = fireEvent('library.query', media, include_year = False, single = True) + year = media['info']['year'] + + query = tryUrlencode({ + 'q': '"%s %s" | "%s (%s)"' % (title, year, title, year), + 'age': Env.setting('retention', 'nzb'), + 'sort': 'agedesc', + 'minsize': quality.get('size_min'), + 'maxsize': quality.get('size_max'), + 'rating': 1, + 'max': 250, + 'more': 1, + 'complete': 1, + }) + return query diff --git a/couchpotato/core/media/movie/providers/nzb/omgwtfnzbs.py b/couchpotato/core/media/movie/providers/nzb/omgwtfnzbs.py new file mode 100644 index 00000000..f4527f6d --- /dev/null +++ b/couchpotato/core/media/movie/providers/nzb/omgwtfnzbs.py @@ -0,0 +1,11 @@ +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.nzb.omgwtfnzbs import Base +from couchpotato.core.media.movie.providers.base import MovieProvider + +log = CPLog(__name__) + +autoload = 'OMGWTFNZBs' + + +class OMGWTFNZBs(MovieProvider, Base): + pass diff --git a/libs/migrate/versioning/templates/repository/default/versions/__init__.py b/couchpotato/core/media/movie/providers/torrent/__init__.py similarity index 100% rename from libs/migrate/versioning/templates/repository/default/versions/__init__.py rename to couchpotato/core/media/movie/providers/torrent/__init__.py diff --git a/couchpotato/core/media/movie/providers/torrent/awesomehd.py b/couchpotato/core/media/movie/providers/torrent/awesomehd.py new file mode 100644 index 00000000..b1c81f18 --- /dev/null +++ b/couchpotato/core/media/movie/providers/torrent/awesomehd.py @@ -0,0 +1,11 @@ +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.awesomehd import Base +from couchpotato.core.media.movie.providers.base import MovieProvider + +log = CPLog(__name__) + +autoload = 'AwesomeHD' + + +class AwesomeHD(MovieProvider, Base): + pass diff --git a/couchpotato/core/media/movie/providers/torrent/bithdtv.py b/couchpotato/core/media/movie/providers/torrent/bithdtv.py new file mode 100644 index 00000000..da6954c8 --- /dev/null +++ b/couchpotato/core/media/movie/providers/torrent/bithdtv.py @@ -0,0 +1,23 @@ +from couchpotato.core.helpers.encoding import tryUrlencode +from couchpotato.core.logger import CPLog +from couchpotato.core.event import fireEvent +from couchpotato.core.media._base.providers.torrent.bithdtv import Base +from couchpotato.core.media.movie.providers.base import MovieProvider + +log = CPLog(__name__) + +autoload = 'BiTHDTV' + + +class BiTHDTV(MovieProvider, Base): + cat_ids = [ + ([2], ['bd50']), + ] + cat_backup_id = 7 # Movies + + def buildUrl(self, media, quality): + query = tryUrlencode({ + 'search': fireEvent('library.query', media, single = True), + 'cat': self.getCatId(quality)[0] + }) + return query diff --git a/couchpotato/core/media/movie/providers/torrent/bitsoup.py b/couchpotato/core/media/movie/providers/torrent/bitsoup.py new file mode 100644 index 00000000..e9d69fe5 --- /dev/null +++ b/couchpotato/core/media/movie/providers/torrent/bitsoup.py @@ -0,0 +1,25 @@ +from couchpotato.core.helpers.encoding import tryUrlencode +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.bitsoup import Base +from couchpotato.core.media.movie.providers.base import MovieProvider + +log = CPLog(__name__) + +autoload = 'Bitsoup' + + +class Bitsoup(MovieProvider, Base): + cat_ids = [ + ([17], ['3d']), + ([41], ['720p', '1080p']), + ([20], ['dvdr']), + ([19], ['brrip', 'dvdrip']), + ] + cat_backup_id = 0 + + def buildUrl(self, title, media, quality): + query = tryUrlencode({ + 'search': '"%s" %s' % (title, media['info']['year']), + 'cat': self.getCatId(quality)[0], + }) + return query diff --git a/couchpotato/core/media/movie/providers/torrent/hdbits.py b/couchpotato/core/media/movie/providers/torrent/hdbits.py new file mode 100644 index 00000000..016f1a12 --- /dev/null +++ b/couchpotato/core/media/movie/providers/torrent/hdbits.py @@ -0,0 +1,11 @@ +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.hdbits import Base +from couchpotato.core.media.movie.providers.base import MovieProvider + +log = CPLog(__name__) + +autoload = 'HDBits' + + +class HDBits(MovieProvider, Base): + pass diff --git a/couchpotato/core/media/movie/providers/torrent/ilovetorrents.py b/couchpotato/core/media/movie/providers/torrent/ilovetorrents.py new file mode 100644 index 00000000..cfd773ad --- /dev/null +++ b/couchpotato/core/media/movie/providers/torrent/ilovetorrents.py @@ -0,0 +1,11 @@ +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.ilovetorrents import Base +from couchpotato.core.media.movie.providers.base import MovieProvider + +log = CPLog(__name__) + +autoload = 'ILoveTorrents' + + +class ILoveTorrents(MovieProvider, Base): + pass diff --git a/couchpotato/core/media/movie/providers/torrent/iptorrents.py b/couchpotato/core/media/movie/providers/torrent/iptorrents.py new file mode 100644 index 00000000..89aeee80 --- /dev/null +++ b/couchpotato/core/media/movie/providers/torrent/iptorrents.py @@ -0,0 +1,23 @@ +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.iptorrents import Base +from couchpotato.core.media.movie.providers.base import MovieProvider + +log = CPLog(__name__) + +autoload = 'IPTorrents' + + +class IPTorrents(MovieProvider, Base): + + cat_ids = [ + ([87], ['3d']), + ([48], ['720p', '1080p', 'bd50']), + ([72], ['cam', 'ts', 'tc', 'r5', 'scr']), + ([7], ['dvdrip', 'brrip']), + ([6], ['dvdr']), + ] + + def buildUrl(self, title, media, quality): + query = '"%s" %s' % (title.replace(':', ''), media['info']['year']) + + return self._buildUrl(query, quality) diff --git a/couchpotato/core/media/movie/providers/torrent/kickasstorrents.py b/couchpotato/core/media/movie/providers/torrent/kickasstorrents.py new file mode 100644 index 00000000..2b9b1969 --- /dev/null +++ b/couchpotato/core/media/movie/providers/torrent/kickasstorrents.py @@ -0,0 +1,11 @@ +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.kickasstorrents import Base +from couchpotato.core.media.movie.providers.base import MovieProvider + +log = CPLog(__name__) + +autoload = 'KickAssTorrents' + + +class KickAssTorrents(MovieProvider, Base): + pass diff --git a/couchpotato/core/media/movie/providers/torrent/passthepopcorn.py b/couchpotato/core/media/movie/providers/torrent/passthepopcorn.py new file mode 100644 index 00000000..bbaea265 --- /dev/null +++ b/couchpotato/core/media/movie/providers/torrent/passthepopcorn.py @@ -0,0 +1,38 @@ +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.passthepopcorn import Base +from couchpotato.core.media.movie.providers.base import MovieProvider + +log = CPLog(__name__) + +autoload = 'PassThePopcorn' + + +class PassThePopcorn(MovieProvider, Base): + + quality_search_params = { + 'bd50': {'media': 'Blu-ray', 'format': 'BD50'}, + '1080p': {'resolution': '1080p'}, + '720p': {'resolution': '720p'}, + 'brrip': {'media': 'Blu-ray'}, + 'dvdr': {'resolution': 'anysd'}, + 'dvdrip': {'media': 'DVD'}, + 'scr': {'media': 'DVD-Screener'}, + 'r5': {'media': 'R5'}, + 'tc': {'media': 'TC'}, + 'ts': {'media': 'TS'}, + 'cam': {'media': 'CAM'} + } + + post_search_filters = { + 'bd50': {'Codec': ['BD50']}, + '1080p': {'Resolution': ['1080p']}, + '720p': {'Resolution': ['720p']}, + 'brrip': {'Source': ['Blu-ray'], 'Quality': ['High Definition'], 'Container': ['!ISO']}, + 'dvdr': {'Codec': ['DVD5', 'DVD9']}, + 'dvdrip': {'Source': ['DVD'], 'Codec': ['!DVD5', '!DVD9']}, + 'scr': {'Source': ['DVD-Screener']}, + 'r5': {'Source': ['R5']}, + 'tc': {'Source': ['TC']}, + 'ts': {'Source': ['TS']}, + 'cam': {'Source': ['CAM']} + } diff --git a/couchpotato/core/media/movie/providers/torrent/sceneaccess.py b/couchpotato/core/media/movie/providers/torrent/sceneaccess.py new file mode 100644 index 00000000..579103af --- /dev/null +++ b/couchpotato/core/media/movie/providers/torrent/sceneaccess.py @@ -0,0 +1,29 @@ +from couchpotato.core.helpers.encoding import tryUrlencode +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.sceneaccess import Base +from couchpotato.core.media.movie.providers.base import MovieProvider + +log = CPLog(__name__) + +autoload = 'SceneAccess' + + +class SceneAccess(MovieProvider, Base): + + cat_ids = [ + ([22], ['720p', '1080p']), + ([7], ['cam', 'ts', 'dvdrip', 'tc', 'r5', 'scr', 'brrip']), + ([8], ['dvdr']), + ] + + def buildUrl(self, title, media, quality): + cat_id = self.getCatId(quality)[0] + url = self.urls['search'] % (cat_id, cat_id) + + arguments = tryUrlencode({ + 'search': '%s %s' % (title, media['info']['year']), + 'method': 2, + }) + query = "%s&%s" % (url, arguments) + + return query diff --git a/couchpotato/core/media/movie/providers/torrent/thepiratebay.py b/couchpotato/core/media/movie/providers/torrent/thepiratebay.py new file mode 100644 index 00000000..0dc8313d --- /dev/null +++ b/couchpotato/core/media/movie/providers/torrent/thepiratebay.py @@ -0,0 +1,27 @@ +from couchpotato.core.helpers.encoding import tryUrlencode +from couchpotato.core.logger import CPLog +from couchpotato.core.event import fireEvent +from couchpotato.core.media._base.providers.torrent.thepiratebay import Base +from couchpotato.core.media.movie.providers.base import MovieProvider + +log = CPLog(__name__) + +autoload = 'ThePirateBay' + + +class ThePirateBay(MovieProvider, Base): + + cat_ids = [ + ([209], ['3d']), + ([207], ['720p', '1080p', 'bd50']), + ([201], ['cam', 'ts', 'dvdrip', 'tc', 'r5', 'scr']), + ([201, 207], ['brrip']), + ([202], ['dvdr']) + ] + + def buildUrl(self, media, page, cats): + return ( + tryUrlencode('"%s"' % fireEvent('library.query', media, single = True)), + page, + ','.join(str(x) for x in cats) + ) diff --git a/couchpotato/core/media/movie/providers/torrent/torrentbytes.py b/couchpotato/core/media/movie/providers/torrent/torrentbytes.py new file mode 100644 index 00000000..48fc68a4 --- /dev/null +++ b/couchpotato/core/media/movie/providers/torrent/torrentbytes.py @@ -0,0 +1,11 @@ +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.torrentbytes import Base +from couchpotato.core.media.movie.providers.base import MovieProvider + +log = CPLog(__name__) + +autoload = 'TorrentBytes' + + +class TorrentBytes(MovieProvider, Base): + pass diff --git a/couchpotato/core/media/movie/providers/torrent/torrentday.py b/couchpotato/core/media/movie/providers/torrent/torrentday.py new file mode 100644 index 00000000..768d3043 --- /dev/null +++ b/couchpotato/core/media/movie/providers/torrent/torrentday.py @@ -0,0 +1,17 @@ +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.torrentday import Base +from couchpotato.core.media.movie.providers.base import MovieProvider + +log = CPLog(__name__) + +autoload = 'TorrentDay' + + +class TorrentDay(MovieProvider, Base): + + cat_ids = [ + ([11], ['720p', '1080p']), + ([1, 21, 25], ['cam', 'ts', 'dvdrip', 'tc', 'r5', 'scr', 'brrip']), + ([3], ['dvdr']), + ([5], ['bd50']), + ] diff --git a/couchpotato/core/media/movie/providers/torrent/torrentleech.py b/couchpotato/core/media/movie/providers/torrent/torrentleech.py new file mode 100644 index 00000000..191ceba8 --- /dev/null +++ b/couchpotato/core/media/movie/providers/torrent/torrentleech.py @@ -0,0 +1,27 @@ +from couchpotato.core.helpers.encoding import tryUrlencode +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.torrentleech import Base +from couchpotato.core.media.movie.providers.base import MovieProvider + +log = CPLog(__name__) + +autoload = 'TorrentLeech' + + +class TorrentLeech(MovieProvider, Base): + + cat_ids = [ + ([13], ['720p', '1080p']), + ([8], ['cam']), + ([9], ['ts', 'tc']), + ([10], ['r5', 'scr']), + ([11], ['dvdrip']), + ([14], ['brrip']), + ([12], ['dvdr']), + ] + + def buildUrl(self, title, media, quality): + return ( + tryUrlencode(title.replace(':', '')), + self.getCatId(quality)[0] + ) diff --git a/couchpotato/core/media/movie/providers/torrent/torrentpotato.py b/couchpotato/core/media/movie/providers/torrent/torrentpotato.py new file mode 100644 index 00000000..67573537 --- /dev/null +++ b/couchpotato/core/media/movie/providers/torrent/torrentpotato.py @@ -0,0 +1,20 @@ +from couchpotato.core.helpers.encoding import tryUrlencode +from couchpotato.core.helpers.variable import getIdentifier +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.torrentpotato import Base +from couchpotato.core.media.movie.providers.base import MovieProvider + +log = CPLog(__name__) + +autoload = 'TorrentPotato' + + +class TorrentPotato(MovieProvider, Base): + + def buildUrl(self, media, host): + arguments = tryUrlencode({ + 'user': host['name'], + 'passkey': host['pass_key'], + 'imdbid': getIdentifier(media), + }) + return '%s?%s' % (host['host'], arguments) diff --git a/couchpotato/core/media/movie/providers/torrent/torrentshack.py b/couchpotato/core/media/movie/providers/torrent/torrentshack.py new file mode 100644 index 00000000..01eb6d6a --- /dev/null +++ b/couchpotato/core/media/movie/providers/torrent/torrentshack.py @@ -0,0 +1,36 @@ +from couchpotato.core.event import fireEvent +from couchpotato.core.helpers.encoding import tryUrlencode +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.torrentshack import Base +from couchpotato.core.media.movie.providers.base import MovieProvider + +log = CPLog(__name__) + +autoload = 'TorrentShack' + + +class TorrentShack(MovieProvider, Base): + + # TorrentShack movie search categories + # Movies/x264 - 300 + # Movies/DVD-R - 350 + # Movies/XviD - 400 + # Full Blu-ray - 970 + # + # REMUX - 320 (not included) + # Movies-HD Pack - 982 (not included) + # Movies-SD Pack - 983 (not included) + + cat_ids = [ + ([970], ['bd50']), + ([300], ['720p', '1080p']), + ([350], ['dvdr']), + ([400], ['brrip', 'dvdrip']), + ] + cat_backup_id = 400 + + def buildUrl(self, media, quality): + query = (tryUrlencode(fireEvent('library.query', media, single = True)), + self.getSceneOnly(), + self.getCatId(quality)[0]) + return query diff --git a/couchpotato/core/media/movie/providers/torrent/torrentz.py b/couchpotato/core/media/movie/providers/torrent/torrentz.py new file mode 100644 index 00000000..742554c4 --- /dev/null +++ b/couchpotato/core/media/movie/providers/torrent/torrentz.py @@ -0,0 +1,15 @@ +from couchpotato.core.helpers.encoding import tryUrlencode +from couchpotato.core.logger import CPLog +from couchpotato.core.event import fireEvent +from couchpotato.core.media._base.providers.torrent.torrentz import Base +from couchpotato.core.media.movie.providers.base import MovieProvider + +log = CPLog(__name__) + +autoload = 'Torrentz' + + +class Torrentz(MovieProvider, Base): + + def buildUrl(self, media): + return tryUrlencode('"%s"' % fireEvent('library.query', media, single = True)) diff --git a/couchpotato/core/media/movie/providers/torrent/yify.py b/couchpotato/core/media/movie/providers/torrent/yify.py new file mode 100644 index 00000000..d30132d6 --- /dev/null +++ b/couchpotato/core/media/movie/providers/torrent/yify.py @@ -0,0 +1,11 @@ +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.torrent.yify import Base +from couchpotato.core.media.movie.providers.base import MovieProvider + +log = CPLog(__name__) + +autoload = 'Yify' + + +class Yify(MovieProvider, Base): + pass diff --git a/libs/migrate/versioning/templates/repository/pylons/__init__.py b/couchpotato/core/media/movie/providers/trailer/__init__.py similarity index 100% rename from libs/migrate/versioning/templates/repository/pylons/__init__.py rename to couchpotato/core/media/movie/providers/trailer/__init__.py diff --git a/couchpotato/core/providers/trailer/base.py b/couchpotato/core/media/movie/providers/trailer/base.py similarity index 66% rename from couchpotato/core/providers/trailer/base.py rename to couchpotato/core/media/movie/providers/trailer/base.py index 338ca9b3..9cd1dbe6 100644 --- a/couchpotato/core/providers/trailer/base.py +++ b/couchpotato/core/media/movie/providers/trailer/base.py @@ -1,6 +1,6 @@ from couchpotato.core.event import addEvent from couchpotato.core.logger import CPLog -from couchpotato.core.providers.base import Provider +from couchpotato.core.media._base.providers.base import Provider log = CPLog(__name__) @@ -11,3 +11,6 @@ class TrailerProvider(Provider): def __init__(self): addEvent('trailer.search', self.search) + + def search(self, *args, **kwargs): + pass diff --git a/couchpotato/core/providers/trailer/hdtrailers/main.py b/couchpotato/core/media/movie/providers/trailer/hdtrailers.py similarity index 89% rename from couchpotato/core/providers/trailer/hdtrailers/main.py rename to couchpotato/core/media/movie/providers/trailer/hdtrailers.py index cba7609f..79425332 100644 --- a/couchpotato/core/providers/trailer/hdtrailers/main.py +++ b/couchpotato/core/media/movie/providers/trailer/hdtrailers.py @@ -1,14 +1,18 @@ +from string import digits, ascii_letters +import re + from bs4 import SoupStrainer, BeautifulSoup from couchpotato.core.helpers.encoding import tryUrlencode from couchpotato.core.helpers.variable import mergeDicts, getTitle from couchpotato.core.logger import CPLog -from couchpotato.core.providers.trailer.base import TrailerProvider -from string import digits, ascii_letters -from urllib2 import HTTPError -import re +from couchpotato.core.media.movie.providers.trailer.base import TrailerProvider +from requests import HTTPError + log = CPLog(__name__) +autoload = 'HDTrailers' + class HDTrailers(TrailerProvider): @@ -20,11 +24,11 @@ class HDTrailers(TrailerProvider): def search(self, group): - movie_name = getTitle(group['library']) + movie_name = getTitle(group) url = self.urls['api'] % self.movieUrlName(movie_name) try: - data = self.getCache('hdtrailers.%s' % group['library']['identifier'], url, show_error = False) + data = self.getCache('hdtrailers.%s' % group['identifier'], url, show_error = False) except HTTPError: log.debug('No page found for: %s', movie_name) data = None @@ -48,13 +52,13 @@ class HDTrailers(TrailerProvider): return result_data def findViaAlternative(self, group): - results = {'480p':[], '720p':[], '1080p':[]} + results = {'480p': [], '720p': [], '1080p': []} - movie_name = getTitle(group['library']) + movie_name = getTitle(group) url = "%s?%s" % (self.urls['backup'], tryUrlencode({'s':movie_name})) try: - data = self.getCache('hdtrailers.alt.%s' % group['library']['identifier'], url, show_error = False) + data = self.getCache('hdtrailers.alt.%s' % group['identifier'], url, show_error = False) except HTTPError: log.debug('No alternative page found for: %s', movie_name) data = None diff --git a/libs/migrate/versioning/templates/repository/pylons/versions/__init__.py b/couchpotato/core/media/movie/providers/userscript/__init__.py similarity index 100% rename from libs/migrate/versioning/templates/repository/pylons/versions/__init__.py rename to couchpotato/core/media/movie/providers/userscript/__init__.py diff --git a/couchpotato/core/providers/userscript/allocine/main.py b/couchpotato/core/media/movie/providers/userscript/allocine.py similarity index 88% rename from couchpotato/core/providers/userscript/allocine/main.py rename to couchpotato/core/media/movie/providers/userscript/allocine.py index f8ca630d..238a6b54 100644 --- a/couchpotato/core/providers/userscript/allocine/main.py +++ b/couchpotato/core/media/movie/providers/userscript/allocine.py @@ -1,9 +1,13 @@ -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.userscript.base import UserscriptBase import traceback +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.userscript.base import UserscriptBase + + log = CPLog(__name__) +autoload = 'AlloCine' + class AlloCine(UserscriptBase): diff --git a/couchpotato/core/providers/userscript/appletrailers/main.py b/couchpotato/core/media/movie/providers/userscript/appletrailers.py similarity index 81% rename from couchpotato/core/providers/userscript/appletrailers/main.py rename to couchpotato/core/media/movie/providers/userscript/appletrailers.py index 693065d1..c59722c7 100644 --- a/couchpotato/core/providers/userscript/appletrailers/main.py +++ b/couchpotato/core/media/movie/providers/userscript/appletrailers.py @@ -1,6 +1,10 @@ -from couchpotato.core.providers.userscript.base import UserscriptBase import re +from couchpotato.core.media._base.providers.userscript.base import UserscriptBase + + +autoload = 'AppleTrailers' + class AppleTrailers(UserscriptBase): diff --git a/couchpotato/core/media/movie/providers/userscript/criticker.py b/couchpotato/core/media/movie/providers/userscript/criticker.py new file mode 100644 index 00000000..cc0bee84 --- /dev/null +++ b/couchpotato/core/media/movie/providers/userscript/criticker.py @@ -0,0 +1,8 @@ +from couchpotato.core.media._base.providers.userscript.base import UserscriptBase + +autoload = 'Criticker' + + +class Criticker(UserscriptBase): + + includes = ['http://www.criticker.com/film/*'] diff --git a/couchpotato/core/media/movie/providers/userscript/filmcentrum.py b/couchpotato/core/media/movie/providers/userscript/filmcentrum.py new file mode 100644 index 00000000..b2b15a9e --- /dev/null +++ b/couchpotato/core/media/movie/providers/userscript/filmcentrum.py @@ -0,0 +1,8 @@ +from couchpotato.core.media._base.providers.userscript.base import UserscriptBase + +autoload = 'FilmCentrum' + + +class FilmCentrum(UserscriptBase): + + includes = ['*://filmcentrum.nl/films/*'] diff --git a/couchpotato/core/media/movie/providers/userscript/filmstarts.py b/couchpotato/core/media/movie/providers/userscript/filmstarts.py new file mode 100644 index 00000000..59027e03 --- /dev/null +++ b/couchpotato/core/media/movie/providers/userscript/filmstarts.py @@ -0,0 +1,30 @@ +from bs4 import BeautifulSoup +from couchpotato.core.media._base.providers.userscript.base import UserscriptBase + +autoload = 'Filmstarts' + + +class Filmstarts(UserscriptBase): + + includes = ['*://www.filmstarts.de/kritiken/*'] + + def getMovie(self, url): + try: + data = self.getUrl(url) + except: + return + + html = BeautifulSoup(data) + table = html.find("table", attrs={"class": "table table-standard thead-standard table-striped_2 fs11"}) + + if table.find(text='Originaltitel'): + # Get original film title from the table specified above + name = table.find("div", text="Originaltitel").parent.parent.parent.td.text + else: + # If none is available get the title from the meta data + name = html.find("meta", {"property":"og:title"})['content'] + + # Year of production is not available in the meta data, so get it from the table + year = table.find("tr", text="Produktionsjahr").parent.parent.parent.td.text + + return self.search(name, year) \ No newline at end of file diff --git a/couchpotato/core/providers/userscript/filmweb/main.py b/couchpotato/core/media/movie/providers/userscript/filmweb.py similarity index 87% rename from couchpotato/core/providers/userscript/filmweb/main.py rename to couchpotato/core/media/movie/providers/userscript/filmweb.py index 66c5fcdb..a4a51a08 100644 --- a/couchpotato/core/providers/userscript/filmweb/main.py +++ b/couchpotato/core/media/movie/providers/userscript/filmweb.py @@ -1,6 +1,10 @@ -from couchpotato.core.providers.userscript.base import UserscriptBase import re +from couchpotato.core.media._base.providers.userscript.base import UserscriptBase + + +autoload = 'Filmweb' + class Filmweb(UserscriptBase): diff --git a/couchpotato/core/providers/userscript/flickchart/main.py b/couchpotato/core/media/movie/providers/userscript/flickchart.py similarity index 87% rename from couchpotato/core/providers/userscript/flickchart/main.py rename to couchpotato/core/media/movie/providers/userscript/flickchart.py index a66bd38f..c54a8cab 100644 --- a/couchpotato/core/providers/userscript/flickchart/main.py +++ b/couchpotato/core/media/movie/providers/userscript/flickchart.py @@ -1,10 +1,14 @@ -from couchpotato.core.event import fireEvent -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.userscript.base import UserscriptBase import traceback +from couchpotato.core.event import fireEvent +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.userscript.base import UserscriptBase + + log = CPLog(__name__) +autoload = 'Flickchart' + class Flickchart(UserscriptBase): diff --git a/couchpotato/core/providers/userscript/imdb/main.py b/couchpotato/core/media/movie/providers/userscript/imdb.py similarity index 66% rename from couchpotato/core/providers/userscript/imdb/main.py rename to couchpotato/core/media/movie/providers/userscript/imdb.py index 2a6efd6b..dccc4832 100644 --- a/couchpotato/core/providers/userscript/imdb/main.py +++ b/couchpotato/core/media/movie/providers/userscript/imdb.py @@ -1,6 +1,7 @@ -from couchpotato.core.event import fireEvent from couchpotato.core.helpers.variable import getImdb -from couchpotato.core.providers.userscript.base import UserscriptBase +from couchpotato.core.media._base.providers.userscript.base import UserscriptBase + +autoload = 'IMDB' class IMDB(UserscriptBase): diff --git a/couchpotato/core/media/movie/providers/userscript/letterboxd.py b/couchpotato/core/media/movie/providers/userscript/letterboxd.py new file mode 100644 index 00000000..43b5d309 --- /dev/null +++ b/couchpotato/core/media/movie/providers/userscript/letterboxd.py @@ -0,0 +1,8 @@ +from couchpotato.core.media._base.providers.userscript.base import UserscriptBase + +autoload = 'Letterboxd' + + +class Letterboxd(UserscriptBase): + + includes = ['*://letterboxd.com/film/*'] diff --git a/couchpotato/core/providers/userscript/moviemeter/main.py b/couchpotato/core/media/movie/providers/userscript/moviemeter.py similarity index 52% rename from couchpotato/core/providers/userscript/moviemeter/main.py rename to couchpotato/core/media/movie/providers/userscript/moviemeter.py index 3593d432..4c9bb221 100644 --- a/couchpotato/core/providers/userscript/moviemeter/main.py +++ b/couchpotato/core/media/movie/providers/userscript/moviemeter.py @@ -1,4 +1,6 @@ -from couchpotato.core.providers.userscript.base import UserscriptBase +from couchpotato.core.media._base.providers.userscript.base import UserscriptBase + +autoload = 'MovieMeter' class MovieMeter(UserscriptBase): diff --git a/couchpotato/core/media/movie/providers/userscript/moviesio.py b/couchpotato/core/media/movie/providers/userscript/moviesio.py new file mode 100644 index 00000000..0381d64a --- /dev/null +++ b/couchpotato/core/media/movie/providers/userscript/moviesio.py @@ -0,0 +1,8 @@ +from couchpotato.core.media._base.providers.userscript.base import UserscriptBase + +autoload = 'MoviesIO' + + +class MoviesIO(UserscriptBase): + + includes = ['*://movies.io/m/*'] diff --git a/couchpotato/core/providers/userscript/reddit/main.py b/couchpotato/core/media/movie/providers/userscript/reddit.py similarity index 71% rename from couchpotato/core/providers/userscript/reddit/main.py rename to couchpotato/core/media/movie/providers/userscript/reddit.py index 9790f6e2..8cb81079 100644 --- a/couchpotato/core/providers/userscript/reddit/main.py +++ b/couchpotato/core/media/movie/providers/userscript/reddit.py @@ -1,6 +1,8 @@ from couchpotato import fireEvent from couchpotato.core.helpers.variable import splitString -from couchpotato.core.providers.userscript.base import UserscriptBase +from couchpotato.core.media._base.providers.userscript.base import UserscriptBase + +autoload = 'Reddit' class Reddit(UserscriptBase): @@ -8,7 +10,8 @@ class Reddit(UserscriptBase): includes = ['*://www.reddit.com/r/Ijustwatched/comments/*'] def getMovie(self, url): - name = splitString(url, '/')[-1] + name = splitString(splitString(url, '/ijw_')[-1], '/')[0] + if name.startswith('ijw_'): name = name[4:] diff --git a/couchpotato/core/providers/userscript/rottentomatoes/main.py b/couchpotato/core/media/movie/providers/userscript/rottentomatoes.py similarity index 89% rename from couchpotato/core/providers/userscript/rottentomatoes/main.py rename to couchpotato/core/media/movie/providers/userscript/rottentomatoes.py index 0b16a441..902192e2 100644 --- a/couchpotato/core/providers/userscript/rottentomatoes/main.py +++ b/couchpotato/core/media/movie/providers/userscript/rottentomatoes.py @@ -1,10 +1,14 @@ -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.userscript.base import UserscriptBase import re import traceback +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.providers.userscript.base import UserscriptBase + + log = CPLog(__name__) +autoload = 'RottenTomatoes' + class RottenTomatoes(UserscriptBase): diff --git a/couchpotato/core/providers/userscript/sharethe/main.py b/couchpotato/core/media/movie/providers/userscript/sharethe.py similarity index 52% rename from couchpotato/core/providers/userscript/sharethe/main.py rename to couchpotato/core/media/movie/providers/userscript/sharethe.py index d22b67eb..ef2f537e 100644 --- a/couchpotato/core/providers/userscript/sharethe/main.py +++ b/couchpotato/core/media/movie/providers/userscript/sharethe.py @@ -1,4 +1,6 @@ -from couchpotato.core.providers.userscript.base import UserscriptBase +from couchpotato.core.media._base.providers.userscript.base import UserscriptBase + +autoload = 'ShareThe' class ShareThe(UserscriptBase): diff --git a/couchpotato/core/providers/userscript/tmdb/main.py b/couchpotato/core/media/movie/providers/userscript/tmdb.py similarity index 79% rename from couchpotato/core/providers/userscript/tmdb/main.py rename to couchpotato/core/media/movie/providers/userscript/tmdb.py index b718fc3b..f11dba27 100644 --- a/couchpotato/core/providers/userscript/tmdb/main.py +++ b/couchpotato/core/media/movie/providers/userscript/tmdb.py @@ -1,7 +1,11 @@ -from couchpotato.core.event import fireEvent -from couchpotato.core.providers.userscript.base import UserscriptBase import re +from couchpotato.core.event import fireEvent +from couchpotato.core.media._base.providers.userscript.base import UserscriptBase + + +autoload = 'TMDB' + class TMDB(UserscriptBase): diff --git a/couchpotato/core/providers/userscript/trakt/main.py b/couchpotato/core/media/movie/providers/userscript/trakt.py similarity index 63% rename from couchpotato/core/providers/userscript/trakt/main.py rename to couchpotato/core/media/movie/providers/userscript/trakt.py index 43e06dee..cd40c69c 100644 --- a/couchpotato/core/providers/userscript/trakt/main.py +++ b/couchpotato/core/media/movie/providers/userscript/trakt.py @@ -1,4 +1,6 @@ -from couchpotato.core.providers.userscript.base import UserscriptBase +from couchpotato.core.media._base.providers.userscript.base import UserscriptBase + +autoload = 'Trakt' class Trakt(UserscriptBase): diff --git a/couchpotato/core/media/movie/providers/userscript/whiwa.py b/couchpotato/core/media/movie/providers/userscript/whiwa.py new file mode 100644 index 00000000..bd602d20 --- /dev/null +++ b/couchpotato/core/media/movie/providers/userscript/whiwa.py @@ -0,0 +1,8 @@ +from couchpotato.core.media._base.providers.userscript.base import UserscriptBase + +autoload = 'WHiWA' + + +class WHiWA(UserscriptBase): + + includes = ['http://whiwa.net/stats/movie/*'] diff --git a/couchpotato/core/providers/userscript/youteather/main.py b/couchpotato/core/media/movie/providers/userscript/youteather.py similarity index 80% rename from couchpotato/core/providers/userscript/youteather/main.py rename to couchpotato/core/media/movie/providers/userscript/youteather.py index 3efd3686..e7e63b46 100644 --- a/couchpotato/core/providers/userscript/youteather/main.py +++ b/couchpotato/core/media/movie/providers/userscript/youteather.py @@ -1,6 +1,11 @@ -from couchpotato.core.providers.userscript.base import UserscriptBase import re +from couchpotato.core.media._base.providers.userscript.base import UserscriptBase + + +autoload = 'YouTheater' + + class YouTheater(UserscriptBase): id_re = re.compile("view\.php\?id=(\d+)") includes = ['http://www.youtheater.com/view.php?id=*', 'http://youtheater.com/view.php?id=*', diff --git a/couchpotato/core/media/movie/searcher.py b/couchpotato/core/media/movie/searcher.py new file mode 100644 index 00000000..1053ec3d --- /dev/null +++ b/couchpotato/core/media/movie/searcher.py @@ -0,0 +1,473 @@ +from datetime import date +import random +import re +import time +import traceback + +from couchpotato import get_db +from couchpotato.api import addApiView +from couchpotato.core.event import addEvent, fireEvent, fireEventAsync +from couchpotato.core.helpers.encoding import simplifyString +from couchpotato.core.helpers.variable import getTitle, possibleTitles, getImdb, getIdentifier, tryInt +from couchpotato.core.logger import CPLog +from couchpotato.core.media._base.searcher.base import SearcherBase +from couchpotato.core.media.movie import MovieTypeBase +from couchpotato.environment import Env + + +log = CPLog(__name__) + +autoload = 'MovieSearcher' + + +class MovieSearcher(SearcherBase, MovieTypeBase): + + in_progress = False + + def __init__(self): + super(MovieSearcher, self).__init__() + + addEvent('movie.searcher.all', self.searchAll) + addEvent('movie.searcher.all_view', self.searchAllView) + addEvent('movie.searcher.single', self.single) + addEvent('movie.searcher.try_next_release', self.tryNextRelease) + addEvent('movie.searcher.could_be_released', self.couldBeReleased) + addEvent('searcher.correct_release', self.correctRelease) + addEvent('searcher.get_search_title', self.getSearchTitle) + + addApiView('movie.searcher.try_next', self.tryNextReleaseView, docs = { + 'desc': 'Marks the snatched results as ignored and try the next best release', + 'params': { + 'media_id': {'desc': 'The id of the media'}, + }, + }) + + addApiView('movie.searcher.full_search', self.searchAllView, docs = { + 'desc': 'Starts a full search for all wanted movies', + }) + + addApiView('movie.searcher.progress', self.getProgress, docs = { + 'desc': 'Get the progress of current full search', + 'return': {'type': 'object', 'example': """{ + 'progress': False || object, total & to_go, +}"""}, + }) + + if self.conf('run_on_launch'): + addEvent('app.load', self.searchAll) + + def searchAllView(self, **kwargs): + + fireEventAsync('movie.searcher.all', manual = True) + + return { + 'success': not self.in_progress + } + + def searchAll(self, manual = False): + + if self.in_progress: + log.info('Search already in progress') + fireEvent('notify.frontend', type = 'movie.searcher.already_started', data = True, message = 'Full search already in progress') + return + + self.in_progress = True + fireEvent('notify.frontend', type = 'movie.searcher.started', data = True, message = 'Full search started') + + medias = [x['_id'] for x in fireEvent('media.with_status', 'active', with_doc = False, single = True)] + random.shuffle(medias) + + total = len(medias) + self.in_progress = { + 'total': total, + 'to_go': total, + } + + try: + search_protocols = fireEvent('searcher.protocols', single = True) + + for media_id in medias: + + media = fireEvent('media.get', media_id, single = True) + + try: + self.single(media, search_protocols, manual = manual) + except IndexError: + log.error('Forcing library update for %s, if you see this often, please report: %s', (getIdentifier(media), traceback.format_exc())) + fireEvent('movie.update_info', media_id) + except: + log.error('Search failed for %s: %s', (getIdentifier(media), traceback.format_exc())) + + self.in_progress['to_go'] -= 1 + + # Break if CP wants to shut down + if self.shuttingDown(): + break + + except SearchSetupError: + pass + + self.in_progress = False + + def single(self, movie, search_protocols = None, manual = False, force_download = False): + + # Find out search type + try: + if not search_protocols: + search_protocols = fireEvent('searcher.protocols', single = True) + except SearchSetupError: + return + + if not movie['profile_id'] or (movie['status'] == 'done' and not manual): + log.debug('Movie doesn\'t have a profile or already done, assuming in manage tab.') + return + + pre_releases = fireEvent('quality.pre_releases', single = True) + release_dates = fireEvent('movie.update_release_dates', movie['_id'], merge = True) + + found_releases = [] + previous_releases = movie.get('releases', []) + too_early_to_search = [] + outside_eta_results = 0 + alway_search = self.conf('always_search') + ignore_eta = manual + total_result_count = 0 + + default_title = getTitle(movie) + if not default_title: + log.error('No proper info found for movie, removing it from library to cause it from having more issues.') + fireEvent('media.delete', movie['_id'], single = True) + return + + fireEvent('notify.frontend', type = 'movie.searcher.started', data = {'_id': movie['_id']}, message = 'Searching for "%s"' % default_title) + + # Ignore eta once every 7 days + if not alway_search: + prop_name = 'last_ignored_eta.%s' % movie['_id'] + last_ignored_eta = float(Env.prop(prop_name, default = 0)) + if last_ignored_eta > time.time() - 604800: + ignore_eta = True + Env.prop(prop_name, value = time.time()) + + db = get_db() + + profile = db.get('id', movie['profile_id']) + ret = False + + index = 0 + for q_identifier in profile.get('qualities'): + quality_custom = { + 'index': index, + 'quality': q_identifier, + 'finish': profile['finish'][index], + 'wait_for': tryInt(profile['wait_for'][index]), + '3d': profile['3d'][index] if profile.get('3d') else False + } + + index += 1 + + could_not_be_released = not self.couldBeReleased(q_identifier in pre_releases, release_dates, movie['info']['year']) + if not alway_search and could_not_be_released: + too_early_to_search.append(q_identifier) + + # Skip release, if ETA isn't ignored + if not ignore_eta: + continue + + has_better_quality = 0 + + # See if better quality is available + for release in movie.get('releases', []): + if release['status'] not in ['available', 'ignored', 'failed']: + is_higher = fireEvent('quality.ishigher', \ + {'identifier': q_identifier, 'is_3d': quality_custom.get('3d', 0)}, \ + {'identifier': release['quality'], 'is_3d': release.get('is_3d', 0)}, \ + profile, single = True) + if is_higher != 'higher': + has_better_quality += 1 + + # Don't search for quality lower then already available. + if has_better_quality > 0: + log.info('Better quality (%s) already available or snatched for %s', (q_identifier, default_title)) + fireEvent('media.restatus', movie['_id']) + break + + quality = fireEvent('quality.single', identifier = q_identifier, single = True) + log.info('Search for %s in %s%s', (default_title, quality['label'], ' ignoring ETA' if alway_search or ignore_eta else '')) + + # Extend quality with profile customs + quality['custom'] = quality_custom + + results = fireEvent('searcher.search', search_protocols, movie, quality, single = True) or [] + results_count = len(results) + total_result_count += results_count + if results_count == 0: + log.debug('Nothing found for %s in %s', (default_title, quality['label'])) + + # Keep track of releases found outside ETA window + outside_eta_results += results_count if could_not_be_released else 0 + + # Check if movie isn't deleted while searching + if not fireEvent('media.get', movie.get('_id'), single = True): + break + + # Add them to this movie releases list + found_releases += fireEvent('release.create_from_search', results, movie, quality, single = True) + + # Don't trigger download, but notify user of available releases + if could_not_be_released: + if results_count > 0: + log.debug('Found %s releases for "%s", but ETA isn\'t correct yet.', (results_count, default_title)) + + # Try find a valid result and download it + if (force_download or not could_not_be_released or alway_search) and fireEvent('release.try_download_result', results, movie, quality_custom, single = True): + ret = True + + # Remove releases that aren't found anymore + temp_previous_releases = [] + for release in previous_releases: + if release.get('status') == 'available' and release.get('identifier') not in found_releases: + fireEvent('release.delete', release.get('_id'), single = True) + else: + temp_previous_releases.append(release) + previous_releases = temp_previous_releases + del temp_previous_releases + + # Break if CP wants to shut down + if self.shuttingDown() or ret: + break + + if total_result_count > 0: + fireEvent('media.tag', movie['_id'], 'recent', single = True) + + if len(too_early_to_search) > 0: + log.info2('Too early to search for %s, %s', (too_early_to_search, default_title)) + + if outside_eta_results > 0: + message = 'Found %s releases for "%s" before ETA. Select and download via the dashboard.' % (outside_eta_results, default_title) + log.info(message) + + if not manual: + fireEvent('media.available', message = message, data = {}) + + fireEvent('notify.frontend', type = 'movie.searcher.ended', data = {'_id': movie['_id']}) + + return ret + + def correctRelease(self, nzb = None, media = None, quality = None, **kwargs): + + if media.get('type') != 'movie': return + + media_title = fireEvent('searcher.get_search_title', media, single = True) + + imdb_results = kwargs.get('imdb_results', False) + retention = Env.setting('retention', section = 'nzb') + + if nzb.get('seeders') is None and 0 < retention < nzb.get('age', 0): + log.info2('Wrong: Outside retention, age is %s, needs %s or lower: %s', (nzb['age'], retention, nzb['name'])) + return False + + # Check for required and ignored words + if not fireEvent('searcher.correct_words', nzb['name'], media, single = True): + return False + + preferred_quality = quality if quality else fireEvent('quality.single', identifier = quality['identifier'], single = True) + + # Contains lower quality string + contains_other = fireEvent('searcher.contains_other_quality', nzb, movie_year = media['info']['year'], preferred_quality = preferred_quality, single = True) + if contains_other != False: + log.info2('Wrong: %s, looking for %s, found %s', (nzb['name'], quality['label'], [x for x in contains_other] if contains_other else 'no quality')) + return False + + # Contains lower quality string + if not fireEvent('searcher.correct_3d', nzb, preferred_quality = preferred_quality, single = True): + log.info2('Wrong: %s, %slooking for %s in 3D', (nzb['name'], ('' if preferred_quality['custom'].get('3d') else 'NOT '), quality['label'])) + return False + + # File to small + if nzb['size'] and tryInt(preferred_quality['size_min']) > tryInt(nzb['size']): + log.info2('Wrong: "%s" is too small to be %s. %sMB instead of the minimal of %sMB.', (nzb['name'], preferred_quality['label'], nzb['size'], preferred_quality['size_min'])) + return False + + # File to large + if nzb['size'] and tryInt(preferred_quality['size_max']) < tryInt(nzb['size']): + log.info2('Wrong: "%s" is too large to be %s. %sMB instead of the maximum of %sMB.', (nzb['name'], preferred_quality['label'], nzb['size'], preferred_quality['size_max'])) + return False + + # Provider specific functions + get_more = nzb.get('get_more_info') + if get_more: + get_more(nzb) + + extra_check = nzb.get('extra_check') + if extra_check and not extra_check(nzb): + return False + + + if imdb_results: + return True + + # Check if nzb contains imdb link + if getImdb(nzb.get('description', '')) == getIdentifier(media): + return True + + for raw_title in media['info']['titles']: + for movie_title in possibleTitles(raw_title): + movie_words = re.split('\W+', simplifyString(movie_title)) + + if fireEvent('searcher.correct_name', nzb['name'], movie_title, single = True): + # if no IMDB link, at least check year range 1 + if len(movie_words) > 2 and fireEvent('searcher.correct_year', nzb['name'], media['info']['year'], 1, single = True): + return True + + # if no IMDB link, at least check year + if len(movie_words) <= 2 and fireEvent('searcher.correct_year', nzb['name'], media['info']['year'], 0, single = True): + return True + + log.info("Wrong: %s, undetermined naming. Looking for '%s (%s)'", (nzb['name'], media_title, media['info']['year'])) + return False + + def couldBeReleased(self, is_pre_release, dates, year = None): + + now = int(time.time()) + now_year = date.today().year + now_month = date.today().month + + if (year is None or year < now_year - 1 or (year <= now_year - 1 and now_month > 4)) and (not dates or (dates.get('theater', 0) == 0 and dates.get('dvd', 0) == 0)): + return True + else: + + # Don't allow movies with years to far in the future + add_year = 1 if now_month > 10 else 0 # Only allow +1 year if end of the year + if year is not None and year > (now_year + add_year): + return False + + # For movies before 1972 + if not dates or dates.get('theater', 0) < 0 or dates.get('dvd', 0) < 0: + return True + + if is_pre_release: + # Prerelease 1 week before theaters + if dates.get('theater') - 604800 < now: + return True + else: + # 12 weeks after theater release + if dates.get('theater') > 0 and dates.get('theater') + 7257600 < now: + return True + + if dates.get('dvd') > 0: + + # 4 weeks before dvd release + if dates.get('dvd') - 2419200 < now: + return True + + # Dvd should be released + if dates.get('dvd') < now: + return True + + + return False + + def tryNextReleaseView(self, media_id = None, **kwargs): + + trynext = self.tryNextRelease(media_id, manual = True, force_download = True) + + return { + 'success': trynext + } + + def tryNextRelease(self, media_id, manual = False, force_download = False): + + try: + db = get_db() + rels = fireEvent('media.with_status', ['snatched', 'done'], single = True) + + for rel in rels: + rel['status'] = 'ignored' + db.update(rel) + + movie_dict = fireEvent('media.get', media_id, single = True) + log.info('Trying next release for: %s', getTitle(movie_dict)) + self.single(movie_dict, manual = manual, force_download = force_download) + + return True + + except: + log.error('Failed searching for next release: %s', traceback.format_exc()) + return False + + def getSearchTitle(self, media): + if media['type'] == 'movie': + return getTitle(media) + +class SearchSetupError(Exception): + pass + + +config = [{ + 'name': 'moviesearcher', + 'order': 20, + 'groups': [ + { + 'tab': 'searcher', + 'name': 'movie_searcher', + 'label': 'Movie search', + 'description': 'Search options for movies', + 'advanced': True, + 'options': [ + { + 'name': 'always_search', + 'default': False, + 'migrate_from': 'searcher', + 'type': 'bool', + 'label': 'Always search', + 'description': 'Search for movies even before there is a ETA. Enabling this will probably get you a lot of fakes.', + }, + { + 'name': 'run_on_launch', + 'migrate_from': 'searcher', + 'label': 'Run on launch', + 'advanced': True, + 'default': 0, + 'type': 'bool', + 'description': 'Force run the searcher after (re)start.', + }, + { + 'name': 'search_on_add', + 'label': 'Search after add', + 'advanced': True, + 'default': 1, + 'type': 'bool', + 'description': 'Disable this to only search for movies on cron.', + }, + { + 'name': 'cron_day', + 'migrate_from': 'searcher', + 'label': 'Day', + 'advanced': True, + 'default': '*', + 'type': 'string', + 'description': '*: Every day, */2: Every 2 days, 1: Every first of the month. See APScheduler for details.', + }, + { + 'name': 'cron_hour', + 'migrate_from': 'searcher', + 'label': 'Hour', + 'advanced': True, + 'default': random.randint(0, 23), + 'type': 'string', + 'description': '*: Every hour, */8: Every 8 hours, 3: At 3, midnight.', + }, + { + 'name': 'cron_minute', + 'migrate_from': 'searcher', + 'label': 'Minute', + 'advanced': True, + 'default': random.randint(0, 59), + 'type': 'string', + 'description': "Just keep it random, so the providers don't get DDOSed by every CP user on a 'full' hour." + }, + ], + }, + ], +}] diff --git a/couchpotato/core/media/movie/searcher/__init__.py b/couchpotato/core/media/movie/searcher/__init__.py deleted file mode 100644 index 4ae1ed32..00000000 --- a/couchpotato/core/media/movie/searcher/__init__.py +++ /dev/null @@ -1,74 +0,0 @@ -from .main import MovieSearcher -import random - - -def start(): - return MovieSearcher() - -config = [{ - 'name': 'moviesearcher', - 'order': 20, - 'groups': [ - { - 'tab': 'searcher', - 'name': 'movie_searcher', - 'label': 'Movie search', - 'description': 'Search options for movies', - 'advanced': True, - 'options': [ - { - 'name': 'always_search', - 'default': False, - 'migrate_from': 'searcher', - 'type': 'bool', - 'label': 'Always search', - 'description': 'Search for movies even before there is a ETA. Enabling this will probably get you a lot of fakes.', - }, - { - 'name': 'run_on_launch', - 'migrate_from': 'searcher', - 'label': 'Run on launch', - 'advanced': True, - 'default': 0, - 'type': 'bool', - 'description': 'Force run the searcher after (re)start.', - }, - { - 'name': 'search_on_add', - 'label': 'Search after add', - 'advanced': True, - 'default': 1, - 'type': 'bool', - 'description': 'Disable this to only search for movies on cron.', - }, - { - 'name': 'cron_day', - 'migrate_from': 'searcher', - 'label': 'Day', - 'advanced': True, - 'default': '*', - 'type': 'string', - 'description': '*: Every day, */2: Every 2 days, 1: Every first of the month. See APScheduler for details.', - }, - { - 'name': 'cron_hour', - 'migrate_from': 'searcher', - 'label': 'Hour', - 'advanced': True, - 'default': random.randint(0, 23), - 'type': 'string', - 'description': '*: Every hour, */8: Every 8 hours, 3: At 3, midnight.', - }, - { - 'name': 'cron_minute', - 'migrate_from': 'searcher', - 'label': 'Minute', - 'advanced': True, - 'default': random.randint(0, 59), - 'type': 'string', - 'description': "Just keep it random, so the providers don't get DDOSed by every CP user on a 'full' hour." - }, - ], - }, - ], -}] diff --git a/couchpotato/core/media/movie/searcher/main.py b/couchpotato/core/media/movie/searcher/main.py deleted file mode 100644 index 1c22e18c..00000000 --- a/couchpotato/core/media/movie/searcher/main.py +++ /dev/null @@ -1,363 +0,0 @@ -from couchpotato import get_session -from couchpotato.api import addApiView -from couchpotato.core.event import addEvent, fireEvent, fireEventAsync -from couchpotato.core.helpers.encoding import simplifyString -from couchpotato.core.helpers.variable import getTitle, possibleTitles, getImdb -from couchpotato.core.logger import CPLog -from couchpotato.core.media._base.searcher.base import SearcherBase -from couchpotato.core.media.movie import MovieTypeBase -from couchpotato.core.settings.model import Media, Release -from couchpotato.environment import Env -from datetime import date -import random -import re -import time -import traceback - -log = CPLog(__name__) - - -class MovieSearcher(SearcherBase, MovieTypeBase): - - in_progress = False - - def __init__(self): - super(MovieSearcher, self).__init__() - - addEvent('movie.searcher.all', self.searchAll) - addEvent('movie.searcher.all_view', self.searchAllView) - addEvent('movie.searcher.single', self.single) - addEvent('movie.searcher.try_next_release', self.tryNextRelease) - addEvent('movie.searcher.could_be_released', self.couldBeReleased) - addEvent('searcher.correct_release', self.correctRelease) - addEvent('searcher.get_search_title', self.getSearchTitle) - - addApiView('movie.searcher.try_next', self.tryNextReleaseView, docs = { - 'desc': 'Marks the snatched results as ignored and try the next best release', - 'params': { - 'id': {'desc': 'The id of the movie'}, - }, - }) - - addApiView('movie.searcher.full_search', self.searchAllView, docs = { - 'desc': 'Starts a full search for all wanted movies', - }) - - addApiView('movie.searcher.progress', self.getProgress, docs = { - 'desc': 'Get the progress of current full search', - 'return': {'type': 'object', 'example': """{ - 'progress': False || object, total & to_go, -}"""}, - }) - - if self.conf('run_on_launch'): - addEvent('app.load', self.searchAll) - - def searchAllView(self, **kwargs): - - fireEventAsync('movie.searcher.all') - - return { - 'success': not self.in_progress - } - - def searchAll(self): - - if self.in_progress: - log.info('Search already in progress') - fireEvent('notify.frontend', type = 'movie.searcher.already_started', data = True, message = 'Full search already in progress') - return - - self.in_progress = True - fireEvent('notify.frontend', type = 'movie.searcher.started', data = True, message = 'Full search started') - - db = get_session() - - movies_raw = db.query(Media).filter( - Media.status.has(identifier = 'active') - ).all() - - random.shuffle(movies_raw) - - movies = [] - for m in movies_raw: - movies.append(m.to_dict({ - 'category': {}, - 'profile': {'types': {'quality': {}}}, - 'releases': {'status': {}, 'quality': {}}, - 'library': {'titles': {}, 'files': {}}, - 'files': {}, - })) - - self.in_progress = { - 'total': len(movies), - 'to_go': len(movies), - } - - try: - search_protocols = fireEvent('searcher.protocols', single = True) - - for movie in movies: - - try: - self.single(movie, search_protocols) - except IndexError: - log.error('Forcing library update for %s, if you see this often, please report: %s', (movie['library']['identifier'], traceback.format_exc())) - fireEvent('library.update.movie', movie['library']['identifier']) - except: - log.error('Search failed for %s: %s', (movie['library']['identifier'], traceback.format_exc())) - - self.in_progress['to_go'] -= 1 - - # Break if CP wants to shut down - if self.shuttingDown(): - break - - except SearchSetupError: - pass - - self.in_progress = False - - def single(self, movie, search_protocols = None, manual = False): - - # movies don't contain 'type' yet, so just set to default here - if 'type' not in movie: - movie['type'] = 'movie' - - # Find out search type - try: - if not search_protocols: - search_protocols = fireEvent('searcher.protocols', single = True) - except SearchSetupError: - return - - done_status = fireEvent('status.get', 'done', single = True) - - if not movie['profile'] or (movie['status_id'] == done_status.get('id') and not manual): - log.debug('Movie doesn\'t have a profile or already done, assuming in manage tab.') - return - - pre_releases = fireEvent('quality.pre_releases', single = True) - release_dates = fireEvent('library.update.movie.release_date', identifier = movie['library']['identifier'], merge = True) - available_status, ignored_status, failed_status = fireEvent('status.get', ['available', 'ignored', 'failed'], single = True) - - found_releases = [] - too_early_to_search = [] - - default_title = getTitle(movie['library']) - if not default_title: - log.error('No proper info found for movie, removing it from library to cause it from having more issues.') - fireEvent('media.delete', movie['id'], single = True) - return - - fireEvent('notify.frontend', type = 'movie.searcher.started', data = {'id': movie['id']}, message = 'Searching for "%s"' % default_title) - - db = get_session() - - ret = False - for quality_type in movie['profile']['types']: - if not self.conf('always_search') and not self.couldBeReleased(quality_type['quality']['identifier'] in pre_releases, release_dates, movie['library']['year']): - too_early_to_search.append(quality_type['quality']['identifier']) - continue - - has_better_quality = 0 - - # See if better quality is available - for release in movie['releases']: - if release['quality']['order'] <= quality_type['quality']['order'] and release['status_id'] not in [available_status.get('id'), ignored_status.get('id'), failed_status.get('id')]: - has_better_quality += 1 - - # Don't search for quality lower then already available. - if has_better_quality is 0: - - log.info('Search for %s in %s', (default_title, quality_type['quality']['label'])) - quality = fireEvent('quality.single', identifier = quality_type['quality']['identifier'], single = True) - - results = fireEvent('searcher.search', search_protocols, movie, quality, single = True) or [] - if len(results) == 0: - log.debug('Nothing found for %s in %s', (default_title, quality_type['quality']['label'])) - - # Check if movie isn't deleted while searching - if not db.query(Media).filter_by(id = movie.get('id')).first(): - break - - # Add them to this movie releases list - found_releases += fireEvent('release.create_from_search', results, movie, quality_type, single = True) - - # Try find a valid result and download it - if fireEvent('release.try_download_result', results, movie, quality_type, manual, single = True): - ret = True - - # Remove releases that aren't found anymore - for release in movie.get('releases', []): - if release.get('status_id') == available_status.get('id') and release.get('identifier') not in found_releases: - fireEvent('release.delete', release.get('id'), single = True) - - else: - log.info('Better quality (%s) already available or snatched for %s', (quality_type['quality']['label'], default_title)) - fireEvent('media.restatus', movie['id']) - break - - # Break if CP wants to shut down - if self.shuttingDown() or ret: - break - - if len(too_early_to_search) > 0: - log.info2('Too early to search for %s, %s', (too_early_to_search, default_title)) - - fireEvent('notify.frontend', type = 'movie.searcher.ended', data = {'id': movie['id']}) - - return ret - - def correctRelease(self, nzb = None, media = None, quality = None, **kwargs): - - if media.get('type') != 'movie': return - - media_title = fireEvent('searcher.get_search_title', media, single = True) - - imdb_results = kwargs.get('imdb_results', False) - retention = Env.setting('retention', section = 'nzb') - - if nzb.get('seeders') is None and 0 < retention < nzb.get('age', 0): - log.info2('Wrong: Outside retention, age is %s, needs %s or lower: %s', (nzb['age'], retention, nzb['name'])) - return False - - # Check for required and ignored words - if not fireEvent('searcher.correct_words', nzb['name'], media, single = True): - return False - - preferred_quality = fireEvent('quality.single', identifier = quality['identifier'], single = True) - - # Contains lower quality string - if fireEvent('searcher.contains_other_quality', nzb, movie_year = media['library']['year'], preferred_quality = preferred_quality, single = True): - log.info2('Wrong: %s, looking for %s', (nzb['name'], quality['label'])) - return False - - - # File to small - if nzb['size'] and preferred_quality['size_min'] > nzb['size']: - log.info2('Wrong: "%s" is too small to be %s. %sMB instead of the minimal of %sMB.', (nzb['name'], preferred_quality['label'], nzb['size'], preferred_quality['size_min'])) - return False - - # File to large - if nzb['size'] and preferred_quality.get('size_max') < nzb['size']: - log.info2('Wrong: "%s" is too large to be %s. %sMB instead of the maximum of %sMB.', (nzb['name'], preferred_quality['label'], nzb['size'], preferred_quality['size_max'])) - return False - - - # Provider specific functions - get_more = nzb.get('get_more_info') - if get_more: - get_more(nzb) - - extra_check = nzb.get('extra_check') - if extra_check and not extra_check(nzb): - return False - - - if imdb_results: - return True - - # Check if nzb contains imdb link - if getImdb(nzb.get('description', '')) == media['library']['identifier']: - return True - - for raw_title in media['library']['titles']: - for movie_title in possibleTitles(raw_title['title']): - movie_words = re.split('\W+', simplifyString(movie_title)) - - if fireEvent('searcher.correct_name', nzb['name'], movie_title, single = True): - # if no IMDB link, at least check year range 1 - if len(movie_words) > 2 and fireEvent('searcher.correct_year', nzb['name'], media['library']['year'], 1, single = True): - return True - - # if no IMDB link, at least check year - if len(movie_words) <= 2 and fireEvent('searcher.correct_year', nzb['name'], media['library']['year'], 0, single = True): - return True - - log.info("Wrong: %s, undetermined naming. Looking for '%s (%s)'", (nzb['name'], media_title, media['library']['year'])) - return False - - def couldBeReleased(self, is_pre_release, dates, year = None): - - now = int(time.time()) - now_year = date.today().year - now_month = date.today().month - - if (year is None or year < now_year - 1) and (not dates or (dates.get('theater', 0) == 0 and dates.get('dvd', 0) == 0)): - return True - else: - - # Don't allow movies with years to far in the future - add_year = 1 if now_month > 10 else 0 # Only allow +1 year if end of the year - if year is not None and year > (now_year + add_year): - return False - - # For movies before 1972 - if not dates or dates.get('theater', 0) < 0 or dates.get('dvd', 0) < 0: - return True - - if is_pre_release: - # Prerelease 1 week before theaters - if dates.get('theater') - 604800 < now: - return True - else: - # 12 weeks after theater release - if dates.get('theater') > 0 and dates.get('theater') + 7257600 < now: - return True - - if dates.get('dvd') > 0: - - # 4 weeks before dvd release - if dates.get('dvd') - 2419200 < now: - return True - - # Dvd should be released - if dates.get('dvd') < now: - return True - - - return False - - def tryNextReleaseView(self, id = None, **kwargs): - - trynext = self.tryNextRelease(id, manual = True) - - return { - 'success': trynext - } - - def tryNextRelease(self, media_id, manual = False): - - snatched_status, done_status, ignored_status = fireEvent('status.get', ['snatched', 'done', 'ignored'], single = True) - - try: - db = get_session() - rels = db.query(Release) \ - .filter_by(movie_id = media_id) \ - .filter(Release.status_id.in_([snatched_status.get('id'), done_status.get('id')])) \ - .all() - - for rel in rels: - rel.status_id = ignored_status.get('id') - db.commit() - - movie_dict = fireEvent('media.get', media_id = media_id, single = True) - log.info('Trying next release for: %s', getTitle(movie_dict['library'])) - fireEvent('movie.searcher.single', movie_dict, manual = manual) - - return True - - except: - log.error('Failed searching for next release: %s', traceback.format_exc()) - db.rollback() - return False - finally: - db.close() - - def getSearchTitle(self, media): - if media['type'] == 'movie': - return getTitle(media['library']) - -class SearchSetupError(Exception): - pass diff --git a/couchpotato/core/media/movie/suggestion/__init__.py b/couchpotato/core/media/movie/suggestion/__init__.py index 50083fe7..e69de29b 100644 --- a/couchpotato/core/media/movie/suggestion/__init__.py +++ b/couchpotato/core/media/movie/suggestion/__init__.py @@ -1,7 +0,0 @@ -from .main import Suggestion - - -def start(): - return Suggestion() - -config = [] diff --git a/couchpotato/core/media/movie/suggestion/main.py b/couchpotato/core/media/movie/suggestion/main.py index 22e23fe2..c2cc9071 100644 --- a/couchpotato/core/media/movie/suggestion/main.py +++ b/couchpotato/core/media/movie/suggestion/main.py @@ -1,12 +1,12 @@ -from couchpotato import get_session +from couchpotato import get_db from couchpotato.api import addApiView from couchpotato.core.event import fireEvent -from couchpotato.core.helpers.variable import splitString, removeDuplicate +from couchpotato.core.helpers.variable import splitString, removeDuplicate, getIdentifier from couchpotato.core.plugins.base import Plugin -from couchpotato.core.settings.model import Media, Library from couchpotato.environment import Env -from sqlalchemy.orm import joinedload_all -from sqlalchemy.sql.expression import or_ + + +autoload = 'Suggestion' class Suggestion(Plugin): @@ -28,11 +28,8 @@ class Suggestion(Plugin): else: if not movies or len(movies) == 0: - db = get_session() - active_movies = db.query(Media) \ - .options(joinedload_all('library')) \ - .filter(or_(*[Media.status.has(identifier = s) for s in ['active', 'done']])).all() - movies = [x.library.identifier for x in active_movies] + active_movies = fireEvent('media.with_status', ['active', 'done'], single = True) + movies = [getIdentifier(x) for x in active_movies] if not ignored or len(ignored) == 0: ignored = splitString(Env.prop('suggest_ignore', default = '')) @@ -87,15 +84,8 @@ class Suggestion(Plugin): # Get new results and add them if len(new_suggestions) - 1 < limit: - - active_status, done_status = fireEvent('status.get', ['active', 'done'], single = True) - - db = get_session() - active_movies = db.query(Media) \ - .join(Library) \ - .with_entities(Library.identifier) \ - .filter(Media.status_id.in_([active_status.get('id'), done_status.get('id')])).all() - movies = [x[0] for x in active_movies] + active_movies = fireEvent('media.with_status', ['active', 'done'], single = True) + movies = [getIdentifier(x) for x in active_movies] movies.extend(seen) ignored.extend([x.get('imdb') for x in cached_suggestion]) diff --git a/couchpotato/core/media/movie/suggestion/static/suggest.css b/couchpotato/core/media/movie/suggestion/static/suggest.css index d4ba734b..8e747849 100644 --- a/couchpotato/core/media/movie/suggestion/static/suggest.css +++ b/couchpotato/core/media/movie/suggestion/static/suggest.css @@ -1,4 +1,7 @@ .suggestions { + clear: both; + padding-top: 10px; + margin-bottom: 30px; } .suggestions > h2 { @@ -91,7 +94,6 @@ padding: 0 3px 10px 0; } .suggestions .media_result .data:before { - bottom: 0; content: ''; display: block; height: 10px; @@ -107,7 +109,7 @@ z-index: 3; pointer-events: none; } - + .suggestions .media_result .data .info .plot.full { top: 0; overflow: auto; @@ -123,14 +125,14 @@ .suggestions .media_result .options select[name=title] { width: 100%; } .suggestions .media_result .options select[name=profile] { width: 100%; } .suggestions .media_result .options select[name=category] { width: 100%; } - - .suggestions .media_result .button { + + .suggestions .media_result .button { position: absolute; margin: 2px 0 0 0; right: 15px; bottom: 15px; } - + .suggestions .media_result .thumbnail { width: 100px; diff --git a/couchpotato/core/media/movie/suggestion/static/suggest.js b/couchpotato/core/media/movie/suggestion/static/suggest.js index c4e5630a..494f0459 100644 --- a/couchpotato/core/media/movie/suggestion/static/suggest.js +++ b/couchpotato/core/media/movie/suggestion/static/suggest.js @@ -42,11 +42,10 @@ var SuggestList = new Class({ } } - }).grab( - new Element('h2', { - 'text': 'You might like these' - }) - ); + }); + + var cookie_menu_select = Cookie.read('suggestions_charts_menu_selected'); + if( cookie_menu_select === 'suggestions' || cookie_menu_select === null ) self.el.show(); else self.el.hide(); self.api_request = Api.request('suggestion.view', { 'onComplete': self.fill.bind(self) @@ -116,7 +115,7 @@ var SuggestList = new Class({ } } }) : null - ) + ); $(m).inject(self.el); @@ -150,4 +149,4 @@ var SuggestList = new Class({ return this.el; } -}) +}); diff --git a/couchpotato/core/migration/migrate.cfg b/couchpotato/core/migration/migrate.cfg deleted file mode 100644 index f17e967a..00000000 --- a/couchpotato/core/migration/migrate.cfg +++ /dev/null @@ -1,4 +0,0 @@ -[db_settings] -repository_id = CouchPotato -version_table = migrate_version -required_dbs = ['sqlite'] diff --git a/couchpotato/core/migration/versions/001_Releases_last_edit.py b/couchpotato/core/migration/versions/001_Releases_last_edit.py deleted file mode 100644 index d4b12080..00000000 --- a/couchpotato/core/migration/versions/001_Releases_last_edit.py +++ /dev/null @@ -1,25 +0,0 @@ -from migrate.changeset.schema import create_column -from sqlalchemy.schema import MetaData, Column, Table, Index -from sqlalchemy.types import Integer - -meta = MetaData() - - -def upgrade(migrate_engine): - meta.bind = migrate_engine - - # Change release, add last_edit and index - last_edit_column = Column('last_edit', Integer) - release = Table('release', meta, last_edit_column) - - create_column(last_edit_column, release) - Index('ix_release_last_edit', release.c.last_edit).create() - - # Change movie last_edit - last_edit_column = Column('last_edit', Integer) - movie = Table('movie', meta, last_edit_column) - Index('ix_movie_last_edit', movie.c.last_edit).create() - - -def downgrade(migrate_engine): - pass diff --git a/couchpotato/core/migration/versions/002_Movie_category.py b/couchpotato/core/migration/versions/002_Movie_category.py deleted file mode 100644 index 023e47c6..00000000 --- a/couchpotato/core/migration/versions/002_Movie_category.py +++ /dev/null @@ -1,18 +0,0 @@ -from migrate.changeset.schema import create_column -from sqlalchemy.schema import MetaData, Column, Table, Index -from sqlalchemy.types import Integer - -meta = MetaData() - - -def upgrade(migrate_engine): - meta.bind = migrate_engine - - category_column = Column('category_id', Integer) - movie = Table('movie', meta, category_column) - create_column(category_column, movie) - Index('ix_movie_category_id', movie.c.category_id).create() - - -def downgrade(migrate_engine): - pass diff --git a/couchpotato/core/migration/versions/__init__.py b/couchpotato/core/migration/versions/__init__.py deleted file mode 100755 index 7e6e44bf..00000000 --- a/couchpotato/core/migration/versions/__init__.py +++ /dev/null @@ -1,30 +0,0 @@ -""" - Examples - - Adding a column: - - from migrate import * - from migrate.changeset.schema import create_column - from sqlalchemy import * - - meta = MetaData() - - def upgrade(migrate_engine): - meta.bind = migrate_engine - - #print changeset.schema - path_column = Column('path', String) - resource = Table('resource', meta, path_column) - - create_column(path_column, resource) - - - - Adding Relation table: http://www.mail-archive.com/sqlelixir@googlegroups.com/msg02061.html - - person = Table('person', metadata, Column('id', Integer)) - person_column = Column('person_id', Integer, ForeignKey('person.id'), nullable=False) - movie = Table('movie', metadata, person_column) - person_constraint = ForeignKeyConstraint(['person_id'], ['person.id'], ondelete="restrict", table=movie) - -""" diff --git a/couchpotato/core/notifications/base.py b/couchpotato/core/notifications/base.py index 63d2075e..725704e0 100644 --- a/couchpotato/core/notifications/base.py +++ b/couchpotato/core/notifications/base.py @@ -1,7 +1,7 @@ from couchpotato.api import addApiView from couchpotato.core.event import addEvent from couchpotato.core.logger import CPLog -from couchpotato.core.providers.base import Provider +from couchpotato.core.media._base.providers.base import Provider from couchpotato.environment import Env log = CPLog(__name__) @@ -15,6 +15,7 @@ class Notification(Provider): test_message = 'ZOMG Lazors Pewpewpew!' listen_to = [ + 'media.available', 'renamer.after', 'movie.snatched', 'updater.available', 'updater.updated', 'core.message.important', diff --git a/couchpotato/core/notifications/boxcar/__init__.py b/couchpotato/core/notifications/boxcar/__init__.py deleted file mode 100644 index faab7a5c..00000000 --- a/couchpotato/core/notifications/boxcar/__init__.py +++ /dev/null @@ -1,34 +0,0 @@ -from .main import Boxcar - - -def start(): - return Boxcar() - -config = [{ - 'name': 'boxcar', - 'groups': [ - { - 'tab': 'notifications', - 'list': 'notification_providers', - 'name': 'boxcar', - 'options': [ - { - 'name': 'enabled', - 'default': 0, - 'type': 'enabler', - }, - { - 'name': 'email', - 'description': 'Your Boxcar registration emailaddress.' - }, - { - 'name': 'on_snatch', - 'default': 0, - 'type': 'bool', - 'advanced': True, - 'description': 'Also send message when movie is snatched.', - }, - ], - } - ], -}] diff --git a/couchpotato/core/notifications/boxcar/main.py b/couchpotato/core/notifications/boxcar/main.py deleted file mode 100644 index 49aab316..00000000 --- a/couchpotato/core/notifications/boxcar/main.py +++ /dev/null @@ -1,35 +0,0 @@ -from couchpotato.core.helpers.encoding import toUnicode -from couchpotato.core.logger import CPLog -from couchpotato.core.notifications.base import Notification -import time - -log = CPLog(__name__) - - -class Boxcar(Notification): - - url = 'https://boxcar.io/devices/providers/7MNNXY3UIzVBwvzkKwkC/notifications' - - def notify(self, message = '', data = None, listener = None): - if not data: data = {} - - try: - message = message.strip() - - data = { - 'email': self.conf('email'), - 'notification[from_screen_name]': self.default_title, - 'notification[message]': toUnicode(message), - 'notification[from_remote_service_id]': int(time.time()), - } - - self.urlopen(self.url, data = data) - except: - log.error('Check your email and added services on boxcar.io') - return False - - log.info('Boxcar notification successful.') - return True - - def isEnabled(self): - return super(Boxcar, self).isEnabled() and self.conf('email') diff --git a/couchpotato/core/notifications/boxcar2/main.py b/couchpotato/core/notifications/boxcar2.py similarity index 55% rename from couchpotato/core/notifications/boxcar2/main.py rename to couchpotato/core/notifications/boxcar2.py index 6633ca70..04ce4f38 100644 --- a/couchpotato/core/notifications/boxcar2/main.py +++ b/couchpotato/core/notifications/boxcar2.py @@ -4,6 +4,8 @@ from couchpotato.core.notifications.base import Notification log = CPLog(__name__) +autoload = 'Boxcar2' + class Boxcar2(Notification): @@ -23,7 +25,7 @@ class Boxcar2(Notification): data = { 'user_credentials': self.conf('token'), - 'notification[title]': toUnicode(message), + 'notification[title]': toUnicode('%s - %s' % (self.default_title, message)), 'notification[long_message]': toUnicode(long_message), } @@ -37,3 +39,33 @@ class Boxcar2(Notification): def isEnabled(self): return super(Boxcar2, self).isEnabled() and self.conf('token') + + +config = [{ + 'name': 'boxcar2', + 'groups': [ + { + 'tab': 'notifications', + 'list': 'notification_providers', + 'name': 'boxcar2', + 'options': [ + { + 'name': 'enabled', + 'default': 0, + 'type': 'enabler', + }, + { + 'name': 'token', + 'description': ('Your Boxcar access token.', 'Can be found in the app under settings') + }, + { + 'name': 'on_snatch', + 'default': 0, + 'type': 'bool', + 'advanced': True, + 'description': 'Also send message when movie is snatched.', + }, + ], + } + ], +}] diff --git a/couchpotato/core/notifications/boxcar2/__init__.py b/couchpotato/core/notifications/boxcar2/__init__.py deleted file mode 100644 index da7f99c0..00000000 --- a/couchpotato/core/notifications/boxcar2/__init__.py +++ /dev/null @@ -1,34 +0,0 @@ -from .main import Boxcar2 - - -def start(): - return Boxcar2() - -config = [{ - 'name': 'boxcar2', - 'groups': [ - { - 'tab': 'notifications', - 'list': 'notification_providers', - 'name': 'boxcar2', - 'options': [ - { - 'name': 'enabled', - 'default': 0, - 'type': 'enabler', - }, - { - 'name': 'token', - 'description': ('Your Boxcar access token.', 'Can be found in the app under settings') - }, - { - 'name': 'on_snatch', - 'default': 0, - 'type': 'bool', - 'advanced': True, - 'description': 'Also send message when movie is snatched.', - }, - ], - } - ], -}] diff --git a/couchpotato/core/notifications/core/__init__.py b/couchpotato/core/notifications/core/__init__.py index b68a915a..9c4fb373 100644 --- a/couchpotato/core/notifications/core/__init__.py +++ b/couchpotato/core/notifications/core/__init__.py @@ -1,7 +1,5 @@ from .main import CoreNotifier -def start(): +def autoload(): return CoreNotifier() - -config = [] diff --git a/couchpotato/core/notifications/core/index.py b/couchpotato/core/notifications/core/index.py new file mode 100644 index 00000000..c7985b5b --- /dev/null +++ b/couchpotato/core/notifications/core/index.py @@ -0,0 +1,37 @@ +from CodernityDB.tree_index import TreeBasedIndex + + +class NotificationIndex(TreeBasedIndex): + _version = 1 + + custom_header = """from CodernityDB.tree_index import TreeBasedIndex +import time""" + + def __init__(self, *args, **kwargs): + kwargs['key_format'] = 'I' + super(NotificationIndex, self).__init__(*args, **kwargs) + + def make_key(self, key): + return key + + def make_key_value(self, data): + if data.get('_t') == 'notification': + return data.get('time'), None + + +class NotificationUnreadIndex(TreeBasedIndex): + _version = 1 + + custom_header = """from CodernityDB.tree_index import TreeBasedIndex +import time""" + + def __init__(self, *args, **kwargs): + kwargs['key_format'] = 'I' + super(NotificationUnreadIndex, self).__init__(*args, **kwargs) + + def make_key(self, key): + return key + + def make_key_value(self, data): + if data.get('_t') == 'notification' and not data.get('read'): + return data.get('time'), None diff --git a/couchpotato/core/notifications/core/main.py b/couchpotato/core/notifications/core/main.py index 93f94d6a..5190218e 100644 --- a/couchpotato/core/notifications/core/main.py +++ b/couchpotato/core/notifications/core/main.py @@ -1,27 +1,34 @@ -from couchpotato import get_session +from operator import itemgetter +import threading +import time +import traceback +import uuid + +from couchpotato import get_db from couchpotato.api import addApiView, addNonBlockApiView from couchpotato.core.event import addEvent, fireEvent from couchpotato.core.helpers.encoding import toUnicode from couchpotato.core.helpers.variable import tryInt, splitString from couchpotato.core.logger import CPLog from couchpotato.core.notifications.base import Notification -from couchpotato.core.settings.model import Notification as Notif +from .index import NotificationIndex, NotificationUnreadIndex from couchpotato.environment import Env -from operator import itemgetter -from sqlalchemy.sql.expression import or_ -import threading -import time -import traceback -import uuid + log = CPLog(__name__) class CoreNotifier(Notification): + _database = { + 'notification': NotificationIndex, + 'notification_unread': NotificationUnreadIndex + } + m_lock = None listen_to = [ + 'media.available', 'renamer.after', 'movie.snatched', 'updater.available', 'updater.updated', 'core.message', 'core.message.important', @@ -66,40 +73,29 @@ class CoreNotifier(Notification): self.m_lock = threading.Lock() def clean(self): - try: - db = get_session() - db.query(Notif).filter(Notif.added <= (int(time.time()) - 2419200)).delete() - db.commit() + db = get_db() + for n in db.all('notification', with_doc = True): + if n['doc'].get('time', 0) <= (int(time.time()) - 2419200): + db.delete(n['doc']) except: log.error('Failed cleaning notification: %s', traceback.format_exc()) - db.rollback() - finally: - db.close() def markAsRead(self, ids = None, **kwargs): ids = splitString(ids) if ids else None try: - db = get_session() - - if ids: - q = db.query(Notif).filter(or_(*[Notif.id == tryInt(s) for s in ids])) - else: - q = db.query(Notif).filter_by(read = False) - - q.update({Notif.read: True}) - db.commit() - + db = get_db() + for x in db.all('notification_unread', with_doc = True): + if not ids or x['_id'] in ids: + x['doc']['read'] = True + db.update(x['doc']) return { 'success': True } except: log.error('Failed mark as read: %s', traceback.format_exc()) - db.rollback() - finally: - db.close() return { 'success': False @@ -107,24 +103,19 @@ class CoreNotifier(Notification): def listView(self, limit_offset = None, **kwargs): - db = get_session() - - q = db.query(Notif) + db = get_db() if limit_offset: splt = splitString(limit_offset) limit = splt[0] offset = 0 if len(splt) is 1 else splt[1] - q = q.limit(limit).offset(offset) + results = db.get_many('notification', limit = limit, offset = offset, with_doc = True) else: - q = q.limit(200) + results = db.get_many('notification', limit = 200, with_doc = True) - results = q.all() notifications = [] for n in results: - ndict = n.to_dict() - ndict['type'] = 'notification' - notifications.append(ndict) + notifications.append(n['doc']) return { 'success': True, @@ -141,7 +132,7 @@ class CoreNotifier(Notification): for message in messages: if message.get('time') > last_check: - message['sticky'] = True # Always sticky core messages + message['sticky'] = True # Always sticky core messages message_type = 'core.message.important' if message.get('important') else 'core.message' fireEvent(message_type, message = message.get('message'), data = message) @@ -155,29 +146,23 @@ class CoreNotifier(Notification): if not data: data = {} try: - db = get_session() + db = get_db() data['notification_type'] = listener if listener else 'unknown' - n = Notif( - message = toUnicode(message), - data = data - ) - db.add(n) - db.commit() + n = { + '_t': 'notification', + 'time': int(time.time()), + 'message': toUnicode(message), + 'data': data + } + db.insert(n) - ndict = n.to_dict() - ndict['type'] = 'notification' - ndict['time'] = time.time() - - self.frontend(type = listener, data = data) + self.frontend(type = listener, data = n) return True except: log.error('Failed notify: %s', traceback.format_exc()) - db.rollback() - finally: - db.close() def frontend(self, type = 'notification', data = None, message = None): if not data: data = {} @@ -274,18 +259,15 @@ class CoreNotifier(Notification): messages = [] - # Get unread + # Get last message if init: - db = get_session() + db = get_db() - notifications = db.query(Notif) \ - .filter(or_(Notif.read == False, Notif.added > (time.time() - 259200))) \ - .all() + notifications = db.all('notification', with_doc = True) for n in notifications: - ndict = n.to_dict() - ndict['type'] = 'notification' - messages.append(ndict) + if n['doc'].get('time') > (time.time() - 604800): + messages.append(n['doc']) return { 'success': True, diff --git a/couchpotato/core/notifications/core/static/notification.js b/couchpotato/core/notifications/core/static/notification.js index 18d09e76..93bfa15d 100644 --- a/couchpotato/core/notifications/core/static/notification.js +++ b/couchpotato/core/notifications/core/static/notification.js @@ -14,17 +14,17 @@ var NotificationBase = new Class({ App.on('message', self.showMessage.bind(self)); // Add test buttons to settings page - App.addEvent('load', self.addTestButtons.bind(self)); + App.addEvent('loadSettings', self.addTestButtons.bind(self)); // Notification bar - self.notifications = [] + self.notifications = []; App.addEvent('load', function(){ App.block.notification = new Block.Menu(self, { 'button_class': 'icon2.eye-open', 'class': 'notification_menu', 'onOpen': self.markAsRead.bind(self) - }) + }); $(App.block.notification).inject(App.getBlock('search'), 'after'); self.badge = new Element('div.badge').inject(App.block.notification, 'top').hide(); @@ -40,7 +40,7 @@ var NotificationBase = new Class({ var self = this; var added = new Date(); - added.setTime(result.added*1000) + added.setTime(result.added*1000); result.el = App.getBlock('notification').addLink( new Element('span.'+(result.read ? 'read' : '' )).adopt( @@ -51,7 +51,7 @@ var NotificationBase = new Class({ self.notifications.include(result); if((result.data.important !== undefined || result.data.sticky !== undefined) && !result.read){ - var sticky = true + var sticky = true; App.trigger('message', [result.message, sticky, result]) } else if(!result.read){ @@ -62,7 +62,7 @@ var NotificationBase = new Class({ setBadge: function(value){ var self = this; - self.badge.set('text', value) + self.badge.set('text', value); self.badge[value ? 'show' : 'hide']() }, @@ -73,11 +73,11 @@ var NotificationBase = new Class({ if(!force_ids) { var rn = self.notifications.filter(function(n){ return !n.read && n.data.important === undefined - }) + }); - var ids = [] + var ids = []; rn.each(function(n){ - ids.include(n.id) + ids.include(n._id) }) } @@ -103,8 +103,10 @@ var NotificationBase = new Class({ self.request = Api.request('notification.listener', { 'data': {'init':true}, - 'onSuccess': self.processData.bind(self) - }).send() + 'onSuccess': function(json){ + self.processData(json, true) + } + }).send(); setInterval(function(){ @@ -120,11 +122,16 @@ var NotificationBase = new Class({ startPoll: function(){ var self = this; - if(self.stopped || (self.request && self.request.isRunning())) + if(self.stopped) return; + if(self.request && self.request.isRunning()) + self.request.cancel(); + self.request = Api.request('nonblock/notification.listener', { - 'onSuccess': self.processData.bind(self), + 'onSuccess': function(json){ + self.processData(json, false) + }, 'data': { 'last_id': self.last_id }, @@ -137,20 +144,20 @@ var NotificationBase = new Class({ stopPoll: function(){ if(this.request) - this.request.cancel() + this.request.cancel(); this.stopped = true; }, - processData: function(json){ + processData: function(json, init){ var self = this; // Process data - if(json){ + if(json && json.result){ Array.each(json.result, function(result){ - App.trigger(result.type, [result]); - if(result.message && result.read === undefined) + App.trigger(result._t || result.type, [result]); + if(result.message && result.read === undefined && !init) self.showMessage(result.message); - }) + }); if(json.result.length > 0) self.last_id = json.result.getLast().message_id @@ -176,18 +183,18 @@ var NotificationBase = new Class({ }, 10); var hide_message = function(){ - new_message.addClass('hide') + new_message.addClass('hide'); setTimeout(function(){ new_message.destroy(); }, 1000); - } + }; if(sticky) new_message.grab( new Element('a.close.icon2', { 'events': { 'click': function(){ - self.markAsRead([data.id]); + self.markAsRead([data._id]); hide_message(); } } @@ -202,7 +209,7 @@ var NotificationBase = new Class({ addTestButtons: function(){ var self = this; - var setting_page = App.getPage('Settings') + var setting_page = App.getPage('Settings'); setting_page.addEvent('create', function(){ Object.each(setting_page.tabs.notifications.groups, self.addTestButton.bind(self)) }) diff --git a/couchpotato/core/notifications/email/__init__.py b/couchpotato/core/notifications/email/__init__.py deleted file mode 100644 index aaf087b9..00000000 --- a/couchpotato/core/notifications/email/__init__.py +++ /dev/null @@ -1,68 +0,0 @@ -from .main import Email - - -def start(): - return Email() - -config = [{ - 'name': 'email', - 'groups': [ - { - 'tab': 'notifications', - 'list': 'notification_providers', - 'name': 'email', - 'options': [ - { - 'name': 'enabled', - 'default': 0, - 'type': 'enabler', - }, - { - 'name': 'from', - 'label': 'Send e-mail from', - }, - { - 'name': 'to', - 'label': 'Send e-mail to', - }, - { - 'name': 'smtp_server', - 'label': 'SMTP server', - }, - { 'name': 'smtp_port', - 'label': 'SMTP server port', - 'default': '25', - 'type': 'int', - }, - { - 'name': 'ssl', - 'label': 'Enable SSL', - 'default': 0, - 'type': 'bool', - }, - { - 'name': 'starttls', - 'label': 'Enable StartTLS', - 'default': 0, - 'type': 'bool', - }, - { - 'name': 'smtp_user', - 'label': 'SMTP user', - }, - { - 'name': 'smtp_pass', - 'label': 'SMTP password', - 'type': 'password', - }, - { - 'name': 'on_snatch', - 'default': 0, - 'type': 'bool', - 'advanced': True, - 'description': 'Also send message when movie is snatched.', - }, - ], - } - ], -}] diff --git a/couchpotato/core/notifications/email/main.py b/couchpotato/core/notifications/email_.py similarity index 56% rename from couchpotato/core/notifications/email/main.py rename to couchpotato/core/notifications/email_.py index b8544016..a63eb3de 100644 --- a/couchpotato/core/notifications/email/main.py +++ b/couchpotato/core/notifications/email_.py @@ -1,15 +1,19 @@ -from couchpotato.core.helpers.encoding import toUnicode -from couchpotato.core.helpers.variable import splitString -from couchpotato.core.logger import CPLog -from couchpotato.core.notifications.base import Notification -from couchpotato.environment import Env from email.mime.text import MIMEText from email.utils import formatdate, make_msgid import smtplib import traceback +from couchpotato.core.helpers.encoding import toUnicode +from couchpotato.core.helpers.variable import splitString +from couchpotato.core.logger import CPLog +from couchpotato.core.notifications.base import Notification +from couchpotato.environment import Env + + log = CPLog(__name__) +autoload = 'Email' + class Email(Notification): @@ -66,3 +70,68 @@ class Email(Notification): log.error('E-mail failed: %s', traceback.format_exc()) return False + + +config = [{ + 'name': 'email', + 'groups': [ + { + 'tab': 'notifications', + 'list': 'notification_providers', + 'name': 'email', + 'options': [ + { + 'name': 'enabled', + 'default': 0, + 'type': 'enabler', + }, + { + 'name': 'from', + 'label': 'Send e-mail from', + }, + { + 'name': 'to', + 'label': 'Send e-mail to', + }, + { + 'name': 'smtp_server', + 'label': 'SMTP server', + }, + { + 'name': 'smtp_port', + 'label': 'SMTP server port', + 'default': '25', + 'type': 'int', + }, + { + 'name': 'ssl', + 'label': 'Enable SSL', + 'default': 0, + 'type': 'bool', + }, + { + 'name': 'starttls', + 'label': 'Enable StartTLS', + 'default': 0, + 'type': 'bool', + }, + { + 'name': 'smtp_user', + 'label': 'SMTP user', + }, + { + 'name': 'smtp_pass', + 'label': 'SMTP password', + 'type': 'password', + }, + { + 'name': 'on_snatch', + 'default': 0, + 'type': 'bool', + 'advanced': True, + 'description': 'Also send message when movie is snatched.', + }, + ], + } + ], +}] diff --git a/couchpotato/core/notifications/growl/main.py b/couchpotato/core/notifications/growl.py similarity index 61% rename from couchpotato/core/notifications/growl/main.py rename to couchpotato/core/notifications/growl.py index a3927ed2..e60e7ef7 100644 --- a/couchpotato/core/notifications/growl/main.py +++ b/couchpotato/core/notifications/growl.py @@ -1,12 +1,16 @@ +import traceback + from couchpotato.core.event import fireEvent, addEvent from couchpotato.core.logger import CPLog from couchpotato.core.notifications.base import Notification from couchpotato.environment import Env from gntp import notifier -import traceback + log = CPLog(__name__) +autoload = 'Growl' + class Growl(Notification): @@ -15,6 +19,8 @@ class Growl(Notification): def __init__(self): super(Growl, self).__init__() + self.growl = None + if self.isEnabled(): addEvent('app.load', self.register) @@ -64,3 +70,44 @@ class Growl(Notification): return False + +config = [{ + 'name': 'growl', + 'groups': [ + { + 'tab': 'notifications', + 'list': 'notification_providers', + 'name': 'growl', + 'description': 'Version 1.4+', + 'options': [ + { + 'name': 'enabled', + 'default': 0, + 'type': 'enabler', + }, + { + 'name': 'on_snatch', + 'default': False, + 'type': 'bool', + 'advanced': True, + 'description': 'Also send message when movie is snatched.', + }, + { + 'name': 'hostname', + 'description': 'Notify growl over network. Needs restart.', + 'advanced': True, + }, + { + 'name': 'port', + 'type': 'int', + 'advanced': True, + }, + { + 'name': 'password', + 'type': 'password', + 'advanced': True, + }, + ], + } + ], +}] diff --git a/couchpotato/core/notifications/growl/__init__.py b/couchpotato/core/notifications/growl/__init__.py deleted file mode 100644 index dd01cb91..00000000 --- a/couchpotato/core/notifications/growl/__init__.py +++ /dev/null @@ -1,46 +0,0 @@ -from .main import Growl - - -def start(): - return Growl() - -config = [{ - 'name': 'growl', - 'groups': [ - { - 'tab': 'notifications', - 'list': 'notification_providers', - 'name': 'growl', - 'description': 'Version 1.4+', - 'options': [ - { - 'name': 'enabled', - 'default': 0, - 'type': 'enabler', - }, - { - 'name': 'on_snatch', - 'default': False, - 'type': 'bool', - 'advanced': True, - 'description': 'Also send message when movie is snatched.', - }, - { - 'name': 'hostname', - 'description': 'Notify growl over network. Needs restart.', - 'advanced': True, - }, - { - 'name': 'port', - 'type': 'int', - 'advanced': True, - }, - { - 'name': 'password', - 'type': 'password', - 'advanced': True, - }, - ], - } - ], -}] diff --git a/couchpotato/core/notifications/nmj/main.py b/couchpotato/core/notifications/nmj.py similarity index 83% rename from couchpotato/core/notifications/nmj/main.py rename to couchpotato/core/notifications/nmj.py index 967b70e7..665837f1 100644 --- a/couchpotato/core/notifications/nmj/main.py +++ b/couchpotato/core/notifications/nmj.py @@ -1,10 +1,12 @@ +import re +import telnetlib + from couchpotato.api import addApiView from couchpotato.core.event import addEvent from couchpotato.core.helpers.encoding import tryUrlencode from couchpotato.core.logger import CPLog from couchpotato.core.notifications.base import Notification -import re -import telnetlib + try: import xml.etree.cElementTree as etree @@ -13,14 +15,18 @@ except ImportError: log = CPLog(__name__) +autoload = 'NMJ' + class NMJ(Notification): + # noinspection PyMissingConstructor def __init__(self): - addEvent('renamer.after', self.addToLibrary) addApiView(self.testNotifyName(), self.test) addApiView('notify.nmj.auto_config', self.autoConfig) + addEvent('renamer.after', self.addToLibrary) + def autoConfig(self, host = 'localhost', **kwargs): mount = '' @@ -118,3 +124,31 @@ class NMJ(Notification): } +config = [{ + 'name': 'nmj', + 'groups': [ + { + 'tab': 'notifications', + 'list': 'notification_providers', + 'name': 'nmj', + 'label': 'NMJ', + 'options': [ + { + 'name': 'enabled', + 'default': 0, + 'type': 'enabler', + }, + { + 'name': 'host', + 'default': 'localhost', + }, + { + 'name': 'database', + }, + { + 'name': 'mount', + }, + ], + } + ], +}] diff --git a/couchpotato/core/notifications/nmj/__init__.py b/couchpotato/core/notifications/nmj/__init__.py deleted file mode 100644 index 461a450e..00000000 --- a/couchpotato/core/notifications/nmj/__init__.py +++ /dev/null @@ -1,34 +0,0 @@ -from .main import NMJ - - -def start(): - return NMJ() - -config = [{ - 'name': 'nmj', - 'groups': [ - { - 'tab': 'notifications', - 'list': 'notification_providers', - 'name': 'nmj', - 'label': 'NMJ', - 'options': [ - { - 'name': 'enabled', - 'default': 0, - 'type': 'enabler', - }, - { - 'name': 'host', - 'default': 'localhost', - }, - { - 'name': 'database', - }, - { - 'name': 'mount', - }, - ], - } - ], -}] diff --git a/couchpotato/core/notifications/notifymyandroid/__init__.py b/couchpotato/core/notifications/notifymyandroid.py similarity index 53% rename from couchpotato/core/notifications/notifymyandroid/__init__.py rename to couchpotato/core/notifications/notifymyandroid.py index 7d4f4aeb..ed7a24c8 100644 --- a/couchpotato/core/notifications/notifymyandroid/__init__.py +++ b/couchpotato/core/notifications/notifymyandroid.py @@ -1,8 +1,41 @@ -from .main import NotifyMyAndroid +from couchpotato.core.helpers.variable import splitString +from couchpotato.core.logger import CPLog +from couchpotato.core.notifications.base import Notification +import pynma +import six + +log = CPLog(__name__) + +autoload = 'NotifyMyAndroid' -def start(): - return NotifyMyAndroid() +class NotifyMyAndroid(Notification): + + def notify(self, message = '', data = None, listener = None): + if not data: data = {} + + nma = pynma.PyNMA() + keys = splitString(self.conf('api_key')) + nma.addkey(keys) + nma.developerkey(self.conf('dev_key')) + + response = nma.push( + application = self.default_title, + event = message.split(' ')[0], + description = message, + priority = self.conf('priority'), + batch_mode = len(keys) > 1 + ) + + successful = 0 + for key in keys: + if not response[str(key)]['code'] == six.u('200'): + log.error('Could not send notification to NotifyMyAndroid (%s). %s', (key, response[key]['message'])) + else: + successful += 1 + + return successful == len(keys) + config = [{ 'name': 'notifymyandroid', diff --git a/couchpotato/core/notifications/notifymyandroid/main.py b/couchpotato/core/notifications/notifymyandroid/main.py deleted file mode 100644 index 16465101..00000000 --- a/couchpotato/core/notifications/notifymyandroid/main.py +++ /dev/null @@ -1,35 +0,0 @@ -from couchpotato.core.helpers.variable import splitString -from couchpotato.core.logger import CPLog -from couchpotato.core.notifications.base import Notification -import pynma -import six - -log = CPLog(__name__) - - -class NotifyMyAndroid(Notification): - - def notify(self, message = '', data = None, listener = None): - if not data: data = {} - - nma = pynma.PyNMA() - keys = splitString(self.conf('api_key')) - nma.addkey(keys) - nma.developerkey(self.conf('dev_key')) - - response = nma.push( - application = self.default_title, - event = message.split(' ')[0], - description = message, - priority = self.conf('priority'), - batch_mode = len(keys) > 1 - ) - - successful = 0 - for key in keys: - if not response[str(key)]['code'] == six.u('200'): - log.error('Could not send notification to NotifyMyAndroid (%s). %s', (key, response[key]['message'])) - else: - successful += 1 - - return successful == len(keys) diff --git a/couchpotato/core/notifications/notifymywp/__init__.py b/couchpotato/core/notifications/notifymywp.py similarity index 57% rename from couchpotato/core/notifications/notifymywp/__init__.py rename to couchpotato/core/notifications/notifymywp.py index 4fcf1a9a..262fd8d1 100644 --- a/couchpotato/core/notifications/notifymywp/__init__.py +++ b/couchpotato/core/notifications/notifymywp.py @@ -1,8 +1,31 @@ -from .main import NotifyMyWP +from couchpotato.core.helpers.variable import splitString +from couchpotato.core.logger import CPLog +from couchpotato.core.notifications.base import Notification +from pynmwp import PyNMWP +import six + +log = CPLog(__name__) + +autoload = 'NotifyMyWP' -def start(): - return NotifyMyWP() +class NotifyMyWP(Notification): + + def notify(self, message = '', data = None, listener = None): + if not data: data = {} + + keys = splitString(self.conf('api_key')) + p = PyNMWP(keys, self.conf('dev_key')) + + response = p.push(application = self.default_title, event = message, description = message, priority = self.conf('priority'), batch_mode = len(keys) > 1) + + for key in keys: + if not response[key]['Code'] == six.u('200'): + log.error('Could not send notification to NotifyMyWindowsPhone (%s). %s', (key, response[key]['message'])) + return False + + return response + config = [{ 'name': 'notifymywp', diff --git a/couchpotato/core/notifications/notifymywp/main.py b/couchpotato/core/notifications/notifymywp/main.py deleted file mode 100644 index 74010441..00000000 --- a/couchpotato/core/notifications/notifymywp/main.py +++ /dev/null @@ -1,25 +0,0 @@ -from couchpotato.core.helpers.variable import splitString -from couchpotato.core.logger import CPLog -from couchpotato.core.notifications.base import Notification -from pynmwp import PyNMWP -import six - -log = CPLog(__name__) - - -class NotifyMyWP(Notification): - - def notify(self, message = '', data = None, listener = None): - if not data: data = {} - - keys = splitString(self.conf('api_key')) - p = PyNMWP(keys, self.conf('dev_key')) - - response = p.push(application = self.default_title, event = message, description = message, priority = self.conf('priority'), batch_mode = len(keys) > 1) - - for key in keys: - if not response[key]['Code'] == six.u('200'): - log.error('Could not send notification to NotifyMyWindowsPhone (%s). %s', (key, response[key]['message'])) - return False - - return response diff --git a/couchpotato/core/notifications/plex/__init__.py b/couchpotato/core/notifications/plex/__init__.py index 0de92ca3..4a64ec5b 100755 --- a/couchpotato/core/notifications/plex/__init__.py +++ b/couchpotato/core/notifications/plex/__init__.py @@ -1,7 +1,7 @@ from .main import Plex -def start(): +def autoload(): return Plex() config = [{ diff --git a/couchpotato/core/notifications/plex/client.py b/couchpotato/core/notifications/plex/client.py index 8864230d..84cf7af6 100644 --- a/couchpotato/core/notifications/plex/client.py +++ b/couchpotato/core/notifications/plex/client.py @@ -1,9 +1,11 @@ import json + from couchpotato import CPLog from couchpotato.core.event import addEvent from couchpotato.core.helpers.encoding import tryUrlencode import requests + log = CPLog(__name__) diff --git a/couchpotato/core/notifications/plex/server.py b/couchpotato/core/notifications/plex/server.py index b66db8fe..cd11f49b 100644 --- a/couchpotato/core/notifications/plex/server.py +++ b/couchpotato/core/notifications/plex/server.py @@ -1,9 +1,10 @@ from datetime import timedelta, datetime -from couchpotato.core.helpers.variable import cleanHost -from couchpotato import CPLog from urlparse import urlparse import traceback +from couchpotato.core.helpers.variable import cleanHost +from couchpotato import CPLog + try: import xml.etree.cElementTree as etree diff --git a/couchpotato/core/notifications/prowl/__init__.py b/couchpotato/core/notifications/prowl.py similarity index 50% rename from couchpotato/core/notifications/prowl/__init__.py rename to couchpotato/core/notifications/prowl.py index 3721a0ad..fdece326 100644 --- a/couchpotato/core/notifications/prowl/__init__.py +++ b/couchpotato/core/notifications/prowl.py @@ -1,8 +1,43 @@ -from .main import Prowl +import traceback + +from couchpotato.core.helpers.encoding import toUnicode +from couchpotato.core.logger import CPLog +from couchpotato.core.notifications.base import Notification -def start(): - return Prowl() +log = CPLog(__name__) + +autoload = 'Prowl' + + +class Prowl(Notification): + + urls = { + 'api': 'https://api.prowlapp.com/publicapi/add' + } + + def notify(self, message = '', data = None, listener = None): + if not data: data = {} + + data = { + 'apikey': self.conf('api_key'), + 'application': self.default_title, + 'description': toUnicode(message), + 'priority': self.conf('priority'), + } + headers = { + 'Content-type': 'application/x-www-form-urlencoded' + } + + try: + self.urlopen(self.urls['api'], headers = headers, data = data, show_error = False) + log.info('Prowl notifications sent.') + return True + except: + log.error('Prowl failed: %s', traceback.format_exc()) + + return False + config = [{ 'name': 'prowl', diff --git a/couchpotato/core/notifications/prowl/main.py b/couchpotato/core/notifications/prowl/main.py deleted file mode 100644 index b3385863..00000000 --- a/couchpotato/core/notifications/prowl/main.py +++ /dev/null @@ -1,35 +0,0 @@ -from couchpotato.core.helpers.encoding import toUnicode -from couchpotato.core.logger import CPLog -from couchpotato.core.notifications.base import Notification -import traceback - -log = CPLog(__name__) - - -class Prowl(Notification): - - urls = { - 'api': 'https://api.prowlapp.com/publicapi/add' - } - - def notify(self, message = '', data = None, listener = None): - if not data: data = {} - - data = { - 'apikey': self.conf('api_key'), - 'application': self.default_title, - 'description': toUnicode(message), - 'priority': self.conf('priority'), - } - headers = { - 'Content-type': 'application/x-www-form-urlencoded' - } - - try: - self.urlopen(self.urls['api'], headers = headers, data = data, show_error = False) - log.info('Prowl notifications sent.') - return True - except: - log.error('Prowl failed: %s', traceback.format_exc()) - - return False diff --git a/couchpotato/core/notifications/pushalot/__init__.py b/couchpotato/core/notifications/pushalot.py similarity index 54% rename from couchpotato/core/notifications/pushalot/__init__.py rename to couchpotato/core/notifications/pushalot.py index ad0c853f..fa781bc5 100644 --- a/couchpotato/core/notifications/pushalot/__init__.py +++ b/couchpotato/core/notifications/pushalot.py @@ -1,8 +1,46 @@ -from .main import Pushalot +import traceback + +from couchpotato.core.helpers.encoding import toUnicode +from couchpotato.core.logger import CPLog +from couchpotato.core.notifications.base import Notification -def start(): - return Pushalot() +log = CPLog(__name__) + +autoload = 'Pushalot' + + +class Pushalot(Notification): + + urls = { + 'api': 'https://pushalot.com/api/sendmessage' + } + + def notify(self, message = '', data = None, listener = None): + if not data: data = {} + + data = { + 'AuthorizationToken': self.conf('auth_token'), + 'Title': self.default_title, + 'Body': toUnicode(message), + 'IsImportant': self.conf('important'), + 'IsSilent': self.conf('silent'), + 'Image': toUnicode(self.getNotificationImage('medium') + '?1'), + 'Source': toUnicode(self.default_title) + } + + headers = { + 'Content-type': 'application/x-www-form-urlencoded' + } + + try: + self.urlopen(self.urls['api'], headers = headers, data = data, show_error = False) + return True + except: + log.error('PushAlot failed: %s', traceback.format_exc()) + + return False + config = [{ 'name': 'pushalot', diff --git a/couchpotato/core/notifications/pushalot/main.py b/couchpotato/core/notifications/pushalot/main.py deleted file mode 100644 index 306ee1d1..00000000 --- a/couchpotato/core/notifications/pushalot/main.py +++ /dev/null @@ -1,38 +0,0 @@ -from couchpotato.core.helpers.encoding import toUnicode -from couchpotato.core.logger import CPLog -from couchpotato.core.notifications.base import Notification -import traceback - -log = CPLog(__name__) - - -class Pushalot(Notification): - - urls = { - 'api': 'https://pushalot.com/api/sendmessage' - } - - def notify(self, message = '', data = None, listener = None): - if not data: data = {} - - data = { - 'AuthorizationToken': self.conf('auth_token'), - 'Title': self.default_title, - 'Body': toUnicode(message), - 'IsImportant': self.conf('important'), - 'IsSilent': self.conf('silent'), - 'Image': toUnicode(self.getNotificationImage('medium') + '?1'), - 'Source': toUnicode(self.default_title) - } - - headers = { - 'Content-type': 'application/x-www-form-urlencoded' - } - - try: - self.urlopen(self.urls['api'], headers = headers, data = data, show_error = False) - return True - except: - log.error('PushAlot failed: %s', traceback.format_exc()) - - return False diff --git a/couchpotato/core/notifications/pushbullet/main.py b/couchpotato/core/notifications/pushbullet.py similarity index 61% rename from couchpotato/core/notifications/pushbullet/main.py rename to couchpotato/core/notifications/pushbullet.py index 487fb3aa..361294e4 100644 --- a/couchpotato/core/notifications/pushbullet/main.py +++ b/couchpotato/core/notifications/pushbullet.py @@ -1,16 +1,20 @@ +import base64 +import json + from couchpotato.core.helpers.encoding import toUnicode from couchpotato.core.helpers.variable import splitString from couchpotato.core.logger import CPLog from couchpotato.core.notifications.base import Notification -import base64 -import json + log = CPLog(__name__) +autoload = 'Pushbullet' + class Pushbullet(Notification): - url = 'https://api.pushbullet.com/api/%s' + url = 'https://api.pushbullet.com/v2/%s' def notify(self, message = '', data = None, listener = None): if not data: data = {} @@ -21,11 +25,7 @@ class Pushbullet(Notification): # Get all the device IDs linked to this user if not len(devices): - response = self.request('devices') - if not response: - return False - - devices += [device.get('id') for device in response['devices']] + devices = [None] successful = 0 for device in devices: @@ -67,3 +67,39 @@ class Pushbullet(Notification): log.debug(ex) return None + + +config = [{ + 'name': 'pushbullet', + 'groups': [ + { + 'tab': 'notifications', + 'list': 'notification_providers', + 'name': 'pushbullet', + 'options': [ + { + 'name': 'enabled', + 'default': 0, + 'type': 'enabler', + }, + { + 'name': 'api_key', + 'label': 'User API Key' + }, + { + 'name': 'devices', + 'default': '', + 'advanced': True, + 'description': 'IDs of devices to send notifications to, empty = all devices' + }, + { + 'name': 'on_snatch', + 'default': 0, + 'type': 'bool', + 'advanced': True, + 'description': 'Also send message when movie is snatched.', + }, + ], + } + ], +}] diff --git a/couchpotato/core/notifications/pushbullet/__init__.py b/couchpotato/core/notifications/pushbullet/__init__.py deleted file mode 100644 index c52e7781..00000000 --- a/couchpotato/core/notifications/pushbullet/__init__.py +++ /dev/null @@ -1,40 +0,0 @@ -from .main import Pushbullet - - -def start(): - return Pushbullet() - -config = [{ - 'name': 'pushbullet', - 'groups': [ - { - 'tab': 'notifications', - 'list': 'notification_providers', - 'name': 'pushbullet', - 'options': [ - { - 'name': 'enabled', - 'default': 0, - 'type': 'enabler', - }, - { - 'name': 'api_key', - 'label': 'User API Key' - }, - { - 'name': 'devices', - 'default': '', - 'advanced': True, - 'description': 'IDs of devices to send notifications to, empty = all devices' - }, - { - 'name': 'on_snatch', - 'default': 0, - 'type': 'bool', - 'advanced': True, - 'description': 'Also send message when movie is snatched.', - }, - ], - } - ], -}] diff --git a/couchpotato/core/notifications/pushover.py b/couchpotato/core/notifications/pushover.py new file mode 100644 index 00000000..d9ef226c --- /dev/null +++ b/couchpotato/core/notifications/pushover.py @@ -0,0 +1,99 @@ +from httplib import HTTPSConnection + +from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode +from couchpotato.core.helpers.variable import getTitle +from couchpotato.core.logger import CPLog +from couchpotato.core.notifications.base import Notification + + +log = CPLog(__name__) + +autoload = 'Pushover' + + +class Pushover(Notification): + + + def notify(self, message = '', data = None, listener = None): + if not data: data = {} + + http_handler = HTTPSConnection("api.pushover.net:443") + + api_data = { + 'user': self.conf('user_key'), + 'token': self.conf('api_token'), + 'message': toUnicode(message), + 'priority': self.conf('priority'), + 'sound': self.conf('sound'), + } + + if data and data.get('identifier'): + api_data.update({ + 'url': toUnicode('http://www.imdb.com/title/%s/' % data['identifier']), + 'url_title': toUnicode('%s on IMDb' % getTitle(data)), + }) + + http_handler.request('POST', '/1/messages.json', + headers = {'Content-type': 'application/x-www-form-urlencoded'}, + body = tryUrlencode(api_data) + ) + + response = http_handler.getresponse() + request_status = response.status + + if request_status == 200: + log.info('Pushover notifications sent.') + return True + elif request_status == 401: + log.error('Pushover auth failed: %s', response.reason) + return False + else: + log.error('Pushover notification failed: %s', request_status) + return False + + +config = [{ + 'name': 'pushover', + 'groups': [ + { + 'tab': 'notifications', + 'list': 'notification_providers', + 'name': 'pushover', + 'options': [ + { + 'name': 'enabled', + 'default': 0, + 'type': 'enabler', + }, + { + 'name': 'user_key', + 'description': 'Register on pushover.net to get one.' + }, + { + 'name': 'api_token', + 'description': 'Register on pushover.net to get one.', + 'advanced': True, + 'default': 'YkxHMYDZp285L265L3IwH3LmzkTaCy', + }, + { + 'name': 'priority', + 'default': 0, + 'type': 'dropdown', + 'values': [('Normal', 0), ('High', 1)], + }, + { + 'name': 'on_snatch', + 'default': 0, + 'type': 'bool', + 'advanced': True, + 'description': 'Also send message when movie is snatched.', + }, + { + 'name': 'sound', + 'advanced': True, + 'description': 'Define custom sound for Pushover alert.' + }, + ], + } + ], +}] diff --git a/couchpotato/core/notifications/pushover/__init__.py b/couchpotato/core/notifications/pushover/__init__.py deleted file mode 100644 index da764860..00000000 --- a/couchpotato/core/notifications/pushover/__init__.py +++ /dev/null @@ -1,40 +0,0 @@ -from .main import Pushover - - -def start(): - return Pushover() - -config = [{ - 'name': 'pushover', - 'groups': [ - { - 'tab': 'notifications', - 'list': 'notification_providers', - 'name': 'pushover', - 'options': [ - { - 'name': 'enabled', - 'default': 0, - 'type': 'enabler', - }, - { - 'name': 'user_key', - 'description': 'Register on pushover.net to get one.' - }, - { - 'name': 'priority', - 'default': 0, - 'type': 'dropdown', - 'values': [('Normal', 0), ('High', 1)], - }, - { - 'name': 'on_snatch', - 'default': 0, - 'type': 'bool', - 'advanced': True, - 'description': 'Also send message when movie is snatched.', - }, - ], - } - ], -}] diff --git a/couchpotato/core/notifications/pushover/main.py b/couchpotato/core/notifications/pushover/main.py deleted file mode 100644 index ba954a54..00000000 --- a/couchpotato/core/notifications/pushover/main.py +++ /dev/null @@ -1,49 +0,0 @@ -from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode -from couchpotato.core.helpers.variable import getTitle -from couchpotato.core.logger import CPLog -from couchpotato.core.notifications.base import Notification -from httplib import HTTPSConnection - -log = CPLog(__name__) - - -class Pushover(Notification): - - app_token = 'YkxHMYDZp285L265L3IwH3LmzkTaCy' - - def notify(self, message = '', data = None, listener = None): - if not data: data = {} - - http_handler = HTTPSConnection("api.pushover.net:443") - - api_data = { - 'user': self.conf('user_key'), - 'token': self.app_token, - 'message': toUnicode(message), - 'priority': self.conf('priority'), - } - - if data and data.get('library'): - api_data.update({ - 'url': toUnicode('http://www.imdb.com/title/%s/' % data['library']['identifier']), - 'url_title': toUnicode('%s on IMDb' % getTitle(data['library'])), - }) - - http_handler.request('POST', - "/1/messages.json", - headers = {'Content-type': 'application/x-www-form-urlencoded'}, - body = tryUrlencode(api_data) - ) - - response = http_handler.getresponse() - request_status = response.status - - if request_status == 200: - log.info('Pushover notifications sent.') - return True - elif request_status == 401: - log.error('Pushover auth failed: %s', response.reason) - return False - else: - log.error('Pushover notification failed.') - return False diff --git a/couchpotato/core/notifications/synoindex/main.py b/couchpotato/core/notifications/synoindex.py similarity index 64% rename from couchpotato/core/notifications/synoindex/main.py rename to couchpotato/core/notifications/synoindex.py index ec7a64ef..b14e1a03 100644 --- a/couchpotato/core/notifications/synoindex/main.py +++ b/couchpotato/core/notifications/synoindex.py @@ -1,18 +1,24 @@ -from couchpotato.core.event import addEvent -from couchpotato.core.logger import CPLog -from couchpotato.core.notifications.base import Notification import os import subprocess +from couchpotato.api import addApiView +from couchpotato.core.event import addEvent +from couchpotato.core.logger import CPLog +from couchpotato.core.notifications.base import Notification + + log = CPLog(__name__) +autoload = 'Synoindex' + class Synoindex(Notification): index_path = '/usr/syno/bin/synoindex' def __init__(self): - super(Synoindex, self).__init__() + addApiView(self.testNotifyName(), self.test) + addEvent('renamer.after', self.addToLibrary) def addToLibrary(self, message = None, group = None): @@ -35,3 +41,23 @@ class Synoindex(Notification): return { 'success': os.path.isfile(self.index_path) } + + +config = [{ + 'name': 'synoindex', + 'groups': [ + { + 'tab': 'notifications', + 'list': 'notification_providers', + 'name': 'synoindex', + 'description': 'Automaticly adds index to Synology Media Server.', + 'options': [ + { + 'name': 'enabled', + 'default': 0, + 'type': 'enabler', + } + ], + } + ], +}] diff --git a/couchpotato/core/notifications/synoindex/__init__.py b/couchpotato/core/notifications/synoindex/__init__.py deleted file mode 100644 index 89d07b06..00000000 --- a/couchpotato/core/notifications/synoindex/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -from .main import Synoindex - - -def start(): - return Synoindex() - -config = [{ - 'name': 'synoindex', - 'groups': [ - { - 'tab': 'notifications', - 'list': 'notification_providers', - 'name': 'synoindex', - 'description': 'Automaticly adds index to Synology Media Server.', - 'options': [ - { - 'name': 'enabled', - 'default': 0, - 'type': 'enabler', - } - ], - } - ], -}] diff --git a/couchpotato/core/notifications/toasty/main.py b/couchpotato/core/notifications/toasty.py similarity index 54% rename from couchpotato/core/notifications/toasty/main.py rename to couchpotato/core/notifications/toasty.py index ea1f2192..919a9d1d 100644 --- a/couchpotato/core/notifications/toasty/main.py +++ b/couchpotato/core/notifications/toasty.py @@ -1,10 +1,14 @@ +import traceback + from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode from couchpotato.core.logger import CPLog from couchpotato.core.notifications.base import Notification -import traceback + log = CPLog(__name__) +autoload = 'Toasty' + class Toasty(Notification): @@ -29,3 +33,33 @@ class Toasty(Notification): log.error('Toasty failed: %s', traceback.format_exc()) return False + + +config = [{ + 'name': 'toasty', + 'groups': [ + { + 'tab': 'notifications', + 'list': 'notification_providers', + 'name': 'toasty', + 'options': [ + { + 'name': 'enabled', + 'default': 0, + 'type': 'enabler', + }, + { + 'name': 'api_key', + 'label': 'Device ID', + }, + { + 'name': 'on_snatch', + 'default': 0, + 'type': 'bool', + 'advanced': True, + 'description': 'Also send message when movie is snatched.', + }, + ], + } + ], +}] diff --git a/couchpotato/core/notifications/toasty/__init__.py b/couchpotato/core/notifications/toasty/__init__.py deleted file mode 100644 index 31e055a0..00000000 --- a/couchpotato/core/notifications/toasty/__init__.py +++ /dev/null @@ -1,34 +0,0 @@ -from .main import Toasty - - -def start(): - return Toasty() - -config = [{ - 'name': 'toasty', - 'groups': [ - { - 'tab': 'notifications', - 'list': 'notification_providers', - 'name': 'toasty', - 'options': [ - { - 'name': 'enabled', - 'default': 0, - 'type': 'enabler', - }, - { - 'name': 'api_key', - 'label': 'Device ID', - }, - { - 'name': 'on_snatch', - 'default': 0, - 'type': 'bool', - 'advanced': True, - 'description': 'Also send message when movie is snatched.', - }, - ], - } - ], -}] diff --git a/couchpotato/core/notifications/trakt/main.py b/couchpotato/core/notifications/trakt.py similarity index 63% rename from couchpotato/core/notifications/trakt/main.py rename to couchpotato/core/notifications/trakt.py index 399f76d8..8f35deab 100644 --- a/couchpotato/core/notifications/trakt/main.py +++ b/couchpotato/core/notifications/trakt.py @@ -1,8 +1,11 @@ +from couchpotato.core.helpers.variable import getTitle from couchpotato.core.logger import CPLog from couchpotato.core.notifications.base import Notification log = CPLog(__name__) +autoload = 'Trakt' + class Trakt(Notification): @@ -35,9 +38,9 @@ class Trakt(Notification): 'username': self.conf('automation_username'), 'password': self.conf('automation_password'), 'movies': [{ - 'imdb_id': data['library']['identifier'], - 'title': data['library']['titles'][0]['title'], - 'year': data['library']['year'] + 'imdb_id': data['identifier'], + 'title': getTitle(data), + 'year': data['info']['year'] }] if data else [] } @@ -61,3 +64,30 @@ class Trakt(Notification): log.error('Failed to call trakt, check your login.') return False + + +config = [{ + 'name': 'trakt', + 'groups': [ + { + 'tab': 'notifications', + 'list': 'notification_providers', + 'name': 'trakt', + 'label': 'Trakt', + 'description': 'add movies to your collection once downloaded. Fill in your username and password in the Automation Trakt settings', + 'options': [ + { + 'name': 'notification_enabled', + 'default': False, + 'type': 'enabler', + }, + { + 'name': 'remove_watchlist_enabled', + 'label': 'Remove from watchlist', + 'default': False, + 'type': 'bool', + }, + ], + } + ], +}] diff --git a/couchpotato/core/notifications/trakt/__init__.py b/couchpotato/core/notifications/trakt/__init__.py deleted file mode 100644 index 20e2e3f9..00000000 --- a/couchpotato/core/notifications/trakt/__init__.py +++ /dev/null @@ -1,31 +0,0 @@ -from .main import Trakt - - -def start(): - return Trakt() - -config = [{ - 'name': 'trakt', - 'groups': [ - { - 'tab': 'notifications', - 'list': 'notification_providers', - 'name': 'trakt', - 'label': 'Trakt', - 'description': 'add movies to your collection once downloaded. Fill in your username and password in the Automation Trakt settings', - 'options': [ - { - 'name': 'notification_enabled', - 'default': False, - 'type': 'enabler', - }, - { - 'name': 'remove_watchlist_enabled', - 'label': 'Remove from watchlist', - 'default': False, - 'type': 'bool', - }, - ], - } - ], -}] diff --git a/couchpotato/core/notifications/twitter/__init__.py b/couchpotato/core/notifications/twitter/__init__.py index 1b9c7699..b6b42bb5 100644 --- a/couchpotato/core/notifications/twitter/__init__.py +++ b/couchpotato/core/notifications/twitter/__init__.py @@ -1,7 +1,7 @@ from .main import Twitter -def start(): +def autoload(): return Twitter() config = [{ diff --git a/couchpotato/core/notifications/twitter/main.py b/couchpotato/core/notifications/twitter/main.py index 559c830f..0d02191e 100644 --- a/couchpotato/core/notifications/twitter/main.py +++ b/couchpotato/core/notifications/twitter/main.py @@ -1,3 +1,5 @@ +from urlparse import parse_qsl + from couchpotato.api import addApiView from couchpotato.core.helpers.encoding import tryUrlencode from couchpotato.core.helpers.variable import cleanHost @@ -5,9 +7,9 @@ from couchpotato.core.logger import CPLog from couchpotato.core.notifications.base import Notification from couchpotato.environment import Env from pytwitter import Api -from urlparse import parse_qsl import oauth2 + log = CPLog(__name__) diff --git a/couchpotato/core/notifications/twitter/static/twitter.js b/couchpotato/core/notifications/twitter/static/twitter.js index 2c4e6e31..75c96a8e 100644 --- a/couchpotato/core/notifications/twitter/static/twitter.js +++ b/couchpotato/core/notifications/twitter/static/twitter.js @@ -2,7 +2,7 @@ var TwitterNotification = new Class({ initialize: function(){ var self = this; - App.addEvent('load', self.addRegisterButton.bind(self)); + App.addEvent('loadSettings', self.addRegisterButton.bind(self)); }, addRegisterButton: function(){ @@ -59,7 +59,7 @@ var TwitterNotification = new Class({ ).inject(fieldset.getElement('.test_button'), 'before'); }) - }, + } }); diff --git a/couchpotato/core/notifications/xbmc/main.py b/couchpotato/core/notifications/xbmc.py old mode 100755 new mode 100644 similarity index 68% rename from couchpotato/core/notifications/xbmc/main.py rename to couchpotato/core/notifications/xbmc.py index bfda85e1..8dbf936b --- a/couchpotato/core/notifications/xbmc/main.py +++ b/couchpotato/core/notifications/xbmc.py @@ -1,16 +1,20 @@ -from couchpotato.core.helpers.variable import splitString -from couchpotato.core.logger import CPLog -from couchpotato.core.notifications.base import Notification import base64 import json import socket import traceback import urllib + +from couchpotato.core.helpers.variable import splitString, getTitle +from couchpotato.core.logger import CPLog +from couchpotato.core.notifications.base import Notification import requests from requests.packages.urllib3.exceptions import MaxRetryError + log = CPLog(__name__) +autoload = 'XBMC' + class XBMC(Notification): @@ -32,7 +36,7 @@ class XBMC(Notification): if self.use_json_notifications.get(host): calls = [ - ('GUI.ShowNotification', {'title': self.default_title, 'message': message, 'image': self.getNotificationImage('small')}), + ('GUI.ShowNotification', None, {'title': self.default_title, 'message': message, 'image': self.getNotificationImage('small')}), ] if data and data.get('destination_dir') and (not self.conf('only_first') or hosts.index(host) == 0): @@ -40,7 +44,7 @@ class XBMC(Notification): if not self.conf('force_full_scan') and (self.conf('remote_dir_scan') or socket.getfqdn('localhost') == socket.getfqdn(host.split(':')[0])): param = {'directory': data['destination_dir']} - calls.append(('VideoLibrary.Scan', param)) + calls.append(('VideoLibrary.Scan', None, param)) max_successful += len(calls) response = self.request(host, calls) @@ -48,7 +52,7 @@ class XBMC(Notification): response = self.notifyXBMCnoJSON(host, {'title': self.default_title, 'message': message}) if data and data.get('destination_dir') and (not self.conf('only_first') or hosts.index(host) == 0): - response += self.request(host, [('VideoLibrary.Scan', {})]) + response += self.request(host, [('VideoLibrary.Scan', None, {})]) max_successful += 1 max_successful += 1 @@ -71,7 +75,7 @@ class XBMC(Notification): # XBMC JSON-RPC version request response = self.request(host, [ - ('JSONRPC.Version', {}) + ('JSONRPC.Version', None, {}) ]) for result in response: if result.get('result') and type(result['result']['version']).__name__ == 'int': @@ -85,14 +89,14 @@ class XBMC(Notification): self.use_json_notifications[host] = False # send the text message - resp = self.notifyXBMCnoJSON(host, {'title':self.default_title, 'message':message}) - for result in resp: - if result.get('result') and result['result'] == 'OK': + resp = self.notifyXBMCnoJSON(host, {'title': self.default_title, 'message': message}) + for r in resp: + if r.get('result') and r['result'] == 'OK': log.debug('Message delivered successfully!') success = True break - elif result.get('error'): - log.error('XBMC error; %s: %s (%s)', (result['id'], result['error']['message'], result['error']['code'])) + elif r.get('error'): + log.error('XBMC error; %s: %s (%s)', (r['id'], r['error']['message'], r['error']['code'])) break elif result.get('result') and type(result['result']['version']).__name__ == 'dict': @@ -108,14 +112,14 @@ class XBMC(Notification): self.use_json_notifications[host] = True # send the text message - resp = self.request(host, [('GUI.ShowNotification', {'title':self.default_title, 'message':message, 'image': self.getNotificationImage('small')})]) - for result in resp: - if result.get('result') and result['result'] == 'OK': + resp = self.request(host, [('GUI.ShowNotification', None, {'title':self.default_title, 'message':message, 'image': self.getNotificationImage('small')})]) + for r in resp: + if r.get('result') and r['result'] == 'OK': log.debug('Message delivered successfully!') success = True break - elif result.get('error'): - log.error('XBMC error; %s: %s (%s)', (result['id'], result['error']['message'], result['error']['code'])) + elif r.get('error'): + log.error('XBMC error; %s: %s (%s)', (r['id'], r['error']['message'], r['error']['code'])) break # error getting version info (we do have contact with XBMC though) @@ -131,7 +135,7 @@ class XBMC(Notification): server = 'http://%s/xbmcCmds/' % host # Notification(title, message [, timeout , image]) - cmd = "xbmcHttp?command=ExecBuiltIn(Notification(%s,%s,'',%s))" % (urllib.quote(data['title']), urllib.quote(data['message']), urllib.quote(self.getNotificationImage('medium'))) + cmd = "xbmcHttp?command=ExecBuiltIn(Notification(%s,%s,'',%s))" % (urllib.quote(getTitle(data)), urllib.quote(data['message']), urllib.quote(self.getNotificationImage('medium'))) server += cmd # I have no idea what to set to, just tried text/plain and seems to be working :) @@ -180,12 +184,13 @@ class XBMC(Notification): data = [] for req in do_requests: - method, kwargs = req + method, id, kwargs = req + data.append({ 'method': method, 'params': kwargs, 'jsonrpc': '2.0', - 'id': method, + 'id': id if id else method, }) data = json.dumps(data) @@ -210,3 +215,66 @@ class XBMC(Notification): log.error('Failed sending request to XBMC: %s', traceback.format_exc()) return [] + +config = [{ + 'name': 'xbmc', + 'groups': [ + { + 'tab': 'notifications', + 'list': 'notification_providers', + 'name': 'xbmc', + 'label': 'XBMC', + 'description': 'v11 (Eden), v12 (Frodo), v13 (Gotham)', + 'options': [ + { + 'name': 'enabled', + 'default': 0, + 'type': 'enabler', + }, + { + 'name': 'host', + 'default': 'localhost:8080', + }, + { + 'name': 'username', + 'default': 'xbmc', + }, + { + 'name': 'password', + 'default': '', + 'type': 'password', + }, + { + 'name': 'only_first', + 'default': 0, + 'type': 'bool', + 'advanced': True, + 'description': 'Only update the first host when movie snatched, useful for synced XBMC', + }, + { + 'name': 'remote_dir_scan', + 'label': 'Remote Folder Scan', + 'default': 0, + 'type': 'bool', + 'advanced': True, + 'description': ('Only scan new movie folder at remote XBMC servers.', 'Useful if the XBMC path is different from the path CPS uses.'), + }, + { + 'name': 'force_full_scan', + 'label': 'Always do a full scan', + 'default': 0, + 'type': 'bool', + 'advanced': True, + 'description': ('Do a full scan instead of only the new movie.', 'Useful if the XBMC path is different from the path CPS uses.'), + }, + { + 'name': 'on_snatch', + 'default': False, + 'type': 'bool', + 'advanced': True, + 'description': 'Also send message when movie is snatched.', + }, + ], + } + ], +}] diff --git a/couchpotato/core/notifications/xbmc/__init__.py b/couchpotato/core/notifications/xbmc/__init__.py deleted file mode 100644 index 34fed632..00000000 --- a/couchpotato/core/notifications/xbmc/__init__.py +++ /dev/null @@ -1,68 +0,0 @@ -from .main import XBMC - - -def start(): - return XBMC() - -config = [{ - 'name': 'xbmc', - 'groups': [ - { - 'tab': 'notifications', - 'list': 'notification_providers', - 'name': 'xbmc', - 'label': 'XBMC', - 'description': 'v11 (Eden) and v12 (Frodo)', - 'options': [ - { - 'name': 'enabled', - 'default': 0, - 'type': 'enabler', - }, - { - 'name': 'host', - 'default': 'localhost:8080', - }, - { - 'name': 'username', - 'default': 'xbmc', - }, - { - 'name': 'password', - 'default': '', - 'type': 'password', - }, - { - 'name': 'only_first', - 'default': 0, - 'type': 'bool', - 'advanced': True, - 'description': 'Only update the first host when movie snatched, useful for synced XBMC', - }, - { - 'name': 'remote_dir_scan', - 'label': 'Remote Folder Scan', - 'default': 0, - 'type': 'bool', - 'advanced': True, - 'description': 'Only scan new movie folder at remote XBMC servers. Works if movie location is the same.', - }, - { - 'name': 'force_full_scan', - 'label': 'Always do a full scan', - 'default': 0, - 'type': 'bool', - 'advanced': True, - 'description': 'Do a full scan instead of only the new movie. Useful if the XBMC path is different from the path CPS uses.', - }, - { - 'name': 'on_snatch', - 'default': 0, - 'type': 'bool', - 'advanced': True, - 'description': 'Also send message when movie is snatched.', - }, - ], - } - ], -}] diff --git a/couchpotato/core/notifications/xmpp/main.py b/couchpotato/core/notifications/xmpp/main.py deleted file mode 100644 index 0011e41c..00000000 --- a/couchpotato/core/notifications/xmpp/main.py +++ /dev/null @@ -1,43 +0,0 @@ -from couchpotato.core.logger import CPLog -from couchpotato.core.notifications.base import Notification -from time import sleep -import traceback -import xmpp - -log = CPLog(__name__) - - -class Xmpp(Notification): - - def notify(self, message = '', data = None, listener = None): - if not data: data = {} - - try: - jid = xmpp.protocol.JID(self.conf('username')) - client = xmpp.Client(jid.getDomain(), debug = []) - - # Connect - if not client.connect(server = (self.conf('hostname'), self.conf('port'))): - log.error('XMPP failed: Connection to server failed.') - return False - - # Authenticate - if not client.auth(jid.getNode(), self.conf('password'), resource = jid.getResource()): - log.error('XMPP failed: Failed to authenticate.') - return False - - # Send message - client.send(xmpp.protocol.Message(to = self.conf('to'), body = message, typ = 'chat')) - - # Disconnect - # some older servers will not send the message if you disconnect immediately after sending - sleep(1) - client.disconnect() - - log.info('XMPP notifications sent.') - return True - - except: - log.error('XMPP failed: %s', traceback.format_exc()) - - return False diff --git a/couchpotato/core/notifications/xmpp/__init__.py b/couchpotato/core/notifications/xmpp_.py similarity index 52% rename from couchpotato/core/notifications/xmpp/__init__.py rename to couchpotato/core/notifications/xmpp_.py index 0e3e14d9..f9916cd0 100644 --- a/couchpotato/core/notifications/xmpp/__init__.py +++ b/couchpotato/core/notifications/xmpp_.py @@ -1,8 +1,51 @@ -from .main import Xmpp +from time import sleep +import traceback + +from couchpotato.core.logger import CPLog +from couchpotato.core.notifications.base import Notification +import xmpp -def start(): - return Xmpp() +log = CPLog(__name__) + +autoload = 'Xmpp' + + +class Xmpp(Notification): + + def notify(self, message = '', data = None, listener = None): + if not data: data = {} + + try: + jid = xmpp.protocol.JID(self.conf('username')) + client = xmpp.Client(jid.getDomain(), debug = []) + + # Connect + if not client.connect(server = (self.conf('hostname'), self.conf('port'))): + log.error('XMPP failed: Connection to server failed.') + return False + + # Authenticate + if not client.auth(jid.getNode(), self.conf('password'), resource = jid.getResource()): + log.error('XMPP failed: Failed to authenticate.') + return False + + # Send message + client.send(xmpp.protocol.Message(to = self.conf('to'), body = message, typ = 'chat')) + + # Disconnect + # some older servers will not send the message if you disconnect immediately after sending + sleep(1) + client.disconnect() + + log.info('XMPP notifications sent.') + return True + + except: + log.error('XMPP failed: %s', traceback.format_exc()) + + return False + config = [{ 'name': 'xmpp', diff --git a/couchpotato/core/plugins/automation/__init__.py b/couchpotato/core/plugins/automation.py similarity index 54% rename from couchpotato/core/plugins/automation/__init__.py rename to couchpotato/core/plugins/automation.py index 482a0090..39d7c9e7 100644 --- a/couchpotato/core/plugins/automation/__init__.py +++ b/couchpotato/core/plugins/automation.py @@ -1,8 +1,55 @@ -from .main import Automation +from couchpotato.core.event import addEvent, fireEvent +from couchpotato.core.logger import CPLog +from couchpotato.core.plugins.base import Plugin +from couchpotato.environment import Env + +log = CPLog(__name__) + +autoload = 'Automation' -def start(): - return Automation() +class Automation(Plugin): + + def __init__(self): + + addEvent('app.load', self.setCrons) + + if not Env.get('dev'): + addEvent('app.load', self.addMovies) + + addEvent('setting.save.automation.hour.after', self.setCrons) + + def setCrons(self): + fireEvent('schedule.interval', 'automation.add_movies', self.addMovies, hours = self.conf('hour', default = 12)) + + def addMovies(self): + + movies = fireEvent('automation.get_movies', merge = True) + movie_ids = [] + + for imdb_id in movies: + + if self.shuttingDown(): + break + + prop_name = 'automation.added.%s' % imdb_id + added = Env.prop(prop_name, default = False) + if not added: + added_movie = fireEvent('movie.add', params = {'identifier': imdb_id}, force_readd = False, search_after = False, update_after = True, single = True) + if added_movie: + movie_ids.append(added_movie['_id']) + Env.prop(prop_name, True) + + for movie_id in movie_ids: + + if self.shuttingDown(): + break + + movie_dict = fireEvent('media.get', movie_id, single = True) + fireEvent('movie.searcher.single', movie_dict) + + return True + config = [{ 'name': 'automation', diff --git a/couchpotato/core/plugins/automation/main.py b/couchpotato/core/plugins/automation/main.py deleted file mode 100644 index 2edcd3be..00000000 --- a/couchpotato/core/plugins/automation/main.py +++ /dev/null @@ -1,49 +0,0 @@ -from couchpotato.core.event import addEvent, fireEvent -from couchpotato.core.logger import CPLog -from couchpotato.core.plugins.base import Plugin -from couchpotato.environment import Env - -log = CPLog(__name__) - - -class Automation(Plugin): - - def __init__(self): - - addEvent('app.load', self.setCrons) - - if not Env.get('dev'): - addEvent('app.load', self.addMovies) - - addEvent('setting.save.automation.hour.after', self.setCrons) - - def setCrons(self): - fireEvent('schedule.interval', 'automation.add_movies', self.addMovies, hours = self.conf('hour', default = 12)) - - def addMovies(self): - - movies = fireEvent('automation.get_movies', merge = True) - movie_ids = [] - - for imdb_id in movies: - - if self.shuttingDown(): - break - - prop_name = 'automation.added.%s' % imdb_id - added = Env.prop(prop_name, default = False) - if not added: - added_movie = fireEvent('movie.add', params = {'identifier': imdb_id}, force_readd = False, search_after = False, update_library = True, single = True) - if added_movie: - movie_ids.append(added_movie['id']) - Env.prop(prop_name, True) - - for movie_id in movie_ids: - - if self.shuttingDown(): - break - - movie_dict = fireEvent('media.get', movie_id, single = True) - fireEvent('movie.searcher.single', movie_dict) - - return True diff --git a/couchpotato/core/plugins/base.py b/couchpotato/core/plugins/base.py index d7487a10..bc66123f 100644 --- a/couchpotato/core/plugins/base.py +++ b/couchpotato/core/plugins/base.py @@ -1,14 +1,4 @@ -from couchpotato.core.event import fireEvent, addEvent -from couchpotato.core.helpers.encoding import ss, toSafeString, \ - toUnicode, sp -from couchpotato.core.helpers.variable import getExt, md5, isLocalIP, scanForPassword, tryInt -from couchpotato.core.logger import CPLog -from couchpotato.environment import Env -import requests -from requests.packages.urllib3 import Timeout -from requests.packages.urllib3.exceptions import MaxRetryError -from tornado import template -from tornado.web import StaticFileHandler +from urllib import quote from urlparse import urlparse import glob import inspect @@ -16,7 +6,19 @@ import os.path import re import time import traceback -import urllib2 + +from couchpotato.core.event import fireEvent, addEvent +from couchpotato.core.helpers.encoding import ss, toSafeString, \ + toUnicode, sp +from couchpotato.core.helpers.variable import getExt, md5, isLocalIP, scanForPassword, tryInt, getIdentifier +from couchpotato.core.logger import CPLog +from couchpotato.environment import Env +import requests +from requests.packages.urllib3 import Timeout +from requests.packages.urllib3.exceptions import MaxRetryError +from tornado import template +from tornado.web import StaticFileHandler + log = CPLog(__name__) @@ -24,6 +26,7 @@ log = CPLog(__name__) class Plugin(object): _class_name = None + _database = None plugin_path = None enabled_option = 'enabled' @@ -37,10 +40,9 @@ class Plugin(object): http_time_between_calls = 0 http_failed_request = {} http_failed_disabled = {} - http_opener = requests.Session() - def __new__(typ, *args, **kwargs): - new_plugin = super(Plugin, typ).__new__(typ) + def __new__(cls, *args, **kwargs): + new_plugin = super(Plugin, cls).__new__(cls) new_plugin.registerPlugin() return new_plugin @@ -53,6 +55,17 @@ class Plugin(object): if self.auto_register_static: self.registerStatic(inspect.getfile(self.__class__)) + # Setup database + if self._database: + addEvent('database.setup', self.databaseSetup) + + def databaseSetup(self): + + for index_name in self._database: + klass = self._database[index_name] + + fireEvent('database.setup_index', index_name, klass) + def conf(self, attr, value = None, default = None, section = None): class_name = self.getName().lower().split(':')[0].lower() return Env.setting(attr, section = section if section else class_name, value = value, default = default) @@ -98,7 +111,7 @@ class Plugin(object): fireEvent('register_%s' % ('script' if ext in 'js' else 'style'), path + os.path.basename(f), f) def createFile(self, path, content, binary = False): - path = ss(path) + path = sp(path) self.makeDir(os.path.dirname(path)) @@ -116,7 +129,7 @@ class Plugin(object): os.remove(path) def makeDir(self, path): - path = ss(path) + path = sp(path) try: if not os.path.isdir(path): os.makedirs(path, Env.getPermission('folder')) @@ -126,9 +139,35 @@ class Plugin(object): return False + def deleteEmptyFolder(self, folder, show_error = True, only_clean = None): + folder = sp(folder) + + for item in os.listdir(folder): + full_folder = os.path.join(folder, item) + + if not only_clean or (item in only_clean and os.path.isdir(full_folder)): + + for root, dirs, files in os.walk(full_folder): + + for dir_name in dirs: + full_path = os.path.join(root, dir_name) + + if len(os.listdir(full_path)) == 0: + try: + os.rmdir(full_path) + except: + if show_error: + log.error('Couldn\'t remove empty directory %s: %s', (full_path, traceback.format_exc())) + + try: + os.rmdir(folder) + except: + if show_error: + log.error('Couldn\'t remove empty directory %s: %s', (folder, traceback.format_exc())) + # http request def urlopen(self, url, timeout = 30, data = None, headers = None, files = None, show_error = True): - url = urllib2.quote(ss(url), safe = "%/:=&?~#+!$,;'@()*[]") + url = quote(ss(url), safe = "%/:=&?~#+!$,;'@()*[]") if not headers: headers = {} if not data: data = {} @@ -144,7 +183,7 @@ class Plugin(object): headers['Connection'] = headers.get('Connection', 'keep-alive') headers['Cache-Control'] = headers.get('Cache-Control', 'max-age=0') - r = self.http_opener + r = Env.get('http_opener') # Don't try for failed requests if self.http_failed_disabled.get(host, 0) > 0: @@ -166,11 +205,12 @@ class Plugin(object): 'data': data if len(data) > 0 else None, 'timeout': timeout, 'files': files, + 'verify': False, #verify_ssl, Disable for now as to many wrongly implemented certificates.. } method = 'post' if len(data) > 0 or files else 'get' log.info('Opening url: %s %s, data: %s', (method, url, [x for x in data.keys()] if isinstance(data, dict) else 'with data')) - response = r.request(method, url, verify = False, **kwargs) + response = r.request(method, url, **kwargs) if response.status_code == requests.codes.ok: data = response.content @@ -223,7 +263,7 @@ class Plugin(object): def afterCall(self, handler): self.isRunning('%s.%s' % (self.getName(), handler.__name__), False) - def doShutdown(self): + def doShutdown(self, *args, **kwargs): self.shuttingDown(True) return True @@ -291,19 +331,22 @@ class Plugin(object): if name_password: release_name, password = name_password tag += '{{%s}}' % password + elif data.get('password'): + tag += '{{%s}}' % data.get('password') - max_length = 127 - len(tag) # Some filesystems don't support 128+ long filenames + max_length = 127 - len(tag) # Some filesystems don't support 128+ long filenames return '%s%s' % (toSafeString(toUnicode(release_name)[:max_length]), tag) def createFileName(self, data, filedata, media): - name = sp(os.path.join(self.createNzbName(data, media))) + name = self.createNzbName(data, media) if data.get('protocol') == 'nzb' and 'DOCTYPE nzb' not in filedata and '' not in filedata: return '%s.%s' % (name, 'rar') return '%s.%s' % (name, data.get('protocol')) def cpTag(self, media): if Env.setting('enabled', 'renamer'): - return '.cp(' + media['library'].get('identifier') + ')' if media['library'].get('identifier') else '' + identifier = getIdentifier(media) + return '.cp(' + identifier + ')' if identifier else '' return '' @@ -311,6 +354,7 @@ class Plugin(object): now = time.time() file_too_new = False + file_time = [] for cur_file in files: # File got removed while checking diff --git a/couchpotato/core/plugins/browser/main.py b/couchpotato/core/plugins/browser.py similarity index 95% rename from couchpotato/core/plugins/browser/main.py rename to couchpotato/core/plugins/browser.py index 956a7680..013a4823 100644 --- a/couchpotato/core/plugins/browser/main.py +++ b/couchpotato/core/plugins/browser.py @@ -1,11 +1,14 @@ -from couchpotato.api import addApiView -from couchpotato.core.helpers.variable import getUserDir -from couchpotato.core.plugins.base import Plugin import ctypes import os import string + +from couchpotato.api import addApiView +from couchpotato.core.helpers.encoding import sp +from couchpotato.core.helpers.variable import getUserDir +from couchpotato.core.plugins.base import Plugin import six + if os.name == 'nt': import imp try: @@ -15,7 +18,11 @@ if os.name == 'nt': raise ImportError("Missing the win32file module, which is a part of the prerequisite \ pywin32 package. You can get it from http://sourceforge.net/projects/pywin32/files/pywin32/") else: - import win32file #@UnresolvedImport + # noinspection PyUnresolvedReferences + import win32file + +autoload = 'FileBrowser' + class FileBrowser(Plugin): @@ -44,6 +51,7 @@ class FileBrowser(Plugin): path = '/' dirs = [] + path = sp(path) for f in os.listdir(path): p = os.path.join(path, f) if os.path.isdir(p) and ((self.is_hidden(p) and bool(int(show_hidden))) or not self.is_hidden(p)): diff --git a/couchpotato/core/plugins/browser/__init__.py b/couchpotato/core/plugins/browser/__init__.py deleted file mode 100644 index fae50657..00000000 --- a/couchpotato/core/plugins/browser/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from .main import FileBrowser - - -def start(): - return FileBrowser() - -config = [] diff --git a/couchpotato/core/plugins/category/__init__.py b/couchpotato/core/plugins/category/__init__.py index dcdae90b..d147092f 100644 --- a/couchpotato/core/plugins/category/__init__.py +++ b/couchpotato/core/plugins/category/__init__.py @@ -1,7 +1,5 @@ from .main import CategoryPlugin -def start(): +def autoload(): return CategoryPlugin() - -config = [] diff --git a/couchpotato/core/plugins/category/index.py b/couchpotato/core/plugins/category/index.py new file mode 100644 index 00000000..6445de3c --- /dev/null +++ b/couchpotato/core/plugins/category/index.py @@ -0,0 +1,31 @@ +from CodernityDB.tree_index import TreeBasedIndex + + +class CategoryIndex(TreeBasedIndex): + _version = 1 + + def __init__(self, *args, **kwargs): + kwargs['key_format'] = 'i' + super(CategoryIndex, self).__init__(*args, **kwargs) + + def make_key(self, key): + return key + + def make_key_value(self, data): + if data.get('_t') == 'category': + return data.get('order', -99), None + + +class CategoryMediaIndex(TreeBasedIndex): + _version = 1 + + def __init__(self, *args, **kwargs): + kwargs['key_format'] = '32s' + super(CategoryMediaIndex, self).__init__(*args, **kwargs) + + def make_key(self, key): + return str(key) + + def make_key_value(self, data): + if data.get('_t') == 'media' and data.get('category_id'): + return str(data.get('category_id')), None diff --git a/couchpotato/core/plugins/category/main.py b/couchpotato/core/plugins/category/main.py index c7abaee4..a0852cc1 100644 --- a/couchpotato/core/plugins/category/main.py +++ b/couchpotato/core/plugins/category/main.py @@ -1,20 +1,25 @@ import traceback -from couchpotato import get_session + +from couchpotato import get_db from couchpotato.api import addApiView from couchpotato.core.event import addEvent from couchpotato.core.helpers.encoding import toUnicode from couchpotato.core.logger import CPLog from couchpotato.core.plugins.base import Plugin -from couchpotato.core.settings.model import Media, Category +from .index import CategoryIndex, CategoryMediaIndex + log = CPLog(__name__) class CategoryPlugin(Plugin): - def __init__(self): - addEvent('category.all', self.all) + _database = { + 'category': CategoryIndex, + 'category_media': CategoryMediaIndex, + } + def __init__(self): addApiView('category.save', self.save) addApiView('category.save_order', self.saveOrder) addApiView('category.delete', self.delete) @@ -26,54 +31,53 @@ class CategoryPlugin(Plugin): }"""} }) + addEvent('category.all', self.all) + def allView(self, **kwargs): return { 'success': True, - 'list': self.all() + 'categories': self.all() } def all(self): - db = get_session() - categories = db.query(Category).all() + db = get_db() + categories = db.all('category', with_doc = True) - temp = [] - for category in categories: - temp.append(category.to_dict()) - - return temp + return [x['doc'] for x in categories] def save(self, **kwargs): try: - db = get_session() + db = get_db() - c = db.query(Category).filter_by(id = kwargs.get('id')).first() - if not c: - c = Category() - db.add(c) + category = { + '_t': 'category', + 'order': kwargs.get('order', 999), + 'label': toUnicode(kwargs.get('label', '')), + 'ignored': toUnicode(kwargs.get('ignored', '')), + 'preferred': toUnicode(kwargs.get('preferred', '')), + 'required': toUnicode(kwargs.get('required', '')), + 'destination': toUnicode(kwargs.get('destination', '')), + } - c.order = kwargs.get('order', c.order if c.order else 0) - c.label = toUnicode(kwargs.get('label', '')) - c.ignored = toUnicode(kwargs.get('ignored', '')) - c.preferred = toUnicode(kwargs.get('preferred', '')) - c.required = toUnicode(kwargs.get('required', '')) - c.destination = toUnicode(kwargs.get('destination', '')) + try: + c = db.get('id', kwargs.get('id')) + category['order'] = c.get('order', category['order']) + c.update(category) - db.commit() - - category_dict = c.to_dict() + db.update(c) + except: + c = db.insert(category) + c.update(category) return { 'success': True, - 'category': category_dict + 'category': c } except: log.error('Failed: %s', traceback.format_exc()) - db.rollback() - finally: - db.close() return { 'success': False, @@ -83,25 +87,21 @@ class CategoryPlugin(Plugin): def saveOrder(self, **kwargs): try: - db = get_session() + db = get_db() order = 0 for category_id in kwargs.get('ids', []): - c = db.query(Category).filter_by(id = category_id).first() - c.order = order + c = db.get('id', category_id) + c['order'] = order + db.update(c) order += 1 - db.commit() - return { 'success': True } except: log.error('Failed: %s', traceback.format_exc()) - db.rollback() - finally: - db.close() return { 'success': False @@ -110,21 +110,20 @@ class CategoryPlugin(Plugin): def delete(self, id = None, **kwargs): try: - db = get_session() + db = get_db() success = False message = '' try: - c = db.query(Category).filter_by(id = id).first() + c = db.get('id', id) db.delete(c) - db.commit() # Force defaults on all empty category movies self.removeFromMovie(id) success = True - except Exception as e: - message = log.error('Failed deleting category: %s', e) + except: + message = log.error('Failed deleting category: %s', traceback.format_exc()) return { 'success': success, @@ -132,9 +131,6 @@ class CategoryPlugin(Plugin): } except: log.error('Failed: %s', traceback.format_exc()) - db.rollback() - finally: - db.close() return { 'success': False @@ -143,15 +139,12 @@ class CategoryPlugin(Plugin): def removeFromMovie(self, category_id): try: - db = get_session() - movies = db.query(Media).filter(Media.category_id == category_id).all() + db = get_db() + movies = [x['doc'] for x in db.get_many('category_media', category_id, with_doc = True)] if len(movies) > 0: for movie in movies: - movie.category_id = None - db.commit() + movie['category_id'] = None + db.update(movie) except: log.error('Failed: %s', traceback.format_exc()) - db.rollback() - finally: - db.close() diff --git a/couchpotato/core/plugins/category/static/category.css b/couchpotato/core/plugins/category/static/category.css index 0987c197..3218a79c 100644 --- a/couchpotato/core/plugins/category/static/category.css +++ b/couchpotato/core/plugins/category/static/category.css @@ -22,7 +22,7 @@ .category > .delete:hover { opacity: 1; } - + .category .ctrlHolder:hover { background: none; } @@ -69,7 +69,7 @@ } #category_ordering li .handle { - background: url('../../static/profile_plugin/handle.png') center; + background: url('../../images/handle.png') center; width: 20px; float: right; } @@ -79,4 +79,4 @@ float: right; width: 250px; margin: 0; - } \ No newline at end of file + } diff --git a/couchpotato/core/plugins/category/static/category.js b/couchpotato/core/plugins/category/static/category.js index 168b70de..6d160be1 100644 --- a/couchpotato/core/plugins/category/static/category.js +++ b/couchpotato/core/plugins/category/static/category.js @@ -3,13 +3,13 @@ var CategoryListBase = new Class({ initialize: function(){ var self = this; - App.addEvent('load', self.addSettings.bind(self)); + App.addEvent('loadSettings', self.addSettings.bind(self)); }, setup: function(categories){ var self = this; - self.categories = [] + self.categories = []; Array.each(categories, self.createCategory.bind(self)); }, @@ -17,7 +17,7 @@ var CategoryListBase = new Class({ addSettings: function(){ var self = this; - self.settings = App.getPage('Settings') + self.settings = App.getPage('Settings'); self.settings.addEvent('create', function(){ var tab = self.settings.createSubTab('category', { 'label': 'Categories', @@ -31,7 +31,7 @@ var CategoryListBase = new Class({ self.createList(); self.createOrdering(); - }) + }); // Add categories in renamer self.settings.addEvent('create', function(){ @@ -86,7 +86,7 @@ var CategoryListBase = new Class({ getCategory: function(id){ return this.categories.filter(function(category){ - return category.data.id == id + return category.data._id == id }).pick() }, @@ -97,9 +97,9 @@ var CategoryListBase = new Class({ createCategory: function(data){ var self = this; - var data = data || {'id': randomString()} - var category = new Category(data) - self.categories.include(category) + var data = data || {'id': randomString()}; + var category = new Category(data); + self.categories.include(category); return category; }, @@ -108,7 +108,7 @@ var CategoryListBase = new Class({ var self = this; var category_list; - var group = self.settings.createGroup({ + self.settings.createGroup({ 'label': 'Category ordering' }).adopt( new Element('.ctrlHolder#category_ordering').adopt( @@ -118,10 +118,10 @@ var CategoryListBase = new Class({ 'html': 'Change the order the categories are in the dropdown list.
First one will be default.' }) ) - ).inject(self.content) + ).inject(self.content); Array.each(self.categories, function(category){ - new Element('li', {'data-id': category.data.id}).adopt( + new Element('li', {'data-id': category.data._id}).adopt( new Element('span.category_label', { 'text': category.data.label }), @@ -145,7 +145,7 @@ var CategoryListBase = new Class({ var ids = []; - self.category_sortable.list.getElements('li').each(function(el, nr){ + self.category_sortable.list.getElements('li').each(function(el){ ids.include(el.get('data-id')); }); @@ -157,7 +157,7 @@ var CategoryListBase = new Class({ } -}) +}); window.CategoryList = new CategoryListBase(); @@ -235,8 +235,6 @@ var Category = new Class({ if(self.save_timer) clearTimeout(self.save_timer); self.save_timer = (function(){ - var data = self.getData(); - Api.request('category.save', { 'data': self.getData(), 'useSpinner': true, @@ -257,21 +255,19 @@ var Category = new Class({ getData: function(){ var self = this; - var data = { - 'id' : self.data.id, + return { + 'id' : self.data._id, 'label' : self.el.getElement('.category_label input').get('value'), 'required' : self.el.getElement('.category_required input').get('value'), 'preferred' : self.el.getElement('.category_preferred input').get('value'), 'ignored' : self.el.getElement('.category_ignored input').get('value'), 'destination': self.data.destination } - - return data }, del: function(){ var self = this; - + if(self.data.label == undefined){ self.el.destroy(); return; @@ -286,7 +282,7 @@ var Category = new Class({ (e).preventDefault(); Api.request('category.delete', { 'data': { - 'id': self.data.id + 'id': self.data._id }, 'useSpinner': true, 'spinnerOptions': { @@ -329,4 +325,4 @@ var Category = new Class({ return this.el } -}); \ No newline at end of file +}); diff --git a/couchpotato/core/plugins/custom/main.py b/couchpotato/core/plugins/custom.py similarity index 96% rename from couchpotato/core/plugins/custom/main.py rename to couchpotato/core/plugins/custom.py index a15c915c..20b4c3f7 100644 --- a/couchpotato/core/plugins/custom/main.py +++ b/couchpotato/core/plugins/custom.py @@ -1,11 +1,15 @@ +import os + from couchpotato.core.event import addEvent from couchpotato.core.logger import CPLog from couchpotato.core.plugins.base import Plugin from couchpotato.environment import Env -import os + log = CPLog(__name__) +autoload = 'Custom' + class Custom(Plugin): diff --git a/couchpotato/core/plugins/custom/__init__.py b/couchpotato/core/plugins/custom/__init__.py deleted file mode 100644 index 20a39351..00000000 --- a/couchpotato/core/plugins/custom/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from .main import Custom - - -def start(): - return Custom() - -config = [] diff --git a/couchpotato/core/plugins/dashboard.py b/couchpotato/core/plugins/dashboard.py new file mode 100644 index 00000000..776f24ec --- /dev/null +++ b/couchpotato/core/plugins/dashboard.py @@ -0,0 +1,106 @@ +from datetime import date +import random as rndm +import time + +from couchpotato import get_db +from couchpotato.api import addApiView +from couchpotato.core.event import fireEvent +from couchpotato.core.helpers.variable import splitString, tryInt +from couchpotato.core.logger import CPLog +from couchpotato.core.plugins.base import Plugin + + +log = CPLog(__name__) + +autoload = 'Dashboard' + + +class Dashboard(Plugin): + + def __init__(self): + addApiView('dashboard.soon', self.getSoonView) + + def getSoonView(self, limit_offset = None, random = False, late = False, **kwargs): + + db = get_db() + now = time.time() + + # Get profiles first, determine pre or post theater + profiles = fireEvent('profile.all', single = True) + pre_releases = fireEvent('quality.pre_releases', single = True) + + # See what the profile contain and cache it + profile_pre = {} + for profile in profiles: + contains = {} + for q_identifier in profile.get('qualities', []): + contains['theater' if q_identifier in pre_releases else 'dvd'] = True + + profile_pre[profile.get('_id')] = contains + + # Add limit + limit = 12 + if limit_offset: + splt = splitString(limit_offset) if isinstance(limit_offset, (str, unicode)) else limit_offset + limit = tryInt(splt[0]) + + # Get all active medias + active_ids = [x['_id'] for x in fireEvent('media.with_status', 'active', with_doc = False, single = True)] + + medias = [] + now_year = date.today().year + + if len(active_ids) > 0: + + # Order by title or randomize + if not random: + orders_ids = db.all('media_title') + active_ids = [x['_id'] for x in orders_ids if x['_id'] in active_ids] + else: + rndm.shuffle(active_ids) + + for media_id in active_ids: + media = db.get('id', media_id) + + pp = profile_pre.get(media['profile_id']) + if not pp: continue + + eta = media['info'].get('release_date', {}) or {} + coming_soon = False + + # Theater quality + if pp.get('theater') and fireEvent('movie.searcher.could_be_released', True, eta, media['info']['year'], single = True): + coming_soon = True + elif pp.get('dvd') and fireEvent('movie.searcher.could_be_released', False, eta, media['info']['year'], single = True): + coming_soon = True + + if coming_soon: + + # Don't list older movies + if ((not late and (media['info']['year'] >= now_year - 1) and (not eta.get('dvd') and not eta.get('theater') or eta.get('dvd') and eta.get('dvd') > (now - 2419200))) or + (late and (media['info']['year'] < now_year - 1 or (eta.get('dvd', 0) > 0 or eta.get('theater')) and eta.get('dvd') < (now - 2419200)))): + + add = True + + # Check if it doesn't have any releases + if late: + media['releases'] = fireEvent('release.for_media', media['_id'], single = True) + + for release in media.get('releases'): + if release.get('status') in ['snatched', 'available', 'seeding', 'downloaded']: + add = False + break + + if add: + medias.append(media) + + if len(medias) >= limit: + break + + return { + 'success': True, + 'empty': len(medias) == 0, + 'movies': medias, + } + + getLateView = getSoonView diff --git a/couchpotato/core/plugins/dashboard/__init__.py b/couchpotato/core/plugins/dashboard/__init__.py deleted file mode 100644 index c43a44eb..00000000 --- a/couchpotato/core/plugins/dashboard/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from .main import Dashboard - - -def start(): - return Dashboard() - -config = [] diff --git a/couchpotato/core/plugins/dashboard/main.py b/couchpotato/core/plugins/dashboard/main.py deleted file mode 100644 index 3367ffb7..00000000 --- a/couchpotato/core/plugins/dashboard/main.py +++ /dev/null @@ -1,130 +0,0 @@ -from datetime import date -from couchpotato import get_session -from couchpotato.api import addApiView -from couchpotato.core.event import fireEvent -from couchpotato.core.helpers.variable import splitString, tryInt -from couchpotato.core.logger import CPLog -from couchpotato.core.plugins.base import Plugin -from couchpotato.core.settings.model import Media, Library, LibraryTitle, \ - Release -from sqlalchemy.orm import joinedload_all -from sqlalchemy.sql.expression import asc, or_ -import random as rndm -import time - -log = CPLog(__name__) - - -class Dashboard(Plugin): - - def __init__(self): - addApiView('dashboard.soon', self.getSoonView) - - def getSoonView(self, limit_offset = None, random = False, late = False, **kwargs): - - db = get_session() - now = time.time() - - # Get profiles first, determine pre or post theater - profiles = fireEvent('profile.all', single = True) - qualities = fireEvent('quality.all', single = True) - pre_releases = fireEvent('quality.pre_releases', single = True) - - id_pre = {} - for quality in qualities: - id_pre[quality.get('id')] = quality.get('identifier') in pre_releases - - # See what the profile contain and cache it - profile_pre = {} - for profile in profiles: - contains = {} - for profile_type in profile.get('types', []): - contains['theater' if id_pre.get(profile_type.get('quality_id')) else 'dvd'] = True - - profile_pre[profile.get('id')] = contains - - # Add limit - limit = 12 - if limit_offset: - splt = splitString(limit_offset) if isinstance(limit_offset, (str, unicode)) else limit_offset - limit = tryInt(splt[0]) - - # Get all active movies - active_status, ignored_status = fireEvent('status.get', ['active', 'ignored'], single = True) - q = db.query(Media) \ - .join(Library) \ - .outerjoin(Media.releases) \ - .filter(Media.status_id == active_status.get('id')) \ - .with_entities(Media.id, Media.profile_id, Library.info, Library.year) \ - .group_by(Media.id) \ - .filter(or_(Release.id == None, Release.status_id == ignored_status.get('id'))) - - if not random: - q = q.join(LibraryTitle) \ - .filter(LibraryTitle.default == True) \ - .order_by(asc(LibraryTitle.simple_title)) - - active = q.all() - movies = [] - now_year = date.today().year - - if len(active) > 0: - - # Do the shuffle - if random: - rndm.shuffle(active) - - movie_ids = [] - for movie in active: - movie_id, profile_id, info, year = movie - - pp = profile_pre.get(profile_id) - if not pp: continue - - eta = info.get('release_date', {}) or {} - coming_soon = False - - # Theater quality - if pp.get('theater') and fireEvent('movie.searcher.could_be_released', True, eta, year, single = True): - coming_soon = True - elif pp.get('dvd') and fireEvent('movie.searcher.could_be_released', False, eta, year, single = True): - coming_soon = True - - if coming_soon: - - # Don't list older movies - if ((not late and (year >= now_year-1) and (not eta.get('dvd') and not eta.get('theater') or eta.get('dvd') and eta.get('dvd') > (now - 2419200))) or - (late and ((year < now_year-1) or ((eta.get('dvd', 0) > 0 or eta.get('theater')) and eta.get('dvd') < (now - 2419200))))): - movie_ids.append(movie_id) - - if len(movie_ids) >= limit: - break - - if len(movie_ids) > 0: - - # Get all movie information - movies_raw = db.query(Media) \ - .options(joinedload_all('library.titles')) \ - .options(joinedload_all('library.files')) \ - .options(joinedload_all('files')) \ - .filter(Media.id.in_(movie_ids)) \ - .all() - - # Create dict by movie id - movie_dict = {} - for movie in movies_raw: - movie_dict[movie.id] = movie - - for movie_id in movie_ids: - movies.append(movie_dict[movie_id].to_dict({ - 'library': {'titles': {}, 'files': {}}, - 'files': {}, - })) - - return { - 'success': True, - 'empty': len(movies) == 0, - 'movies': movies, - } - - getLateView = getSoonView diff --git a/couchpotato/core/plugins/file.py b/couchpotato/core/plugins/file.py new file mode 100644 index 00000000..51adf8c9 --- /dev/null +++ b/couchpotato/core/plugins/file.py @@ -0,0 +1,78 @@ +import os.path +import traceback + +from couchpotato import get_db +from couchpotato.api import addApiView +from couchpotato.core.event import addEvent, fireEvent +from couchpotato.core.helpers.encoding import toUnicode +from couchpotato.core.helpers.variable import md5, getExt +from couchpotato.core.logger import CPLog +from couchpotato.core.plugins.base import Plugin +from couchpotato.environment import Env +from tornado.web import StaticFileHandler + + +log = CPLog(__name__) + +autoload = 'FileManager' + + +class FileManager(Plugin): + + def __init__(self): + addEvent('file.download', self.download) + + addApiView('file.cache/(.*)', self.showCacheFile, static = True, docs = { + 'desc': 'Return a file from the cp_data/cache directory', + 'params': { + 'filename': {'desc': 'path/filename of the wanted file'} + }, + 'return': {'type': 'file'} + }) + + fireEvent('schedule.interval', 'file.cleanup', self.cleanup, hours = 24) + + def cleanup(self): + + # Wait a bit after starting before cleanup + log.debug('Cleaning up unused files') + + try: + db = get_db() + cache_dir = Env.get('cache_dir') + medias = db.all('media', with_doc = True) + + files = [] + for media in medias: + file_dict = media['doc'].get('files', {}) + for x in file_dict.keys(): + files.extend(file_dict[x]) + + for f in os.listdir(cache_dir): + if os.path.splitext(f)[1] in ['.png', '.jpg', '.jpeg']: + file_path = os.path.join(cache_dir, f) + if toUnicode(file_path) not in files: + os.remove(file_path) + except: + log.error('Failed removing unused file: %s', traceback.format_exc()) + + def showCacheFile(self, route, **kwargs): + Env.get('app').add_handlers(".*$", [('%s%s' % (Env.get('api_base'), route), StaticFileHandler, {'path': Env.get('cache_dir')})]) + + def download(self, url = '', dest = None, overwrite = False, urlopen_kwargs = None): + if not urlopen_kwargs: urlopen_kwargs = {} + + if not dest: # to Cache + dest = os.path.join(Env.get('cache_dir'), '%s.%s' % (md5(url), getExt(url))) + + if not overwrite and os.path.isfile(dest): + return dest + + try: + filedata = self.urlopen(url, **urlopen_kwargs) + except: + log.error('Failed downloading file %s: %s', (url, traceback.format_exc())) + return False + + self.createFile(dest, filedata, binary = True) + return dest diff --git a/couchpotato/core/plugins/file/__init__.py b/couchpotato/core/plugins/file/__init__.py deleted file mode 100644 index 3dced3d0..00000000 --- a/couchpotato/core/plugins/file/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from .main import FileManager - - -def start(): - return FileManager() - -config = [] diff --git a/couchpotato/core/plugins/file/main.py b/couchpotato/core/plugins/file/main.py deleted file mode 100644 index c52a9801..00000000 --- a/couchpotato/core/plugins/file/main.py +++ /dev/null @@ -1,175 +0,0 @@ -from couchpotato import get_session -from couchpotato.api import addApiView -from couchpotato.core.event import addEvent -from couchpotato.core.helpers.encoding import toUnicode -from couchpotato.core.helpers.variable import md5, getExt -from couchpotato.core.logger import CPLog -from couchpotato.core.plugins.base import Plugin -from couchpotato.core.plugins.scanner.main import Scanner -from couchpotato.core.settings.model import FileType, File -from couchpotato.environment import Env -from tornado.web import StaticFileHandler -import os.path -import time -import traceback - -log = CPLog(__name__) - - -class FileManager(Plugin): - - def __init__(self): - addEvent('file.add', self.add) - addEvent('file.download', self.download) - addEvent('file.types', self.getTypes) - - addApiView('file.cache/(.*)', self.showCacheFile, static = True, docs = { - 'desc': 'Return a file from the cp_data/cache directory', - 'params': { - 'filename': {'desc': 'path/filename of the wanted file'} - }, - 'return': {'type': 'file'} - }) - - addApiView('file.types', self.getTypesView, docs = { - 'desc': 'Return a list of all the file types and their ids.', - 'return': {'type': 'object', 'example': """{ - 'types': [ - { - "identifier": "poster_original", - "type": "image", - "id": 1, - "name": "Poster_original" - }, - { - "identifier": "poster", - "type": "image", - "id": 2, - "name": "Poster" - }, - etc - ] -}"""} - }) - - addEvent('app.load', self.cleanup) - addEvent('app.load', self.init) - - def init(self): - - for type_tuple in Scanner.file_types.values(): - self.getType(type_tuple) - - def cleanup(self): - - # Wait a bit after starting before cleanup - time.sleep(3) - log.debug('Cleaning up unused files') - - try: - db = get_session() - for root, dirs, walk_files in os.walk(Env.get('cache_dir')): - for filename in walk_files: - if os.path.splitext(filename)[1] in ['.png', '.jpg', '.jpeg']: - file_path = os.path.join(root, filename) - f = db.query(File).filter(File.path == toUnicode(file_path)).first() - if not f: - os.remove(file_path) - except: - log.error('Failed removing unused file: %s', traceback.format_exc()) - db.rollback() - finally: - db.close() - - def showCacheFile(self, route, **kwargs): - Env.get('app').add_handlers(".*$", [('%s%s' % (Env.get('api_base'), route), StaticFileHandler, {'path': Env.get('cache_dir')})]) - - def download(self, url = '', dest = None, overwrite = False, urlopen_kwargs = None): - if not urlopen_kwargs: urlopen_kwargs = {} - - if not dest: # to Cache - dest = os.path.join(Env.get('cache_dir'), '%s.%s' % (md5(url), getExt(url))) - - if not overwrite and os.path.isfile(dest): - return dest - - try: - filedata = self.urlopen(url, **urlopen_kwargs) - except: - log.error('Failed downloading file %s: %s', (url, traceback.format_exc())) - return False - - self.createFile(dest, filedata, binary = True) - return dest - - def add(self, path = '', part = 1, type_tuple = (), available = 1, properties = None): - if not properties: properties = {} - - try: - db = get_session() - type_id = self.getType(type_tuple).get('id') - - f = db.query(File).filter(File.path == toUnicode(path)).first() - if not f: - f = File() - db.add(f) - - f.path = toUnicode(path) - f.part = part - f.available = available - f.type_id = type_id - - db.commit() - - file_dict = f.to_dict() - - return file_dict - except: - log.error('Failed adding file: %s, %s', (path, traceback.format_exc())) - db.rollback() - finally: - db.close() - - def getType(self, type_tuple): - - try: - db = get_session() - type_type, type_identifier = type_tuple - - ft = db.query(FileType).filter_by(identifier = type_identifier).first() - if not ft: - ft = FileType( - type = toUnicode(type_type), - identifier = type_identifier, - name = toUnicode(type_identifier[0].capitalize() + type_identifier[1:]) - ) - db.add(ft) - db.commit() - - type_dict = ft.to_dict() - - return type_dict - except: - log.error('Failed getting type: %s, %s', (type_tuple, traceback.format_exc())) - db.rollback() - finally: - db.close() - - - def getTypes(self): - - db = get_session() - - results = db.query(FileType).all() - - types = [] - for type_object in results: - types.append(type_object.to_dict()) - - return types - - def getTypesView(self, **kwargs): - - return { - 'types': self.getTypes() - } diff --git a/couchpotato/core/plugins/file/static/file.js b/couchpotato/core/plugins/file/static/file.js deleted file mode 100644 index 6659c882..00000000 --- a/couchpotato/core/plugins/file/static/file.js +++ /dev/null @@ -1,86 +0,0 @@ -var File = new Class({ - - initialize: function(type, file){ - var self = this; - - if(!file){ - self.empty = true; - self.el = new Element('div.empty_file.'+type); - return - } - - self.data = file; - self.type = File.Type.get(file.type_id); - - self['create'+(self.type.type).capitalize()]() - - }, - - createImage: function(){ - var self = this; - - var file_name = self.data.path.replace(/^.*[\\\/]/, ''); - - self.el = new Element('div', { - 'class': 'type_image ' + self.type.identifier, - 'styles': { - 'background-image': 'url('+Api.createUrl('file.cache') + file_name+')' - } - }).adopt( - new Element('img', { - 'src': Api.createUrl('file.cache') + file_name - }) - ) - }, - - toElement: function(){ - return this.el; - } - -}); - -var FileSelect = new Class({ - - multiple: function(type, files, single){ - - var results = files.filter(function(file){ - return file.type_id == File.Type.get(type).id; - }); - - if(single) - return new File(type, results.pop()); - - return results; - - }, - - single: function(type, files){ - return this.multiple(type, files, true); - } - -}); -window.File.Select = new FileSelect(); - -var FileTypeBase = new Class({ - - setup: function(types){ - var self = this; - - self.typesById = {}; - self.typesByKey = {}; - Object.each(types, function(type){ - self.typesByKey[type.identifier] = type; - self.typesById[type.id] = type; - }); - - }, - - get: function(identifier){ - if(typeOf(identifier) == 'number') - return this.typesById[identifier] - else - return this.typesByKey[identifier] - } - -}); -window.File.Type = new FileTypeBase(); diff --git a/couchpotato/core/plugins/log/__init__.py b/couchpotato/core/plugins/log/__init__.py index f5d9d105..3760b567 100644 --- a/couchpotato/core/plugins/log/__init__.py +++ b/couchpotato/core/plugins/log/__init__.py @@ -1,7 +1,5 @@ from .main import Logging -def start(): +def autoload(): return Logging() - -config = [] diff --git a/couchpotato/core/plugins/log/main.py b/couchpotato/core/plugins/log/main.py index 2f471586..003529b1 100644 --- a/couchpotato/core/plugins/log/main.py +++ b/couchpotato/core/plugins/log/main.py @@ -1,11 +1,14 @@ +import os +import re +import traceback + from couchpotato.api import addApiView from couchpotato.core.helpers.encoding import toUnicode -from couchpotato.core.helpers.variable import tryInt +from couchpotato.core.helpers.variable import tryInt, splitString from couchpotato.core.logger import CPLog from couchpotato.core.plugins.base import Plugin from couchpotato.environment import Env -import os -import traceback + log = CPLog(__name__) @@ -20,7 +23,11 @@ class Logging(Plugin): }, 'return': {'type': 'object', 'example': """{ 'success': True, - 'log': string, //Log file + 'log': [{ + 'time': '03-12 09:12:59', + 'type': 'INFO', + 'message': 'Log message' + }, ..], //Log file 'total': int, //Total log files available }"""} }) @@ -32,7 +39,11 @@ class Logging(Plugin): }, 'return': {'type': 'object', 'example': """{ 'success': True, - 'log': string, //Log file + 'log': [{ + 'time': '03-12 09:12:59', + 'type': 'INFO', + 'message': 'Log message' + }, ..] }"""} }) addApiView('logging.clear', self.clear, docs = { @@ -65,20 +76,22 @@ class Logging(Plugin): if x is nr: current_path = path - log = '' + log_content = '' if current_path: f = open(current_path, 'r') - log = f.read() + log_content = f.read() + logs = self.toList(log_content) return { 'success': True, - 'log': toUnicode(log), + 'log': logs, 'total': total, } - def partial(self, type = 'all', lines = 30, **kwargs): + def partial(self, type = 'all', lines = 30, offset = 0, **kwargs): total_lines = tryInt(lines) + offset = tryInt(offset) log_lines = [] @@ -91,28 +104,57 @@ class Logging(Plugin): break f = open(path, 'r') - reversed_lines = toUnicode(f.read()).split('[0m\n') - reversed_lines.reverse() + log_content = toUnicode(f.read()) + raw_lines = self.toList(log_content) + raw_lines.reverse() brk = False - for line in reversed_lines: + for line in raw_lines: - if type == 'all' or '%s ' % type.upper() in line: + if type == 'all' or line.get('type') == type.upper(): log_lines.append(line) - if len(log_lines) >= total_lines: + if len(log_lines) >= (total_lines + offset): brk = True break if brk: break + log_lines = log_lines[offset:] log_lines.reverse() + return { 'success': True, - 'log': '[0m\n'.join(log_lines), + 'log': log_lines, } + def toList(self, log_content = ''): + + logs_raw = toUnicode(log_content).split('[0m\n') + + logs = [] + for log_line in logs_raw: + split = splitString(log_line, '\x1b') + if split: + try: + date, time, log_type = splitString(split[0], ' ') + timestamp = '%s %s' % (date, time) + except: + timestamp = 'UNKNOWN' + log_type = 'UNKNOWN' + + message = ''.join(split[1]) if len(split) > 1 else split[0] + message = re.sub('\[\d+m\[', '[', message) + + logs.append({ + 'time': timestamp, + 'type': log_type, + 'message': message + }) + + return logs + def clear(self, **kwargs): for x in range(0, 50): diff --git a/couchpotato/core/plugins/log/static/log.css b/couchpotato/core/plugins/log/static/log.css index 524b588d..c7aace61 100644 --- a/couchpotato/core/plugins/log/static/log.css +++ b/couchpotato/core/plugins/log/static/log.css @@ -9,16 +9,21 @@ bottom: 0; left: 0; background: #4E5969; + z-index: 100; } .page.log .nav li { display: inline-block; padding: 5px 10px; margin: 0; + } + + .page.log .nav li.select, + .page.log .nav li.clear { cursor: pointer; } - .page.log .nav li:hover:not(.active) { + .page.log .nav li:hover:not(.active):not(.filter) { background: rgba(255, 255, 255, 0.1); } @@ -27,17 +32,30 @@ cursor: default; background: rgba(255,255,255,.1); } - + @media all and (max-width: 480px) { .page.log .nav { font-size: 14px; } - + .page.log .nav li { padding: 5px; } } + .page.log .nav li.hint { + text-align: center; + width: 400px; + left: 50%; + margin-left: -200px; + font-style: italic; + font-size: 11px; + position: absolute; + right: 20px; + opacity: .5; + bottom: 5px; + } + .page.log .loading { text-align: center; font-size: 20px; @@ -47,31 +65,135 @@ .page.log .container { padding: 30px 0 60px; overflow: hidden; -} - -.page.log .container span { - float: left; - width: 86%; line-height: 150%; - padding: 3px 0; - border-top: 1px solid rgba(255, 255, 255, 0.2); font-size: 11px; - font-family: Lucida Console, Monaco, Nimbus Mono L; + color: #FFF; } - .page.log .container .error { - color: #FFA4A4; - white-space: pre-wrap; + .page.log .container select { + vertical-align: top; } - .page.log .container .debug { color: lightgrey; } .page.log .container .time { clear: both; - width: 14%; color: lightgrey; - padding: 3px 0; font-size: 10px; + border-top: 1px solid rgba(255, 255, 255, 0.1); + position: relative; + overflow: hidden; + padding: 0 3px; + font-family: Lucida Console, Monaco, Nimbus Mono L, monospace, serif; + } + .page.log .container .time.highlight { + background: rgba(255, 255, 255, 0.1); + } + .page.log .container .time span { + padding: 5px 0 3px; + display: inline-block; + vertical-align: middle; + } + + .page.log[data-filter=INFO] .error, + .page.log[data-filter=INFO] .debug, + .page.log[data-filter=ERROR] .debug, + .page.log[data-filter=ERROR] .info, + .page.log[data-filter=DEBUG] .info, + .page.log[data-filter=DEBUG] .error { + display: none; + } + + .page.log .container .type { + margin-left: 10px; } - .page.log .container .time:last-child { display: none; } + .page.log .container .message { + float: right; + width: 86%; + white-space: pre-wrap; + } + .page.log .container .error { color: #FFA4A4; } + .page.log .container .debug span { opacity: .6; } + +.do_report { + position: absolute; + padding: 10px; +} + +.page.log .report { + position: fixed; + width: 100%; + height: 100%; + background: rgba(0,0,0,.7); + left: 0; + top: 0; + z-index: 99999; + font-size: 14px; +} + + .page.log .report .button { + display: inline-block; + margin: 10px 0; + padding: 10px; + } + + .page.log .report .bug { + width: 800px; + height: 80%; + position: absolute; + left: 50%; + top: 50%; + margin: 0 0 0 -400px; + transform: translate(0, -50%); + } + + .page.log .report .bug textarea { + display: block; + width: 100%; + background: #FFF; + padding: 20px; + overflow: auto; + color: #666; + height: 70%; + font-size: 12px; + } + +.page.log .container .time ::-webkit-selection { + background-color: #000; + color: #FFF; +} + +.page.log .container .time ::-moz-selection { + background-color: #000; + color: #FFF; +} + +.page.log .container .time ::-ms-selection { + background-color: #000; + color: #FFF; +} + +.page.log .container .time.highlight ::selection { + background-color: transparent; + color: inherit; +} + +.page.log .container .time.highlight ::-webkit-selection { + background-color: transparent; + color: inherit; +} + +.page.log .container .time.highlight ::-moz-selection { + background-color: transparent; + color: inherit; +} + +.page.log .container .time.highlight ::-ms-selection { + background-color: transparent; + color: inherit; +} + +.page.log .container .time.highlight ::selection { + background-color: transparent; + color: inherit; +} diff --git a/couchpotato/core/plugins/log/static/log.js b/couchpotato/core/plugins/log/static/log.js index 159bfeaa..11acb5ca 100644 --- a/couchpotato/core/plugins/log/static/log.js +++ b/couchpotato/core/plugins/log/static/log.js @@ -2,81 +2,295 @@ Page.Log = new Class({ Extends: PageBase, + order: 60, name: 'log', title: 'Show recent logs.', has_tab: false, - indexAction: function(){ + log_items: [], + report_text: '\ +### Steps to reproduce:\n\ +1. ..\n\ +2. ..\n\ +\n\ +### Information:\n\ +Movie(s) I have this with: ...\n\ +Quality of the movie being searched: ...\n\ +Providers I use: ...\n\ +Version of CouchPotato: {version}\n\ +Running on: ...\n\ +\n\ +### Logs:\n\ +```\n{issue}```', + + indexAction: function () { var self = this; self.getLogs(0); }, - getLogs: function(nr){ + getLogs: function (nr) { var self = this; - if(self.log) self.log.destroy(); + if (self.log) self.log.destroy(); self.log = new Element('div.container.loading', { - 'text': 'loading...' + 'text': 'loading...', + 'events': { + 'mouseup:relay(.time)': function(e){ + self.showSelectionButton.delay(100, self, e); + } + } }).inject(self.el); Api.request('logging.get', { 'data': { 'nr': nr }, - 'onComplete': function(json){ - self.log.set('html', self.addColors(json.log)); + 'onComplete': function (json) { + self.log.set('text', ''); + self.log_items = self.createLogElements(json.log); + self.log.adopt(self.log_items); self.log.removeClass('loading'); - new Fx.Scroll(window, {'duration': 0}).toBottom(); + var nav = new Element('ul.nav', { + 'events': { + 'click:relay(li.select)': function (e, el) { + self.getLogs(parseInt(el.get('text')) - 1); + } + } + }); - var nav = new Element('ul.nav').inject(self.log, 'top'); - for (var i = 0; i <= json.total; i++) { - new Element('li', { - 'text': i+1, - 'class': nr == i ? 'active': '', + // Type selection + new Element('li.filter').grab( + new Element('select', { 'events': { - 'click': function(e){ - self.getLogs(e.target.get('text')-1); + 'change': function () { + var type_filter = this.getSelected()[0].get('value'); + self.el.set('data-filter', type_filter); + self.scrollToBottom(); } } - }).inject(nav); - }; + }).adopt( + new Element('option', {'value': 'ALL', 'text': 'Show all logs'}), + new Element('option', {'value': 'INFO', 'text': 'Show only INFO'}), + new Element('option', {'value': 'DEBUG', 'text': 'Show only DEBUG'}), + new Element('option', {'value': 'ERROR', 'text': 'Show only ERROR'}) + ) + ).inject(nav); - new Element('li', { + // Selections + for (var i = 0; i <= json.total; i++) { + new Element('li', { + 'text': i + 1, + 'class': 'select ' + (nr == i ? 'active' : '') + }).inject(nav); + } + + // Clear button + new Element('li.clear', { 'text': 'clear', 'events': { - 'click': function(){ + 'click': function () { Api.request('logging.clear', { - 'onComplete': function(){ + 'onComplete': function () { self.getLogs(0); } }); } } - }).inject(nav) + }).inject(nav); + + // Hint + new Element('li.hint', { + 'text': 'Select multiple lines & report an issue' + }).inject(nav); + + // Add to page + nav.inject(self.log, 'top'); + + self.scrollToBottom(); } }); }, - addColors: function(text){ - var self = this; + createLogElements: function (logs) { - text = text - .replace(/&/g, '&') - .replace(//g, '>') - .replace(/"/g, '"') - .replace(/\u001b\[31m/gi, '') - .replace(/\u001b\[36m/gi, '') - .replace(/\u001b\[33m/gi, '') - .replace(/\u001b\[0m\n/gi, '
') - .replace(/\u001b\[0m/gi, '') + var elements = []; - return '
' + text + '
'; + logs.each(function (log) { + elements.include(new Element('div', { + 'class': 'time ' + log.type.toLowerCase() + }).adopt( + new Element('span', { + 'text': log.time + }), + new Element('span.type', { + 'text': log.type + }), + new Element('span.message', { + 'text': log.message + }) + )) + }); + + return elements; + }, + + scrollToBottom: function () { + new Fx.Scroll(window, {'duration': 0}).toBottom(); + }, + + showSelectionButton: function(e){ + var self = this, + selection = self.getSelected(), + start_node = selection.anchorNode, + parent_start = start_node.parentNode.getParent('.time'), + end_node = selection.focusNode.parentNode.getParent('.time'), + text = ''; + + var remove_button = function(){ + self.log.getElements('.highlight').removeClass('highlight'); + if(self.do_report) + self.do_report.destroy(); + document.body.removeEvent('click', remove_button); + }; + remove_button(); + + if(parent_start) + start_node = parent_start; + + var index = { + 'start': self.log_items.indexOf(start_node), + 'end': self.log_items.indexOf(end_node) + }; + + if(index.start > index.end){ + index = { + 'start': index.end, + 'end': index.start + }; + } + + var nodes = self.log_items.slice(index.start, index.end + 1); + + nodes.each(function(node, nr){ + node.addClass('highlight'); + node.getElements('span').each(function(span){ + text += self.spaceFill(span.get('text') + ' ', 6); + }); + text += '\n'; + }); + + self.do_report = new Element('a.do_report.button', { + 'text': 'Report issue', + 'styles': { + 'top': e.page.y, + 'left': e.page.x + }, + 'events': { + 'click': function(e){ + (e).stop(); + + self.showReport(text); + } + } + }).inject(document.body); + + setTimeout(function(){ + document.body.addEvent('click', remove_button); + }, 0); + + }, + + showReport: function(text){ + var self = this, + version = Updater.getInfo(), + body = self.report_text + .replace('{issue}', text) + .replace('{version}', version ? version.version.repr : '...'), + textarea; + + var overlay = new Element('div.report', { + 'method': 'post', + 'events': { + 'click': function(e){ + overlay.destroy(); + } + } + }).grab( + new Element('div.bug', { + 'events': { + 'click': function(e){ + (e).stopPropagation(); + } + } + }).adopt( + new Element('h1', { + 'text': 'Report a bug' + }), + new Element('span').adopt( + new Element('span', { + 'text': 'Read ' + }), + new Element('a.button', { + 'target': '_blank', + 'text': 'the contributing guide', + 'href': 'https://github.com/RuudBurger/CouchPotatoServer/blob/develop/contributing.md' + }), + new Element('span', { + 'text': ' before posting, then copy the text below' + }) + ), + textarea = new Element('textarea', { + 'text': body, + 'events': { + 'click': function(){ + this.select(); + } + } + }), + new Element('a.button', { + 'target': '_blank', + 'text': 'Create a new issue on GitHub with the text above', + 'href': 'https://github.com/RuudBurger/CouchPotatoServer/issues/new', + 'events': { + 'click': function(e){ + (e).stop(); + + var body = textarea.get('value'), + bdy = '?body=' + (body.length < 2000 ? encodeURIComponent(body) : 'Paste the text here'), + win = window.open(e.target.get('href') + bdy, '_blank'); + win.focus(); + } + } + }) + ) + ); + + overlay.inject(self.log); + }, + + getSelected: function(){ + if (window.getSelection) + return window.getSelection(); + else if (document.getSelection) + return document.getSelection(); + else { + var selection = document.selection && document.selection.createRange(); + if (selection.text) + return selection.text; + } + return false; + + }, + + spaceFill: function( number, width ){ + if ( number.toString().length >= width ) + return number; + return ( new Array( width ).join( ' ' ) + number.toString() ).substr( -width ); } -}) +}); diff --git a/couchpotato/core/plugins/manage/main.py b/couchpotato/core/plugins/manage.py similarity index 58% rename from couchpotato/core/plugins/manage/main.py rename to couchpotato/core/plugins/manage.py index 2f297491..bec204b0 100644 --- a/couchpotato/core/plugins/manage/main.py +++ b/couchpotato/core/plugins/manage.py @@ -1,19 +1,22 @@ -from couchpotato.api import addApiView -from couchpotato.core.event import fireEvent, addEvent, fireEventAsync -from couchpotato.core.helpers.encoding import sp -from couchpotato.core.helpers.variable import splitString, getTitle, tryInt -from couchpotato.core.logger import CPLog -from couchpotato.core.plugins.base import Plugin -from couchpotato.environment import Env import ctypes import os import sys import time import traceback +from couchpotato.api import addApiView +from couchpotato.core.event import fireEvent, addEvent, fireEventAsync +from couchpotato.core.helpers.encoding import sp +from couchpotato.core.helpers.variable import splitString, getTitle, tryInt, getIdentifier +from couchpotato.core.logger import CPLog +from couchpotato.core.plugins.base import Plugin +from couchpotato.environment import Env + log = CPLog(__name__) +autoload = 'Manage' + class Manage(Plugin): @@ -29,7 +32,7 @@ class Manage(Plugin): # Add files after renaming def after_rename(message = None, group = None): if not group: group = {} - return self.scanFilesToLibrary(folder = group['destination_dir'], files = group['renamed_files']) + return self.scanFilesToLibrary(folder = group['destination_dir'], files = group['renamed_files'], release_download = group['release_download']) addEvent('renamer.after', after_rename, priority = 110) addApiView('manage.update', self.updateLibraryView, docs = { @@ -49,6 +52,20 @@ class Manage(Plugin): if not Env.get('dev') and self.conf('startup_scan'): addEvent('app.load', self.updateLibraryQuick) + addEvent('app.load', self.setCrons) + + # Enable / disable interval + addEvent('setting.save.manage.library_refresh_interval.after', self.setCrons) + + def setCrons(self): + + fireEvent('schedule.remove', 'manage.update_library') + refresh = tryInt(self.conf('library_refresh_interval')) + if refresh > 0: + fireEvent('schedule.interval', 'manage.update_library', self.updateLibrary, hours = refresh, single = True) + + return True + def getProgress(self, **kwargs): return { 'progress': self.in_progress @@ -67,7 +84,8 @@ class Manage(Plugin): return self.updateLibrary(full = False) def updateLibrary(self, full = True): - last_update = float(Env.prop('manage.last_update', default = 0)) + last_update_key = 'manage.last_update%s' % ('_full' if full else '') + last_update = float(Env.prop(last_update_key, default = 0)) if self.in_progress: log.info('Already updating library: %s', self.in_progress) @@ -105,7 +123,6 @@ class Manage(Plugin): log.info('Updating manage library: %s', folder) fireEvent('notify.frontend', type = 'manage.update', data = True, message = 'Scanning for movies in "%s"' % folder) - onFound = self.createAddToLibrary(folder, added_identifiers) fireEvent('scanner.scan', folder = folder, simple = True, newer_than = last_update if not full else 0, on_found = onFound, single = True) @@ -117,46 +134,56 @@ class Manage(Plugin): if self.conf('cleanup') and full and not self.shuttingDown(): # Get movies with done status - total_movies, done_movies = fireEvent('media.list', types = 'movie', status = 'done', single = True) + total_movies, done_movies = fireEvent('media.list', types = 'movie', status = 'done', release_status = 'done', status_or = True, single = True) + deleted_releases = [] for done_movie in done_movies: - if done_movie['library']['identifier'] not in added_identifiers: - fireEvent('media.delete', media_id = done_movie['id'], delete_from = 'all') + if getIdentifier(done_movie) not in added_identifiers: + fireEvent('media.delete', media_id = done_movie['_id'], delete_from = 'all') else: - releases = fireEvent('release.for_movie', id = done_movie.get('id'), single = True) + releases = done_movie.get('releases', []) for release in releases: - if len(release.get('files', [])) > 0: - for release_file in release.get('files', []): - # Remove release not available anymore - if not os.path.isfile(sp(release_file['path'])): - fireEvent('release.clean', release['id']) + if release.get('files'): + brk = False + for file_type in release.get('files', {}): + for release_file in release['files'][file_type]: + # Remove release not available anymore + if not os.path.isfile(sp(release_file)): + fireEvent('release.clean', release['_id']) + brk = True + break + if brk: break # Check if there are duplicate releases (different quality) use the last one, delete the rest if len(releases) > 1: used_files = {} for release in releases: + for file_type in release.get('files', {}): + for release_file in release['files'][file_type]: + already_used = used_files.get(release_file) - for release_file in release.get('files', []): - already_used = used_files.get(release_file['path']) - - if already_used: - if already_used < release['id']: - fireEvent('release.delete', release['id'], single = True) # delete this one + if already_used: + release_id = release['_id'] if already_used.get('last_edit', 0) < release.get('last_edit', 0) else already_used['_id'] + if release_id not in deleted_releases: + fireEvent('release.delete', release_id, single = True) + deleted_releases.append(release_id) + break else: - fireEvent('release.delete', already_used, single = True) # delete previous one - break - else: - used_files[release_file['path']] = release.get('id') + used_files[release_file] = release del used_files - Env.prop('manage.last_update', time.time()) + # Break if CP wants to shut down + if self.shuttingDown(): + break + + Env.prop(last_update_key, time.time()) except: log.error('Failed updating library: %s', (traceback.format_exc())) - while True and not self.shuttingDown(): + while self.in_progress and len(self.in_progress) > 0 and not self.shuttingDown(): delete_me = {} @@ -167,14 +194,12 @@ class Manage(Plugin): for delete in delete_me: del self.in_progress[delete] - if len(self.in_progress) == 0: - break - time.sleep(1) fireEvent('notify.frontend', type = 'manage.updating', data = False) self.in_progress = False + # noinspection PyDefaultArgument def createAddToLibrary(self, folder, added_identifiers = []): def addToLibrary(group, total_found, to_go): @@ -184,15 +209,14 @@ class Manage(Plugin): 'to_go': total_found, }) - if group['library'] and group['library'].get('identifier'): - identifier = group['library'].get('identifier') - added_identifiers.append(identifier) + self.updateProgress(folder, to_go) + + if group['media'] and group['identifier']: + added_identifiers.append(group['identifier']) # Add it to release and update the info - fireEvent('release.add', group = group) - fireEvent('library.update.movie', identifier = identifier, on_complete = self.createAfterUpdate(folder, identifier)) - else: - self.updateProgress(folder) + fireEvent('release.add', group = group, update_info = False) + fireEvent('movie.update_info', identifier = group['identifier'], on_complete = self.createAfterUpdate(folder, group['identifier'])) return addToLibrary @@ -203,20 +227,20 @@ class Manage(Plugin): if not self.in_progress or self.shuttingDown(): return - self.updateProgress(folder) total = self.in_progress[folder]['total'] movie_dict = fireEvent('media.get', identifier, single = True) - fireEvent('notify.frontend', type = 'movie.added', data = movie_dict, message = None if total > 5 else 'Added "%s" to manage.' % getTitle(movie_dict['library'])) + fireEvent('notify.frontend', type = 'movie.added', data = movie_dict, message = None if total > 5 else 'Added "%s" to manage.' % getTitle(movie_dict)) return afterUpdate - def updateProgress(self, folder): + def updateProgress(self, folder, to_go): pr = self.in_progress[folder] - pr['to_go'] -= 1 + if to_go < pr['to_go']: + pr['to_go'] = to_go - avg = (time.time() - pr['started'])/(pr['total'] - pr['to_go']) + avg = (time.time() - pr['started']) / (pr['total'] - pr['to_go']) pr['eta'] = tryInt(avg * pr['to_go']) @@ -229,7 +253,7 @@ class Manage(Plugin): return [] - def scanFilesToLibrary(self, folder = None, files = None): + def scanFilesToLibrary(self, folder = None, files = None, release_download = None): folder = os.path.normpath(folder) @@ -237,8 +261,11 @@ class Manage(Plugin): if groups: for group in groups.values(): - if group['library'] and group['library'].get('identifier'): - fireEvent('release.add', group = group) + if group.get('media'): + if release_download and release_download.get('release_id'): + fireEvent('release.add', group = group, update_id = release_download.get('release_id')) + else: + fireEvent('release.add', group = group) def getDiskSpace(self): @@ -267,3 +294,49 @@ class Manage(Plugin): return free_space + +config = [{ + 'name': 'manage', + 'groups': [ + { + 'tab': 'manage', + 'label': 'Movie Library Manager', + 'description': 'Add your existing movie folders.', + 'options': [ + { + 'name': 'enabled', + 'default': False, + 'type': 'enabler', + }, + { + 'name': 'library', + 'type': 'directories', + 'description': 'Folder where the movies should be moved to.', + }, + { + 'label': 'Cleanup After', + 'name': 'cleanup', + 'type': 'bool', + 'description': 'Remove movie from db if it can\'t be found after re-scan.', + 'default': True, + }, + { + 'label': 'Scan at startup', + 'name': 'startup_scan', + 'type': 'bool', + 'default': True, + 'advanced': True, + 'description': 'Do a quick scan on startup. On slow systems better disable this.', + }, + { + 'label': 'Full library refresh', + 'name': 'library_refresh_interval', + 'type': 'int', + 'default': 0, + 'advanced': True, + 'description': 'Do a full scan every X hours. (0 is disabled)', + }, + ], + }, + ], +}] diff --git a/couchpotato/core/plugins/manage/__init__.py b/couchpotato/core/plugins/manage/__init__.py deleted file mode 100644 index c992dee6..00000000 --- a/couchpotato/core/plugins/manage/__init__.py +++ /dev/null @@ -1,43 +0,0 @@ -from .main import Manage - - -def start(): - return Manage() - -config = [{ - 'name': 'manage', - 'groups': [ - { - 'tab': 'manage', - 'label': 'Movie Library Manager', - 'description': 'Add your existing movie folders.', - 'options': [ - { - 'name': 'enabled', - 'default': False, - 'type': 'enabler', - }, - { - 'name': 'library', - 'type': 'directories', - 'description': 'Folder where the movies should be moved to.', - }, - { - 'label': 'Cleanup After', - 'name': 'cleanup', - 'type': 'bool', - 'description': 'Remove movie from db if it can\'t be found after re-scan.', - 'default': True, - }, - { - 'label': 'Scan at startup', - 'name': 'startup_scan', - 'type': 'bool', - 'default': True, - 'advanced': True, - 'description': 'Do a quick scan on startup. On slow systems better disable this.', - }, - ], - }, - ], -}] diff --git a/couchpotato/core/plugins/profile/__init__.py b/couchpotato/core/plugins/profile/__init__.py index c07bc7c5..15a74eee 100644 --- a/couchpotato/core/plugins/profile/__init__.py +++ b/couchpotato/core/plugins/profile/__init__.py @@ -1,7 +1,5 @@ from .main import ProfilePlugin -def start(): +def autoload(): return ProfilePlugin() - -config = [] diff --git a/couchpotato/core/plugins/profile/index.py b/couchpotato/core/plugins/profile/index.py new file mode 100644 index 00000000..c2bf9445 --- /dev/null +++ b/couchpotato/core/plugins/profile/index.py @@ -0,0 +1,16 @@ +from CodernityDB.tree_index import TreeBasedIndex + + +class ProfileIndex(TreeBasedIndex): + _version = 1 + + def __init__(self, *args, **kwargs): + kwargs['key_format'] = 'i' + super(ProfileIndex, self).__init__(*args, **kwargs) + + def make_key(self, key): + return key + + def make_key_value(self, data): + if data.get('_t') == 'profile': + return data.get('order', 99), None diff --git a/couchpotato/core/plugins/profile/main.py b/couchpotato/core/plugins/profile/main.py index 914d46f3..10987198 100644 --- a/couchpotato/core/plugins/profile/main.py +++ b/couchpotato/core/plugins/profile/main.py @@ -1,19 +1,22 @@ import traceback -from couchpotato import get_session + +from couchpotato import get_db, tryInt from couchpotato.api import addApiView from couchpotato.core.event import addEvent, fireEvent from couchpotato.core.helpers.encoding import toUnicode from couchpotato.core.logger import CPLog from couchpotato.core.plugins.base import Plugin -from couchpotato.core.settings.model import Profile, ProfileType, Media -from sqlalchemy.orm import joinedload_all +from .index import ProfileIndex + log = CPLog(__name__) class ProfilePlugin(Plugin): - to_dict = {'types': {}} + _database = { + 'profile': ProfileIndex + } def __init__(self): addEvent('profile.all', self.all) @@ -31,27 +34,33 @@ class ProfilePlugin(Plugin): }) addEvent('app.initialize', self.fill, priority = 90) - addEvent('app.load', self.forceDefaults) + addEvent('app.load', self.forceDefaults, priority = 110) def forceDefaults(self): + db = get_db() + + # Fill qualities and profiles if they are empty somehow.. + if db.count(db.all, 'profile') == 0: + + if db.count(db.all, 'quality') == 0: + fireEvent('quality.fill', single = True) + + self.fill() + # Get all active movies without profile - active_status = fireEvent('status.get', 'active', single = True) - try: - db = get_session() - movies = db.query(Media).filter(Media.status_id == active_status.get('id'), Media.profile == None).all() + medias = fireEvent('media.with_status', 'active', single = True) - if len(movies) > 0: - default_profile = self.default() - for movie in movies: - movie.profile_id = default_profile.get('id') - db.commit() + profile_ids = [x.get('_id') for x in self.all()] + default_id = profile_ids[0] + + for media in medias: + if media.get('profile_id') not in profile_ids: + media['profile_id'] = default_id + db.update(media) except: log.error('Failed: %s', traceback.format_exc()) - db.rollback() - finally: - db.close() def allView(self, **kwargs): @@ -62,97 +71,81 @@ class ProfilePlugin(Plugin): def all(self): - db = get_session() - profiles = db.query(Profile) \ - .options(joinedload_all('types')) \ - .all() + db = get_db() + profiles = db.all('profile', with_doc = True) - temp = [] - for profile in profiles: - temp.append(profile.to_dict(self.to_dict)) - - return temp + return [x['doc'] for x in profiles] def save(self, **kwargs): try: - db = get_session() + db = get_db() - p = db.query(Profile).filter_by(id = kwargs.get('id')).first() - if not p: - p = Profile() - db.add(p) - - p.label = toUnicode(kwargs.get('label')) - p.order = kwargs.get('order', p.order if p.order else 0) - p.core = kwargs.get('core', False) - - #delete old types - [db.delete(t) for t in p.types] + profile = { + '_t': 'profile', + 'label': toUnicode(kwargs.get('label')), + 'order': tryInt(kwargs.get('order', 999)), + 'core': kwargs.get('core', False), + 'qualities': [], + 'wait_for': [], + 'finish': [], + '3d': [] + } + # Update types order = 0 for type in kwargs.get('types', []): - t = ProfileType( - order = order, - finish = type.get('finish') if order > 0 else 1, - wait_for = kwargs.get('wait_for'), - quality_id = type.get('quality_id') - ) - p.types.append(t) - + profile['qualities'].append(type.get('quality')) + profile['wait_for'].append(tryInt(kwargs.get('wait_for', 0))) + profile['finish'].append((tryInt(type.get('finish')) == 1) if order > 0 else True) + profile['3d'].append(tryInt(type.get('3d'))) order += 1 - db.commit() + id = kwargs.get('id') + try: + p = db.get('id', id) + profile['order'] = tryInt(kwargs.get('order', p.get('order', 999))) + except: + p = db.insert(profile) - profile_dict = p.to_dict(self.to_dict) + p.update(profile) + db.update(p) return { 'success': True, - 'profile': profile_dict + 'profile': p } except: log.error('Failed: %s', traceback.format_exc()) - db.rollback() - finally: - db.close() return { 'success': False } def default(self): - - db = get_session() - default = db.query(Profile) \ - .options(joinedload_all('types')) \ - .first() - default_dict = default.to_dict(self.to_dict) - - return default_dict + db = get_db() + return list(db.all('profile', limit = 1, with_doc = True))[0]['doc'] def saveOrder(self, **kwargs): try: - db = get_session() + db = get_db() order = 0 - for profile in kwargs.get('ids', []): - p = db.query(Profile).filter_by(id = profile).first() - p.hide = kwargs.get('hidden')[order] - p.order = order + + for profile_id in kwargs.get('ids', []): + p = db.get('id', profile_id) + p['hide'] = tryInt(kwargs.get('hidden')[order]) == 1 + p['order'] = order + db.update(p) order += 1 - db.commit() - return { 'success': True } except: log.error('Failed: %s', traceback.format_exc()) - db.rollback() - finally: - db.close() return { 'success': False @@ -161,15 +154,14 @@ class ProfilePlugin(Plugin): def delete(self, id = None, **kwargs): try: - db = get_session() + db = get_db() success = False message = '' - try: - p = db.query(Profile).filter_by(id = id).first() + try: + p = db.get('id', id) db.delete(p) - db.commit() # Force defaults on all empty profile movies self.forceDefaults() @@ -184,9 +176,6 @@ class ProfilePlugin(Plugin): } except: log.error('Failed: %s', traceback.format_exc()) - db.rollback() - finally: - db.close() return { 'success': False @@ -195,7 +184,7 @@ class ProfilePlugin(Plugin): def fill(self): try: - db = get_session() + db = get_db() profiles = [{ 'label': 'Best', @@ -206,41 +195,42 @@ class ProfilePlugin(Plugin): }, { 'label': 'SD', 'qualities': ['dvdrip', 'dvdr'] + }, { + 'label': 'Prefer 3D HD', + 'qualities': ['1080p', '720p', '720p', '1080p'], + '3d': [True, True] + }, { + 'label': '3D HD', + 'qualities': ['1080p', '720p'], + '3d': [True, True] }] # Create default quality profile - order = -2 + order = 0 for profile in profiles: log.info('Creating default profile: %s', profile.get('label')) - p = Profile( - label = toUnicode(profile.get('label')), - order = order - ) - db.add(p) - quality_order = 0 - for quality in profile.get('qualities'): - quality = fireEvent('quality.single', identifier = quality, single = True) - profile_type = ProfileType( - quality_id = quality.get('id'), - profile = p, - finish = True, - wait_for = 0, - order = quality_order - ) - p.types.append(profile_type) + pro = { + '_t': 'profile', + 'label': toUnicode(profile.get('label')), + 'order': order, + 'qualities': profile.get('qualities'), + 'finish': [], + 'wait_for': [], + '3d': [] + } - quality_order += 1 + threed = profile.get('3d', []) + for q in profile.get('qualities'): + pro['finish'].append(True) + pro['wait_for'].append(0) + pro['3d'].append(threed.pop() if threed else False) + db.insert(pro) order += 1 - db.commit() - return True except: log.error('Failed: %s', traceback.format_exc()) - db.rollback() - finally: - db.close() return False diff --git a/couchpotato/core/plugins/profile/static/profile.css b/couchpotato/core/plugins/profile/static/profile.css index d94576b9..f8a1b422 100644 --- a/couchpotato/core/plugins/profile/static/profile.css +++ b/couchpotato/core/plugins/profile/static/profile.css @@ -22,18 +22,17 @@ .profile > .delete:hover { opacity: 1; } - + .profile .ctrlHolder:hover { background: none; } .profile .qualities { min-height: 80px; - padding-top: 0; } .profile .formHint { - width: 250px !important; + width: 210px !important; vertical-align: top !important; margin: 0 !important; padding-left: 3px !important; @@ -78,21 +77,55 @@ } .profile .quality_type select { - width: 186px; + width: 120px; margin-left: -1px; } - .profile .types li.is_empty .check, .profile .types li.is_empty .delete, .profile .types li.is_empty .handle { + .profile .types li.is_empty .check, + .profile .types li.is_empty .delete, + .profile .types li.is_empty .handle, + .profile .types li.is_empty .check_label { visibility: hidden; } + .profile .types .type label { + display: inline-block; + width: auto; + float: none; + text-transform: uppercase; + font-size: 11px; + font-weight: normal; + margin-right: 20px; + text-shadow: none; + vertical-align: bottom; + padding: 0; + height: 17px; + } + .profile .types .type label .check { + margin-right: 5px; + } + .profile .types .type label .check_label { + display: inline-block; + vertical-align: top; + height: 16px; + line-height: 13px; + } + + .profile .types .type .threed { + display: none; + } + + .profile .types .type.allow_3d .threed { + display: inline-block; + } + .profile .types .type .handle { - background: url('../../static/profile_plugin/handle.png') center; + background: url('../../images/handle.png') center; display: inline-block; height: 20px; width: 20px; - cursor: -webkit-grab; cursor: -moz-grab; + cursor: -webkit-grab; cursor: grab; margin: 0; } @@ -106,6 +139,9 @@ font-size: 13px; color: #fd5353; } + .profile .types .type:not(.allow_3d) .delete { + margin-left: 55px; + } .profile .types .type:hover:not(.is_empty) .delete { visibility: visible; @@ -123,9 +159,6 @@ } #profile_ordering li { - cursor: -webkit-grab; - cursor: -moz-grab; - cursor: grab; border-bottom: 1px solid rgba(255,255,255,0.2); padding: 0 5px; } @@ -144,9 +177,12 @@ } #profile_ordering li .handle { - background: url('../../static/profile_plugin/handle.png') center; + background: url('../../images/handle.png') center; width: 20px; float: right; + cursor: -webkit-grab; + cursor: -moz-grab; + cursor: grab; } #profile_ordering .formHint { @@ -154,4 +190,4 @@ float: right; width: 250px; margin: 0; - } \ No newline at end of file + } diff --git a/couchpotato/core/plugins/profile/static/profile.js b/couchpotato/core/plugins/profile/static/profile.js index a93ca1bc..c62b137c 100644 --- a/couchpotato/core/plugins/profile/static/profile.js +++ b/couchpotato/core/plugins/profile/static/profile.js @@ -41,7 +41,7 @@ var Profile = new Class({ new Element('span', {'text':'Wait'}), new Element('input.inlay.xsmall', { 'type':'text', - 'value': data.types && data.types.length > 0 ? data.types[0].wait_for : 0 + 'value': data.wait_for && data.wait_for.length > 0 ? data.wait_for[0] : 0 }), new Element('span', {'text':'day(s) for a better quality.'}) ), @@ -54,10 +54,22 @@ var Profile = new Class({ ) ); - self.makeSortable() + self.makeSortable(); + + // Combine qualities and properties into types + if(data.qualities){ + data.types = []; + data.qualities.each(function(quality, nr){ + data.types.include({ + 'quality': quality, + 'finish': data.finish[nr] || false, + '3d': data['3d'] ? data['3d'][nr] || false : false + }) + }); + } if(data.types) - Object.each(data.types, self.addType.bind(self)) + data.types.each(self.addType.bind(self)); else self.delete_button.hide(); @@ -87,7 +99,7 @@ var Profile = new Class({ 'onComplete': function(json){ if(json.success){ self.data = json.profile; - self.type_container.getElement('li:first-child input[type=checkbox]') + self.type_container.getElement('li:first-child input.finish[type=checkbox]') .set('checked', true) .getParent().addClass('checked'); } @@ -102,19 +114,20 @@ var Profile = new Class({ var self = this; var data = { - 'id' : self.data.id, + 'id' : self.data._id, 'label' : self.el.getElement('.quality_label input').get('value'), 'wait_for' : self.el.getElement('.wait_for input').get('value'), 'types': [] - } + }; Array.each(self.type_container.getElements('.type'), function(type){ - if(!type.hasClass('deleted') && type.getElement('select').get('value') > 0) + if(!type.hasClass('deleted') && type.getElement('select').get('value') != -1) data.types.include({ - 'quality_id': type.getElement('select').get('value'), - 'finish': +type.getElement('input[type=checkbox]').checked + 'quality': type.getElement('select').get('value'), + 'finish': +type.getElement('input.finish[type=checkbox]').checked, + '3d': +type.getElement('input.3d[type=checkbox]').checked }); - }) + }); return data }, @@ -145,7 +158,7 @@ var Profile = new Class({ var self = this; return self.types.filter(function(type){ - return type.get('quality_id') + return type.get('quality') }); }, @@ -162,7 +175,7 @@ var Profile = new Class({ (e).preventDefault(); Api.request('profile.delete', { 'data': { - 'id': self.data.id + 'id': self.data._id }, 'useSpinner': true, 'spinnerOptions': { @@ -227,6 +240,7 @@ Profile.Type = new Class({ self.addEvent('change', function(){ self.el[self.qualities.get('value') == '-1' ? 'addClass' : 'removeClass']('is_empty'); + self.el[Quality.getQuality(self.qualities.get('value')).allow_3d ? 'addClass': 'removeClass']('allow_3d'); self.deleted = self.qualities.get('value') == '-1'; }); @@ -237,24 +251,40 @@ Profile.Type = new Class({ var data = self.data; self.el = new Element('li.type').adopt( - new Element('span.quality_type').adopt( + new Element('span.quality_type').grab( self.fillQualities() ), - new Element('span.finish').adopt( - self.finish = new Element('input.inlay.finish[type=checkbox]', { - 'checked': data.finish !== undefined ? data.finish : 1, - 'events': { - 'change': function(e){ - if(self.el == self.el.getParent().getElement(':first-child')){ - self.finish_class.check(); - alert('Top quality always finishes the search') - return; - } + self.finish_container = new Element('label.finish').adopt( + new Element('span.finish').grab( + self.finish = new Element('input.inlay.finish[type=checkbox]', { + 'checked': data.finish !== undefined ? data.finish : 1, + 'events': { + 'change': function(){ + if(self.el == self.el.getParent().getElement(':first-child')){ + self.finish_class.check(); + alert('Top quality always finishes the search'); + return; + } - self.fireEvent('change'); + self.fireEvent('change'); + } } - } - }) + }) + ), + new Element('span.check_label[text=finish]') + ), + self['3d_container'] = new Element('label.threed').adopt( + new Element('span.3d').grab( + self['3d'] = new Element('input.inlay.3d[type=checkbox]', { + 'checked': data['3d'] !== undefined ? data['3d'] : 0, + 'events': { + 'change': function(){ + self.fireEvent('change'); + } + } + }) + ), + new Element('span.check_label[text=3D]') ), new Element('span.delete.icon2', { 'events': { @@ -264,9 +294,13 @@ Profile.Type = new Class({ new Element('span.handle') ); - self.el[self.data.quality_id > 0 ? 'removeClass' : 'addClass']('is_empty'); + self.el[self.data.quality ? 'removeClass' : 'addClass']('is_empty'); + + if(self.data.quality && Quality.getQuality(self.data.quality).allow_3d) + self.el.addClass('allow_3d'); self.finish_class = new Form.Check(self.finish); + self['3d_class'] = new Form.Check(self['3d']); }, @@ -277,7 +311,7 @@ Profile.Type = new Class({ 'events': { 'change': self.fireEvent.bind(self, 'change') } - }).adopt( + }).grab( new Element('option', { 'text': '+ Add another quality', 'value': -1 @@ -287,11 +321,12 @@ Profile.Type = new Class({ Object.each(Quality.qualities, function(q){ new Element('option', { 'text': q.label, - 'value': q.id + 'value': q.identifier, + 'data-allow_3d': q.allow_3d }).inject(self.qualities) }); - self.qualities.set('value', self.data.quality_id); + self.qualities.set('value', self.data.quality); return self.qualities; @@ -301,8 +336,9 @@ Profile.Type = new Class({ var self = this; return { - 'quality_id': self.qualities.get('value'), - 'finish': +self.finish.checked + 'quality': self.qualities.get('value'), + 'finish': +self.finish.checked, + '3d': +self['3d'].checked } }, @@ -324,4 +360,4 @@ Profile.Type = new Class({ return this.el; } -}) \ No newline at end of file +}); diff --git a/couchpotato/core/plugins/quality/__init__.py b/couchpotato/core/plugins/quality/__init__.py index 2630f1a3..7710251c 100644 --- a/couchpotato/core/plugins/quality/__init__.py +++ b/couchpotato/core/plugins/quality/__init__.py @@ -1,7 +1,5 @@ from .main import QualityPlugin -def start(): +def autoload(): return QualityPlugin() - -config = [] diff --git a/couchpotato/core/plugins/quality/index.py b/couchpotato/core/plugins/quality/index.py new file mode 100644 index 00000000..78043972 --- /dev/null +++ b/couchpotato/core/plugins/quality/index.py @@ -0,0 +1,18 @@ +from hashlib import md5 + +from CodernityDB.hash_index import HashIndex + + +class QualityIndex(HashIndex): + _version = 1 + + def __init__(self, *args, **kwargs): + kwargs['key_format'] = '32s' + super(QualityIndex, self).__init__(*args, **kwargs) + + def make_key(self, key): + return md5(key).hexdigest() + + def make_key_value(self, data): + if data.get('_t') == 'quality' and data.get('identifier'): + return md5(data.get('identifier')).hexdigest(), None diff --git a/couchpotato/core/plugins/quality/main.py b/couchpotato/core/plugins/quality/main.py index 80773a84..dd820cf0 100644 --- a/couchpotato/core/plugins/quality/main.py +++ b/couchpotato/core/plugins/quality/main.py @@ -1,28 +1,33 @@ import traceback -from couchpotato import get_session +import re + +from CodernityDB.database import RecordNotFound +from couchpotato import get_db from couchpotato.api import addApiView -from couchpotato.core.event import addEvent +from couchpotato.core.event import addEvent, fireEvent from couchpotato.core.helpers.encoding import toUnicode, ss -from couchpotato.core.helpers.variable import mergeDicts, getExt +from couchpotato.core.helpers.variable import mergeDicts, getExt, tryInt, splitString from couchpotato.core.logger import CPLog from couchpotato.core.plugins.base import Plugin -from couchpotato.core.settings.model import Quality, Profile, ProfileType -from sqlalchemy.sql.expression import or_ -import re -import time +from couchpotato.core.plugins.quality.index import QualityIndex + log = CPLog(__name__) class QualityPlugin(Plugin): + _database = { + 'quality': QualityIndex + } + qualities = [ - {'identifier': 'bd50', 'hd': True, 'size': (15000, 60000), 'label': 'BR-Disk', 'alternative': ['bd25'], 'allow': ['1080p'], 'ext':[], 'tags': ['bdmv', 'certificate', ('complete', 'bluray')]}, - {'identifier': '1080p', 'hd': True, 'size': (4000, 20000), 'label': '1080p', 'width': 1920, 'height': 1080, 'alternative': [], 'allow': [], 'ext':['mkv', 'm2ts'], 'tags': ['m2ts', 'x264', 'h264']}, - {'identifier': '720p', 'hd': True, 'size': (3000, 10000), 'label': '720p', 'width': 1280, 'height': 720, 'alternative': [], 'allow': [], 'ext':['mkv', 'ts'], 'tags': ['x264', 'h264']}, - {'identifier': 'brrip', 'hd': True, 'size': (700, 7000), 'label': 'BR-Rip', 'alternative': ['bdrip'], 'allow': ['720p', '1080p'], 'ext':[], 'tags': ['hdtv', 'hdrip', 'webdl', ('web', 'dl')]}, - {'identifier': 'dvdr', 'size': (3000, 10000), 'label': 'DVD-R', 'alternative': ['br2dvd'], 'allow': [], 'ext':['iso', 'img', 'vob'], 'tags': ['pal', 'ntsc', 'video_ts', 'audio_ts', ('dvd', 'r')]}, - {'identifier': 'dvdrip', 'size': (600, 2400), 'label': 'DVD-Rip', 'width': 720, 'alternative': [], 'allow': [], 'ext':[], 'tags': [('dvd', 'rip'), ('dvd', 'xvid'), ('dvd', 'divx')]}, + {'identifier': 'bd50', 'hd': True, 'allow_3d': True, 'size': (20000, 60000), 'label': 'BR-Disk', 'alternative': ['bd25', ('br', 'disk')], 'allow': ['1080p'], 'ext':['iso', 'img'], 'tags': ['bdmv', 'certificate', ('complete', 'bluray'), 'avc', 'mvc']}, + {'identifier': '1080p', 'hd': True, 'allow_3d': True, 'size': (4000, 20000), 'label': '1080p', 'width': 1920, 'height': 1080, 'alternative': [], 'allow': [], 'ext':['mkv', 'm2ts', 'ts'], 'tags': ['m2ts', 'x264', 'h264']}, + {'identifier': '720p', 'hd': True, 'allow_3d': True, 'size': (3000, 10000), 'label': '720p', 'width': 1280, 'height': 720, 'alternative': [], 'allow': [], 'ext':['mkv', 'ts'], 'tags': ['x264', 'h264']}, + {'identifier': 'brrip', 'hd': True, 'allow_3d': True, 'size': (700, 7000), 'label': 'BR-Rip', 'alternative': ['bdrip', ('br', 'rip')], 'allow': ['720p', '1080p'], 'ext':[], 'tags': ['hdtv', 'hdrip', 'webdl', ('web', 'dl')]}, + {'identifier': 'dvdr', 'size': (3000, 10000), 'label': 'DVD-R', 'alternative': ['br2dvd', ('dvd', 'r')], 'allow': [], 'ext':['iso', 'img', 'vob'], 'tags': ['pal', 'ntsc', 'video_ts', 'audio_ts', ('dvd', 'r'), 'dvd9']}, + {'identifier': 'dvdrip', 'size': (600, 2400), 'label': 'DVD-Rip', 'width': 720, 'alternative': [('dvd', 'rip')], 'allow': [], 'ext':[], 'tags': [('dvd', 'rip'), ('dvd', 'xvid'), ('dvd', 'divx')]}, {'identifier': 'scr', 'size': (600, 1600), 'label': 'Screener', 'alternative': ['screener', 'dvdscr', 'ppvrip', 'dvdscreener', 'hdscr'], 'allow': ['dvdr', 'dvdrip', '720p', '1080p'], 'ext':[], 'tags': ['webrip', ('web', 'rip')]}, {'identifier': 'r5', 'size': (600, 1000), 'label': 'R5', 'alternative': ['r6'], 'allow': ['dvdr'], 'ext':[]}, {'identifier': 'tc', 'size': (600, 1000), 'label': 'TeleCine', 'alternative': ['telecine'], 'allow': [], 'ext':[]}, @@ -30,6 +35,11 @@ class QualityPlugin(Plugin): {'identifier': 'cam', 'size': (600, 1000), 'label': 'Cam', 'alternative': ['camrip', 'hdcam'], 'allow': [], 'ext':[]} ] pre_releases = ['cam', 'ts', 'tc', 'r5', 'scr'] + threed_tags = { + 'sbs': [('half', 'sbs'), 'hsbs', ('full', 'sbs'), 'fsbs'], + 'ou': [('half', 'ou'), 'hou', ('full', 'ou'), 'fou'], + '3d': ['2d3d', '3d2d', '3d'], + } cached_qualities = None cached_order = None @@ -39,6 +49,10 @@ class QualityPlugin(Plugin): addEvent('quality.single', self.single) addEvent('quality.guess', self.guess) addEvent('quality.pre_releases', self.preReleases) + addEvent('quality.order', self.getOrder) + addEvent('quality.ishigher', self.isHigher) + addEvent('quality.isfinish', self.isFinish) + addEvent('quality.fill', self.fill) addApiView('quality.size.save', self.saveSize) addApiView('quality.list', self.allView, docs = { @@ -53,6 +67,17 @@ class QualityPlugin(Plugin): addEvent('app.test', self.doTest) + self.order = [] + self.addOrder() + + def addOrder(self): + self.order = [] + for q in self.qualities: + self.order.append(q.get('identifier')) + + def getOrder(self): + return self.order + def preReleases(self): return self.pre_releases @@ -68,26 +93,27 @@ class QualityPlugin(Plugin): if self.cached_qualities: return self.cached_qualities - db = get_session() - - qualities = db.query(Quality).all() + db = get_db() temp = [] - for quality in qualities: - q = mergeDicts(self.getQuality(quality.identifier), quality.to_dict()) + for quality in self.qualities: + quality_doc = db.get('quality', quality.get('identifier'), with_doc = True)['doc'] + q = mergeDicts(quality, quality_doc) temp.append(q) - self.cached_qualities = temp + if len(temp) == len(self.qualities): + self.cached_qualities = temp + return temp def single(self, identifier = ''): - db = get_session() + db = get_db() quality_dict = {} - quality = db.query(Quality).filter(or_(Quality.identifier == identifier, Quality.id == identifier)).first() + quality = db.get('quality', identifier, with_doc = True)['doc'] if quality: - quality_dict = dict(self.getQuality(quality.identifier), **quality.to_dict()) + quality_dict = mergeDicts(self.getQuality(quality['identifier']), quality) return quality_dict @@ -100,12 +126,12 @@ class QualityPlugin(Plugin): def saveSize(self, **kwargs): try: - db = get_session() - quality = db.query(Quality).filter_by(identifier = kwargs.get('identifier')).first() + db = get_db() + quality = db.get('quality', kwargs.get('identifier'), with_doc = True) if quality: - setattr(quality, kwargs.get('value_type'), kwargs.get('value')) - db.commit() + quality['doc'][kwargs.get('value_type')] = tryInt(kwargs.get('value')) + db.update(quality['doc']) self.cached_qualities = None @@ -114,9 +140,6 @@ class QualityPlugin(Plugin): } except: log.error('Failed: %s', traceback.format_exc()) - db.rollback() - finally: - db.close() return { 'success': False @@ -125,64 +148,46 @@ class QualityPlugin(Plugin): def fill(self): try: - db = get_session() + db = get_db() order = 0 for q in self.qualities: - # Create quality - qual = db.query(Quality).filter_by(identifier = q.get('identifier')).first() + existing = None + try: + existing = db.get('quality', q.get('identifier')) + except RecordNotFound: + pass - if not qual: - log.info('Creating quality: %s', q.get('label')) - qual = Quality() - qual.order = order - qual.identifier = q.get('identifier') - qual.label = toUnicode(q.get('label')) - qual.size_min, qual.size_max = q.get('size') + if not existing: + db.insert({ + '_t': 'quality', + 'order': order, + 'identifier': q.get('identifier'), + 'size_min': tryInt(q.get('size')[0]), + 'size_max': tryInt(q.get('size')[1]), + }) - db.add(qual) - - # Create single quality profile - prof = db.query(Profile).filter( - Profile.core == True - ).filter( - Profile.types.any(quality = qual) - ).all() - - if not prof: log.info('Creating profile: %s', q.get('label')) - prof = Profile( - core = True, - label = toUnicode(qual.label), - order = order - ) - db.add(prof) - - profile_type = ProfileType( - quality = qual, - profile = prof, - finish = True, - order = 0 - ) - prof.types.append(profile_type) + db.insert({ + '_t': 'profile', + 'order': order + 20, # Make sure it goes behind other profiles + 'core': True, + 'qualities': [q.get('identifier')], + 'label': toUnicode(q.get('label')), + 'finish': [True], + 'wait_for': [0], + }) order += 1 - db.commit() - - time.sleep(0.3) # Wait a moment - return True except: log.error('Failed: %s', traceback.format_exc()) - db.rollback() - finally: - db.close() return False - def guess(self, files, extra = None): + def guess(self, files, extra = None, size = None): if not extra: extra = {} # Create hash for cache @@ -196,34 +201,58 @@ class QualityPlugin(Plugin): # Start with 0 score = {} for quality in qualities: - score[quality.get('identifier')] = 0 + score[quality.get('identifier')] = { + 'score': 0, + '3d': {} + } for cur_file in files: words = re.split('\W+', cur_file.lower()) + name_year = fireEvent('scanner.name_year', cur_file, file_name = cur_file, single = True) + threed_words = words + if name_year and name_year.get('name'): + split_name = splitString(name_year.get('name'), ' ') + threed_words = [x for x in words if x not in split_name] for quality in qualities: contains_score = self.containsTagScore(quality, words, cur_file) - self.calcScore(score, quality, contains_score) + threedscore = self.contains3D(quality, threed_words, cur_file) if quality.get('allow_3d') else (0, None) - # Try again with loose testing + self.calcScore(score, quality, contains_score, threedscore) + + size_scores = [] for quality in qualities: - loose_score = self.guessLooseScore(quality, files = files, extra = extra) - self.calcScore(score, quality, loose_score) + # Evaluate score based on size + size_score = self.guessSizeScore(quality, size = size) + loose_score = self.guessLooseScore(quality, extra = extra) - # Return nothing if all scores are 0 + if size_score > 0: + size_scores.append(quality) + + self.calcScore(score, quality, size_score + loose_score, penalty = False) + + # Add additional size score if only 1 size validated + if len(size_scores) == 1: + self.calcScore(score, size_scores[0], 10, penalty = False) + del size_scores + + # Return nothing if all scores are <= 0 has_non_zero = 0 for s in score: - if score[s] > 0: + if score[s]['score'] > 0: has_non_zero += 1 if not has_non_zero: return None - heighest_quality = max(score, key = score.get) + heighest_quality = max(score, key = lambda p: score[p]['score']) if heighest_quality: for quality in qualities: if quality.get('identifier') == heighest_quality: + quality['is_3d'] = False + if score[heighest_quality].get('3d'): + quality['is_3d'] = True return self.setCache(cache_key, quality) return None @@ -232,6 +261,9 @@ class QualityPlugin(Plugin): cur_file = ss(cur_file) score = 0 + extension = words[-1] + words = words[:-1] + points = { 'identifier': 10, 'label': 10, @@ -246,12 +278,12 @@ class QualityPlugin(Plugin): qualities = [qualities] if isinstance(qualities, (str, unicode)) else qualities for alt in qualities: - if (isinstance(alt, tuple)): + if isinstance(alt, tuple): if len(set(words) & set(alt)) == len(alt): log.debug('Found %s via %s %s in %s', (quality['identifier'], tag_type, quality.get(tag_type), cur_file)) score += points.get(tag_type) - if (isinstance(alt, (str, unicode)) and ss(alt.lower()) in cur_file.lower()): + if isinstance(alt, (str, unicode)) and ss(alt.lower()) in words: log.debug('Found %s via %s %s in %s', (quality['identifier'], tag_type, quality.get(tag_type), cur_file)) score += points.get(tag_type) / 2 @@ -261,13 +293,30 @@ class QualityPlugin(Plugin): # Check extention for ext in quality.get('ext', []): - if ext == words[-1]: - log.debug('Found %s extension in %s', (ext, cur_file)) + if ext == extension: + log.debug('Found %s with .%s extension in %s', (quality['identifier'], ext, cur_file)) score += points['ext'] return score - def guessLooseScore(self, quality, files = None, extra = None): + def contains3D(self, quality, words, cur_file = ''): + cur_file = ss(cur_file) + + for key in self.threed_tags: + tags = self.threed_tags.get(key, []) + + for tag in tags: + if isinstance(tag, tuple): + if len(set(words) & set(tag)) == len(tag): + log.debug('Found %s in %s', (tag, cur_file)) + return 1, key + elif tag in words: + log.debug('Found %s in %s', (tag, cur_file)) + return 1, key + + return 0, None + + def guessLooseScore(self, quality, extra = None): score = 0 @@ -289,9 +338,31 @@ class QualityPlugin(Plugin): return score - def calcScore(self, score, quality, add_score): - score[quality['identifier']] += add_score + def guessSizeScore(self, quality, size = None): + + score = 0 + + if size: + + if tryInt(quality['size_min']) <= tryInt(size) <= tryInt(quality['size_max']): + log.debug('Found %s via release size: %s MB < %s MB < %s MB', (quality['identifier'], quality['size_min'], size, quality['size_max'])) + score += 5 + else: + score -= 5 + + return score + + def calcScore(self, score, quality, add_score, threedscore = (0, None), penalty = True): + + score[quality['identifier']]['score'] += add_score + + threedscore, threedtag = threedscore + if threedscore and threedtag: + if threedscore not in score[quality['identifier']]['3d']: + score[quality['identifier']]['3d'][threedtag] = 0 + + score[quality['identifier']]['3d'][threedtag] += threedscore # Set order for allow calculation (and cache) if not self.cached_order: @@ -299,32 +370,93 @@ class QualityPlugin(Plugin): for q in self.qualities: self.cached_order[q.get('identifier')] = self.qualities.index(q) - if add_score != 0: + if penalty and add_score != 0: for allow in quality.get('allow', []): - score[allow] -= 40 if self.cached_order[allow] < self.cached_order[quality['identifier']] else 5 + score[allow]['score'] -= 40 if self.cached_order[allow] < self.cached_order[quality['identifier']] else 5 + + # Give panelty for all lower qualities + for q in self.qualities[self.order.index(quality.get('identifier'))+1:]: + if score.get(q.get('identifier')): + score[q.get('identifier')]['score'] -= 1 + + def isFinish(self, quality, profile): + if not isinstance(profile, dict) or not profile.get('qualities'): + return False + + try: + quality_order = [i for i, identifier in enumerate(profile['qualities']) if identifier == quality['identifier'] and bool(profile['3d'][i] if profile.get('3d') else 0) == bool(quality.get('is_3d', 0))][0] + return profile['finish'][quality_order] + except: + return False + + def isHigher(self, quality, compare_with, profile = None): + if not isinstance(profile, dict) or not profile.get('qualities'): + profile = {'qualities': self.order} + + # Try to find quality in profile, if not found: a quality we do not want is lower than anything else + try: + quality_order = [i for i, identifier in enumerate(profile['qualities']) if identifier == quality['identifier'] and bool(profile['3d'][i] if profile.get('3d') else 0) == bool(quality.get('is_3d', 0))][0] + except: + log.debug('Quality %s not found in profile identifiers %s', (quality['identifier'] + (' 3D' if quality.get('is_3d', 0) else ''), \ + [identifier + ('3D' if (profile['3d'][i] if profile.get('3d') else 0) else '') for i, identifier in enumerate(profile['qualities'])])) + return 'lower' + + # Try to find compare quality in profile, if not found: anything is higher than a not wanted quality + try: + compare_order = [i for i, identifier in enumerate(profile['qualities']) if identifier == compare_with['identifier'] and bool(profile['3d'][i] if profile.get('3d') else 0) == bool(compare_with.get('is_3d', 0))][0] + except: + log.debug('Compare quality %s not found in profile identifiers %s', (compare_with['identifier'] + (' 3D' if compare_with.get('is_3d', 0) else ''), \ + [identifier + (' 3D' if (profile['3d'][i] if profile.get('3d') else 0) else '') for i, identifier in enumerate(profile['qualities'])])) + return 'higher' + + # Note to self: a lower number means higher quality + if quality_order > compare_order: + return 'lower' + elif quality_order == compare_order: + return 'equal' + else: + return 'higher' def doTest(self): tests = { - 'Movie Name (1999)-DVD-Rip.avi': 'dvdrip', - 'Movie Name 1999 720p Bluray.mkv': '720p', - 'Movie Name 1999 BR-Rip 720p.avi': 'brrip', - 'Movie Name 1999 720p Web Rip.avi': 'scr', - 'Movie Name 1999 Web DL.avi': 'brrip', - 'Movie.Name.1999.1080p.WEBRip.H264-Group': 'scr', - 'Movie.Name.1999.DVDRip-Group': 'dvdrip', - 'Movie.Name.1999.DVD-Rip-Group': 'dvdrip', - 'Movie.Name.1999.DVD-R-Group': 'dvdr', - 'Movie.Name.Camelie.1999.720p.BluRay.x264-Group': '720p', - 'Movie.Name.2008.German.DL.AC3.1080p.BluRay.x264-Group': '1080p', - 'Movie.Name.2004.GERMAN.AC3D.DL.1080p.BluRay.x264-Group': '1080p', + 'Movie Name (1999)-DVD-Rip.avi': {'size': 700, 'quality': 'dvdrip'}, + 'Movie Name 1999 720p Bluray.mkv': {'size': 4200, 'quality': '720p'}, + 'Movie Name 1999 BR-Rip 720p.avi': {'size': 1000, 'quality': 'brrip'}, + 'Movie Name 1999 720p Web Rip.avi': {'size': 1200, 'quality': 'scr'}, + 'Movie Name 1999 Web DL.avi': {'size': 800, 'quality': 'brrip'}, + 'Movie.Name.1999.1080p.WEBRip.H264-Group': {'size': 1500, 'quality': 'scr'}, + 'Movie.Name.1999.DVDRip-Group': {'size': 750, 'quality': 'dvdrip'}, + 'Movie.Name.1999.DVD-Rip-Group': {'size': 700, 'quality': 'dvdrip'}, + 'Movie.Name.1999.DVD-R-Group': {'size': 4500, 'quality': 'dvdr'}, + 'Movie.Name.Camelie.1999.720p.BluRay.x264-Group': {'size': 5500, 'quality': '720p'}, + 'Movie.Name.2008.German.DL.AC3.1080p.BluRay.x264-Group': {'size': 8500, 'extra': {'resolution_width': 1920, 'resolution_height': 1080} , 'quality': '1080p'}, + 'Movie.Name.2004.GERMAN.AC3D.DL.1080p.BluRay.x264-Group': {'size': 8000, 'quality': '1080p'}, + 'Movie.Name.2013.BR-Disk-Group.iso': {'size': 48000, 'quality': 'bd50'}, + 'Movie.Name.2013.2D+3D.BR-Disk-Group.iso': {'size': 52000, 'quality': 'bd50', 'is_3d': True}, + 'Movie.Rising.Name.Girl.2011.NTSC.DVD9-GroupDVD': {'size': 7200, 'quality': 'dvdr'}, + 'Movie Name (2013) 2D + 3D': {'size': 49000, 'quality': 'bd50', 'is_3d': True}, + 'Movie Monuments 2013 BrRip 1080p': {'size': 1800, 'quality': 'brrip'}, + 'Movie Monuments 2013 BrRip 720p': {'size': 1300, 'quality': 'brrip'}, + 'The.Movie.2014.3D.1080p.BluRay.AVC.DTS-HD.MA.5.1-GroupName': {'size': 30000, 'quality': 'bd50', 'is_3d': True}, + '/home/namehou/Movie Monuments (2013)/Movie Monuments.mkv': {'size': 4500, 'quality': '1080p', 'is_3d': False}, + '/home/namehou/Movie Monuments (2013)/Movie Monuments Full-OU.mkv': {'size': 4500, 'quality': '1080p', 'is_3d': True}, + '/volume1/Public/3D/Moviename/Moviename (2009).3D.SBS.ts': {'size': 7500, 'quality': '1080p', 'is_3d': True}, + '/volume1/Public/Moviename/Moviename (2009).ts': {'size': 5500, 'quality': '1080p'}, + '/movies/BluRay HDDVD H.264 MKV 720p EngSub/QuiQui le fou (criterion collection #123, 1915)/QuiQui le fou (1915) 720p x264 BluRay.mkv': {'size': 5500, 'quality': '720p'}, + 'C:\\movies\QuiQui le fou (collection #123, 1915)\QuiQui le fou (1915) 720p x264 BluRay.mkv': {'size': 5500, 'quality': '720p'}, + 'C:\\movies\QuiQui le fou (collection #123, 1915)\QuiQui le fou (1915) half-sbs 720p x264 BluRay.mkv': {'size': 5500, 'quality': '720p', 'is_3d': True}, } correct = 0 for name in tests: - success = self.guess([name]).get('identifier') == tests[name] + test_quality = self.guess(files = [name], extra = tests[name].get('extra', None), size = tests[name].get('size', None)) or {} + success = test_quality.get('identifier') == tests[name]['quality'] and test_quality.get('is_3d') == tests[name].get('is_3d', False) if not success: - log.error('%s failed check, thinks it\'s %s', (name, self.guess([name]).get('identifier'))) + log.error('%s failed check, thinks it\'s "%s" expecting "%s"', (name, + test_quality.get('identifier') + (' 3D' if test_quality.get('is_3d') else ''), + tests[name]['quality'] + (' 3D' if tests[name].get('is_3d') else '') + )) correct += success diff --git a/couchpotato/core/plugins/quality/static/quality.js b/couchpotato/core/plugins/quality/static/quality.js index ead9a904..d233b1ce 100644 --- a/couchpotato/core/plugins/quality/static/quality.js +++ b/couchpotato/core/plugins/quality/static/quality.js @@ -8,16 +8,17 @@ var QualityBase = new Class({ self.qualities = data.qualities; - self.profiles = [] + self.profiles_list = null; + self.profiles = []; Array.each(data.profiles, self.createProfilesClass.bind(self)); - App.addEvent('load', self.addSettings.bind(self)) + App.addEvent('loadSettings', self.addSettings.bind(self)) }, getProfile: function(id){ return this.profiles.filter(function(profile){ - return profile.data.id == id + return profile.data._id == id }).pick() }, @@ -28,16 +29,21 @@ var QualityBase = new Class({ }); }, - getQuality: function(id){ - return this.qualities.filter(function(q){ - return q.id == id; - }).pick(); + getQuality: function(identifier){ + try { + return this.qualities.filter(function(q){ + return q.identifier == identifier; + }).pick(); + } + catch(e){} + + return {} }, addSettings: function(){ var self = this; - self.settings = App.getPage('Settings') + self.settings = App.getPage('Settings'); self.settings.addEvent('create', function(){ var tab = self.settings.createSubTab('profile', { 'label': 'Quality', @@ -91,9 +97,9 @@ var QualityBase = new Class({ createProfilesClass: function(data){ var self = this; - var data = data || {'id': randomString()} - var profile = new Profile(data) - self.profiles.include(profile) + var data = data || {'id': randomString()}; + var profile = new Profile(data); + self.profiles.include(profile); return profile; }, @@ -101,23 +107,22 @@ var QualityBase = new Class({ createProfileOrdering: function(){ var self = this; - var profile_list; - var group = self.settings.createGroup({ + self.settings.createGroup({ 'label': 'Profile Defaults', - 'description': '(Needs refresh \'' +(App.isMac() ? 'CMD+R' : 'F5')+ '\' after editing)' + 'description': '(Needs refresh \'' +(App.isMac() ? 'CMD+R' : 'F5')+ '\' after editing)' }).adopt( new Element('.ctrlHolder#profile_ordering').adopt( new Element('label[text=Order]'), - profile_list = new Element('ul'), + self.profiles_list = new Element('ul'), new Element('p.formHint', { 'html': 'Change the order the profiles are in the dropdown list. Uncheck to hide it completely.
First one will be default.' }) ) - ).inject(self.content) + ).inject(self.content); Array.each(self.profiles, function(profile){ var check; - new Element('li', {'data-id': profile.data.id}).adopt( + new Element('li', {'data-id': profile.data._id}).adopt( check = new Element('input.inlay[type=checkbox]', { 'checked': !profile.data.hide, 'events': { @@ -128,29 +133,37 @@ var QualityBase = new Class({ 'text': profile.data.label }), new Element('span.handle') - ).inject(profile_list); + ).inject(self.profiles_list); new Form.Check(check); }); // Sortable - self.profile_sortable = new Sortables(profile_list, { + var sorted_changed = false; + self.profile_sortable = new Sortables(self.profiles_list, { 'revert': true, - 'handle': '', + 'handle': '.handle', 'opacity': 0.5, - 'onComplete': self.saveProfileOrdering.bind(self) + 'onSort': function(){ + sorted_changed = true; + }, + 'onComplete': function(){ + if(sorted_changed){ + self.saveProfileOrdering(); + sorted_changed = false; + } + } }); }, saveProfileOrdering: function(){ - var self = this; + var self = this, + ids = [], + hidden = []; - var ids = []; - var hidden = []; - - self.profile_sortable.list.getElements('li').each(function(el, nr){ + self.profiles_list.getElements('li').each(function(el, nr){ ids.include(el.get('data-id')); hidden[nr] = +!el.getElement('input[type=checkbox]').get('checked'); }); @@ -175,14 +188,14 @@ var QualityBase = new Class({ 'description': 'Edit the minimal and maximum sizes (in MB) for each quality.', 'advanced': true, 'name': 'sizes' - }).inject(self.content) + }).inject(self.content); new Element('div.item.head.ctrlHolder').adopt( new Element('span.label', {'text': 'Quality'}), new Element('span.min', {'text': 'Min'}), new Element('span.max', {'text': 'Max'}) - ).inject(group) + ).inject(group); Array.each(self.qualities, function(quality){ new Element('div.ctrlHolder.item').adopt( diff --git a/couchpotato/core/plugins/release/__init__.py b/couchpotato/core/plugins/release/__init__.py index 08c6a57c..e6e60c4b 100644 --- a/couchpotato/core/plugins/release/__init__.py +++ b/couchpotato/core/plugins/release/__init__.py @@ -1,7 +1,5 @@ from .main import Release -def start(): +def autoload(): return Release() - -config = [] diff --git a/couchpotato/core/plugins/release/index.py b/couchpotato/core/plugins/release/index.py new file mode 100644 index 00000000..8265fe33 --- /dev/null +++ b/couchpotato/core/plugins/release/index.py @@ -0,0 +1,64 @@ +from hashlib import md5 + +from CodernityDB.hash_index import HashIndex +from CodernityDB.tree_index import TreeBasedIndex + + +class ReleaseIndex(TreeBasedIndex): + _version = 1 + + def __init__(self, *args, **kwargs): + kwargs['key_format'] = '32s' + super(ReleaseIndex, self).__init__(*args, **kwargs) + + def make_key(self, key): + return key + + def make_key_value(self, data): + if data.get('_t') == 'release' and data.get('media_id'): + return data['media_id'], None + + +class ReleaseStatusIndex(TreeBasedIndex): + _version = 1 + + def __init__(self, *args, **kwargs): + kwargs['key_format'] = '32s' + super(ReleaseStatusIndex, self).__init__(*args, **kwargs) + + def make_key(self, key): + return md5(key).hexdigest() + + def make_key_value(self, data): + if data.get('_t') == 'release' and data.get('status'): + return md5(data.get('status')).hexdigest(), {'media_id': data.get('media_id')} + + +class ReleaseIDIndex(HashIndex): + _version = 1 + + def __init__(self, *args, **kwargs): + kwargs['key_format'] = '32s' + super(ReleaseIDIndex, self).__init__(*args, **kwargs) + + def make_key(self, key): + return md5(key).hexdigest() + + def make_key_value(self, data): + if data.get('_t') == 'release' and data.get('identifier'): + return md5(data.get('identifier')).hexdigest(), {'media_id': data.get('media_id')} + + +class ReleaseDownloadIndex(HashIndex): + _version = 2 + + def __init__(self, *args, **kwargs): + kwargs['key_format'] = '32s' + super(ReleaseDownloadIndex, self).__init__(*args, **kwargs) + + def make_key(self, key): + return md5(key.lower()).hexdigest() + + def make_key_value(self, data): + if data.get('_t') == 'release' and data.get('download_info') and data['download_info']['id'] and data['download_info']['downloader']: + return md5(('%s-%s' % (data['download_info']['downloader'], data['download_info']['id'])).lower()).hexdigest(), None diff --git a/couchpotato/core/plugins/release/main.py b/couchpotato/core/plugins/release/main.py index a478b64d..cb16c5ae 100644 --- a/couchpotato/core/plugins/release/main.py +++ b/couchpotato/core/plugins/release/main.py @@ -1,30 +1,33 @@ -from couchpotato import get_session, md5 -from couchpotato.api import addApiView -from couchpotato.core.event import fireEvent, addEvent -from couchpotato.core.helpers.encoding import ss, toUnicode -from couchpotato.core.helpers.variable import getTitle -from couchpotato.core.logger import CPLog -from couchpotato.core.plugins.base import Plugin -from couchpotato.core.plugins.scanner.main import Scanner -from couchpotato.core.settings.model import File, Release as Relea, Media, \ - ReleaseInfo -from couchpotato.environment import Env from inspect import ismethod, isfunction -from sqlalchemy.exc import InterfaceError -from sqlalchemy.orm import joinedload_all -from sqlalchemy.sql.expression import and_, or_ import os import time import traceback +from CodernityDB.database import RecordDeleted +from couchpotato import md5, get_db +from couchpotato.api import addApiView +from couchpotato.core.event import fireEvent, addEvent +from couchpotato.core.helpers.encoding import toUnicode, sp +from couchpotato.core.helpers.variable import getTitle +from couchpotato.core.logger import CPLog +from couchpotato.core.plugins.base import Plugin +from .index import ReleaseIndex, ReleaseStatusIndex, ReleaseIDIndex, ReleaseDownloadIndex +from couchpotato.environment import Env + + log = CPLog(__name__) class Release(Plugin): - def __init__(self): - addEvent('release.add', self.add) + _database = { + 'release': ReleaseIndex, + 'release_status': ReleaseStatusIndex, + 'release_identifier': ReleaseIDIndex, + 'release_download': ReleaseDownloadIndex + } + def __init__(self): addApiView('release.manual_download', self.manualDownload, docs = { 'desc': 'Send a release manually to the downloaders', 'params': { @@ -43,238 +46,234 @@ class Release(Plugin): 'id': {'type': 'id', 'desc': 'ID of the release object in release-table'} } }) - addApiView('release.for_movie', self.forMovieView, docs = { - 'desc': 'Returns all releases for a movie. Ordered by score(desc)', - 'params': { - 'id': {'type': 'id', 'desc': 'ID of the movie'} - } - }) + addEvent('release.add', self.add) addEvent('release.download', self.download) addEvent('release.try_download_result', self.tryDownloadResult) addEvent('release.create_from_search', self.createFromSearch) - addEvent('release.for_movie', self.forMovie) addEvent('release.delete', self.delete) addEvent('release.clean', self.clean) addEvent('release.update_status', self.updateStatus) + addEvent('release.with_status', self.withStatus) + addEvent('release.for_media', self.forMedia) # Clean releases that didn't have activity in the last week - addEvent('app.load', self.cleanDone) - fireEvent('schedule.interval', 'movie.clean_releases', self.cleanDone, hours = 4) + addEvent('app.load', self.cleanDone, priority = 1000) + fireEvent('schedule.interval', 'movie.clean_releases', self.cleanDone, hours = 12) def cleanDone(self): - log.debug('Removing releases from dashboard') now = time.time() week = 262080 - done_status, available_status, snatched_status, downloaded_status, ignored_status = \ - fireEvent('status.get', ['done', 'available', 'snatched', 'downloaded', 'ignored'], single = True) + db = get_db() - db = get_session() + # Get (and remove) parentless releases + releases = db.all('release', with_doc = True) + media_exist = [] + for release in releases: + if release.get('key') in media_exist: + continue + + try: + db.get('id', release.get('key')) + media_exist.append(release.get('key')) + except RecordDeleted: + db.delete(release['doc']) + log.debug('Deleted orphaned release: %s', release['doc']) + except: + log.debug('Failed cleaning up orphaned releases: %s', traceback.format_exc()) + + del media_exist # get movies last_edit more than a week ago - media = db.query(Media) \ - .filter(Media.status_id == done_status.get('id'), Media.last_edit < (now - week)) \ - .all() + medias = fireEvent('media.with_status', 'done', single = True) + + for media in medias: + if media.get('last_edit', 0) > (now - week): + continue + + for rel in fireEvent('release.for_media', media['_id'], single = True): - for item in media: - for rel in item.releases: # Remove all available releases - if rel.status_id in [available_status.get('id')]: - fireEvent('release.delete', id = rel.id, single = True) + if rel['status'] in ['available']: + self.delete(rel['_id']) + # Set all snatched and downloaded releases to ignored to make sure they are ignored when re-adding the move - elif rel.status_id in [snatched_status.get('id'), downloaded_status.get('id')]: - self.updateStatus(id = rel.id, status = ignored_status) + elif rel['status'] in ['snatched', 'downloaded']: + self.updateStatus(rel['_id'], status = 'ignore') + fireEvent('media.untag', media.get('_id'), 'recent', single = True) - def add(self, group): + def add(self, group, update_info = True, update_id = None): try: - db = get_session() + db = get_db() - identifier = '%s.%s.%s' % (group['library']['identifier'], group['meta_data'].get('audio', 'unknown'), group['meta_data']['quality']['identifier']) + release_identifier = '%s.%s.%s' % (group['identifier'], group['meta_data'].get('audio', 'unknown'), group['meta_data']['quality']['identifier']) - done_status, snatched_status = fireEvent('status.get', ['done', 'snatched'], single = True) - - # Add movie - media = db.query(Media).filter_by(library_id = group['library'].get('id')).first() - if not media: - media = Media( - library_id = group['library'].get('id'), - profile_id = 0, - status_id = done_status.get('id') - ) - db.add(media) - db.commit() - - # Add Release - rel = db.query(Relea).filter( - or_( - Relea.identifier == identifier, - and_(Relea.identifier.startswith(group['library']['identifier']), Relea.status_id == snatched_status.get('id')) - ) - ).first() - if not rel: - rel = Relea( - identifier = identifier, - movie = media, - quality_id = group['meta_data']['quality'].get('id'), - status_id = done_status.get('id') - ) - db.add(rel) - db.commit() - - # Add each file type - added_files = [] - for type in group['files']: - for cur_file in group['files'][type]: - added_file = self.saveFile(cur_file, type = type, include_media_info = type is 'movie') - added_files.append(added_file.get('id')) - - # Add the release files in batch + # Add movie if it doesn't exist try: - added_files = db.query(File).filter(or_(*[File.id == x for x in added_files])).all() - rel.files.extend(added_files) - db.commit() + media = db.get('media', 'imdb-%s' % group['identifier'], with_doc = True)['doc'] except: - log.debug('Failed to attach "%s" to release: %s', (added_files, traceback.format_exc())) + media = fireEvent('movie.add', params = { + 'identifier': group['identifier'], + 'profile_id': None, + }, search_after = False, update_after = update_info, notify_after = False, status = 'done', single = True) - fireEvent('media.restatus', media.id) + release = None + if update_id: + try: + release = db.get('id', update_id) + release.update({ + 'identifier': release_identifier, + 'last_edit': int(time.time()), + 'status': 'done', + }) + except: + log.error('Failed updating existing release: %s', traceback.format_exc()) + else: + + # Add Release + if not release: + release = { + '_t': 'release', + 'media_id': media['_id'], + 'identifier': release_identifier, + 'quality': group['meta_data']['quality'].get('identifier'), + 'is_3d': group['meta_data']['quality'].get('is_3d', 0), + 'last_edit': int(time.time()), + 'status': 'done' + } + + try: + r = db.get('release_identifier', release_identifier, with_doc = True)['doc'] + r['media_id'] = media['_id'] + except: + log.debug('Failed updating release by identifier "%s". Inserting new.', release_identifier) + r = db.insert(release) + + # Update with ref and _id + release.update({ + '_id': r['_id'], + '_rev': r['_rev'], + }) + + # Empty out empty file groups + release['files'] = dict((k, [toUnicode(x) for x in v]) for k, v in group['files'].items() if v) + db.update(release) + + fireEvent('media.restatus', media['_id']) return True except: log.error('Failed: %s', traceback.format_exc()) - db.rollback() - finally: - db.close() return False - def saveFile(self, filepath, type = 'unknown', include_media_info = False): - - properties = {} - - # Get media info for files - if include_media_info: - properties = {} - - # Check database and update/insert if necessary - return fireEvent('file.add', path = filepath, part = fireEvent('scanner.partnumber', file, single = True), type_tuple = Scanner.file_types.get(type), properties = properties, single = True) - def deleteView(self, id = None, **kwargs): return { 'success': self.delete(id) } - def delete(self, id): + def delete(self, release_id): try: - db = get_session() - - rel = db.query(Relea).filter_by(id = id).first() - if rel: - rel.delete() - db.commit() - return True + db = get_db() + rel = db.get('id', release_id) + db.delete(rel) + return True + except RecordDeleted: + log.debug('Already deleted: %s', release_id) + return True except: log.error('Failed: %s', traceback.format_exc()) - db.rollback() - finally: - db.close() return False - def clean(self, id): + def clean(self, release_id): try: - db = get_session() + db = get_db() + rel = db.get('id', release_id) + raw_files = rel.get('files') - rel = db.query(Relea).filter_by(id = id).first() - if rel: - for release_file in rel.files: - if not os.path.isfile(ss(release_file.path)): - db.delete(release_file) - db.commit() + if len(raw_files) == 0: + self.delete(rel['_id']) + else: - if len(rel.files) == 0: - self.delete(id) + files = {} + for file_type in raw_files: - return True + for release_file in raw_files.get(file_type, []): + if os.path.isfile(sp(release_file)): + if file_type not in files: + files[file_type] = [] + files[file_type].append(release_file) + + rel['files'] = files + db.update(rel) + + return True except: log.error('Failed: %s', traceback.format_exc()) - db.rollback() - finally: - db.close() return False def ignore(self, id = None, **kwargs): - db = get_session() + db = get_db() - rel = db.query(Relea).filter_by(id = id).first() - if rel: - ignored_status, failed_status, available_status = fireEvent('status.get', ['ignored', 'failed', 'available'], single = True) - self.updateStatus(id, available_status if rel.status_id in [ignored_status.get('id'), failed_status.get('id')] else ignored_status) + try: + rel = db.get('id', id, with_doc = True) + self.updateStatus(id, 'available' if rel['status'] in ['ignored', 'failed'] else 'ignored') + + return { + 'success': True + } + except: + log.error('Failed: %s', traceback.format_exc()) return { - 'success': True + 'success': False } def manualDownload(self, id = None, **kwargs): - db = get_session() + db = get_db() - rel = db.query(Relea).filter_by(id = id).first() - if not rel: - log.error('Couldn\'t find release with id: %s', id) + try: + release = db.get('id', id) + item = release['info'] + movie = db.get('id', release['media_id']) + + fireEvent('notify.frontend', type = 'release.manual_download', data = True, message = 'Snatching "%s"' % item['name']) + + # Get matching provider + provider = fireEvent('provider.belongs_to', item['url'], provider = item.get('provider'), single = True) + + if item.get('protocol') != 'torrent_magnet': + item['download'] = provider.loginDownload if provider.urls.get('login') else provider.download + + success = self.download(data = item, media = movie, manual = True) + + if success: + fireEvent('notify.frontend', type = 'release.manual_download', data = True, message = 'Successfully snatched "%s"' % item['name']) + + return { + 'success': success == True + } + + except: + log.error('Couldn\'t find release with id: %s: %s', (id, traceback.format_exc())) return { 'success': False } - item = {} - for info in rel.info: - item[info.identifier] = info.value - - fireEvent('notify.frontend', type = 'release.manual_download', data = True, message = 'Snatching "%s"' % item['name']) - - # Get matching provider - provider = fireEvent('provider.belongs_to', item['url'], provider = item.get('provider'), single = True) - - # Backwards compatibility code - if not item.get('protocol'): - item['protocol'] = item['type'] - item['type'] = 'movie' - - if item.get('protocol') != 'torrent_magnet': - item['download'] = provider.loginDownload if provider.urls.get('login') else provider.download - - success = self.download(data = item, media = rel.movie.to_dict({ - 'profile': {'types': {'quality': {}}}, - 'releases': {'status': {}, 'quality': {}}, - 'library': {'titles': {}, 'files': {}}, - 'files': {} - }), manual = True) - - db.expunge_all() - - if success: - fireEvent('notify.frontend', type = 'release.manual_download', data = True, message = 'Successfully snatched "%s"' % item['name']) - - return { - 'success': success == True - } - def download(self, data, media, manual = False): - # Backwards compatibility code - if not data.get('protocol'): - data['protocol'] = data['type'] - data['type'] = 'movie' - # Test to see if any downloaders are enabled for this type downloader_enabled = fireEvent('download.enabled', manual, data, single = True) if not downloader_enabled: @@ -302,12 +301,12 @@ class Release(Plugin): return False log.debug('Downloader result: %s', download_result) - snatched_status, done_status, downloaded_status, active_status = fireEvent('status.get', ['snatched', 'done', 'downloaded', 'active'], single = True) - try: - db = get_session() - rls = db.query(Relea).filter_by(identifier = md5(data['url'])).first() - if not rls: + db = get_db() + + try: + rls = db.get('release_identifier', md5(data['url']), with_doc = True)['doc'] + except: log.error('No release found to store download information in') return False @@ -315,194 +314,220 @@ class Release(Plugin): # Save download-id info if returned if isinstance(download_result, dict): - for key in download_result: - rls_info = ReleaseInfo( - identifier = 'download_%s' % key, - value = toUnicode(download_result.get(key)) - ) - rls.info.append(rls_info) - db.commit() + rls['download_info'] = download_result + db.update(rls) - log_movie = '%s (%s) in %s' % (getTitle(media['library']), media['library']['year'], rls.quality.label) + log_movie = '%s (%s) in %s' % (getTitle(media), media['info']['year'], rls['quality']) snatch_message = 'Snatched "%s": %s' % (data.get('name'), log_movie) log.info(snatch_message) - fireEvent('%s.snatched' % data['type'], message = snatch_message, data = rls.to_dict()) + fireEvent('%s.snatched' % data['type'], message = snatch_message, data = rls) # Mark release as snatched if renamer_enabled: - self.updateStatus(rls.id, status = snatched_status) + self.updateStatus(rls['_id'], status = 'snatched') # If renamer isn't used, mark media done if finished or release downloaded else: - if media['status_id'] == active_status.get('id'): - finished = next((True for profile_type in media['profile']['types'] - if profile_type['quality_id'] == rls.quality.id and profile_type['finish']), False) + + if media['status'] == 'active': + profile = db.get('id', media['profile_id']) + finished = False + if rls['quality'] in profile['qualities']: + nr = profile['qualities'].index(rls['quality']) + finished = profile['finish'][nr] + if finished: log.info('Renamer disabled, marking media as finished: %s', log_movie) # Mark release done - self.updateStatus(rls.id, status = done_status) + self.updateStatus(rls['_id'], status = 'done') # Mark media done - mdia = db.query(Media).filter_by(id = media['id']).first() - mdia.status_id = done_status.get('id') - mdia.last_edit = int(time.time()) - db.commit() + mdia = db.get('id', media['_id']) + mdia['status'] = 'done' + mdia['last_edit'] = int(time.time()) + db.update(mdia) + + fireEvent('media.tag', media['_id'], 'recent', single = True) return True # Assume release downloaded - self.updateStatus(rls.id, status = downloaded_status) + self.updateStatus(rls['_id'], status = 'downloaded') except: log.error('Failed storing download status: %s', traceback.format_exc()) - db.rollback() return False - finally: - db.close() return True - def tryDownloadResult(self, results, media, quality_type, manual = False): - ignored_status, failed_status = fireEvent('status.get', ['ignored', 'failed'], single = True) + def tryDownloadResult(self, results, media, quality_custom): + wait_for = False + let_through = False + filtered_results = [] + + # If a single release comes through the "wait for", let through all for rel in results: - if not quality_type.get('finish', False) and quality_type.get('wait_for', 0) > 0 and rel.get('age') <= quality_type.get('wait_for', 0): - log.info('Ignored, waiting %s days: %s', (quality_type.get('wait_for'), rel['name'])) - continue - if rel['status_id'] in [ignored_status.get('id'), failed_status.get('id')]: + if rel['status'] in ['ignored', 'failed']: log.info('Ignored: %s', rel['name']) continue if rel['score'] <= 0: - log.info('Ignored, score to low: %s', rel['name']) + log.info('Ignored, score "%s" to low: %s', (rel['score'], rel['name'])) continue - downloaded = fireEvent('release.download', data = rel, media = media, manual = manual, single = True) + if rel['size'] <= 50: + log.info('Ignored, size "%sMB" to low: %s', (rel['size'], rel['name'])) + continue + + rel['wait_for'] = False + if quality_custom.get('index') != 0 and quality_custom.get('wait_for', 0) > 0 and rel.get('age') <= quality_custom.get('wait_for', 0): + rel['wait_for'] = True + else: + let_through = True + + filtered_results.append(rel) + + # Loop through filtered results + for rel in filtered_results: + + # Only wait if not a single release is old enough + if rel.get('wait_for') and not let_through: + log.info('Ignored, waiting %s days: %s', (quality_custom.get('wait_for') - rel.get('age'), rel['name'])) + wait_for = True + continue + + downloaded = fireEvent('release.download', data = rel, media = media, single = True) if downloaded is True: return True elif downloaded != 'try_next': break - return False + return wait_for - def createFromSearch(self, search_results, media, quality_type): - - available_status = fireEvent('status.get', ['available'], single = True) + def createFromSearch(self, search_results, media, quality): try: - db = get_session() + db = get_db() found_releases = [] + is_3d = False + try: is_3d = quality['custom']['3d'] + except: pass + for rel in search_results: rel_identifier = md5(rel['url']) found_releases.append(rel_identifier) - rls = db.query(Relea).filter_by(identifier = rel_identifier).first() - if not rls: - rls = Relea( - identifier = rel_identifier, - movie_id = media.get('id'), - #media_id = media.get('id'), - quality_id = quality_type.get('quality_id'), - status_id = available_status.get('id') - ) - db.add(rls) - else: - [db.delete(old_info) for old_info in rls.info] - rls.last_edit = int(time.time()) + release = { + '_t': 'release', + 'identifier': rel_identifier, + 'media_id': media.get('_id'), + 'quality': quality.get('identifier'), + 'is_3d': is_3d, + 'status': rel.get('status', 'available'), + 'last_edit': int(time.time()), + 'info': {} + } - db.commit() + # Add downloader info if provided + try: + release['download_info'] = rel['download_info'] + del rel['download_info'] + except: + pass + try: + rls = db.get('release_identifier', rel_identifier, with_doc = True)['doc'] + except: + rls = db.insert(release) + rls.update(release) + + # Update info, but filter out functions for info in rel: try: if not isinstance(rel[info], (str, unicode, int, long, float)): continue - rls_info = ReleaseInfo( - identifier = info, - value = toUnicode(rel[info]) - ) - rls.info.append(rls_info) - except InterfaceError: + rls['info'][info] = toUnicode(rel[info]) if isinstance(rel[info], (str, unicode)) else rel[info] + except: log.debug('Couldn\'t add %s to ReleaseInfo: %s', (info, traceback.format_exc())) - db.commit() + db.update(rls) - rel['status_id'] = rls.status_id + # Update release in search_results + rel['status'] = rls.get('status') return found_releases except: log.error('Failed: %s', traceback.format_exc()) - db.rollback() - finally: - db.close() return [] - def forMovie(self, id = None): - - db = get_session() - - releases_raw = db.query(Relea) \ - .options(joinedload_all('info')) \ - .options(joinedload_all('files')) \ - .filter(Relea.movie_id == id) \ - .all() - - releases = [r.to_dict({'info': {}, 'files': {}}) for r in releases_raw] - releases = sorted(releases, key = lambda k: k['info'].get('score', 0), reverse = True) - - return releases - - def forMovieView(self, id = None, **kwargs): - - releases = self.forMovie(id) - - return { - 'releases': releases, - 'success': True - } - - def updateStatus(self, id, status = None): + def updateStatus(self, release_id, status = None): if not status: return False try: - db = get_session() + db = get_db() - rel = db.query(Relea).filter_by(id = id).first() - if rel and status and rel.status_id != status.get('id'): - - item = {} - for info in rel.info: - item[info.identifier] = info.value + rel = db.get('id', release_id) + if rel and rel.get('status') != status: release_name = None - if rel.files: - for file_item in rel.files: - if file_item.type.identifier == 'movie': - release_name = os.path.basename(file_item.path) - break - else: - release_name = item['name'] + if rel.get('files'): + for file_type in rel.get('files', {}): + if file_type == 'movie': + for release_file in rel['files'][file_type]: + release_name = os.path.basename(release_file) + break + + if not release_name and rel.get('info'): + release_name = rel['info'].get('name') #update status in Db - log.debug('Marking release %s as %s', (release_name, status.get("label"))) - rel.status_id = status.get('id') - rel.last_edit = int(time.time()) - db.commit() + log.debug('Marking release %s as %s', (release_name, status)) + rel['status'] = status + rel['last_edit'] = int(time.time()) + + db.update(rel) #Update all movie info as there is no release update function - fireEvent('notify.frontend', type = 'release.update_status', data = rel.to_dict()) + fireEvent('notify.frontend', type = 'release.update_status', data = rel) return True except: log.error('Failed: %s', traceback.format_exc()) - db.rollback() - finally: - db.close() return False + + def withStatus(self, status, with_doc = True): + + db = get_db() + + status = list(status if isinstance(status, (list, tuple)) else [status]) + + for s in status: + for ms in db.get_many('release_status', s, with_doc = with_doc): + yield ms['doc'] if with_doc else ms + + def forMedia(self, media_id): + + db = get_db() + raw_releases = list(db.get_many('release', media_id, with_doc = True)) + + releases = [] + for r in raw_releases: + releases.append(r['doc']) + + releases = sorted(releases, key = lambda k: k.get('info', {}).get('score', 0), reverse = True) + + # Sort based on preferred search method + download_preference = self.conf('preferred_method', section = 'searcher') + if download_preference != 'both': + releases = sorted(releases, key = lambda k: k.get('info', {}).get('protocol', '')[:3], reverse = (download_preference == 'torrent')) + + return releases diff --git a/couchpotato/core/plugins/renamer/main.py b/couchpotato/core/plugins/renamer.py old mode 100755 new mode 100644 similarity index 57% rename from couchpotato/core/plugins/renamer/main.py rename to couchpotato/core/plugins/renamer.py index 436107f7..8b57103e --- a/couchpotato/core/plugins/renamer/main.py +++ b/couchpotato/core/plugins/renamer.py @@ -1,27 +1,28 @@ -from couchpotato import get_session -from couchpotato.api import addApiView -from couchpotato.core.event import addEvent, fireEvent, fireEventAsync -from couchpotato.core.helpers.encoding import toUnicode, ss, sp -from couchpotato.core.helpers.variable import getExt, mergeDicts, getTitle, \ - getImdb, link, symlink, tryInt, splitString, fnEscape, isSubFolder -from couchpotato.core.logger import CPLog -from couchpotato.core.plugins.base import Plugin -from couchpotato.core.settings.model import Library, File, Profile, Release, \ - ReleaseInfo -from couchpotato.environment import Env -from unrar2 import RarFile -import errno import fnmatch import os import re import shutil import time import traceback + +from couchpotato import get_db +from couchpotato.api import addApiView +from couchpotato.core.event import addEvent, fireEvent, fireEventAsync +from couchpotato.core.helpers.encoding import toUnicode, ss, sp +from couchpotato.core.helpers.variable import getExt, mergeDicts, getTitle, \ + getImdb, link, symlink, tryInt, splitString, fnEscape, isSubFolder, getIdentifier +from couchpotato.core.logger import CPLog +from couchpotato.core.plugins.base import Plugin +from couchpotato.environment import Env +from unrar2 import RarFile import six from six.moves import filter + log = CPLog(__name__) +autoload = 'Renamer' + class Renamer(Plugin): @@ -77,16 +78,22 @@ class Renamer(Plugin): downloader = kwargs.get('downloader') download_id = kwargs.get('download_id') - files = '|'.join([sp(filename) for filename in splitString(kwargs.get('files'), '|')]) + files = [sp(filename) for filename in splitString(kwargs.get('files'), '|')] status = kwargs.get('status', 'completed') release_download = None if not base_folder and media_folder: release_download = {'folder': media_folder} - release_download.update({'id': download_id, 'downloader': downloader, 'status': status, 'files': files} if download_id else {}) + + if download_id: + release_download.update({ + 'id': download_id, + 'downloader': downloader, + 'status': status, + 'files': files + }) fire_handle = fireEvent if not async else fireEventAsync - fire_handle('renamer.scan', base_folder = base_folder, release_download = release_download) return { @@ -104,13 +111,13 @@ class Renamer(Plugin): return if not base_folder: - base_folder = self.conf('from') + base_folder = sp(self.conf('from')) from_folder = sp(self.conf('from')) to_folder = sp(self.conf('to')) # Get media folder to process - media_folder = release_download.get('folder') + media_folder = sp(release_download.get('folder')) # Get all folders that should not be processed no_process = [to_folder] @@ -137,17 +144,17 @@ class Renamer(Plugin): log.debug('The provided media folder %s does not exist. Trying to find it in the \'from\' folder.', media_folder) # Update to the from folder - if len(splitString(release_download.get('files'), '|')) == 1: - new_media_folder = from_folder + if len(release_download.get('files', [])) == 1: + new_media_folder = sp(from_folder) else: - new_media_folder = os.path.join(from_folder, os.path.basename(media_folder)) + new_media_folder = sp(os.path.join(from_folder, os.path.basename(media_folder))) if not os.path.isdir(new_media_folder): log.error('The provided media folder %s does not exist and could also not be found in the \'from\' folder.', media_folder) return # Update the files - new_files = [os.path.join(new_media_folder, os.path.relpath(filename, media_folder)) for filename in splitString(release_download.get('files'), '|')] + new_files = [os.path.join(new_media_folder, os.path.relpath(filename, media_folder)) for filename in release_download.get('files', [])] if new_files and not os.path.isfile(new_files[0]): log.error('The provided media folder %s does not exist and its files could also not be found in the \'from\' folder.', media_folder) return @@ -155,7 +162,7 @@ class Renamer(Plugin): # Update release_download info to the from folder log.debug('Release %s found in the \'from\' folder.', media_folder) release_download['folder'] = new_media_folder - release_download['files'] = '|'.join(new_files) + release_download['files'] = new_files media_folder = new_media_folder if media_folder: @@ -177,11 +184,12 @@ class Renamer(Plugin): log.info('Scanning media folder %s...', media_folder) folder = os.path.dirname(media_folder) - if release_download.get('files', ''): - files = splitString(release_download['files'], '|') + release_files = release_download.get('files', []) + if release_files: + files = release_files # If there is only one file in the torrent, the downloader did not create a subfolder - if len(files) == 1: + if len(release_files) == 1: folder = media_folder else: # Get all files from the specified folder @@ -191,10 +199,11 @@ class Renamer(Plugin): except: log.error('Failed getting files from %s: %s', (media_folder, traceback.format_exc())) - db = get_session() + db = get_db() # Extend the download info with info stored in the downloaded release - release_download = self.extendReleaseDownload(release_download) + if release_download: + release_download = self.extendReleaseDownload(release_download) # Unpack any archives extr_files = None @@ -211,10 +220,6 @@ class Renamer(Plugin): nfo_name = self.conf('nfo_name') separator = self.conf('separator') - # Statuses - done_status, active_status, downloaded_status, snatched_status, seeding_status = \ - fireEvent('status.get', ['done', 'active', 'downloaded', 'snatched', 'seeding'], single = True) - # Tag release folder as failed_rename in case no groups were found. This prevents check_snatched from removing the release from the downloader. if not groups and self.statusInfoComplete(release_download): self.tagRelease(release_download = release_download, tag = 'failed_rename') @@ -222,43 +227,52 @@ class Renamer(Plugin): for group_identifier in groups: group = groups[group_identifier] + group['release_download'] = None rename_files = {} remove_files = [] remove_releases = [] - movie_title = getTitle(group['library']) + media_title = getTitle(group) # Add _UNKNOWN_ if no library item is connected - if not group['library'] or not movie_title: + if not group.get('media') or not media_title: self.tagRelease(group = group, tag = 'unknown') continue # Rename the files using the library data else: - group['library'] = fireEvent('library.update.movie', identifier = group['library']['identifier'], single = True) - if not group['library']: + + # Media not in library, add it first + if not group['media'].get('_id'): + group['media'] = fireEvent('movie.add', params = { + 'identifier': group['identifier'], + 'profile_id': None + }, search_after = False, status = 'done', single = True) + else: + group['media'] = fireEvent('movie.update_info', media_id = group['media'].get('_id'), single = True) + + if not group['media'] or not group['media'].get('_id'): log.error('Could not rename, no library item to work with: %s', group_identifier) continue - library = group['library'] - library_ent = db.query(Library).filter_by(identifier = group['library']['identifier']).first() - - movie_title = getTitle(library) + media = group['media'] + media_title = getTitle(media) # Overwrite destination when set in category destination = to_folder category_label = '' - for movie in library_ent.movies: - if movie.category and movie.category.label: - category_label = movie.category.label + if media.get('category_id') and media.get('category_id') != '-1': + try: + category = db.get('id', media['category_id']) + category_label = category['label'] - if movie.category and movie.category.destination and len(movie.category.destination) > 0 and movie.category.destination != 'None': - destination = movie.category.destination - log.debug('Setting category destination for "%s": %s' % (movie_title, destination)) - else: - log.debug('No category destination found for "%s"' % movie_title) - - break + if category['destination'] and len(category['destination']) > 0 and category['destination'] != 'None': + destination = category['destination'] + log.debug('Setting category destination for "%s": %s' % (media_title, destination)) + else: + log.debug('No category destination found for "%s"' % media_title) + except: + log.error('Failed getting category label: %s', traceback.format_exc()) # Find subtitle for renaming group['before_rename'] = [] @@ -269,18 +283,20 @@ class Renamer(Plugin): group['before_rename'].extend(extr_files) # Remove weird chars from movie name - movie_name = re.sub(r"[\x00\/\\:\*\?\"<>\|]", '', movie_title) + movie_name = re.sub(r"[\x00\/\\:\*\?\"<>\|]", '', media_title) # Put 'The' at the end name_the = movie_name - if movie_name[:4].lower() == 'the ': - name_the = movie_name[4:] + ', The' + for prefix in ['the ', 'an ', 'a ']: + if prefix == movie_name[:len(prefix)].lower(): + name_the = movie_name[len(prefix):] + ', ' + prefix.strip().capitalize() + break replacements = { 'ext': 'mkv', 'namethe': name_the.strip(), 'thename': movie_name.strip(), - 'year': library['year'], + 'year': media['info']['year'], 'first': name_the[0].upper(), 'quality': group['meta_data']['quality']['label'], 'quality_type': group['meta_data']['quality_type'], @@ -291,13 +307,19 @@ class Renamer(Plugin): 'resolution_width': group['meta_data'].get('resolution_width'), 'resolution_height': group['meta_data'].get('resolution_height'), 'audio_channels': group['meta_data'].get('audio_channels'), - 'imdb_id': library['identifier'], + 'imdb_id': group['identifier'], 'cd': '', 'cd_nr': '', - 'mpaa': library['info'].get('mpaa', ''), + 'mpaa': media['info'].get('mpaa', ''), + 'mpaa_only': media['info'].get('mpaa', ''), 'category': category_label, + '3d': '3D' if group['meta_data']['quality'].get('is_3d', 0) else '', + '3d_type': group['meta_data'].get('3d_type'), } + if replacements['mpaa_only'] not in ('G', 'PG', 'PG-13', 'R', 'NC-17'): + replacements['mpaa_only'] = 'Not Rated' + for file_type in group['files']: # Move nfo depending on settings @@ -393,8 +415,12 @@ class Renamer(Plugin): # Don't add language if multiple languages in 1 subtitle file if len(sub_langs) == 1: - sub_name = sub_name.replace(replacements['ext'], '%s.%s' % (sub_langs[0], replacements['ext'])) - rename_files[current_file] = os.path.join(destination, final_folder_name, sub_name) + sub_suffix = '%s.%s' % (sub_langs[0], replacements['ext']) + + # Don't add language to subtitle file it it's already there + if not sub_name.endswith(sub_suffix): + sub_name = sub_name.replace(replacements['ext'], sub_suffix) + rename_files[current_file] = os.path.join(destination, final_folder_name, sub_name) rename_files = mergeDicts(rename_files, rename_extras) @@ -420,76 +446,90 @@ class Renamer(Plugin): # Before renaming, remove the lower quality files remove_leftovers = True - # Add it to the wanted list before we continue - if len(library_ent.movies) == 0: - profile = db.query(Profile).filter_by(core = True, label = group['meta_data']['quality']['label']).first() - fireEvent('movie.add', params = {'identifier': group['library']['identifier'], 'profile_id': profile.id}, search_after = False) - db.expire_all() - library_ent = db.query(Library).filter_by(identifier = group['library']['identifier']).first() + # Mark movie "done" once it's found the quality with the finish check + profile = None + try: + if media.get('status') == 'active' and media.get('profile_id'): + profile = db.get('id', media['profile_id']) + if fireEvent('quality.isfinish', group['meta_data']['quality'], profile, single = True): + mdia = db.get('id', media['_id']) + mdia['status'] = 'done' + mdia['last_edit'] = int(time.time()) + db.update(mdia) - for movie in library_ent.movies: + # List movie on dashboard + fireEvent('media.tag', media['_id'], 'recent', single = True) - # Mark movie "done" once it's found the quality with the finish check - try: - if movie.status_id == active_status.get('id') and movie.profile: - for profile_type in movie.profile.types: - if profile_type.quality_id == group['meta_data']['quality']['id'] and profile_type.finish: - movie.status_id = done_status.get('id') - movie.last_edit = int(time.time()) - db.commit() - except Exception as e: - log.error('Failed marking movie finished: %s %s', (e, traceback.format_exc())) - db.rollback() + except: + log.error('Failed marking movie finished: %s', (traceback.format_exc())) - # Go over current movie releases - for release in movie.releases: + # Mark media for dashboard + mark_as_recent = False - # When a release already exists - if release.status_id is done_status.get('id'): + # Go over current movie releases + for release in fireEvent('release.for_media', media['_id'], single = True): - # This is where CP removes older, lesser quality releases - if release.quality.order > group['meta_data']['quality']['order']: - log.info('Removing lesser quality %s for %s.', (movie.library.titles[0].title, release.quality.label)) - for current_file in release.files: - remove_files.append(current_file) - remove_releases.append(release) - # Same quality, but still downloaded, so maybe repack/proper/unrated/directors cut etc - elif release.quality.order is group['meta_data']['quality']['order']: - log.info('Same quality release already exists for %s, with quality %s. Assuming repack.', (movie.library.titles[0].title, release.quality.label)) - for current_file in release.files: - remove_files.append(current_file) - remove_releases.append(release) + # When a release already exists + if release.get('status') == 'done': - # Downloaded a lower quality, rename the newly downloaded files/folder to exclude them from scan - else: - log.info('Better quality release already exists for %s, with quality %s', (movie.library.titles[0].title, release.quality.label)) + # This is where CP removes older, lesser quality releases or releases that are not wanted anymore + is_higher = fireEvent('quality.ishigher', \ + group['meta_data']['quality'], {'identifier': release['quality'], 'is_3d': release.get('is_3d', 0)}, profile, single = True) - # Add exists tag to the .ignore file - self.tagRelease(group = group, tag = 'exists') + if is_higher == 'higher': + log.info('Removing lesser or not wanted quality %s for %s.', (media_title, release.get('quality'))) + for file_type in release.get('files', {}): + for release_file in release['files'][file_type]: + remove_files.append(release_file) + remove_releases.append(release) - # Notify on rename fail - download_message = 'Renaming of %s (%s) cancelled, exists in %s already.' % (movie.library.titles[0].title, group['meta_data']['quality']['label'], release.quality.label) - fireEvent('movie.renaming.canceled', message = download_message, data = group) - remove_leftovers = False + # Same quality, but still downloaded, so maybe repack/proper/unrated/directors cut etc + elif is_higher == 'equal': + log.info('Same quality release already exists for %s, with quality %s. Assuming repack.', (media_title, release.get('quality'))) + for file_type in release.get('files', {}): + for release_file in release['files'][file_type]: + remove_files.append(release_file) + remove_releases.append(release) - break + # Downloaded a lower quality, rename the newly downloaded files/folder to exclude them from scan + else: + log.info('Better quality release already exists for %s, with quality %s', (media_title, release.get('quality'))) - elif release.status_id in [snatched_status.get('id'), seeding_status.get('id')]: - if release_download and release_download.get('rls_id'): - if release_download['rls_id'] == release.id: - if release_download['status'] == 'completed': - # Set the release to downloaded - fireEvent('release.update_status', release.id, status = downloaded_status, single = True) - elif release_download['status'] == 'seeding': - # Set the release to seeding - fireEvent('release.update_status', release.id, status = seeding_status, single = True) + # Add exists tag to the .ignore file + self.tagRelease(group = group, tag = 'exists') - elif release.quality.id is group['meta_data']['quality']['id']: + # Notify on rename fail + download_message = 'Renaming of %s (%s) cancelled, exists in %s already.' % (media_title, group['meta_data']['quality']['label'], release.get('identifier')) + fireEvent('movie.renaming.canceled', message = download_message, data = group) + remove_leftovers = False + + break + + elif release.get('status') in ['snatched', 'seeding']: + if release_download and release_download.get('release_id'): + if release_download['release_id'] == release['_id']: + if release_download['status'] == 'completed': # Set the release to downloaded - fireEvent('release.update_status', release.id, status = downloaded_status, single = True) + fireEvent('release.update_status', release['_id'], status = 'downloaded', single = True) + group['release_download'] = release_download + mark_as_recent = True + elif release_download['status'] == 'seeding': + # Set the release to seeding + fireEvent('release.update_status', release['_id'], status = 'seeding', single = True) + mark_as_recent = True + + elif release.get('identifier') == group['meta_data']['quality']['identifier']: + # Set the release to downloaded + fireEvent('release.update_status', release['_id'], status = 'downloaded', single = True) + group['release_download'] = release_download + mark_as_recent = True + + # Mark media for dashboard + if mark_as_recent: + fireEvent('media.tag', group['media'].get('_id'), 'recent', single = True) # Remove leftover files - if not remove_leftovers: # Don't remove anything + if not remove_leftovers: # Don't remove anything break log.debug('Removing leftover files') @@ -502,9 +542,6 @@ class Renamer(Plugin): delete_folders = [] for src in remove_files: - if isinstance(src, File): - src = src.path - if rename_files.get(src): log.debug('Not removing file that will be renamed: %s', src) continue @@ -517,8 +554,8 @@ class Renamer(Plugin): parent_dir = os.path.dirname(src) if delete_folders.count(parent_dir) == 0 and os.path.isdir(parent_dir) and \ - not isSubFolder(destination, parent_dir) and not isSubFolder(media_folder, parent_dir) and \ - not isSubFolder(parent_dir, base_folder): + not isSubFolder(destination, parent_dir) and not isSubFolder(media_folder, parent_dir) and \ + not isSubFolder(parent_dir, base_folder): delete_folders.append(parent_dir) @@ -548,7 +585,7 @@ class Renamer(Plugin): self.moveFile(src, dst, forcemove = not self.downloadIsTorrent(release_download) or self.fileIsAdded(src, group)) group['renamed_files'].append(dst) except: - log.error('Failed ranaming the file "%s" : %s', (os.path.basename(src), traceback.format_exc())) + log.error('Failed renaming the file "%s" : %s', (os.path.basename(src), traceback.format_exc())) failed_rename = True break @@ -566,11 +603,11 @@ class Renamer(Plugin): # Remove matching releases for release in remove_releases: - log.debug('Removing release %s', release.identifier) + log.debug('Removing release %s', release.get('identifier')) try: db.delete(release) except: - log.error('Failed removing %s: %s', (release.identifier, traceback.format_exc())) + log.error('Failed removing %s: %s', (release, traceback.format_exc())) if group['dirname'] and group['parentdir'] and not self.downloadIsTorrent(release_download): if media_folder: @@ -587,7 +624,7 @@ class Renamer(Plugin): log.error('Failed removing %s: %s', (group_folder, traceback.format_exc())) # Notify on download, search for trailers etc - download_message = 'Downloaded %s (%s)' % (movie_title, replacements['quality']) + download_message = 'Downloaded %s (%s%s)' % (media_title, replacements['quality'], (' ' + replacements['3d']) if replacements['3d'] else '') try: fireEvent('renamer.after', message = download_message, group = group, in_order = True) except: @@ -637,17 +674,17 @@ Remove it if you want it to be renamed (again, or at least let it try again) elif isinstance(release_download, dict): # Tag download_files if they are known - if release_download['files']: - tag_files = splitString(release_download['files'], '|') + if release_download.get('files', []): + tag_files = [filename for filename in release_download.get('files', []) if os.path.exists(filename)] # Tag all files in release folder - else: - for root, folders, names in os.walk(release_download['folder']): + elif release_download['folder']: + for root, folders, names in os.walk(sp(release_download['folder'])): tag_files.extend([os.path.join(root, name) for name in names]) for filename in tag_files: - # Dont tag .ignore files + # Don't tag .ignore files if os.path.splitext(filename)[1] == '.ignore': continue @@ -666,24 +703,25 @@ Remove it if you want it to be renamed (again, or at least let it try again) if isinstance(group, dict): tag_files = [sorted(list(group['files']['movie']))[0]] - folder = group['parentdir'] + folder = sp(group['parentdir']) if not group.get('dirname') or not os.path.isdir(folder): return False elif isinstance(release_download, dict): + + folder = sp(release_download['folder']) + if not os.path.isdir(folder): + return False + # Untag download_files if they are known - if release_download['files']: - tag_files = splitString(release_download['files'], '|') + if release_download.get('files'): + tag_files = release_download.get('files', []) # Untag all files in release folder else: - for root, folders, names in os.walk(release_download['folder']): + for root, folders, names in os.walk(folder): tag_files.extend([sp(os.path.join(root, name)) for name in names if not os.path.splitext(name)[1] == '.ignore']) - folder = release_download['folder'] - if not os.path.isdir(folder): - return False - if not folder: return False @@ -705,7 +743,7 @@ Remove it if you want it to be renamed (again, or at least let it try again) if not release_download: return False - folder = release_download['folder'] + folder = sp(release_download['folder']) if not os.path.isdir(folder): return False @@ -713,12 +751,12 @@ Remove it if you want it to be renamed (again, or at least let it try again) ignore_files = [] # Find tag on download_files if they are known - if release_download['files']: - tag_files = splitString(release_download['files'], '|') + if release_download.get('files'): + tag_files = release_download.get('files', []) # Find tag on all files in release folder else: - for root, folders, names in os.walk(release_download['folder']): + for root, folders, names in os.walk(folder): tag_files.extend([sp(os.path.join(root, name)) for name in names if not os.path.splitext(name)[1] == '.ignore']) # Find all .ignore files in folder @@ -734,7 +772,7 @@ Remove it if you want it to be renamed (again, or at least let it try again) return False def moveFile(self, old, dest, forcemove = False): - dest = ss(dest) + dest = sp(dest) try: if forcemove or self.conf('file_action') not in ['copy', 'link']: try: @@ -807,7 +845,7 @@ Remove it if you want it to be renamed (again, or at least let it try again) def replaceDoubles(self, string): replaces = [ - ('\.+', '.'), ('_+', '_'), ('-+', '-'), ('\s+', ' '), + ('\.+', '.'), ('_+', '_'), ('-+', '-'), ('\s+', ' '), (' \\\\', '\\\\'), (' /', '/'), ('(\s\.)+', '.'), ('(-\.)+', '.'), ('(\s-)+', '-'), ] @@ -817,25 +855,6 @@ Remove it if you want it to be renamed (again, or at least let it try again) return string - def deleteEmptyFolder(self, folder, show_error = True): - folder = sp(folder) - - loge = log.error if show_error else log.debug - for root, dirs, files in os.walk(folder): - - for dir_name in dirs: - full_path = os.path.join(root, dir_name) - if len(os.listdir(full_path)) == 0: - try: - os.rmdir(full_path) - except: - loge('Couldn\'t remove empty directory %s: %s', (full_path, traceback.format_exc())) - - try: - os.rmdir(folder) - except: - loge('Couldn\'t remove empty directory %s: %s', (folder, traceback.format_exc())) - def checkSnatched(self, fire_scan = True): if self.checking_snatched: @@ -844,230 +863,223 @@ Remove it if you want it to be renamed (again, or at least let it try again) self.checking_snatched = True - snatched_status, ignored_status, failed_status, seeding_status, downloaded_status, missing_status = \ - fireEvent('status.get', ['snatched', 'ignored', 'failed', 'seeding', 'downloaded', 'missing'], single = True) - - db = get_session() - rels = db.query(Release).filter( - Release.status_id.in_([snatched_status.get('id'), seeding_status.get('id'), missing_status.get('id')]) - ).all() - - if not rels: - #No releases found that need status checking - self.checking_snatched = False - return True - - # Collect all download information with the download IDs from the releases - download_ids = [] - no_status_support = [] try: - for rel in rels: - rel_dict = rel.to_dict({'info': {}}) - if rel_dict['info'].get('download_id') and rel_dict['info'].get('download_downloader'): - download_ids.append({'id': rel_dict['info']['download_id'], 'downloader': rel_dict['info']['download_downloader']}) + db = get_db() - ds = rel_dict['info'].get('download_status_support') - if ds == False or ds == 'False': - no_status_support.append(ss(rel_dict['info'].get('download_downloader'))) - except: - log.error('Error getting download IDs from database') - self.checking_snatched = False - return False + rels = list(fireEvent('release.with_status', ['snatched', 'seeding', 'missing'], single = True)) - release_downloads = fireEvent('download.status', download_ids, merge = True) if download_ids else [] + if not rels: + #No releases found that need status checking + self.checking_snatched = False + return True - if len(no_status_support) > 0: - log.debug('Download status functionality is not implemented for one of the active downloaders: %s', no_status_support) + # Collect all download information with the download IDs from the releases + download_ids = [] + no_status_support = [] + try: + for rel in rels: + if not rel.get('download_info'): continue - if not release_downloads: - if fire_scan: - self.scan() + if rel['download_info'].get('id') and rel['download_info'].get('downloader'): + download_ids.append(rel['download_info']) - self.checking_snatched = False - return True + ds = rel['download_info'].get('status_support') + if ds is False or ds == 'False': + no_status_support.append(ss(rel['download_info'].get('downloader'))) + except: + log.error('Error getting download IDs from database') + self.checking_snatched = False + return False - scan_releases = [] - scan_required = False + release_downloads = fireEvent('download.status', download_ids, merge = True) if download_ids else [] - log.debug('Checking status snatched releases...') + if len(no_status_support) > 0: + log.debug('Download status functionality is not implemented for one of the active downloaders: %s', list(set(no_status_support))) - try: - for rel in rels: - rel_dict = rel.to_dict({'info': {}}) - movie_dict = fireEvent('media.get', media_id = rel.movie_id, single = True) + if not release_downloads: + if fire_scan: + self.scan() - if not isinstance(rel_dict['info'], dict): - log.error('Faulty release found without any info, ignoring.') - fireEvent('release.update_status', rel.id, status = ignored_status, single = True) - continue + self.checking_snatched = False + return True - # Check if download ID is available - if not rel_dict['info'].get('download_id') or not rel_dict['info'].get('download_downloader'): - log.debug('Download status functionality is not implemented for downloader (%s) of release %s.', (rel_dict['info'].get('download_downloader', 'unknown'), rel_dict['info']['name'])) - scan_required = True + scan_releases = [] + scan_required = False - # Continue with next release - continue + log.debug('Checking status snatched releases...') - # Find release in downloaders - nzbname = self.createNzbName(rel_dict['info'], movie_dict) + try: + for rel in rels: + movie_dict = db.get('id', rel.get('media_id')) + download_info = rel.get('download_info') + + if not isinstance(download_info, dict): + log.error('Faulty release found without any info, ignoring.') + fireEvent('release.update_status', rel.get('_id'), status = 'ignored', single = True) + continue + + # Check if download ID is available + if not download_info.get('id') or not download_info.get('downloader'): + log.debug('Download status functionality is not implemented for downloader (%s) of release %s.', (download_info.get('downloader', 'unknown'), rel['info']['name'])) + scan_required = True + + # Continue with next release + continue + + # Find release in downloaders + nzbname = self.createNzbName(rel['info'], movie_dict) - found_release = False - for release_download in release_downloads: found_release = False - if rel_dict['info'].get('download_id'): - if release_download['id'] == rel_dict['info']['download_id'] and release_download['downloader'] == rel_dict['info']['download_downloader']: - log.debug('Found release by id: %s', release_download['id']) - found_release = True - break - else: - if release_download['name'] == nzbname or rel_dict['info']['name'] in release_download['name'] or getImdb(release_download['name']) == movie_dict['library']['identifier']: - log.debug('Found release by release name or imdb ID: %s', release_download['name']) - found_release = True - break - - if not found_release: - log.info('%s not found in downloaders', nzbname) - - #Check status if already missing and for how long, if > 1 week, set to ignored else to missing - if rel.status_id == missing_status.get('id'): - if rel.last_edit < int(time.time()) - 7 * 24 * 60 * 60: - fireEvent('release.update_status', rel.id, status = ignored_status, single = True) - else: - # Set the release to missing - fireEvent('release.update_status', rel.id, status = missing_status, single = True) - - # Continue with next release - continue - - # Log that we found the release - timeleft = 'N/A' if release_download['timeleft'] == -1 else release_download['timeleft'] - log.debug('Found %s: %s, time to go: %s', (release_download['name'], release_download['status'].upper(), timeleft)) - - # Check status of release - if release_download['status'] == 'busy': - # Set the release to snatched if it was missing before - fireEvent('release.update_status', rel.id, status = snatched_status, single = True) - - # Tag folder if it is in the 'from' folder and it will not be processed because it is still downloading - if self.movieInFromFolder(release_download['folder']): - self.tagRelease(release_download = release_download, tag = 'downloading') - - elif release_download['status'] == 'seeding': - #If linking setting is enabled, process release - if self.conf('file_action') != 'move' and not rel.status_id == seeding_status.get('id') and self.statusInfoComplete(release_download): - log.info('Download of %s completed! It is now being processed while leaving the original files alone for seeding. Current ratio: %s.', (release_download['name'], release_download['seed_ratio'])) - - # Remove the downloading tag - self.untagRelease(release_download = release_download, tag = 'downloading') - - # Scan and set the torrent to paused if required - release_download.update({'pause': True, 'scan': True, 'process_complete': False}) - scan_releases.append(release_download) - else: - #let it seed - log.debug('%s is seeding with ratio: %s', (release_download['name'], release_download['seed_ratio'])) - - # Set the release to seeding - fireEvent('release.update_status', rel.id, status = seeding_status, single = True) - - elif release_download['status'] == 'failed': - # Set the release to failed - fireEvent('release.update_status', rel.id, status = failed_status, single = True) - - fireEvent('download.remove_failed', release_download, single = True) - - if self.conf('next_on_failed'): - fireEvent('movie.searcher.try_next_release', media_id = rel.movie_id) - - elif release_download['status'] == 'completed': - log.info('Download of %s completed!', release_download['name']) - - #Make sure the downloader sent over a path to look in - if self.statusInfoComplete(release_download): - - # If the release has been seeding, process now the seeding is done - if rel.status_id == seeding_status.get('id'): - if self.conf('file_action') != 'move': - # Set the release to done as the movie has already been renamed - fireEvent('release.update_status', rel.id, status = downloaded_status, single = True) - - # Allow the downloader to clean-up - release_download.update({'pause': False, 'scan': False, 'process_complete': True}) - scan_releases.append(release_download) - else: - # Scan and Allow the downloader to clean-up - release_download.update({'pause': False, 'scan': True, 'process_complete': True}) - scan_releases.append(release_download) - + for release_download in release_downloads: + found_release = False + if download_info.get('id'): + if release_download['id'] == download_info['id'] and release_download['downloader'] == download_info['downloader']: + log.debug('Found release by id: %s', release_download['id']) + found_release = True + break else: - # Set the release to snatched if it was missing before - fireEvent('release.update_status', rel.id, status = snatched_status, single = True) + if release_download['name'] == nzbname or rel['info']['name'] in release_download['name'] or getImdb(release_download['name']) == getIdentifier(movie_dict): + log.debug('Found release by release name or imdb ID: %s', release_download['name']) + found_release = True + break + + if not found_release: + log.info('%s not found in downloaders', nzbname) + + #Check status if already missing and for how long, if > 1 week, set to ignored else to missing + if rel.get('status') == 'missing': + if rel.get('last_edit') < int(time.time()) - 7 * 24 * 60 * 60: + fireEvent('release.update_status', rel.get('_id'), status = 'ignored', single = True) + else: + # Set the release to missing + fireEvent('release.update_status', rel.get('_id'), status = 'missing', single = True) + + # Continue with next release + continue + + # Log that we found the release + timeleft = 'N/A' if release_download['timeleft'] == -1 else release_download['timeleft'] + log.debug('Found %s: %s, time to go: %s', (release_download['name'], release_download['status'].upper(), timeleft)) + + # Check status of release + if release_download['status'] == 'busy': + # Set the release to snatched if it was missing before + fireEvent('release.update_status', rel.get('_id'), status = 'snatched', single = True) + + # Tag folder if it is in the 'from' folder and it will not be processed because it is still downloading + if self.movieInFromFolder(release_download['folder']): + self.tagRelease(release_download = release_download, tag = 'downloading') + + elif release_download['status'] == 'seeding': + #If linking setting is enabled, process release + if self.conf('file_action') != 'move' and not rel.get('status') == 'seeding' and self.statusInfoComplete(release_download): + log.info('Download of %s completed! It is now being processed while leaving the original files alone for seeding. Current ratio: %s.', (release_download['name'], release_download['seed_ratio'])) # Remove the downloading tag self.untagRelease(release_download = release_download, tag = 'downloading') - # Scan and Allow the downloader to clean-up - release_download.update({'pause': False, 'scan': True, 'process_complete': True}) + # Scan and set the torrent to paused if required + release_download.update({'pause': True, 'scan': True, 'process_complete': False}) scan_releases.append(release_download) - else: - scan_required = True + else: + #let it seed + log.debug('%s is seeding with ratio: %s', (release_download['name'], release_download['seed_ratio'])) + # Set the release to seeding + fireEvent('release.update_status', rel.get('_id'), status = 'seeding', single = True) + + elif release_download['status'] == 'failed': + # Set the release to failed + fireEvent('release.update_status', rel.get('_id'), status = 'failed', single = True) + + fireEvent('download.remove_failed', release_download, single = True) + + if self.conf('next_on_failed'): + fireEvent('movie.searcher.try_next_release', media_id = rel.get('media_id')) + + elif release_download['status'] == 'completed': + log.info('Download of %s completed!', release_download['name']) + + #Make sure the downloader sent over a path to look in + if self.statusInfoComplete(release_download): + + # If the release has been seeding, process now the seeding is done + if rel.get('status') == 'seeding': + if self.conf('file_action') != 'move': + # Set the release to done as the movie has already been renamed + fireEvent('release.update_status', rel.get('_id'), status = 'downloaded', single = True) + + # Allow the downloader to clean-up + release_download.update({'pause': False, 'scan': False, 'process_complete': True}) + scan_releases.append(release_download) + else: + # Scan and Allow the downloader to clean-up + release_download.update({'pause': False, 'scan': True, 'process_complete': True}) + scan_releases.append(release_download) + + else: + # Set the release to snatched if it was missing before + fireEvent('release.update_status', rel.get('_id'), status = 'snatched', single = True) + + # Remove the downloading tag + self.untagRelease(release_download = release_download, tag = 'downloading') + + # Scan and Allow the downloader to clean-up + release_download.update({'pause': False, 'scan': True, 'process_complete': True}) + scan_releases.append(release_download) + else: + scan_required = True + + except: + log.error('Failed checking for release in downloader: %s', traceback.format_exc()) + + # The following can either be done here, or inside the scanner if we pass it scan_items in one go + for release_download in scan_releases: + # Ask the renamer to scan the item + if release_download['scan']: + if release_download['pause'] and self.conf('file_action') == 'link': + fireEvent('download.pause', release_download = release_download, pause = True, single = True) + self.scan(release_download = release_download) + if release_download['pause'] and self.conf('file_action') == 'link': + fireEvent('download.pause', release_download = release_download, pause = False, single = True) + if release_download['process_complete']: + # First make sure the files were successfully processed + if not self.hastagRelease(release_download = release_download, tag = 'failed_rename'): + # Remove the seeding tag if it exists + self.untagRelease(release_download = release_download, tag = 'renamed_already') + # Ask the downloader to process the item + fireEvent('download.process_complete', release_download = release_download, single = True) + + if fire_scan and (scan_required or len(no_status_support) > 0): + self.scan() + + self.checking_snatched = False + return True except: - log.error('Failed checking for release in downloader: %s', traceback.format_exc()) - - # The following can either be done here, or inside the scanner if we pass it scan_items in one go - for release_download in scan_releases: - # Ask the renamer to scan the item - if release_download['scan']: - if release_download['pause'] and self.conf('file_action') == 'link': - fireEvent('download.pause', release_download = release_download, pause = True, single = True) - self.scan(release_download = release_download) - if release_download['pause'] and self.conf('file_action') == 'link': - fireEvent('download.pause', release_download = release_download, pause = False, single = True) - if release_download['process_complete']: - #First make sure the files were succesfully processed - if not self.hastagRelease(release_download = release_download, tag = 'failed_rename'): - # Remove the seeding tag if it exists - self.untagRelease(release_download = release_download, tag = 'renamed_already') - # Ask the downloader to process the item - fireEvent('download.process_complete', release_download = release_download, single = True) - - if fire_scan and (scan_required or len(no_status_support) > 0): - self.scan() + log.error('Failed checking snatched: %s', traceback.format_exc()) self.checking_snatched = False - return True + return False def extendReleaseDownload(self, release_download): rls = None + db = get_db() - if release_download and release_download.get('id') and release_download.get('downloader'): - - db = get_session() - - rlsnfo_dwnlds = db.query(ReleaseInfo).filter_by(identifier = 'download_downloader', value = release_download.get('downloader')).all() - rlsnfo_ids = db.query(ReleaseInfo).filter_by(identifier = 'download_id', value = release_download.get('id')).all() - - for rlsnfo_dwnld in rlsnfo_dwnlds: - for rlsnfo_id in rlsnfo_ids: - if rlsnfo_id.release == rlsnfo_dwnld.release: - rls = rlsnfo_id.release - break - if rls: break - - if not rls: + if release_download and release_download.get('id'): + try: + rls = db.get('release_download', '%s-%s' % (release_download.get('downloader'), release_download.get('id')), with_doc = True)['doc'] + except: log.error('Download ID %s from downloader %s not found in releases', (release_download.get('id'), release_download.get('downloader'))) if rls: - - rls_dict = rls.to_dict({'info':{}}) + media = db.get('id', rls['media_id']) release_download.update({ - 'imdb_id': rls.movie.library.identifier, - 'quality': rls.quality.identifier, - 'protocol': rls_dict.get('info', {}).get('protocol') or rls_dict.get('info', {}).get('type'), - 'rls_id': rls.id, + 'imdb_id': getIdentifier(media), + 'quality': rls['quality'], + 'is_3d': rls['is_3d'], + 'protocol': rls.get('info', {}).get('protocol') or rls.get('info', {}).get('type'), + 'release_id': rls['_id'], }) return release_download @@ -1192,3 +1204,179 @@ Remove it if you want it to be renamed (again, or at least let it try again) folder = None return folder, media_folder, files, extr_files + + +rename_options = { + 'pre': '<', + 'post': '>', + 'choices': { + 'ext': 'Extention (mkv)', + 'namethe': 'Moviename, The', + 'thename': 'The Moviename', + 'year': 'Year (2011)', + 'first': 'First letter (M)', + 'quality': 'Quality (720p)', + 'quality_type': '(HD) or (SD)', + '3d': '3D', + '3d_type': '3D Type (Full SBS)', + 'video': 'Video (x264)', + 'audio': 'Audio (DTS)', + 'group': 'Releasegroup name', + 'source': 'Source media (Bluray)', + 'resolution_width': 'resolution width (1280)', + 'resolution_height': 'resolution height (720)', + 'audio_channels': 'audio channels (7.1)', + 'original': 'Original filename', + 'original_folder': 'Original foldername', + 'imdb_id': 'IMDB id (tt0123456)', + 'cd': 'CD number (cd1)', + 'cd_nr': 'Just the cd nr. (1)', + 'mpaa': 'MPAA or other certification', + 'mpaa_only': 'MPAA only certification (G|PG|PG-13|R|NC-17|Not Rated)', + 'category': 'Category label', + }, +} + +config = [{ + 'name': 'renamer', + 'order': 40, + 'description': 'Move and rename your downloaded movies to your movie directory.', + 'groups': [ + { + 'tab': 'renamer', + 'name': 'renamer', + 'label': 'Rename downloaded movies', + 'wizard': True, + 'options': [ + { + 'name': 'enabled', + 'default': False, + 'type': 'enabler', + }, + { + 'name': 'from', + 'type': 'directory', + 'description': 'Folder where CP searches for movies.', + }, + { + 'name': 'to', + 'type': 'directory', + 'description': 'Default folder where the movies are moved to.', + }, + { + 'name': 'folder_name', + 'label': 'Folder naming', + 'description': 'Name of the folder. Keep empty for no folder.', + 'default': ' ()', + 'type': 'choice', + 'options': rename_options + }, + { + 'name': 'file_name', + 'label': 'File naming', + 'description': 'Name of the file', + 'default': '.', + 'type': 'choice', + 'options': rename_options + }, + { + 'name': 'unrar', + 'type': 'bool', + 'description': 'Extract rar files if found.', + 'default': False, + }, + { + 'name': 'cleanup', + 'type': 'bool', + 'description': 'Cleanup leftover files after successful rename.', + 'default': False, + }, + { + 'advanced': True, + 'name': 'run_every', + 'label': 'Run every', + 'default': 1, + 'type': 'int', + 'unit': 'min(s)', + 'description': ('Detect movie status every X minutes.', 'Will start the renamer if movie is completed or handle failed download if these options are enabled'), + }, + { + 'advanced': True, + 'name': 'force_every', + 'label': 'Force every', + 'default': 2, + 'type': 'int', + 'unit': 'hour(s)', + 'description': 'Forces the renamer to scan every X hours', + }, + { + 'advanced': True, + 'name': 'next_on_failed', + 'default': True, + 'type': 'bool', + 'description': 'Try the next best release for a movie after a download failed.', + }, + { + 'name': 'move_leftover', + 'type': 'bool', + 'description': 'Move all leftover file after renaming, to the movie folder.', + 'default': False, + 'advanced': True, + }, + { + 'advanced': True, + 'name': 'separator', + 'label': 'File-Separator', + 'description': ('Replace all the spaces with a character.', 'Example: ".", "-" (without quotes). Leave empty to use spaces.'), + }, + { + 'advanced': True, + 'name': 'foldersep', + 'label': 'Folder-Separator', + 'description': ('Replace all the spaces with a character.', 'Example: ".", "-" (without quotes). Leave empty to use spaces.'), + }, + { + 'name': 'file_action', + 'label': 'Torrent File Action', + 'default': 'link', + 'type': 'dropdown', + 'values': [('Link', 'link'), ('Copy', 'copy'), ('Move', 'move')], + 'description': ('Link, Copy or Move after download completed.', + 'Link first tries hard link, then sym link and falls back to Copy. It is perfered to use link when downloading torrents as it will save you space, while still beeing able to seed.'), + 'advanced': True, + }, + { + 'advanced': True, + 'name': 'ntfs_permission', + 'label': 'NTFS Permission', + 'type': 'bool', + 'hidden': os.name != 'nt', + 'description': 'Set permission of moved files to that of destination folder (Windows NTFS only).', + 'default': False, + }, + ], + }, { + 'tab': 'renamer', + 'name': 'meta_renamer', + 'label': 'Advanced renaming', + 'description': 'Meta data file renaming. Use <filename> to use the above "File naming" settings, without the file extention.', + 'advanced': True, + 'options': [ + { + 'name': 'rename_nfo', + 'label': 'Rename .NFO', + 'description': 'Rename original .nfo file', + 'type': 'bool', + 'default': True, + }, + { + 'name': 'nfo_name', + 'label': 'NFO naming', + 'default': '.orig.', + 'type': 'choice', + 'options': rename_options + }, + ], + }, + ], +}] diff --git a/couchpotato/core/plugins/renamer/__init__.py b/couchpotato/core/plugins/renamer/__init__.py deleted file mode 100755 index e238f5eb..00000000 --- a/couchpotato/core/plugins/renamer/__init__.py +++ /dev/null @@ -1,178 +0,0 @@ -from couchpotato.core.plugins.renamer.main import Renamer -import os - - -def start(): - return Renamer() - -rename_options = { - 'pre': '<', - 'post': '>', - 'choices': { - 'ext': 'Extention (mkv)', - 'namethe': 'Moviename, The', - 'thename': 'The Moviename', - 'year': 'Year (2011)', - 'first': 'First letter (M)', - 'quality': 'Quality (720p)', - 'quality_type': '(HD) or (SD)', - 'video': 'Video (x264)', - 'audio': 'Audio (DTS)', - 'group': 'Releasegroup name', - 'source': 'Source media (Bluray)', - 'resolution_width': 'resolution width (1280)', - 'resolution_height': 'resolution height (720)', - 'audio_channels': 'audio channels (7.1)', - 'original': 'Original filename', - 'original_folder': 'Original foldername', - 'imdb_id': 'IMDB id (tt0123456)', - 'cd': 'CD number (cd1)', - 'cd_nr': 'Just the cd nr. (1)', - 'mpaa': 'MPAA Rating', - 'category': 'Category label', - }, -} - -config = [{ - 'name': 'renamer', - 'order': 40, - 'description': 'Move and rename your downloaded movies to your movie directory.', - 'groups': [ - { - 'tab': 'renamer', - 'name': 'renamer', - 'label': 'Rename downloaded movies', - 'wizard': True, - 'options': [ - { - 'name': 'enabled', - 'default': False, - 'type': 'enabler', - }, - { - 'name': 'from', - 'type': 'directory', - 'description': 'Folder where CP searches for movies.', - }, - { - 'name': 'to', - 'type': 'directory', - 'description': 'Default folder where the movies are moved to.', - }, - { - 'name': 'folder_name', - 'label': 'Folder naming', - 'description': 'Name of the folder. Keep empty for no folder.', - 'default': ' ()', - 'type': 'choice', - 'options': rename_options - }, - { - 'name': 'file_name', - 'label': 'File naming', - 'description': 'Name of the file', - 'default': '.', - 'type': 'choice', - 'options': rename_options - }, - { - 'name': 'unrar', - 'type': 'bool', - 'description': 'Extract rar files if found.', - 'default': False, - }, - { - 'name': 'cleanup', - 'type': 'bool', - 'description': 'Cleanup leftover files after successful rename.', - 'default': False, - }, - { - 'advanced': True, - 'name': 'run_every', - 'label': 'Run every', - 'default': 1, - 'type': 'int', - 'unit': 'min(s)', - 'description': ('Detect movie status every X minutes.', 'Will start the renamer if movie is completed or handle failed download if these options are enabled'), - }, - { - 'advanced': True, - 'name': 'force_every', - 'label': 'Force every', - 'default': 2, - 'type': 'int', - 'unit': 'hour(s)', - 'description': 'Forces the renamer to scan every X hours', - }, - { - 'advanced': True, - 'name': 'next_on_failed', - 'default': True, - 'type': 'bool', - 'description': 'Try the next best release for a movie after a download failed.', - }, - { - 'name': 'move_leftover', - 'type': 'bool', - 'description': 'Move all leftover file after renaming, to the movie folder.', - 'default': False, - 'advanced': True, - }, - { - 'advanced': True, - 'name': 'separator', - 'label': 'File-Separator', - 'description': ('Replace all the spaces with a character.', 'Example: ".", "-" (without quotes). Leave empty to use spaces.'), - }, - { - 'advanced': True, - 'name': 'foldersep', - 'label': 'Folder-Separator', - 'description': ('Replace all the spaces with a character.', 'Example: ".", "-" (without quotes). Leave empty to use spaces.'), - }, - { - 'name': 'file_action', - 'label': 'Torrent File Action', - 'default': 'link', - 'type': 'dropdown', - 'values': [('Link', 'link'), ('Copy', 'copy'), ('Move', 'move')], - 'description': ('Link, Copy or Move after download completed.', - 'Link first tries hard link, then sym link and falls back to Copy. It is perfered to use link when downloading torrents as it will save you space, while still beeing able to seed.'), - 'advanced': True, - }, - { - 'advanced': True, - 'name': 'ntfs_permission', - 'label': 'NTFS Permission', - 'type': 'bool', - 'hidden': os.name != 'nt', - 'description': 'Set permission of moved files to that of destination folder (Windows NTFS only).', - 'default': False, - }, - ], - }, { - 'tab': 'renamer', - 'name': 'meta_renamer', - 'label': 'Advanced renaming', - 'description': 'Meta data file renaming. Use <filename> to use the above "File naming" settings, without the file extention.', - 'advanced': True, - 'options': [ - { - 'name': 'rename_nfo', - 'label': 'Rename .NFO', - 'description': 'Rename original .nfo file', - 'type': 'bool', - 'default': True, - }, - { - 'name': 'nfo_name', - 'label': 'NFO naming', - 'default': '.orig.', - 'type': 'choice', - 'options': rename_options - }, - ], - }, - ], -}] diff --git a/couchpotato/core/plugins/scanner/main.py b/couchpotato/core/plugins/scanner.py similarity index 78% rename from couchpotato/core/plugins/scanner/main.py rename to couchpotato/core/plugins/scanner.py index 3031236f..01a88fbb 100644 --- a/couchpotato/core/plugins/scanner/main.py +++ b/couchpotato/core/plugins/scanner.py @@ -1,30 +1,33 @@ -from couchpotato import get_session -from couchpotato.core.event import fireEvent, addEvent -from couchpotato.core.helpers.encoding import toUnicode, simplifyString, ss, sp -from couchpotato.core.helpers.variable import getExt, getImdb, tryInt, \ - splitString -from couchpotato.core.logger import CPLog -from couchpotato.core.plugins.base import Plugin -from couchpotato.core.settings.model import File, Media -from enzyme.exceptions import NoParserError, ParseError -from guessit import guess_movie_info -from subliminal.videos import Video -import enzyme import os import re import threading import time import traceback + +from couchpotato import get_db +from couchpotato.core.event import fireEvent, addEvent +from couchpotato.core.helpers.encoding import toUnicode, simplifyString, sp, ss +from couchpotato.core.helpers.variable import getExt, getImdb, tryInt, \ + splitString, getIdentifier +from couchpotato.core.logger import CPLog +from couchpotato.core.plugins.base import Plugin +from enzyme.exceptions import NoParserError, ParseError +from guessit import guess_movie_info +from subliminal.videos import Video +import enzyme from six.moves import filter, map, zip + log = CPLog(__name__) +autoload = 'Scanner' + class Scanner(Plugin): ignored_in_path = [os.path.sep + 'extracted' + os.path.sep, 'extracting', '_unpack', '_failed_', '_unknown_', '_exists_', '_failed_remove_', '_failed_rename_', '.appledouble', '.appledb', '.appledesktop', os.path.sep + '._', '.ds_store', 'cp.cpnfo', - 'thumbs.db', 'ehthumbs.db', 'desktop.ini'] #unpacking, smb-crap, hidden files + 'thumbs.db', 'ehthumbs.db', 'desktop.ini'] # unpacking, smb-crap, hidden files ignore_names = ['extract', 'extracting', 'extracted', 'movie', 'movies', 'film', 'films', 'download', 'downloads', 'video_ts', 'audio_ts', 'bdmv', 'certificate'] extensions = { 'movie': ['mkv', 'wmv', 'avi', 'mpg', 'mpeg', 'mp4', 'm2ts', 'iso', 'img', 'mdf', 'ts', 'm4v'], @@ -36,6 +39,17 @@ class Scanner(Plugin): 'trailer': ['mov', 'mp4', 'flv'] } + threed_types = { + 'Half SBS': [('half', 'sbs'), ('h', 'sbs'), 'hsbs'], + 'Full SBS': [('full', 'sbs'), ('f', 'sbs'), 'fsbs'], + 'SBS': ['sbs'], + 'Half OU': [('half', 'ou'), ('h', 'ou'), 'hou'], + 'Full OU': [('full', 'ou'), ('h', 'ou'), 'fou'], + 'OU': ['ou'], + 'Frame Packed': ['mvc', ('complete', 'bluray')], + '3D': ['3d'] + } + file_types = { 'subtitle': ('subtitle', 'subtitle'), 'subtitle_extra': ('subtitle', 'subtitle_extra'), @@ -56,33 +70,43 @@ class Scanner(Plugin): } codecs = { - 'audio': ['dts', 'ac3', 'ac3d', 'mp3'], - 'video': ['x264', 'h264', 'divx', 'xvid'] + 'audio': ['DTS', 'AC3', 'AC3D', 'MP3'], + 'video': ['x264', 'H264', 'DivX', 'Xvid'] + } + + resolutions = { + '1080p': {'resolution_width': 1920, 'resolution_height': 1080, 'aspect': 1.78}, + '1080i': {'resolution_width': 1920, 'resolution_height': 1080, 'aspect': 1.78}, + '720p': {'resolution_width': 1280, 'resolution_height': 720, 'aspect': 1.78}, + '720i': {'resolution_width': 1280, 'resolution_height': 720, 'aspect': 1.78}, + '480p': {'resolution_width': 640, 'resolution_height': 480, 'aspect': 1.33}, + '480i': {'resolution_width': 640, 'resolution_height': 480, 'aspect': 1.33}, + 'default': {'resolution_width': 0, 'resolution_height': 0, 'aspect': 1}, } audio_codec_map = { - 0x2000: 'ac3', - 0x2001: 'dts', - 0x0055: 'mp3', - 0x0050: 'mp2', - 0x0001: 'pcm', - 0x003: 'pcm', - 0x77a1: 'tta1', - 0x5756: 'wav', - 0x6750: 'vorbis', - 0xF1AC: 'flac', - 0x00ff: 'aac', + 0x2000: 'AC3', + 0x2001: 'DTS', + 0x0055: 'MP3', + 0x0050: 'MP2', + 0x0001: 'PCM', + 0x003: 'WAV', + 0x77a1: 'TTA1', + 0x5756: 'WAV', + 0x6750: 'Vorbis', + 0xF1AC: 'FLAC', + 0x00ff: 'AAC', } source_media = { - 'bluray': ['bluray', 'blu-ray', 'brrip', 'br-rip'], - 'hddvd': ['hddvd', 'hd-dvd'], - 'dvd': ['dvd'], - 'hdtv': ['hdtv'] + 'Blu-ray': ['bluray', 'blu-ray', 'brrip', 'br-rip'], + 'HD DVD': ['hddvd', 'hd-dvd'], + 'DVD': ['dvd'], + 'HDTV': ['hdtv'] } - clean = '[ _\,\.\(\)\[\]\-]?(extended.cut|directors.cut|french|swedisch|danish|dutch|swesub|spanish|german|ac3|dts|custom|dc|divx|divx5|dsr|dsrip|dutch|dvd|dvdr|dvdrip|dvdscr|dvdscreener|screener|dvdivx|cam|fragment|fs|hdtv|hdrip' \ - '|hdtvrip|internal|limited|multisubs|ntsc|ogg|ogm|pal|pdtv|proper|repack|rerip|retail|r3|r5|bd5|se|svcd|swedish|german|read.nfo|nfofix|unrated|ws|telesync|ts|telecine|tc|brrip|bdrip|video_ts|audio_ts|480p|480i|576p|576i|720p|720i|1080p|1080i|hrhd|hrhdtv|hddvd|bluray|x264|h264|xvid|xvidvd|xxx|www.www|cd[1-9]|\[.*\])([ _\,\.\(\)\[\]\-]|$)' + clean = '([ _\,\.\(\)\[\]\-]|^)(3d|hsbs|sbs|half.sbs|full.sbs|ou|half.ou|full.ou|extended|extended.cut|directors.cut|french|fr|swedisch|sw|danish|dutch|nl|swesub|subs|spanish|german|ac3|dts|custom|dc|divx|divx5|dsr|dsrip|dutch|dvd|dvdr|dvdrip|dvdscr|dvdscreener|screener|dvdivx|cam|fragment|fs|hdtv|hdrip' \ + '|hdtvrip|webdl|web.dl|webrip|web.rip|internal|limited|multisubs|ntsc|ogg|ogm|pal|pdtv|proper|repack|rerip|retail|r3|r5|bd5|se|svcd|swedish|german|read.nfo|nfofix|unrated|ws|telesync|ts|telecine|tc|brrip|bdrip|video_ts|audio_ts|480p|480i|576p|576i|720p|720i|1080p|1080i|hrhd|hrhdtv|hddvd|bluray|x264|h264|xvid|xvidvd|xxx|www.www|hc|\[.*\])(?=[ _\,\.\(\)\[\]\-]|$)' multipart_regex = [ '[ _\.-]+cd[ _\.-]*([0-9a-d]+)', #*cd1 '[ _\.-]+dvd[ _\.-]*([0-9a-d]+)', #*dvd1 @@ -125,8 +149,8 @@ class Scanner(Plugin): check_file_date = True try: files = [] - for root, dirs, walk_files in os.walk(folder): - files.extend([sp(os.path.join(root, filename)) for filename in walk_files]) + for root, dirs, walk_files in os.walk(folder, followlinks=True): + files.extend([sp(os.path.join(sp(root), ss(filename))) for filename in walk_files]) # Break if CP wants to shut down if self.shuttingDown(): @@ -140,7 +164,6 @@ class Scanner(Plugin): check_file_date = False files = [sp(x) for x in files] - for file_path in files: if not os.path.exists(file_path): @@ -161,7 +184,7 @@ class Scanner(Plugin): identifiers = [identifier] # Identifier with quality - quality = fireEvent('quality.guess', [file_path], single = True) if not is_dvd_file else {'identifier':'dvdr'} + quality = fireEvent('quality.guess', files = [file_path], size = self.getFileSize(file_path), single = True) if not is_dvd_file else {'identifier':'dvdr'} if quality: identifier_with_quality = '%s %s' % (identifier, quality.get('identifier', '')) identifiers = [identifier_with_quality, identifier] @@ -332,7 +355,6 @@ class Scanner(Plugin): release_download = None # Determine file types - db = get_session() processed_movies = {} while True and not self.shuttingDown(): try: @@ -342,6 +364,7 @@ class Scanner(Plugin): if return_ignored is False and identifier in ignored_identifiers: log.debug('Ignore file found, ignoring release: %s', identifier) + total_found -= 1 continue # Group extra (and easy) files first @@ -362,6 +385,7 @@ class Scanner(Plugin): if len(group['files']['movie']) == 0: log.error('Couldn\'t find any movie files for %s', identifier) + total_found -= 1 continue log.debug('Getting metadata for %s', identifier) @@ -397,19 +421,17 @@ class Scanner(Plugin): del group['unsorted_files'] # Determine movie - group['library'] = self.determineMovie(group, release_download = release_download) - if not group['library']: - log.error('Unable to determine movie: %s', group['identifiers']) + group['media'] = self.determineMedia(group, release_download = release_download) + if not group['media']: + log.error('Unable to determine media: %s', group['identifiers']) else: - movie = db.query(Media).filter_by(library_id = group['library']['id']).first() - group['movie_id'] = None if not movie else movie.id - db.expire_all() + group['identifier'] = getIdentifier(group['media']) or group['media']['info'].get('imdb') processed_movies[identifier] = group # Notify parent & progress on something found if on_found: - on_found(group, total_found, total_found - len(processed_movies)) + on_found(group, total_found, len(valid_files)) # Wait for all the async events calm down a bit while threading.activeCount() > 100 and not self.shuttingDown(): @@ -431,28 +453,39 @@ class Scanner(Plugin): for cur_file in files: if not self.filesizeBetween(cur_file, self.file_sizes['movie']): continue # Ignore smaller files - meta = self.getMeta(cur_file) + if not data.get('audio'): # Only get metadata from first media file + meta = self.getMeta(cur_file) - try: - data['video'] = meta.get('video', self.getCodec(cur_file, self.codecs['video'])) - data['audio'] = meta.get('audio', self.getCodec(cur_file, self.codecs['audio'])) - data['resolution_width'] = meta.get('resolution_width', 720) - data['resolution_height'] = meta.get('resolution_height', 480) - data['audio_channels'] = meta.get('audio_channels', 2.0) - data['aspect'] = round(float(meta.get('resolution_width', 720)) / meta.get('resolution_height', 480), 2) - except: - log.debug('Error parsing metadata: %s %s', (cur_file, traceback.format_exc())) - pass + try: + data['video'] = meta.get('video', self.getCodec(cur_file, self.codecs['video'])) + data['audio'] = meta.get('audio', self.getCodec(cur_file, self.codecs['audio'])) + data['audio_channels'] = meta.get('audio_channels', 2.0) + if meta.get('resolution_width'): + data['resolution_width'] = meta.get('resolution_width') + data['resolution_height'] = meta.get('resolution_height') + data['aspect'] = round(float(meta.get('resolution_width')) / meta.get('resolution_height', 1), 2) + else: + data.update(self.getResolution(cur_file)) + except: + log.debug('Error parsing metadata: %s %s', (cur_file, traceback.format_exc())) + pass - if data.get('audio'): break + data['size'] = data.get('size', 0) + self.getFileSize(cur_file) - # Use the quality guess first, if that failes use the quality we wanted to download data['quality'] = None + quality = fireEvent('quality.guess', size = data.get('size'), files = files, extra = data, single = True) + + # Use the quality that we snatched but check if it matches our guess if release_download and release_download.get('quality'): data['quality'] = fireEvent('quality.single', release_download.get('quality'), single = True) + data['quality']['is_3d'] = release_download.get('is_3d', 0) + if data['quality']['identifier'] != quality['identifier']: + log.info('Different quality snatched than detected for %s: %s vs. %s. Assuming snatched quality is correct.', (files[0], data['quality']['identifier'], quality['identifier'])) + if data['quality']['is_3d'] != quality['is_3d']: + log.info('Different 3d snatched than detected for %s: %s vs. %s. Assuming snatched 3d is correct.', (files[0], data['quality']['is_3d'], quality['is_3d'])) if not data['quality']: - data['quality'] = fireEvent('quality.guess', files = files, extra = data, single = True) + data['quality'] = quality if not data['quality']: data['quality'] = fireEvent('quality.single', 'dvdr' if group['is_dvd'] else 'dvdrip', single = True) @@ -462,16 +495,32 @@ class Scanner(Plugin): filename = re.sub('(.cp\(tt[0-9{7}]+\))', '', files[0]) data['group'] = self.getGroup(filename[len(folder):]) data['source'] = self.getSourceMedia(filename) - + if data['quality'].get('is_3d', 0): + data['3d_type'] = self.get3dType(filename) return data + def get3dType(self, filename): + filename = ss(filename) + + words = re.split('\W+', filename.lower()) + + for key in self.threed_types: + tags = self.threed_types.get(key, []) + + for tag in tags: + if (isinstance(tag, tuple) and '.'.join(tag) in '.'.join(words)) or (isinstance(tag, (str, unicode)) and ss(tag.lower()) in words): + log.debug('Found %s in %s', (tag, filename)) + return key + + return '' + def getMeta(self, filename): try: p = enzyme.parse(filename) # Video codec - vc = ('h264' if p.video[0].codec == 'AVC1' else p.video[0].codec).lower() + vc = ('H264' if p.video[0].codec == 'AVC1' else p.video[0].codec) # Audio codec ac = p.audio[0].codec @@ -533,7 +582,7 @@ class Scanner(Plugin): return detected_languages - def determineMovie(self, group, release_download = None): + def determineMedia(self, group, release_download = None): # Get imdb id from downloader imdb_id = release_download and release_download.get('imdb_id') @@ -575,20 +624,6 @@ class Scanner(Plugin): except: pass - # Check if path is already in db - if not imdb_id: - - db = get_session() - for cf in files['movie']: - f = db.query(File).filter_by(path = toUnicode(cf)).first() - try: - imdb_id = f.library[0].identifier - log.debug('Found movie via database: %s', cf) - cur_file = cf - break - except: - pass - # Search based on identifiers if not imdb_id: for identifier in group['identifiers']: @@ -599,19 +634,32 @@ class Scanner(Plugin): name_year = self.getReleaseNameYear(identifier, file_name = filename if not group['is_dvd'] else None) if name_year.get('name') and name_year.get('year'): - movie = fireEvent('movie.search', q = '%(name)s %(year)s' % name_year, merge = True, limit = 1) + search_q = '%(name)s %(year)s' % name_year + movie = fireEvent('movie.search', q = search_q, merge = True, limit = 1) + + # Try with other + if len(movie) == 0 and name_year.get('other') and name_year['other'].get('name') and name_year['other'].get('year'): + search_q2 = '%(name)s %(year)s' % name_year + if search_q2 != search_q: + movie = fireEvent('movie.search', q = '%(name)s %(year)s' % name_year.get('other'), merge = True, limit = 1) if len(movie) > 0: imdb_id = movie[0].get('imdb') - log.debug('Found movie via search: %s', cur_file) + log.debug('Found movie via search: %s', identifier) if imdb_id: break else: log.debug('Identifier to short to use for search: %s', identifier) if imdb_id: - return fireEvent('library.add.movie', attrs = { - 'identifier': imdb_id - }, update_after = False, single = True) + try: + db = get_db() + return db.get('media', 'imdb-%s' % imdb_id, with_doc = True)['doc'] + except: + log.debug('Movie "%s" not in library, just getting info', imdb_id) + return { + 'identifier': imdb_id, + 'info': fireEvent('movie.info', identifier = imdb_id, merge = True, extended = False) + } log.error('No imdb_id found for %s. Add a NFO file with IMDB id or add the year to the filename.', group['identifiers']) return {} @@ -716,19 +764,26 @@ class Scanner(Plugin): if not file_size: file_size = [] try: - return (file_size.get('min', 0) * 1048576) < os.path.getsize(file) < (file_size.get('max', 100000) * 1048576) + return file_size.get('min', 0) < self.getFileSize(file) < file_size.get('max', 100000) except: log.error('Couldn\'t get filesize of %s.', file) return False - def createStringIdentifier(self, file_path, folder = '', exclude_filename = False): + def getFileSize(self, file): + try: + return os.path.getsize(file) / 1024 / 1024 + except: + return None - year = self.findYear(file_path) + def createStringIdentifier(self, file_path, folder = '', exclude_filename = False): identifier = file_path.replace(folder, '').lstrip(os.path.sep) # root folder identifier = os.path.splitext(identifier)[0] # ext + # Make sure the identifier is lower case as all regex is with lower case tags + identifier = identifier.lower() + try: path_split = splitString(identifier, os.path.sep) identifier = path_split[-2] if len(path_split) > 1 and len(path_split[-2]) > len(path_split[-1]) else path_split[-1] # Only get filename @@ -743,8 +798,13 @@ class Scanner(Plugin): # remove cptag identifier = self.removeCPTag(identifier) - # groups, release tags, scenename cleaner, regex isn't correct - identifier = re.sub(self.clean, '::', simplifyString(identifier)).strip(':') + # simplify the string + identifier = simplifyString(identifier) + + year = self.findYear(file_path) + + # groups, release tags, scenename cleaner + identifier = re.sub(self.clean, '::', identifier).strip(':') # Year if year and identifier[:4] != year: @@ -793,6 +853,16 @@ class Scanner(Plugin): except: return '' + def getResolution(self, filename): + try: + for key in self.resolutions: + if key in filename.lower() and key != 'default': + return self.resolutions[key] + except: + pass + + return self.resolutions['default'] + def getGroup(self, file): try: match = re.findall('\-([A-Z0-9]+)[\.\/]', file, re.I) @@ -840,9 +910,11 @@ class Scanner(Plugin): log.debug('Could not detect via guessit "%s": %s', (file_name, traceback.format_exc())) # Backup to simple + release_name = os.path.basename(release_name.replace('\\', '/')) cleaned = ' '.join(re.split('\W+', simplifyString(release_name))) cleaned = re.sub(self.clean, ' ', cleaned) + year = None for year_str in [file_name, release_name, cleaned]: if not year_str: continue year = self.findYear(year_str) @@ -851,7 +923,7 @@ class Scanner(Plugin): cp_guess = {} - if year: # Split name on year + if year: # Split name on year try: movie_name = cleaned.rsplit(year, 1).pop(0).strip() if movie_name: @@ -873,8 +945,11 @@ class Scanner(Plugin): pass if cp_guess.get('year') == guess.get('year') and len(cp_guess.get('name', '')) > len(guess.get('name', '')): + cp_guess['other'] = guess return cp_guess elif guess == {}: + cp_guess['other'] = guess return cp_guess + guess['other'] = cp_guess return guess diff --git a/couchpotato/core/plugins/scanner/__init__.py b/couchpotato/core/plugins/scanner/__init__.py deleted file mode 100644 index 66c6b39c..00000000 --- a/couchpotato/core/plugins/scanner/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from .main import Scanner - - -def start(): - return Scanner() - -config = [] diff --git a/couchpotato/core/plugins/score/__init__.py b/couchpotato/core/plugins/score/__init__.py index a960081c..65cadd99 100644 --- a/couchpotato/core/plugins/score/__init__.py +++ b/couchpotato/core/plugins/score/__init__.py @@ -1,7 +1,5 @@ from .main import Score -def start(): +def autoload(): return Score() - -config = [] diff --git a/couchpotato/core/plugins/score/main.py b/couchpotato/core/plugins/score/main.py index 30e7baca..e6fef253 100644 --- a/couchpotato/core/plugins/score/main.py +++ b/couchpotato/core/plugins/score/main.py @@ -24,11 +24,11 @@ class Score(Plugin): try: preferred_words = removeDuplicate(preferred_words + splitString(movie['category']['preferred'].lower())) except: pass - score = nameScore(toUnicode(nzb['name']), movie['library']['year'], preferred_words) + score = nameScore(toUnicode(nzb['name']), movie['info']['year'], preferred_words) - for movie_title in movie['library']['titles']: - score += nameRatioScore(toUnicode(nzb['name']), toUnicode(movie_title['title'])) - score += namePositionScore(toUnicode(nzb['name']), toUnicode(movie_title['title'])) + for movie_title in movie['info']['titles']: + score += nameRatioScore(toUnicode(nzb['name']), toUnicode(movie_title)) + score += namePositionScore(toUnicode(nzb['name']), toUnicode(movie_title)) score += sizeScore(nzb['size']) @@ -44,7 +44,7 @@ class Score(Plugin): score += providerScore(nzb['provider']) # Duplicates in name - score += duplicateScore(nzb['name'], getTitle(movie['library'])) + score += duplicateScore(nzb['name'], getTitle(movie)) # Merge global and category ignored_words = splitString(Env.setting('ignored_words', section = 'searcher').lower()) @@ -52,7 +52,7 @@ class Score(Plugin): except: pass # Partial ignored words - score += partialIgnoredScore(nzb['name'], getTitle(movie['library']), ignored_words) + score += partialIgnoredScore(nzb['name'], getTitle(movie), ignored_words) # Ignore single downloads from multipart score += halfMultipartScore(nzb['name']) diff --git a/couchpotato/core/plugins/score/scores.py b/couchpotato/core/plugins/score/scores.py index c1f5123a..a53608c0 100644 --- a/couchpotato/core/plugins/score/scores.py +++ b/couchpotato/core/plugins/score/scores.py @@ -1,10 +1,12 @@ +import re +import traceback + from couchpotato.core.event import fireEvent from couchpotato.core.helpers.encoding import simplifyString from couchpotato.core.helpers.variable import tryInt from couchpotato.core.logger import CPLog from couchpotato.environment import Env -import re -import traceback + log = CPLog(__name__) diff --git a/couchpotato/core/plugins/status/__init__.py b/couchpotato/core/plugins/status/__init__.py deleted file mode 100644 index 204fbee7..00000000 --- a/couchpotato/core/plugins/status/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from .main import StatusPlugin - - -def start(): - return StatusPlugin() - -config = [] diff --git a/couchpotato/core/plugins/status/main.py b/couchpotato/core/plugins/status/main.py deleted file mode 100644 index 08f46984..00000000 --- a/couchpotato/core/plugins/status/main.py +++ /dev/null @@ -1,136 +0,0 @@ -import traceback -from couchpotato import get_session -from couchpotato.api import addApiView -from couchpotato.core.event import addEvent -from couchpotato.core.helpers.encoding import toUnicode -from couchpotato.core.logger import CPLog -from couchpotato.core.plugins.base import Plugin -from couchpotato.core.settings.model import Status - -log = CPLog(__name__) - - -class StatusPlugin(Plugin): - - statuses = { - 'needs_update': 'Needs update', - 'active': 'Active', - 'done': 'Done', - 'downloaded': 'Downloaded', - 'wanted': 'Wanted', - 'snatched': 'Snatched', - 'failed': 'Failed', - 'deleted': 'Deleted', - 'ignored': 'Ignored', - 'available': 'Available', - 'suggest': 'Suggest', - 'seeding': 'Seeding', - 'missing': 'Missing', - } - status_cached = {} - - def __init__(self): - addEvent('status.get', self.get) - addEvent('status.get_by_id', self.getById) - addEvent('status.all', self.all) - addEvent('app.initialize', self.fill) - addEvent('app.load', self.all) # Cache all statuses - - addApiView('status.list', self.list, docs = { - 'desc': 'Check for available update', - 'return': {'type': 'object', 'example': """{ - 'success': True, - 'list': array, statuses -}"""} - }) - - def list(self, **kwargs): - - return { - 'success': True, - 'list': self.all() - } - - def getById(self, id): - db = get_session() - status = db.query(Status).filter_by(id = id).first() - status_dict = status.to_dict() - #db.close() - - return status_dict - - def all(self): - - db = get_session() - - statuses = db.query(Status).all() - - temp = [] - for status in statuses: - s = status.to_dict() - temp.append(s) - - # Update cache - self.status_cached[status.identifier] = s - - return temp - - def get(self, identifiers): - - if not isinstance(identifiers, list): - identifiers = [identifiers] - - try: - db = get_session() - return_list = [] - - for identifier in identifiers: - - if self.status_cached.get(identifier): - return_list.append(self.status_cached.get(identifier)) - continue - - s = db.query(Status).filter_by(identifier = identifier).first() - if not s: - s = Status( - identifier = identifier, - label = toUnicode(identifier.capitalize()) - ) - db.add(s) - db.commit() - - status_dict = s.to_dict() - - self.status_cached[identifier] = status_dict - return_list.append(status_dict) - - return return_list if len(identifiers) > 1 else return_list[0] - except: - log.error('Failed: %s', traceback.format_exc()) - db.rollback() - finally: - db.close() - - def fill(self): - - try: - db = get_session() - - for identifier, label in self.statuses.items(): - s = db.query(Status).filter_by(identifier = identifier).first() - if not s: - log.info('Creating status: %s', label) - s = Status( - identifier = identifier, - label = toUnicode(label) - ) - db.add(s) - - s.label = toUnicode(label) - db.commit() - except: - log.error('Failed: %s', traceback.format_exc()) - db.rollback() - finally: - db.close() - diff --git a/couchpotato/core/plugins/status/static/status.js b/couchpotato/core/plugins/status/static/status.js deleted file mode 100644 index 2b8d30f3..00000000 --- a/couchpotato/core/plugins/status/static/status.js +++ /dev/null @@ -1,17 +0,0 @@ -var StatusBase = new Class({ - - setup: function(statuses){ - var self = this; - - self.statuses = statuses; - - }, - - get: function(id){ - return this.statuses.filter(function(status){ - return status.id == id - }).pick() - }, - -}); -window.Status = new StatusBase(); diff --git a/couchpotato/core/plugins/subtitle/main.py b/couchpotato/core/plugins/subtitle.py similarity index 55% rename from couchpotato/core/plugins/subtitle/main.py rename to couchpotato/core/plugins/subtitle.py index 56056c0a..fdb640b1 100644 --- a/couchpotato/core/plugins/subtitle/main.py +++ b/couchpotato/core/plugins/subtitle.py @@ -1,16 +1,18 @@ -from couchpotato import get_session -from couchpotato.core.event import addEvent, fireEvent +import traceback + +from couchpotato.core.event import addEvent from couchpotato.core.helpers.encoding import toUnicode, sp from couchpotato.core.helpers.variable import splitString from couchpotato.core.logger import CPLog from couchpotato.core.plugins.base import Plugin -from couchpotato.core.settings.model import Library, FileType from couchpotato.environment import Env import subliminal -import traceback + log = CPLog(__name__) +autoload = 'Subtitle' + class Subtitle(Plugin): @@ -19,28 +21,6 @@ class Subtitle(Plugin): def __init__(self): addEvent('renamer.before', self.searchSingle) - def searchLibrary(self): - - # Get all active and online movies - db = get_session() - - library = db.query(Library).all() - done_status = fireEvent('status.get', 'done', single = True) - - for movie in library.movies: - - for release in movie.releases: - - # get releases and their movie files - if release.status_id is done_status.get('id'): - - files = [] - for file in release.files.filter(FileType.status.has(identifier = 'movie')).all(): - files.append(file.path) - - # get subtitles for those files - subliminal.list_subtitles(files, cache_dir = Env.get('cache_dir'), multi = True, languages = self.getLanguages(), services = self.services) - def searchSingle(self, group): if self.isDisabled(): return @@ -52,7 +32,7 @@ class Subtitle(Plugin): for lang in self.getLanguages(): if lang not in available_languages: - download = subliminal.download_subtitles(files, multi = True, force = False, languages = [lang], services = self.services, cache_dir = Env.get('cache_dir')) + download = subliminal.download_subtitles(files, multi = True, force = self.conf('force'), languages = [lang], services = self.services, cache_dir = Env.get('cache_dir')) for subtitle in download: downloaded.extend(download[subtitle]) @@ -71,3 +51,36 @@ class Subtitle(Plugin): def getLanguages(self): return splitString(self.conf('languages')) + + +config = [{ + 'name': 'subtitle', + 'groups': [ + { + 'tab': 'renamer', + 'name': 'subtitle', + 'label': 'Download subtitles', + 'description': 'after rename', + 'options': [ + { + 'name': 'enabled', + 'label': 'Search and download subtitles', + 'default': False, + 'type': 'enabler', + }, + { + 'name': 'languages', + 'description': ('Comma separated, 2 letter country code.', 'Example: en, nl. See the codes at on Wikipedia'), + }, + { + 'advanced': True, + 'name': 'force', + 'label': 'Force', + 'description': ('Force download all languages (including embedded).', 'This will also overwrite all existing subtitles.'), + 'default': False, + 'type': 'bool', + }, + ], + }, + ], +}] diff --git a/couchpotato/core/plugins/subtitle/__init__.py b/couchpotato/core/plugins/subtitle/__init__.py deleted file mode 100644 index 59847aee..00000000 --- a/couchpotato/core/plugins/subtitle/__init__.py +++ /dev/null @@ -1,35 +0,0 @@ -from .main import Subtitle - - -def start(): - return Subtitle() - -config = [{ - 'name': 'subtitle', - 'groups': [ - { - 'tab': 'renamer', - 'name': 'subtitle', - 'label': 'Download subtitles', - 'description': 'after rename', - 'options': [ - { - 'name': 'enabled', - 'label': 'Search and download subtitles', - 'default': False, - 'type': 'enabler', - }, - { - 'name': 'languages', - 'description': ('Comma separated, 2 letter country code.', 'Example: en, nl. See the codes at on Wikipedia'), - }, -# { -# 'name': 'automatic', -# 'default': True, -# 'type': 'bool', -# 'description': 'Automaticly search & download for movies in library', -# }, - ], - }, - ], -}] diff --git a/couchpotato/core/plugins/trailer/main.py b/couchpotato/core/plugins/trailer.py similarity index 60% rename from couchpotato/core/plugins/trailer/main.py rename to couchpotato/core/plugins/trailer.py index ba040058..ae525862 100644 --- a/couchpotato/core/plugins/trailer/main.py +++ b/couchpotato/core/plugins/trailer.py @@ -1,11 +1,15 @@ +import os + from couchpotato.core.event import addEvent, fireEvent from couchpotato.core.helpers.variable import getExt, getTitle from couchpotato.core.logger import CPLog from couchpotato.core.plugins.base import Plugin -import os + log = CPLog(__name__) +autoload = 'Trailer' + class Trailer(Plugin): @@ -18,7 +22,7 @@ class Trailer(Plugin): trailers = fireEvent('trailer.search', group = group, merge = True) if not trailers or trailers == []: - log.info('No trailers found for: %s', getTitle(group['library'])) + log.info('No trailers found for: %s', getTitle(group)) return False for trailer in trailers.get(self.conf('quality'), []): @@ -40,3 +44,37 @@ class Trailer(Plugin): break return True + + +config = [{ + 'name': 'trailer', + 'groups': [ + { + 'tab': 'renamer', + 'name': 'trailer', + 'label': 'Download trailer', + 'description': 'after rename', + 'options': [ + { + 'name': 'enabled', + 'label': 'Search and download trailers', + 'default': False, + 'type': 'enabler', + }, + { + 'name': 'quality', + 'default': '720p', + 'type': 'dropdown', + 'values': [('1080p', '1080p'), ('720p', '720p'), ('480P', '480p')], + }, + { + 'name': 'name', + 'label': 'Naming', + 'default': '-trailer', + 'advanced': True, + 'description': 'Use <filename> to use above settings.' + }, + ], + }, + ], +}] diff --git a/couchpotato/core/plugins/trailer/__init__.py b/couchpotato/core/plugins/trailer/__init__.py deleted file mode 100644 index e7a6d26e..00000000 --- a/couchpotato/core/plugins/trailer/__init__.py +++ /dev/null @@ -1,38 +0,0 @@ -from .main import Trailer - - -def start(): - return Trailer() - -config = [{ - 'name': 'trailer', - 'groups': [ - { - 'tab': 'renamer', - 'name': 'trailer', - 'label': 'Download trailer', - 'description': 'after rename', - 'options': [ - { - 'name': 'enabled', - 'label': 'Search and download trailers', - 'default': False, - 'type': 'enabler', - }, - { - 'name': 'quality', - 'default': '720p', - 'type': 'dropdown', - 'values': [('1080p', '1080p'), ('720p', '720p'), ('480P', '480p')], - }, - { - 'name': 'name', - 'label': 'Naming', - 'default': '-trailer', - 'advanced': True, - 'description': 'Use <filename> to use above settings.' - }, - ], - }, - ], -}] diff --git a/couchpotato/core/plugins/userscript/__init__.py b/couchpotato/core/plugins/userscript/__init__.py index 184f5d79..9d708593 100644 --- a/couchpotato/core/plugins/userscript/__init__.py +++ b/couchpotato/core/plugins/userscript/__init__.py @@ -1,7 +1,5 @@ from .main import Userscript -def start(): +def autoload(): return Userscript() - -config = [] diff --git a/couchpotato/core/plugins/userscript/bookmark.js b/couchpotato/core/plugins/userscript/bookmark.js_tmpl similarity index 95% rename from couchpotato/core/plugins/userscript/bookmark.js rename to couchpotato/core/plugins/userscript/bookmark.js_tmpl index cdba3b40..cc04baf7 100644 --- a/couchpotato/core/plugins/userscript/bookmark.js +++ b/couchpotato/core/plugins/userscript/bookmark.js_tmpl @@ -32,7 +32,7 @@ var isCorrectUrl = function() { } var addUserscript = function() { // Add window param - document.body.setAttribute('cp_auto_open', true) + document.body.setAttribute('cp_auto_open', 'true') // Load userscript var e = document.createElement('script'); diff --git a/couchpotato/core/plugins/userscript/main.py b/couchpotato/core/plugins/userscript/main.py index 113c0351..4ca8ed37 100644 --- a/couchpotato/core/plugins/userscript/main.py +++ b/couchpotato/core/plugins/userscript/main.py @@ -1,3 +1,5 @@ +import os + from couchpotato import index from couchpotato.api import addApiView from couchpotato.core.event import fireEvent, addEvent @@ -6,14 +8,14 @@ from couchpotato.core.logger import CPLog from couchpotato.core.plugins.base import Plugin from couchpotato.environment import Env from tornado.web import RequestHandler -import os + log = CPLog(__name__) class Userscript(Plugin): - version = 4 + version = 5 def __init__(self): addApiView('userscript.get/(.*)/(.*)', self.getUserScript, static = True) @@ -33,7 +35,7 @@ class Userscript(Plugin): 'host': host, } - return self.renderTemplate(__file__, 'bookmark.js', **params) + return self.renderTemplate(__file__, 'bookmark.js_tmpl', **params) def getIncludes(self, **kwargs): @@ -58,7 +60,7 @@ class Userscript(Plugin): 'host': '%s://%s' % (self.request.protocol, self.request.headers.get('X-Forwarded-Host') or self.request.headers.get('host')), } - script = klass.renderTemplate(__file__, 'template.js', **params) + script = klass.renderTemplate(__file__, 'template.js_tmpl', **params) klass.createFile(os.path.join(Env.get('cache_dir'), 'couchpotato.user.js'), script) self.redirect(Env.get('api_base') + 'file.cache/couchpotato.user.js') diff --git a/couchpotato/core/plugins/userscript/static/userscript.js b/couchpotato/core/plugins/userscript/static/userscript.js index 11daa068..d8caeb3f 100644 --- a/couchpotato/core/plugins/userscript/static/userscript.js +++ b/couchpotato/core/plugins/userscript/static/userscript.js @@ -2,6 +2,7 @@ Page.Userscript = new Class({ Extends: PageBase, + order: 80, name: 'userscript', has_tab: false, @@ -12,7 +13,7 @@ Page.Userscript = new Class({ } }, - indexAction: function(param){ + indexAction: function(){ var self = this; self.el.adopt( @@ -53,28 +54,28 @@ var UserscriptSettingTab = new Class({ initialize: function(){ var self = this; - App.addEvent('load', self.addSettings.bind(self)) + App.addEvent('loadSettings', self.addSettings.bind(self)) }, addSettings: function(){ var self = this; - self.settings = App.getPage('Settings') + self.settings = App.getPage('Settings'); self.settings.addEvent('create', function(){ var host_url = window.location.protocol + '//' + window.location.host; self.settings.createGroup({ 'name': 'userscript', - 'label': 'Install the bookmarklet or userscript', + 'label': 'Install the browser extension or bookmarklet', 'description': 'Easily add movies via imdb.com, appletrailers and more' }).inject(self.settings.tabs.automation.content, 'top').adopt( new Element('a.userscript.button', { - 'text': 'Install userscript', - 'href': Api.createUrl('userscript.get')+randomString()+'/couchpotato.user.js', + 'text': 'Install extension', + 'href': 'https://couchpota.to/extension/', 'target': '_blank' - }), + }), new Element('span.or[text=or]'), new Element('span.bookmarklet').adopt( new Element('a.button.green', { @@ -86,7 +87,7 @@ var UserscriptSettingTab = new Class({ 'target': '', 'events': { 'click': function(e){ - (e).stop() + (e).stop(); alert('Drag it to your bookmark ;)') } } @@ -96,7 +97,7 @@ var UserscriptSettingTab = new Class({ }) ) ).setStyles({ - 'background-image': "url('"+App.createUrl('static/plugin/userscript/userscript.png')+"')" + 'background-image': "url('https://couchpota.to/media/images/userscript.gif')" }); }); diff --git a/couchpotato/core/plugins/userscript/static/userscript.png b/couchpotato/core/plugins/userscript/static/userscript.png deleted file mode 100644 index c8e76577..00000000 Binary files a/couchpotato/core/plugins/userscript/static/userscript.png and /dev/null differ diff --git a/couchpotato/core/plugins/userscript/template.js b/couchpotato/core/plugins/userscript/template.js_tmpl similarity index 98% rename from couchpotato/core/plugins/userscript/template.js rename to couchpotato/core/plugins/userscript/template.js_tmpl index f5928b85..5a32da39 100644 --- a/couchpotato/core/plugins/userscript/template.js +++ b/couchpotato/core/plugins/userscript/template.js_tmpl @@ -25,13 +25,16 @@ var version = {{version}}, host = '{{host}}', api = '{{api}}'; -function create() { +var create = function() { + var A, B; switch (arguments.length) { case 1: - var A = document.createTextNode(arguments[0]); + A = document.createTextNode(arguments[0]); break; default: - var A = document.createElement(arguments[0]), B = arguments[1]; + A = document.createElement(arguments[0]); + B = arguments[1]; + for ( var b in B) { if (b.indexOf("on") == 0){ A.addEventListener(b.substring(2), B[b], false); @@ -141,4 +144,4 @@ if(document.location.href.indexOf(host) == -1) else setVersion(); -} \ No newline at end of file +} diff --git a/couchpotato/core/plugins/wizard/__init__.py b/couchpotato/core/plugins/wizard/__init__.py index eda6f25a..7a272b44 100644 --- a/couchpotato/core/plugins/wizard/__init__.py +++ b/couchpotato/core/plugins/wizard/__init__.py @@ -1,7 +1,7 @@ from .main import Wizard -def start(): +def autoload(): return Wizard() config = [{ diff --git a/couchpotato/core/plugins/wizard/static/wizard.css b/couchpotato/core/plugins/wizard/static/wizard.css index c27c1d83..9af32ed0 100644 --- a/couchpotato/core/plugins/wizard/static/wizard.css +++ b/couchpotato/core/plugins/wizard/static/wizard.css @@ -5,10 +5,9 @@ .page.wizard h1 { padding: 10px 0; - margin: 0 5px; display: block; font-size: 30px; - margin-top: 80px; + margin: 80px 5px 0; } .page.wizard .description { @@ -52,7 +51,7 @@ font-weight: normal; border-bottom: 4px solid transparent; } - + .page.wizard .tabs li:hover a { border-color: #047792; } .page.wizard .tabs li.done a { border-color: #04bce6; } diff --git a/couchpotato/core/plugins/wizard/static/wizard.js b/couchpotato/core/plugins/wizard/static/wizard.js index b4857abb..f215dbf4 100644 --- a/couchpotato/core/plugins/wizard/static/wizard.js +++ b/couchpotato/core/plugins/wizard/static/wizard.js @@ -2,6 +2,7 @@ Page.Wizard = new Class({ Extends: Page.Settings, + order: 70, name: 'wizard', has_tab: false, wizard_only: true, @@ -35,11 +36,11 @@ Page.Wizard = new Class({ }, 'automation': { 'title': 'Easily add movies to your wanted list!', - 'description': 'You can easily add movies from your favorite movie site, like IMDB, Rotten Tomatoes, Apple Trailers and more. Just install the userscript or drag the bookmarklet to your browsers bookmarks.' + + 'description': 'You can easily add movies from your favorite movie site, like IMDB, Rotten Tomatoes, Apple Trailers and more. Just install the extension or drag the bookmarklet to your bookmarks.' + '
Once installed, just click the bookmarklet on a movie page and watch the magic happen ;)', 'content': function(){ return App.createUserscriptButtons().setStyles({ - 'background-image': "url('"+App.createUrl('static/plugin/userscript/userscript.png')+"')" + 'background-image': "url('https://couchpota.to/media/images/userscript.gif')" }) } }, @@ -89,7 +90,7 @@ Page.Wizard = new Class({ self.parent(action, params); self.addEvent('create', function(){ - self.order(); + self.orderGroups(); }); self.initialized = true; @@ -105,16 +106,16 @@ Page.Wizard = new Class({ }).delay(1) }, - order: function(){ + orderGroups: function(){ var self = this; var form = self.el.getElement('.uniForm'); var tabs = self.el.getElement('.tabs'); - self.groups.each(function(group, nr){ + self.groups.each(function(group){ if(self.headers[group]){ - group_container = new Element('.wgroup_'+group, { + var group_container = new Element('.wgroup_'+group, { 'styles': { 'opacity': 0.2 }, @@ -129,7 +130,7 @@ Page.Wizard = new Class({ }) } - var content = self.headers[group].content + var content = self.headers[group].content; group_container.adopt( new Element('h1', { 'text': self.headers[group].title @@ -144,7 +145,7 @@ Page.Wizard = new Class({ var tab_navigation = tabs.getElement('.t_'+group); if(!tab_navigation && self.headers[group] && self.headers[group].include){ - tab_navigation = [] + tab_navigation = []; self.headers[group].include.each(function(inc){ tab_navigation.include(tabs.getElement('.t_'+inc)); }) @@ -157,7 +158,7 @@ Page.Wizard = new Class({ self.headers[group].include.each(function(inc){ self.el.getElement('.tab_'+inc).inject(group_container); - }) + }); new Element('li.t_'+group).adopt( new Element('a', { @@ -215,9 +216,9 @@ Page.Wizard = new Class({ self.groups.each(function(groups2, nr2){ var t2 = self.el.getElement('.t_'+groups2); t2[nr2 > nr ? 'removeClass' : 'addClass' ]('done'); - }) + }); g.tween('opacity', 1); - } + }; if(nr == 0) func(); @@ -241,4 +242,4 @@ Page.Wizard = new Class({ } -}); \ No newline at end of file +}); diff --git a/couchpotato/core/providers/automation/bluray/__init__.py b/couchpotato/core/providers/automation/bluray/__init__.py deleted file mode 100644 index 519a7119..00000000 --- a/couchpotato/core/providers/automation/bluray/__init__.py +++ /dev/null @@ -1,32 +0,0 @@ -from .main import Bluray - - -def start(): - return Bluray() - -config = [{ - 'name': 'bluray', - 'groups': [ - { - 'tab': 'automation', - 'list': 'automation_providers', - 'name': 'bluray_automation', - 'label': 'Blu-ray.com', - 'description': 'Imports movies from blu-ray.com.', - 'options': [ - { - 'name': 'automation_enabled', - 'default': False, - 'type': 'enabler', - }, - { - 'name': 'backlog', - 'advanced': True, - 'description': 'Parses the history until the minimum movie year is reached. (Will be disabled once it has completed)', - 'default': False, - 'type': 'bool', - }, - ], - }, - ], -}] diff --git a/couchpotato/core/providers/automation/bluray/main.py b/couchpotato/core/providers/automation/bluray/main.py deleted file mode 100644 index ddd7b8ab..00000000 --- a/couchpotato/core/providers/automation/bluray/main.py +++ /dev/null @@ -1,75 +0,0 @@ -from bs4 import BeautifulSoup -from couchpotato.core.helpers.rss import RSS -from couchpotato.core.helpers.variable import tryInt -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.automation.base import Automation - -log = CPLog(__name__) - - -class Bluray(Automation, RSS): - - interval = 1800 - rss_url = 'http://www.blu-ray.com/rss/newreleasesfeed.xml' - backlog_url = 'http://www.blu-ray.com/movies/movies.php?show=newreleases&page=%s' - - def getIMDBids(self): - - movies = [] - - if self.conf('backlog'): - - page = 0 - while True: - page += 1 - - url = self.backlog_url % page - data = self.getHTMLData(url) - soup = BeautifulSoup(data) - - try: - # Stop if the release year is before the minimal year - page_year = soup.body.find_all('center')[3].table.tr.find_all('td', recursive = False)[3].h3.get_text().split(', ')[1] - if tryInt(page_year) < self.getMinimal('year'): - break - - for table in soup.body.find_all('center')[3].table.tr.find_all('td', recursive = False)[3].find_all('table')[1:20]: - name = table.h3.get_text().lower().split('blu-ray')[0].strip() - year = table.small.get_text().split('|')[1].strip() - - if not name.find('/') == -1: # make sure it is not a double movie release - continue - - if tryInt(year) < self.getMinimal('year'): - continue - - imdb = self.search(name, year) - - if imdb: - if self.isMinimalMovie(imdb): - movies.append(imdb['imdb']) - except: - log.debug('Error loading page: %s', page) - break - - self.conf('backlog', value = False) - - rss_movies = self.getRSSData(self.rss_url) - - for movie in rss_movies: - name = self.getTextElement(movie, 'title').lower().split('blu-ray')[0].strip('(').rstrip() - year = self.getTextElement(movie, 'description').split('|')[1].strip('(').strip() - - if not name.find('/') == -1: # make sure it is not a double movie release - continue - - if tryInt(year) < self.getMinimal('year'): - continue - - imdb = self.search(name, year) - - if imdb: - if self.isMinimalMovie(imdb): - movies.append(imdb['imdb']) - - return movies diff --git a/couchpotato/core/providers/automation/flixster/__init__.py b/couchpotato/core/providers/automation/flixster/__init__.py deleted file mode 100644 index 71bd83c0..00000000 --- a/couchpotato/core/providers/automation/flixster/__init__.py +++ /dev/null @@ -1,35 +0,0 @@ -from .main import Flixster - - -def start(): - return Flixster() - -config = [{ - 'name': 'flixster', - 'groups': [ - { - 'tab': 'automation', - 'list': 'watchlist_providers', - 'name': 'flixster_automation', - 'label': 'Flixster', - 'description': 'Import movies from any public Flixster watchlist', - 'options': [ - { - 'name': 'automation_enabled', - 'default': False, - 'type': 'enabler', - }, - { - 'name': 'automation_ids_use', - 'label': 'Use', - }, - { - 'name': 'automation_ids', - 'label': 'User ID', - 'type': 'combined', - 'combine': ['automation_ids_use', 'automation_ids'], - }, - ], - }, - ], -}] diff --git a/couchpotato/core/providers/automation/goodfilms/__init__.py b/couchpotato/core/providers/automation/goodfilms/__init__.py deleted file mode 100644 index e04ccd0d..00000000 --- a/couchpotato/core/providers/automation/goodfilms/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -from .main import Goodfilms - - -def start(): - return Goodfilms() - -config = [{ - 'name': 'goodfilms', - 'groups': [ - { - 'tab': 'automation', - 'list': 'watchlist_providers', - 'name': 'goodfilms_automation', - 'label': 'Goodfilms', - 'description': 'import movies from your Goodfilms queue', - 'options': [ - { - 'name': 'automation_enabled', - 'default': False, - 'type': 'enabler', - }, - { - 'name': 'automation_username', - 'label': 'Username', - }, - ], - }, - ], -}] diff --git a/couchpotato/core/providers/automation/imdb/__init__.py b/couchpotato/core/providers/automation/imdb/__init__.py deleted file mode 100644 index f9baabf2..00000000 --- a/couchpotato/core/providers/automation/imdb/__init__.py +++ /dev/null @@ -1,70 +0,0 @@ -from .main import IMDB - - -def start(): - return IMDB() - -config = [{ - 'name': 'imdb', - 'groups': [ - { - 'tab': 'automation', - 'list': 'watchlist_providers', - 'name': 'imdb_automation_watchlist', - 'label': 'IMDB', - 'description': 'From any public IMDB watchlists.', - 'options': [ - { - 'name': 'automation_enabled', - 'default': False, - 'type': 'enabler', - }, - { - 'name': 'automation_urls_use', - 'label': 'Use', - }, - { - 'name': 'automation_urls', - 'label': 'url', - 'type': 'combined', - 'combine': ['automation_urls_use', 'automation_urls'], - }, - ], - }, - { - 'tab': 'automation', - 'list': 'automation_providers', - 'name': 'imdb_automation_charts', - 'label': 'IMDB', - 'description': 'Import movies from IMDB Charts', - 'options': [ - { - 'name': 'automation_providers_enabled', - 'default': False, - 'type': 'enabler', - }, - { - 'name': 'automation_charts_theater', - 'type': 'bool', - 'label': 'In Theaters', - 'description': 'New Movies In-Theaters chart', - 'default': True, - }, - { - 'name': 'automation_charts_top250', - 'type': 'bool', - 'label': 'TOP 250', - 'description': 'IMDB TOP 250 chart', - 'default': True, - }, - { - 'name': 'automation_charts_boxoffice', - 'type': 'bool', - 'label': 'Box office TOP 10', - 'description': 'IMDB Box office TOP 10 chart', - 'default': True, - }, - ], - }, - ], -}] diff --git a/couchpotato/core/providers/automation/imdb/main.py b/couchpotato/core/providers/automation/imdb/main.py deleted file mode 100644 index 6ca81b70..00000000 --- a/couchpotato/core/providers/automation/imdb/main.py +++ /dev/null @@ -1,144 +0,0 @@ -import traceback -import re - -from bs4 import BeautifulSoup -from couchpotato import fireEvent -from couchpotato.core.helpers.rss import RSS -from couchpotato.core.helpers.variable import getImdb, splitString, tryInt - -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.automation.base import Automation - -from couchpotato.core.providers.base import MultiProvider - - -log = CPLog(__name__) - - -class IMDB(MultiProvider): - - def getTypes(self): - return [IMDBWatchlist, IMDBAutomation] - - -class IMDBBase(Automation, RSS): - - interval = 1800 - - def getInfo(self, imdb_id): - return fireEvent('movie.info', identifier = imdb_id, extended = False, merge = True) - - -class IMDBWatchlist(IMDBBase): - - enabled_option = 'automation_enabled' - - def getIMDBids(self): - - movies = [] - - watchlist_enablers = [tryInt(x) for x in splitString(self.conf('automation_urls_use'))] - watchlist_urls = splitString(self.conf('automation_urls')) - - index = -1 - for watchlist_url in watchlist_urls: - - try: - # Get list ID - ids = re.findall('(?:list/|list_id=)([a-zA-Z0-9\-_]{11})', watchlist_url) - if len(ids) == 1: - watchlist_url = 'http://www.imdb.com/list/%s/?view=compact&sort=created:asc' % ids[0] - # Try find user id with watchlist - else: - userids = re.findall('(ur\d{7,9})', watchlist_url) - if len(userids) == 1: - watchlist_url = 'http://www.imdb.com/user/%s/watchlist?view=compact&sort=created:asc' % userids[0] - except: - log.error('Failed getting id from watchlist: %s', traceback.format_exc()) - - index += 1 - if not watchlist_enablers[index]: - continue - - start = 0 - while True: - try: - - w_url = '%s&start=%s' % (watchlist_url, start) - log.debug('Started IMDB watchlists: %s', w_url) - html = self.getHTMLData(w_url) - - try: - split = splitString(html, split_on="
")[1] - html = splitString(split, split_on="
")[0] - except: - pass - - imdbs = getImdb(html, multiple = True) if html else [] - - for imdb in imdbs: - if imdb not in movies: - movies.append(imdb) - - if self.shuttingDown(): - break - - log.debug('Found %s movies on %s', (len(imdbs), w_url)) - - if len(imdbs) < 250: - break - - start += 250 - - except: - log.error('Failed loading IMDB watchlist: %s %s', (watchlist_url, traceback.format_exc())) - - return movies - - -class IMDBAutomation(IMDBBase): - - enabled_option = 'automation_providers_enabled' - - chart_urls = { - 'theater': 'http://www.imdb.com/movies-in-theaters/', - 'top250': 'http://www.imdb.com/chart/top', - 'boxoffice': 'http://www.imdb.com/chart/', - } - - first_table = ['boxoffice'] - - def getIMDBids(self): - - movies = [] - - for url in self.chart_urls: - if self.conf('automation_charts_%s' % url): - data = self.getHTMLData(self.chart_urls[url]) - if data: - html = BeautifulSoup(data) - - try: - result_div = html.find('div', attrs = {'id': 'main'}) - - try: - if url in self.first_table: - table = result_div.find('table') - result_div = table if table else result_div - except: - pass - - imdb_ids = getImdb(str(result_div), multiple = True) - - for imdb_id in imdb_ids: - info = self.getInfo(imdb_id) - if info and self.isMinimalMovie(info): - movies.append(imdb_id) - - if self.shuttingDown(): - break - - except: - log.error('Failed loading IMDB chart results from %s: %s', (url, traceback.format_exc())) - - return movies diff --git a/couchpotato/core/providers/automation/itunes/__init__.py b/couchpotato/core/providers/automation/itunes/__init__.py deleted file mode 100644 index 13526f43..00000000 --- a/couchpotato/core/providers/automation/itunes/__init__.py +++ /dev/null @@ -1,37 +0,0 @@ -from .main import ITunes - - -def start(): - return ITunes() - -config = [{ - 'name': 'itunes', - 'groups': [ - { - 'tab': 'automation', - 'list': 'automation_providers', - 'name': 'itunes_automation', - 'label': 'iTunes', - 'description': 'From any iTunes Store feed. Url should be the RSS link.', - 'options': [ - { - 'name': 'automation_enabled', - 'default': False, - 'type': 'enabler', - }, - { - 'name': 'automation_urls_use', - 'label': 'Use', - 'default': ',', - }, - { - 'name': 'automation_urls', - 'label': 'url', - 'type': 'combined', - 'combine': ['automation_urls_use', 'automation_urls'], - 'default': 'https://itunes.apple.com/rss/topmovies/limit=25/xml,', - }, - ], - }, - ], -}] diff --git a/couchpotato/core/providers/automation/kinepolis/__init__.py b/couchpotato/core/providers/automation/kinepolis/__init__.py deleted file mode 100644 index cc4c5706..00000000 --- a/couchpotato/core/providers/automation/kinepolis/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -from .main import Kinepolis - - -def start(): - return Kinepolis() - -config = [{ - 'name': 'kinepolis', - 'groups': [ - { - 'tab': 'automation', - 'list': 'automation_providers', - 'name': 'kinepolis_automation', - 'label': 'Kinepolis', - 'description': 'Imports movies from the current top 10 of kinepolis.', - 'options': [ - { - 'name': 'automation_enabled', - 'default': False, - 'type': 'enabler', - }, - ], - }, - ], -}] diff --git a/couchpotato/core/providers/automation/letterboxd/__init__.py b/couchpotato/core/providers/automation/letterboxd/__init__.py deleted file mode 100644 index 88bfe6a1..00000000 --- a/couchpotato/core/providers/automation/letterboxd/__init__.py +++ /dev/null @@ -1,35 +0,0 @@ -from .main import Letterboxd - - -def start(): - return Letterboxd() - -config = [{ - 'name': 'letterboxd', - 'groups': [ - { - 'tab': 'automation', - 'list': 'watchlist_providers', - 'name': 'letterboxd_automation', - 'label': 'Letterboxd', - 'description': 'Import movies from any public Letterboxd watchlist', - 'options': [ - { - 'name': 'automation_enabled', - 'default': False, - 'type': 'enabler', - }, - { - 'name': 'automation_urls_use', - 'label': 'Use', - }, - { - 'name': 'automation_urls', - 'label': 'Username', - 'type': 'combined', - 'combine': ['automation_urls_use', 'automation_urls'], - }, - ], - }, - ], -}] diff --git a/couchpotato/core/providers/automation/moviemeter/__init__.py b/couchpotato/core/providers/automation/moviemeter/__init__.py deleted file mode 100644 index 0e9a4edc..00000000 --- a/couchpotato/core/providers/automation/moviemeter/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -from .main import Moviemeter - - -def start(): - return Moviemeter() - -config = [{ - 'name': 'moviemeter', - 'groups': [ - { - 'tab': 'automation', - 'list': 'automation_providers', - 'name': 'moviemeter_automation', - 'label': 'Moviemeter', - 'description': 'Imports movies from the current top 10 of moviemeter.nl.', - 'options': [ - { - 'name': 'automation_enabled', - 'default': False, - 'type': 'enabler', - }, - ], - }, - ], -}] diff --git a/couchpotato/core/providers/automation/moviemeter/main.py b/couchpotato/core/providers/automation/moviemeter/main.py deleted file mode 100644 index dae764bf..00000000 --- a/couchpotato/core/providers/automation/moviemeter/main.py +++ /dev/null @@ -1,28 +0,0 @@ -from couchpotato.core.event import fireEvent -from couchpotato.core.helpers.rss import RSS -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.automation.base import Automation - -log = CPLog(__name__) - - -class Moviemeter(Automation, RSS): - - interval = 1800 - rss_url = 'http://www.moviemeter.nl/rss/cinema' - - def getIMDBids(self): - - movies = [] - - rss_movies = self.getRSSData(self.rss_url) - - for movie in rss_movies: - - name_year = fireEvent('scanner.name_year', self.getTextElement(movie, 'title'), single = True) - imdb = self.search(name_year.get('name'), name_year.get('year')) - - if imdb and self.isMinimalMovie(imdb): - movies.append(imdb['imdb']) - - return movies diff --git a/couchpotato/core/providers/automation/movies_io/__init__.py b/couchpotato/core/providers/automation/movies_io/__init__.py deleted file mode 100644 index 0361223b..00000000 --- a/couchpotato/core/providers/automation/movies_io/__init__.py +++ /dev/null @@ -1,35 +0,0 @@ -from .main import MoviesIO - - -def start(): - return MoviesIO() - -config = [{ - 'name': 'moviesio', - 'groups': [ - { - 'tab': 'automation', - 'list': 'watchlist_providers', - 'name': 'moviesio', - 'label': 'Movies.IO', - 'description': 'Imports movies from Movies.io RSS watchlists', - 'options': [ - { - 'name': 'automation_enabled', - 'default': False, - 'type': 'enabler', - }, - { - 'name': 'automation_urls_use', - 'label': 'Use', - }, - { - 'name': 'automation_urls', - 'label': 'url', - 'type': 'combined', - 'combine': ['automation_urls_use', 'automation_urls'], - }, - ], - }, - ], -}] diff --git a/couchpotato/core/providers/automation/rottentomatoes/__init__.py b/couchpotato/core/providers/automation/rottentomatoes/__init__.py deleted file mode 100644 index 1d3026d3..00000000 --- a/couchpotato/core/providers/automation/rottentomatoes/__init__.py +++ /dev/null @@ -1,43 +0,0 @@ -from .main import Rottentomatoes - - -def start(): - return Rottentomatoes() - -config = [{ - 'name': 'rottentomatoes', - 'groups': [ - { - 'tab': 'automation', - 'list': 'automation_providers', - 'name': 'rottentomatoes_automation', - 'label': 'Rottentomatoes', - 'description': 'Imports movies from rottentomatoes rss feeds specified below.', - 'options': [ - { - 'name': 'automation_enabled', - 'default': False, - 'type': 'enabler', - }, - { - 'name': 'automation_urls_use', - 'label': 'Use', - 'default': '1', - }, - { - 'name': 'automation_urls', - 'label': 'url', - 'type': 'combined', - 'combine': ['automation_urls_use', 'automation_urls'], - 'default': 'http://www.rottentomatoes.com/syndication/rss/in_theaters.xml', - }, - { - 'name': 'tomatometer_percent', - 'default': '80', - 'label': 'Tomatometer', - 'description': 'Use as extra scoring requirement', - }, - ], - }, - ], -}] diff --git a/couchpotato/core/providers/automation/trakt/__init__.py b/couchpotato/core/providers/automation/trakt/__init__.py deleted file mode 100644 index 6ae2806b..00000000 --- a/couchpotato/core/providers/automation/trakt/__init__.py +++ /dev/null @@ -1,39 +0,0 @@ -from .main import Trakt - - -def start(): - return Trakt() - -config = [{ - 'name': 'trakt', - 'groups': [ - { - 'tab': 'automation', - 'list': 'watchlist_providers', - 'name': 'trakt_automation', - 'label': 'Trakt', - 'description': 'import movies from your own watchlist', - 'options': [ - { - 'name': 'automation_enabled', - 'default': False, - 'type': 'enabler', - }, - { - 'name': 'automation_api_key', - 'label': 'Apikey', - }, - { - 'name': 'automation_username', - 'label': 'Username', - }, - { - 'name': 'automation_password', - 'label': 'Password', - 'type': 'password', - 'description': 'When you have "Protect my data" checked on trakt.', - }, - ], - }, - ], -}] diff --git a/couchpotato/core/providers/info/_modifier/__init__.py b/couchpotato/core/providers/info/_modifier/__init__.py deleted file mode 100644 index 9dfab703..00000000 --- a/couchpotato/core/providers/info/_modifier/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from .main import MovieResultModifier - - -def start(): - return MovieResultModifier() - -config = [] diff --git a/couchpotato/core/providers/info/base.py b/couchpotato/core/providers/info/base.py deleted file mode 100644 index 1b43ab8a..00000000 --- a/couchpotato/core/providers/info/base.py +++ /dev/null @@ -1,5 +0,0 @@ -from couchpotato.core.providers.base import Provider - - -class MovieProvider(Provider): - type = 'movie' diff --git a/couchpotato/core/providers/info/couchpotatoapi/__init__.py b/couchpotato/core/providers/info/couchpotatoapi/__init__.py deleted file mode 100644 index 196dde6a..00000000 --- a/couchpotato/core/providers/info/couchpotatoapi/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from .main import CouchPotatoApi - - -def start(): - return CouchPotatoApi() - -config = [] diff --git a/couchpotato/core/providers/info/omdbapi/__init__.py b/couchpotato/core/providers/info/omdbapi/__init__.py deleted file mode 100644 index b7ea3932..00000000 --- a/couchpotato/core/providers/info/omdbapi/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from .main import OMDBAPI - - -def start(): - return OMDBAPI() - -config = [] diff --git a/couchpotato/core/providers/info/themoviedb/__init__.py b/couchpotato/core/providers/info/themoviedb/__init__.py deleted file mode 100644 index b981950e..00000000 --- a/couchpotato/core/providers/info/themoviedb/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -from .main import TheMovieDb - - -def start(): - return TheMovieDb() - -config = [{ - 'name': 'themoviedb', - 'groups': [ - { - 'tab': 'providers', - 'name': 'tmdb', - 'label': 'TheMovieDB', - 'hidden': True, - 'description': 'Used for all calls to TheMovieDB.', - 'options': [ - { - 'name': 'api_key', - 'default': '9b939aee0aaafc12a65bf448e4af9543', - 'label': 'Api Key', - }, - ], - }, - ], -}] diff --git a/couchpotato/core/providers/metadata/base.py b/couchpotato/core/providers/metadata/base.py deleted file mode 100644 index d1274adf..00000000 --- a/couchpotato/core/providers/metadata/base.py +++ /dev/null @@ -1,117 +0,0 @@ -from couchpotato.core.event import addEvent, fireEvent -from couchpotato.core.helpers.encoding import sp -from couchpotato.core.helpers.variable import mergeDicts -from couchpotato.core.logger import CPLog -from couchpotato.core.plugins.base import Plugin -from couchpotato.environment import Env -import os -import shutil -import traceback - -log = CPLog(__name__) - - -class MetaDataBase(Plugin): - - enabled_option = 'meta_enabled' - - def __init__(self): - addEvent('renamer.after', self.create) - - def create(self, message = None, group = None): - if self.isDisabled(): return - if not group: group = {} - - log.info('Creating %s metadata.', self.getName()) - - # Update library to get latest info - try: - updated_library = fireEvent('library.update.movie', group['library']['identifier'], extended = True, single = True) - group['library'] = mergeDicts(group['library'], updated_library) - except: - log.error('Failed to update movie, before creating metadata: %s', traceback.format_exc()) - - root_name = self.getRootName(group) - meta_name = os.path.basename(root_name) - root = os.path.dirname(root_name) - - movie_info = group['library'].get('info') - - for file_type in ['nfo', 'thumbnail', 'fanart']: - try: - # Get file path - name = getattr(self, 'get' + file_type.capitalize() + 'Name')(meta_name, root) - - if name and (self.conf('meta_' + file_type) or self.conf('meta_' + file_type) is None): - - # Get file content - content = getattr(self, 'get' + file_type.capitalize())(movie_info = movie_info, data = group) - if content: - log.debug('Creating %s file: %s', (file_type, name)) - if os.path.isfile(content): - content = sp(content) - name = sp(name) - - shutil.copy2(content, name) - shutil.copyfile(content, name) - - # Try and copy stats seperately - try: shutil.copystat(content, name) - except: pass - else: - self.createFile(name, content) - group['renamed_files'].append(name) - - try: - os.chmod(sp(name), Env.getPermission('file')) - except: - log.debug('Failed setting permissions for %s: %s', (name, traceback.format_exc())) - - except: - log.error('Unable to create %s file: %s', (file_type, traceback.format_exc())) - - def getRootName(self, data = None): - if not data: data = {} - return os.path.join(data['destination_dir'], data['filename']) - - def getFanartName(self, name, root): - return - - def getThumbnailName(self, name, root): - return - - def getNfoName(self, name, root): - return - - def getNfo(self, movie_info = None, data = None): - if not data: data = {} - if not movie_info: movie_info = {} - - def getThumbnail(self, movie_info = None, data = None, wanted_file_type = 'poster_original'): - if not data: data = {} - if not movie_info: movie_info = {} - file_types = fireEvent('file.types', single = True) - file_type = {} - - for ft in file_types: - if ft.get('identifier') == wanted_file_type: - file_type = ft - break - - # See if it is in current files - for cur_file in data['library'].get('files', []): - if cur_file.get('type_id') is file_type.get('id') and os.path.isfile(cur_file.get('path')): - return cur_file.get('path') - - # Download using existing info - try: - images = data['library']['info']['images'][wanted_file_type] - file_path = fireEvent('file.download', url = images[0], single = True) - return file_path - except: - pass - - def getFanart(self, movie_info = None, data = None): - if not data: data = {} - if not movie_info: movie_info = {} - return self.getThumbnail(movie_info = movie_info, data = data, wanted_file_type = 'backdrop_original') diff --git a/couchpotato/core/providers/metadata/wmc/main.py b/couchpotato/core/providers/metadata/wmc/main.py deleted file mode 100644 index f84897b4..00000000 --- a/couchpotato/core/providers/metadata/wmc/main.py +++ /dev/null @@ -1,8 +0,0 @@ -from couchpotato.core.providers.metadata.base import MetaDataBase -import os - - -class WindowsMediaCenter(MetaDataBase): - - def getThumbnailName(self, name, root): - return os.path.join(root, 'folder.jpg') diff --git a/couchpotato/core/providers/metadata/xbmc/__init__.py b/couchpotato/core/providers/metadata/xbmc/__init__.py deleted file mode 100644 index deb5c908..00000000 --- a/couchpotato/core/providers/metadata/xbmc/__init__.py +++ /dev/null @@ -1,70 +0,0 @@ -from .main import XBMC - - -def start(): - return XBMC() - -config = [{ - 'name': 'xbmc', - 'groups': [ - { - 'tab': 'renamer', - 'subtab': 'metadata', - 'name': 'xbmc_metadata', - 'label': 'XBMC', - 'description': 'Enable metadata XBMC can understand', - 'options': [ - { - 'name': 'meta_enabled', - 'default': False, - 'type': 'enabler', - }, - { - 'name': 'meta_nfo', - 'label': 'NFO', - 'default': True, - 'type': 'bool', - }, - { - 'name': 'meta_nfo_name', - 'label': 'NFO filename', - 'default': '%s.nfo', - 'advanced': True, - 'description': '%s is the rootname of the movie. For example "/path/to/movie cd1.mkv" will be "/path/to/movie"' - }, - { - 'name': 'meta_url_only', - 'label': 'Only IMDB URL', - 'default': False, - 'advanced': True, - 'description': 'Create a nfo with only the IMDB url inside', - 'type': 'bool', - }, - { - 'name': 'meta_fanart', - 'label': 'Fanart', - 'default': True, - 'type': 'bool', - }, - { - 'name': 'meta_fanart_name', - 'label': 'Fanart filename', - 'default': '%s-fanart.jpg', - 'advanced': True, - }, - { - 'name': 'meta_thumbnail', - 'label': 'Thumbnail', - 'default': True, - 'type': 'bool', - }, - { - 'name': 'meta_thumbnail_name', - 'label': 'Thumbnail filename', - 'default': '%s.tbn', - 'advanced': True, - }, - ], - }, - ], -}] diff --git a/couchpotato/core/providers/metadata/xbmc/main.py b/couchpotato/core/providers/metadata/xbmc/main.py deleted file mode 100644 index 267b2822..00000000 --- a/couchpotato/core/providers/metadata/xbmc/main.py +++ /dev/null @@ -1,174 +0,0 @@ -from couchpotato.core.helpers.encoding import toUnicode -from couchpotato.core.helpers.variable import getTitle -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.metadata.base import MetaDataBase -from xml.etree.ElementTree import Element, SubElement, tostring -import os -import re -import traceback -import xml.dom.minidom - -log = CPLog(__name__) - -class XBMC(MetaDataBase): - - def getFanartName(self, name, root): - return self.createMetaName(self.conf('meta_fanart_name'), name, root) - - def getThumbnailName(self, name, root): - return self.createMetaName(self.conf('meta_thumbnail_name'), name, root) - - def getNfoName(self, name, root): - return self.createMetaName(self.conf('meta_nfo_name'), name, root) - - def createMetaName(self, basename, name, root): - return os.path.join(root, basename.replace('%s', name)) - - def getNfo(self, movie_info = None, data = None): - if not data: data = {} - if not movie_info: movie_info = {} - - # return imdb url only - if self.conf('meta_url_only'): - return 'http://www.imdb.com/title/%s/' % toUnicode(data['library']['identifier']) - - nfoxml = Element('movie') - - # Title - try: - el = SubElement(nfoxml, 'title') - el.text = toUnicode(getTitle(data['library'])) - except: - pass - - # IMDB id - try: - el = SubElement(nfoxml, 'id') - el.text = toUnicode(data['library']['identifier']) - except: - pass - - # Runtime - try: - runtime = SubElement(nfoxml, 'runtime') - runtime.text = '%s min' % movie_info.get('runtime') - except: - pass - - # Other values - types = ['year', 'mpaa', 'originaltitle:original_title', 'outline', 'plot', 'tagline', 'premiered:released'] - for type in types: - - if ':' in type: - name, type = type.split(':') - else: - name = type - - try: - if movie_info.get(type): - el = SubElement(nfoxml, name) - el.text = toUnicode(movie_info.get(type, '')) - except: - pass - - # Rating - for rating_type in ['imdb', 'rotten', 'tmdb']: - try: - r, v = movie_info['rating'][rating_type] - rating = SubElement(nfoxml, 'rating') - rating.text = str(r) - votes = SubElement(nfoxml, 'votes') - votes.text = str(v) - break - except: - log.debug('Failed adding rating info from %s: %s', (rating_type, traceback.format_exc())) - - # Genre - for genre in movie_info.get('genres', []): - genres = SubElement(nfoxml, 'genre') - genres.text = toUnicode(genre) - - # Actors - for actor_name in movie_info.get('actor_roles', {}): - role_name = movie_info['actor_roles'][actor_name] - - actor = SubElement(nfoxml, 'actor') - name = SubElement(actor, 'name') - name.text = toUnicode(actor_name) - if role_name: - role = SubElement(actor, 'role') - role.text = toUnicode(role_name) - if movie_info['images']['actors'].get(actor_name): - thumb = SubElement(actor, 'thumb') - thumb.text = toUnicode(movie_info['images']['actors'].get(actor_name)) - - # Directors - for director_name in movie_info.get('directors', []): - director = SubElement(nfoxml, 'director') - director.text = toUnicode(director_name) - - # Writers - for writer in movie_info.get('writers', []): - writers = SubElement(nfoxml, 'credits') - writers.text = toUnicode(writer) - - # Sets or collections - collection_name = movie_info.get('collection') - if collection_name: - collection = SubElement(nfoxml, 'set') - collection.text = toUnicode(collection_name) - sorttitle = SubElement(nfoxml, 'sorttitle') - sorttitle.text = '%s %s' % (toUnicode(collection_name), movie_info.get('year')) - - # Images - for image_url in movie_info['images']['poster_original']: - image = SubElement(nfoxml, 'thumb') - image.text = toUnicode(image_url) - fanart = SubElement(nfoxml, 'fanart') - for image_url in movie_info['images']['backdrop_original']: - image = SubElement(fanart, 'thumb') - image.text = toUnicode(image_url) - - # Add trailer if found - trailer_found = False - if data.get('renamed_files'): - for filename in data.get('renamed_files'): - if 'trailer' in filename: - trailer = SubElement(nfoxml, 'trailer') - trailer.text = toUnicode(filename) - trailer_found = True - if not trailer_found and data['files'].get('trailer'): - trailer = SubElement(nfoxml, 'trailer') - trailer.text = toUnicode(data['files']['trailer'][0]) - - # Add file metadata - fileinfo = SubElement(nfoxml, 'fileinfo') - streamdetails = SubElement(fileinfo, 'streamdetails') - - # Video data - if data['meta_data'].get('video'): - video = SubElement(streamdetails, 'video') - codec = SubElement(video, 'codec') - codec.text = toUnicode(data['meta_data']['video']) - aspect = SubElement(video, 'aspect') - aspect.text = str(data['meta_data']['aspect']) - width = SubElement(video, 'width') - width.text = str(data['meta_data']['resolution_width']) - height = SubElement(video, 'height') - height.text = str(data['meta_data']['resolution_height']) - - # Audio data - if data['meta_data'].get('audio'): - audio = SubElement(streamdetails, 'audio') - codec = SubElement(audio, 'codec') - codec.text = toUnicode(data['meta_data'].get('audio')) - channels = SubElement(audio, 'channels') - channels.text = toUnicode(data['meta_data'].get('audio_channels')) - - # Clean up the xml and return it - nfoxml = xml.dom.minidom.parseString(tostring(nfoxml)) - xml_string = nfoxml.toprettyxml(indent = ' ') - text_re = re.compile('>\n\s+([^<>\s].*?)\n\s+\g<1>BinSearch', - 'wizard': True, - 'options': [ - { - 'name': 'enabled', - 'type': 'enabler', - }, - { - 'name': 'extra_score', - 'advanced': True, - 'label': 'Extra Score', - 'type': 'int', - 'default': 0, - 'description': 'Starting score for each release found via this provider.', - } - ], - }, - ], -}] diff --git a/couchpotato/core/providers/nzb/newznab/__init__.py b/couchpotato/core/providers/nzb/newznab/__init__.py deleted file mode 100644 index 97f1cfad..00000000 --- a/couchpotato/core/providers/nzb/newznab/__init__.py +++ /dev/null @@ -1,60 +0,0 @@ -from .main import Newznab - - -def start(): - return Newznab() - -config = [{ - 'name': 'newznab', - 'groups': [ - { - 'tab': 'searcher', - 'list': 'nzb_providers', - 'name': 'newznab', - 'order': 10, - 'description': 'Enable NewzNab such as NZB.su, \ - NZBs.org, DOGnzb.cr, \ - Spotweb, NZBGeek, \ - SmackDown, NZBFinder', - 'wizard': True, - 'options': [ - { - 'name': 'enabled', - 'type': 'enabler', - 'default': True, - }, - { - 'name': 'use', - 'default': '0,0,0,0,0,0' - }, - { - 'name': 'host', - 'default': 'nzb.su,dognzb.cr,nzbs.org,https://index.nzbgeek.info, https://smackdownonyou.com, https://www.nzbfinder.ws', - 'description': 'The hostname of your newznab provider', - }, - { - 'name': 'extra_score', - 'advanced': True, - 'label': 'Extra Score', - 'default': '0,0,0,0,0,0', - 'description': 'Starting score for each release found via this provider.', - }, - { - 'name': 'custom_tag', - 'advanced': True, - 'label': 'Custom tag', - 'default': ',,,,,', - 'description': 'Add custom tags, for example add rls=1 to get only scene releases from nzbs.org', - }, - { - 'name': 'api_key', - 'default': ',,,,,', - 'label': 'Api Key', - 'description': 'Can be found on your profile page', - 'type': 'combined', - 'combine': ['use', 'host', 'api_key', 'extra_score', 'custom_tag'], - }, - ], - }, - ], -}] diff --git a/couchpotato/core/providers/nzb/newznab/main.py b/couchpotato/core/providers/nzb/newznab/main.py deleted file mode 100644 index deadaa1c..00000000 --- a/couchpotato/core/providers/nzb/newznab/main.py +++ /dev/null @@ -1,194 +0,0 @@ -from couchpotato.core.helpers.encoding import tryUrlencode, toUnicode -from couchpotato.core.helpers.rss import RSS -from couchpotato.core.helpers.variable import cleanHost, splitString, tryInt -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.base import ResultList -from couchpotato.core.providers.nzb.base import NZBProvider -from couchpotato.environment import Env -from dateutil.parser import parse -from urllib2 import HTTPError -from urlparse import urlparse -import time -import traceback -import urllib2 - -log = CPLog(__name__) - - -class Newznab(NZBProvider, RSS): - - urls = { - 'download': 'get&id=%s', - 'detail': 'details&id=%s', - 'search': 'movie', - } - - limits_reached = {} - - http_time_between_calls = 1 # Seconds - - def search(self, movie, quality): - hosts = self.getHosts() - - results = ResultList(self, movie, quality, imdb_results = True) - - for host in hosts: - if self.isDisabled(host): - continue - - self._searchOnHost(host, movie, quality, results) - - return results - - def _searchOnHost(self, host, movie, quality, results): - - arguments = tryUrlencode({ - 'imdbid': movie['library']['identifier'].replace('tt', ''), - 'apikey': host['api_key'], - 'extended': 1 - }) + ('&%s' % host['custom_tag'] if host.get('custom_tag') else '') - url = '%s&%s' % (self.getUrl(host['host'], self.urls['search']), arguments) - - nzbs = self.getRSSData(url, cache_timeout = 1800, headers = {'User-Agent': Env.getIdentifier()}) - - for nzb in nzbs: - - date = None - spotter = None - for item in nzb: - if date and spotter: - break - if item.attrib.get('name') == 'usenetdate': - date = item.attrib.get('value') - break - - # Get the name of the person who posts the spot - if item.attrib.get('name') == 'poster': - if "@spot.net" in item.attrib.get('value'): - spotter = item.attrib.get('value').split("@")[0] - continue - - if not date: - date = self.getTextElement(nzb, 'pubDate') - - nzb_id = self.getTextElement(nzb, 'guid').split('/')[-1:].pop() - name = self.getTextElement(nzb, 'title') - - if not name: - continue - - name_extra = '' - if spotter: - name_extra = spotter - - results.append({ - 'id': nzb_id, - 'provider_extra': urlparse(host['host']).hostname or host['host'], - 'name': toUnicode(name), - 'name_extra': name_extra, - 'age': self.calculateAge(int(time.mktime(parse(date).timetuple()))), - 'size': int(self.getElement(nzb, 'enclosure').attrib['length']) / 1024 / 1024, - 'url': (self.getUrl(host['host'], self.urls['download']) % tryUrlencode(nzb_id)) + self.getApiExt(host), - 'detail_url': '%sdetails/%s' % (cleanHost(host['host']), tryUrlencode(nzb_id)), - 'content': self.getTextElement(nzb, 'description'), - 'score': host['extra_score'], - }) - - def getHosts(self): - - uses = splitString(str(self.conf('use')), clean = False) - hosts = splitString(self.conf('host'), clean = False) - api_keys = splitString(self.conf('api_key'), clean = False) - extra_score = splitString(self.conf('extra_score'), clean = False) - custom_tags = splitString(self.conf('custom_tag'), clean = False) - - list = [] - for nr in range(len(hosts)): - - try: key = api_keys[nr] - except: key = '' - - try: host = hosts[nr] - except: host = '' - - try: score = tryInt(extra_score[nr]) - except: score = 0 - - try: custom_tag = custom_tags[nr] - except: custom_tag = '' - - list.append({ - 'use': uses[nr], - 'host': host, - 'api_key': key, - 'extra_score': score, - 'custom_tag': custom_tag - }) - - return list - - def belongsTo(self, url, provider = None, host = None): - - hosts = self.getHosts() - - for host in hosts: - result = super(Newznab, self).belongsTo(url, host = host['host'], provider = provider) - if result: - return result - - def getUrl(self, host, type): - if '?page=newznabapi' in host: - return cleanHost(host)[:-1] + '&t=' + type - - return cleanHost(host) + 'api?t=' + type - - def isDisabled(self, host = None): - return not self.isEnabled(host) - - def isEnabled(self, host = None): - - # Return true if at least one is enabled and no host is given - if host is None: - for host in self.getHosts(): - if self.isEnabled(host): - return True - return False - - return NZBProvider.isEnabled(self) and host['host'] and host['api_key'] and int(host['use']) - - def getApiExt(self, host): - return '&apikey=%s' % host['api_key'] - - def download(self, url = '', nzb_id = ''): - host = urlparse(url).hostname - - if self.limits_reached.get(host): - # Try again in 3 hours - if self.limits_reached[host] > time.time() - 10800: - return 'try_next' - - try: - # Get final redirected url - log.debug('Checking %s for redirects.', url) - req = urllib2.Request(url) - req.add_header('User-Agent', self.user_agent) - res = urllib2.urlopen(req) - finalurl = res.geturl() - if finalurl != url: - log.debug('Redirect url used: %s', finalurl) - - data = self.urlopen(finalurl, show_error = False) - self.limits_reached[host] = False - return data - except HTTPError as e: - if e.code == 503: - response = e.read().lower() - if 'maximum api' in response or 'download limit' in response: - if not self.limits_reached.get(host): - log.error('Limit reached for newznab provider: %s', host) - self.limits_reached[host] = time.time() - return 'try_next' - - log.error('Failed download from %s: %s', (host, traceback.format_exc())) - - return 'try_next' diff --git a/couchpotato/core/providers/nzb/nzbclub/__init__.py b/couchpotato/core/providers/nzb/nzbclub/__init__.py deleted file mode 100644 index 02a69404..00000000 --- a/couchpotato/core/providers/nzb/nzbclub/__init__.py +++ /dev/null @@ -1,32 +0,0 @@ -from .main import NZBClub - - -def start(): - return NZBClub() - -config = [{ - 'name': 'nzbclub', - 'groups': [ - { - 'tab': 'searcher', - 'list': 'nzb_providers', - 'name': 'NZBClub', - 'description': 'Free provider, less accurate. See NZBClub', - 'wizard': True, - 'options': [ - { - 'name': 'enabled', - 'type': 'enabler', - }, - { - 'name': 'extra_score', - 'advanced': True, - 'label': 'Extra Score', - 'type': 'int', - 'default': 0, - 'description': 'Starting score for each release found via this provider.', - } - ], - }, - ], -}] diff --git a/couchpotato/core/providers/nzb/nzbindex/__init__.py b/couchpotato/core/providers/nzb/nzbindex/__init__.py deleted file mode 100644 index acb53e19..00000000 --- a/couchpotato/core/providers/nzb/nzbindex/__init__.py +++ /dev/null @@ -1,33 +0,0 @@ -from .main import NzbIndex - - -def start(): - return NzbIndex() - -config = [{ - 'name': 'nzbindex', - 'groups': [ - { - 'tab': 'searcher', - 'list': 'nzb_providers', - 'name': 'nzbindex', - 'description': 'Free provider, less accurate. See NZBIndex', - 'wizard': True, - 'options': [ - { - 'name': 'enabled', - 'type': 'enabler', - 'default': True, - }, - { - 'name': 'extra_score', - 'advanced': True, - 'label': 'Extra Score', - 'type': 'int', - 'default': 0, - 'description': 'Starting score for each release found via this provider.', - } - ], - }, - ], -}] diff --git a/couchpotato/core/providers/nzb/nzbindex/main.py b/couchpotato/core/providers/nzb/nzbindex/main.py deleted file mode 100644 index a143c199..00000000 --- a/couchpotato/core/providers/nzb/nzbindex/main.py +++ /dev/null @@ -1,79 +0,0 @@ -from bs4 import BeautifulSoup -from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode -from couchpotato.core.helpers.rss import RSS -from couchpotato.core.helpers.variable import tryInt -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.nzb.base import NZBProvider -from couchpotato.environment import Env -from dateutil.parser import parse -import re -import time - -log = CPLog(__name__) - - -class NzbIndex(NZBProvider, RSS): - - urls = { - 'download': 'https://www.nzbindex.com/download/', - 'search': 'https://www.nzbindex.com/rss/?%s', - } - - http_time_between_calls = 1 # Seconds - - def _searchOnTitle(self, title, movie, quality, results): - - q = '"%s %s" | "%s (%s)"' % (title, movie['library']['year'], title, movie['library']['year']) - arguments = tryUrlencode({ - 'q': q, - 'age': Env.setting('retention', 'nzb'), - 'sort': 'agedesc', - 'minsize': quality.get('size_min'), - 'maxsize': quality.get('size_max'), - 'rating': 1, - 'max': 250, - 'more': 1, - 'complete': 1, - }) - - nzbs = self.getRSSData(self.urls['search'] % arguments) - - for nzb in nzbs: - - enclosure = self.getElement(nzb, 'enclosure').attrib - nzbindex_id = int(self.getTextElement(nzb, "link").split('/')[4]) - - try: - description = self.getTextElement(nzb, "description") - except: - description = '' - - def extra_check(item): - if '#c20000' in item['description'].lower(): - log.info('Wrong: Seems to be passworded: %s', item['name']) - return False - - return True - - results.append({ - 'id': nzbindex_id, - 'name': self.getTextElement(nzb, "title"), - 'age': self.calculateAge(int(time.mktime(parse(self.getTextElement(nzb, "pubDate")).timetuple()))), - 'size': tryInt(enclosure['length']) / 1024 / 1024, - 'url': enclosure['url'], - 'detail_url': enclosure['url'].replace('/download/', '/release/'), - 'description': description, - 'get_more_info': self.getMoreInfo, - 'extra_check': extra_check, - }) - - def getMoreInfo(self, item): - try: - if '/nfo/' in item['description'].lower(): - nfo_url = re.search('href=\"(?P.+)\" ', item['description']).group('nfo') - full_description = self.getCache('nzbindex.%s' % item['id'], url = nfo_url, cache_timeout = 25920000) - html = BeautifulSoup(full_description) - item['description'] = toUnicode(html.find('pre', attrs = {'id':'nfo0'}).text) - except: - pass - diff --git a/couchpotato/core/providers/nzb/omgwtfnzbs/__init__.py b/couchpotato/core/providers/nzb/omgwtfnzbs/__init__.py deleted file mode 100644 index 2f3990de..00000000 --- a/couchpotato/core/providers/nzb/omgwtfnzbs/__init__.py +++ /dev/null @@ -1,41 +0,0 @@ -from .main import OMGWTFNZBs - - -def start(): - return OMGWTFNZBs() - -config = [{ - 'name': 'omgwtfnzbs', - 'groups': [ - { - 'tab': 'searcher', - 'list': 'nzb_providers', - 'name': 'OMGWTFNZBs', - 'description': 'See OMGWTFNZBs', - 'wizard': True, - 'options': [ - { - 'name': 'enabled', - 'type': 'enabler', - }, - { - 'name': 'username', - 'default': '', - }, - { - 'name': 'api_key', - 'label': 'Api Key', - 'default': '', - }, - { - 'name': 'extra_score', - 'advanced': True, - 'label': 'Extra Score', - 'default': 20, - 'type': 'int', - 'description': 'Starting score for each release found via this provider.', - } - ], - }, - ], -}] diff --git a/couchpotato/core/providers/nzb/omgwtfnzbs/main.py b/couchpotato/core/providers/nzb/omgwtfnzbs/main.py deleted file mode 100644 index 93925752..00000000 --- a/couchpotato/core/providers/nzb/omgwtfnzbs/main.py +++ /dev/null @@ -1,63 +0,0 @@ -from couchpotato.core.event import fireEvent -from couchpotato.core.helpers.encoding import toUnicode, tryUrlencode -from couchpotato.core.helpers.rss import RSS -from couchpotato.core.helpers.variable import tryInt -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.nzb.base import NZBProvider -from dateutil.parser import parse -from urlparse import urlparse, parse_qs -import time - -log = CPLog(__name__) - - -class OMGWTFNZBs(NZBProvider, RSS): - - urls = { - 'search': 'https://rss.omgwtfnzbs.org/rss-search.php?%s', - 'detail_url': 'https://omgwtfnzbs.org/details.php?id=%s', - } - - http_time_between_calls = 1 #seconds - - cat_ids = [ - ([15], ['dvdrip']), - ([15, 16], ['brrip']), - ([16], ['720p', '1080p', 'bd50']), - ([17], ['dvdr']), - ] - cat_backup_id = 'movie' - - def search(self, movie, quality): - - if quality['identifier'] in fireEvent('quality.pre_releases', single = True): - return [] - - return super(OMGWTFNZBs, self).search(movie, quality) - - def _searchOnTitle(self, title, movie, quality, results): - - q = '%s %s' % (title, movie['library']['year']) - params = tryUrlencode({ - 'search': q, - 'catid': ','.join([str(x) for x in self.getCatId(quality['identifier'])]), - 'user': self.conf('username', default = ''), - 'api': self.conf('api_key', default = ''), - }) - - nzbs = self.getRSSData(self.urls['search'] % params) - - for nzb in nzbs: - - enclosure = self.getElement(nzb, 'enclosure').attrib - nzb_id = parse_qs(urlparse(self.getTextElement(nzb, 'link')).query).get('id')[0] - - results.append({ - 'id': nzb_id, - 'name': toUnicode(self.getTextElement(nzb, 'title')), - 'age': self.calculateAge(int(time.mktime(parse(self.getTextElement(nzb, 'pubDate')).timetuple()))), - 'size': tryInt(enclosure['length']) / 1024 / 1024, - 'url': enclosure['url'], - 'detail_url': self.urls['detail_url'] % nzb_id, - 'description': self.getTextElement(nzb, 'description') - }) diff --git a/couchpotato/core/providers/torrent/awesomehd/__init__.py b/couchpotato/core/providers/torrent/awesomehd/__init__.py deleted file mode 100644 index 6f076703..00000000 --- a/couchpotato/core/providers/torrent/awesomehd/__init__.py +++ /dev/null @@ -1,73 +0,0 @@ -from .main import AwesomeHD - - -def start(): - return AwesomeHD() - -config = [{ - 'name': 'awesomehd', - 'groups': [ - { - 'tab': 'searcher', - 'list': 'torrent_providers', - 'name': 'Awesome-HD', - 'description': 'See AHD', - 'wizard': True, - 'options': [ - { - 'name': 'enabled', - 'type': 'enabler', - 'default': False, - }, - { - 'name': 'passkey', - 'default': '', - }, - { - 'name': 'seed_ratio', - 'label': 'Seed ratio', - 'type': 'float', - 'default': 1, - 'description': 'Will not be (re)moved until this seed ratio is met.', - }, - { - 'name': 'seed_time', - 'label': 'Seed time', - 'type': 'int', - 'default': 40, - 'description': 'Will not be (re)moved until this seed time (in hours) is met.', - }, - { - 'name': 'only_internal', - 'advanced': True, - 'type': 'bool', - 'default': 1, - 'description': 'Only search for internal releases.' - }, - { - 'name': 'prefer_internal', - 'advanced': True, - 'type': 'bool', - 'default': 1, - 'description': 'Favors internal releases over non-internal releases.' - }, - { - 'name': 'favor', - 'advanced': True, - 'default': 'both', - 'type': 'dropdown', - 'values': [('Encodes & Remuxes', 'both'), ('Encodes', 'encode'), ('Remuxes', 'remux'), ('None', 'none')], - 'description': 'Give extra scoring to encodes or remuxes.' - }, - { - 'name': 'extra_score', - 'advanced': True, - 'type': 'int', - 'default': 20, - 'description': 'Starting score for each release found via this provider.', - }, - ], - }, - ], -}] - diff --git a/couchpotato/core/providers/torrent/awesomehd/main.py b/couchpotato/core/providers/torrent/awesomehd/main.py deleted file mode 100644 index ca6a30df..00000000 --- a/couchpotato/core/providers/torrent/awesomehd/main.py +++ /dev/null @@ -1,69 +0,0 @@ -from bs4 import BeautifulSoup -from couchpotato.core.helpers.variable import tryInt -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.torrent.base import TorrentProvider -import re -import traceback - -log = CPLog(__name__) - - -class AwesomeHD(TorrentProvider): - - urls = { - 'test': 'https://awesome-hd.net/', - 'detail': 'https://awesome-hd.net/torrents.php?torrentid=%s', - 'search': 'https://awesome-hd.net/searchapi.php?action=imdbsearch&passkey=%s&imdb=%s&internal=%s', - 'download': 'https://awesome-hd.net/torrents.php?action=download&id=%s&authkey=%s&torrent_pass=%s', - } - http_time_between_calls = 1 - - def _search(self, movie, quality, results): - - data = self.getHTMLData(self.urls['search'] % (self.conf('passkey'), movie['library']['identifier'], self.conf('only_internal'))) - - if data: - try: - soup = BeautifulSoup(data) - - if soup.find('error'): - log.error(soup.find('error').get_text()) - return - - authkey = soup.find('authkey').get_text() - entries = soup.find_all('torrent') - - for entry in entries: - - torrentscore = 0 - torrent_id = entry.find('id').get_text() - name = entry.find('name').get_text() - year = entry.find('year').get_text() - releasegroup = entry.find('releasegroup').get_text() - resolution = entry.find('resolution').get_text() - encoding = entry.find('encoding').get_text() - freeleech = entry.find('freeleech').get_text() - torrent_desc = '/ %s / %s / %s ' % (releasegroup, resolution, encoding) - - if freeleech == '0.25' and self.conf('prefer_internal'): - torrent_desc += '/ Internal' - torrentscore += 200 - - if encoding == 'x264' and self.conf('favor') in ['encode', 'both']: - torrentscore += 300 - if re.search('Remux', encoding) and self.conf('favor') in ['remux', 'both']: - torrentscore += 200 - - results.append({ - 'id': torrent_id, - 'name': re.sub('[^A-Za-z0-9\-_ \(\).]+', '', '%s (%s) %s' % (name, year, torrent_desc)), - 'url': self.urls['download'] % (torrent_id, authkey, self.conf('passkey')), - 'detail_url': self.urls['detail'] % torrent_id, - 'size': self.parseSize(entry.find('size').get_text()), - 'seeders': tryInt(entry.find('seeders').get_text()), - 'leechers': tryInt(entry.find('leechers').get_text()), - 'score': torrentscore - }) - - except: - log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc())) diff --git a/couchpotato/core/providers/torrent/bithdtv/__init__.py b/couchpotato/core/providers/torrent/bithdtv/__init__.py deleted file mode 100644 index ffc5363f..00000000 --- a/couchpotato/core/providers/torrent/bithdtv/__init__.py +++ /dev/null @@ -1,56 +0,0 @@ -from .main import BiTHDTV - - -def start(): - return BiTHDTV() - -config = [{ - 'name': 'bithdtv', - 'groups': [ - { - 'tab': 'searcher', - 'list': 'torrent_providers', - 'name': 'BiT-HDTV', - 'description': 'See BiT-HDTV', - 'wizard': True, - 'options': [ - { - 'name': 'enabled', - 'type': 'enabler', - 'default': False, - }, - { - 'name': 'username', - 'default': '', - }, - { - 'name': 'password', - 'default': '', - 'type': 'password', - }, - { - 'name': 'seed_ratio', - 'label': 'Seed ratio', - 'type': 'float', - 'default': 1, - 'description': 'Will not be (re)moved until this seed ratio is met.', - }, - { - 'name': 'seed_time', - 'label': 'Seed time', - 'type': 'int', - 'default': 40, - 'description': 'Will not be (re)moved until this seed time (in hours) is met.', - }, - { - 'name': 'extra_score', - 'advanced': True, - 'label': 'Extra Score', - 'type': 'int', - 'default': 20, - 'description': 'Starting score for each release found via this provider.', - } - ], - }, - ], -}] diff --git a/couchpotato/core/providers/torrent/bithdtv/main.py b/couchpotato/core/providers/torrent/bithdtv/main.py deleted file mode 100644 index 90117de4..00000000 --- a/couchpotato/core/providers/torrent/bithdtv/main.py +++ /dev/null @@ -1,89 +0,0 @@ -from bs4 import BeautifulSoup -from couchpotato.core.helpers.encoding import tryUrlencode, toUnicode -from couchpotato.core.helpers.variable import tryInt -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.torrent.base import TorrentProvider -import traceback - -log = CPLog(__name__) - - -class BiTHDTV(TorrentProvider): - - urls = { - 'test': 'http://www.bit-hdtv.com/', - 'login': 'http://www.bit-hdtv.com/takelogin.php', - 'login_check': 'http://www.bit-hdtv.com/messages.php', - 'detail': 'http://www.bit-hdtv.com/details.php?id=%s', - 'search': 'http://www.bit-hdtv.com/torrents.php?', - } - - # Searches for movies only - BiT-HDTV's subcategory and resolution search filters appear to be broken - cat_id_movies = 7 - - http_time_between_calls = 1 #seconds - - def _searchOnTitle(self, title, movie, quality, results): - - arguments = tryUrlencode({ - 'search': '%s %s' % (title.replace(':', ''), movie['library']['year']), - 'cat': self.cat_id_movies - }) - - url = "%s&%s" % (self.urls['search'], arguments) - - data = self.getHTMLData(url) - - if data: - # Remove BiT-HDTV's output garbage so outdated BS4 versions successfully parse the HTML - split_data = data.partition('-->') - if '## SELECT COUNT(' in split_data[0]: - data = split_data[2] - - html = BeautifulSoup(data) - - try: - result_table = html.find('table', attrs = {'width' : '750', 'class' : ''}) - if result_table is None: - return - - entries = result_table.find_all('tr') - for result in entries[1:]: - - cells = result.find_all('td') - link = cells[2].find('a') - torrent_id = link['href'].replace('/details.php?id=', '') - - results.append({ - 'id': torrent_id, - 'name': link.contents[0].get_text(), - 'url': cells[0].find('a')['href'], - 'detail_url': self.urls['detail'] % torrent_id, - 'size': self.parseSize(cells[6].get_text()), - 'seeders': tryInt(cells[8].string), - 'leechers': tryInt(cells[9].string), - 'get_more_info': self.getMoreInfo, - }) - - except: - log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc())) - - def getLoginParams(self): - return { - 'username': self.conf('username'), - 'password': self.conf('password'), - } - - def getMoreInfo(self, item): - full_description = self.getCache('bithdtv.%s' % item['id'], item['detail_url'], cache_timeout = 25920000) - html = BeautifulSoup(full_description) - nfo_pre = html.find('table', attrs = {'class':'detail'}) - description = toUnicode(nfo_pre.text) if nfo_pre else '' - - item['description'] = description - return item - - def loginSuccess(self, output): - return 'logout.php' in output.lower() - - loginCheckSuccess = loginSuccess diff --git a/couchpotato/core/providers/torrent/bitsoup/__init__.py b/couchpotato/core/providers/torrent/bitsoup/__init__.py deleted file mode 100644 index da07cc3b..00000000 --- a/couchpotato/core/providers/torrent/bitsoup/__init__.py +++ /dev/null @@ -1,56 +0,0 @@ -from .main import Bitsoup - - -def start(): - return Bitsoup() - -config = [{ - 'name': 'bitsoup', - 'groups': [ - { - 'tab': 'searcher', - 'list': 'torrent_providers', - 'name': 'Bitsoup', - 'description': 'See Bitsoup', - 'wizard': True, - 'options': [ - { - 'name': 'enabled', - 'type': 'enabler', - 'default': False, - }, - { - 'name': 'username', - 'default': '', - }, - { - 'name': 'password', - 'default': '', - 'type': 'password', - }, - { - 'name': 'seed_ratio', - 'label': 'Seed ratio', - 'type': 'float', - 'default': 1, - 'description': 'Will not be (re)moved until this seed ratio is met.', - }, - { - 'name': 'seed_time', - 'label': 'Seed time', - 'type': 'int', - 'default': 40, - 'description': 'Will not be (re)moved until this seed time (in hours) is met.', - }, - { - 'name': 'extra_score', - 'advanced': True, - 'label': 'Extra Score', - 'type': 'int', - 'default': 20, - 'description': 'Starting score for each release found via this provider.', - } - ], - }, - ], -}] diff --git a/couchpotato/core/providers/torrent/bitsoup/main.py b/couchpotato/core/providers/torrent/bitsoup/main.py deleted file mode 100644 index a709c5c1..00000000 --- a/couchpotato/core/providers/torrent/bitsoup/main.py +++ /dev/null @@ -1,87 +0,0 @@ -from bs4 import BeautifulSoup -from couchpotato.core.helpers.encoding import simplifyString, tryUrlencode -from couchpotato.core.helpers.variable import tryInt -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.torrent.base import TorrentProvider -import traceback - -log = CPLog(__name__) - - -class Bitsoup(TorrentProvider): - - urls = { - 'test': 'https://www.bitsoup.me/', - 'login': 'https://www.bitsoup.me/takelogin.php', - 'login_check': 'https://www.bitsoup.me/my.php', - 'search': 'https://www.bitsoup.me/browse.php?', - 'baseurl': 'https://www.bitsoup.me/%s', - } - - http_time_between_calls = 1 #seconds - - def _searchOnTitle(self, title, movie, quality, results): - - q = '"%s" %s' % (simplifyString(title), movie['library']['year']) - arguments = tryUrlencode({ - 'search': q, - }) - url = "%s&%s" % (self.urls['search'], arguments) - - data = self.getHTMLData(url) - - if data: - html = BeautifulSoup(data) - - try: - result_table = html.find('table', attrs = {'class': 'koptekst'}) - if not result_table or 'nothing found!' in data.lower(): - return - - entries = result_table.find_all('tr') - for result in entries[1:]: - - all_cells = result.find_all('td') - - torrent = all_cells[1].find('a') - download = all_cells[3].find('a') - - torrent_id = torrent['href'] - torrent_id = torrent_id.replace('details.php?id=', '') - torrent_id = torrent_id.replace('&hit=1', '') - - torrent_name = torrent.getText() - - torrent_size = self.parseSize(all_cells[7].getText()) - torrent_seeders = tryInt(all_cells[9].getText()) - torrent_leechers = tryInt(all_cells[10].getText()) - torrent_url = self.urls['baseurl'] % download['href'] - torrent_detail_url = self.urls['baseurl'] % torrent['href'] - - results.append({ - 'id': torrent_id, - 'name': torrent_name, - 'size': torrent_size, - 'seeders': torrent_seeders, - 'leechers': torrent_leechers, - 'url': torrent_url, - 'detail_url': torrent_detail_url, - }) - - except: - log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc())) - - - def getLoginParams(self): - return { - 'username': self.conf('username'), - 'password': self.conf('password'), - 'ssl': 'yes', - } - - - def loginSuccess(self, output): - return 'logout.php' in output.lower() - - loginCheckSuccess = loginSuccess - diff --git a/couchpotato/core/providers/torrent/hdbits/__init__.py b/couchpotato/core/providers/torrent/hdbits/__init__.py deleted file mode 100644 index 1e9aa3ce..00000000 --- a/couchpotato/core/providers/torrent/hdbits/__init__.py +++ /dev/null @@ -1,54 +0,0 @@ -from .main import HDBits - - -def start(): - return HDBits() - -config = [{ - 'name': 'hdbits', - 'groups': [ - { - 'tab': 'searcher', - 'list': 'torrent_providers', - 'name': 'HDBits', - 'description': 'See HDBits', - 'options': [ - { - 'name': 'enabled', - 'type': 'enabler', - 'default': False, - }, - { - 'name': 'username', - 'default': '', - }, - { - 'name': 'passkey', - 'default': '', - }, - { - 'name': 'seed_ratio', - 'label': 'Seed ratio', - 'type': 'float', - 'default': 1, - 'description': 'Will not be (re)moved until this seed ratio is met.', - }, - { - 'name': 'seed_time', - 'label': 'Seed time', - 'type': 'int', - 'default': 40, - 'description': 'Will not be (re)moved until this seed time (in hours) is met.', - }, - { - 'name': 'extra_score', - 'advanced': True, - 'label': 'Extra Score', - 'type': 'int', - 'default': 0, - 'description': 'Starting score for each release found via this provider.', - }, - ], - }, - ], -}] diff --git a/couchpotato/core/providers/torrent/hdbits/main.py b/couchpotato/core/providers/torrent/hdbits/main.py deleted file mode 100644 index ce17bbac..00000000 --- a/couchpotato/core/providers/torrent/hdbits/main.py +++ /dev/null @@ -1,63 +0,0 @@ -from couchpotato.core.helpers.variable import tryInt -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.torrent.base import TorrentProvider - -import re -import json -import traceback - -log = CPLog(__name__) - - -class HDBits(TorrentProvider): - - urls = { - 'test': 'https://hdbits.org/', - 'detail': 'https://hdbits.org/details.php?id=%s', - 'download': 'https://hdbits.org/download.php?id=%s&passkey=%s', - 'api': 'https://hdbits.org/api/torrents' - } - - http_time_between_calls = 1 #seconds - - def _post_query(self, **params): - - post_data = { - 'username': self.conf('username'), - 'passkey': self.conf('passkey') - } - post_data.update(params) - - try: - result = self.getJsonData(self.urls['api'], data = json.dumps(post_data)) - - if result: - if result['status'] != 0: - log.error('Error searching hdbits: %s' % result['message']) - else: - return result['data'] - except: - pass - - return None - - def _search(self, movie, quality, results): - - match = re.match(r'tt(\d{7})', movie['library']['identifier']) - - data = self._post_query(imdb = {'id': match.group(1)}) - - if data: - try: - for result in data: - results.append({ - 'id': result['id'], - 'name': result['name'], - 'url': self.urls['download'] % (result['id'], self.conf('passkey')), - 'detail_url': self.urls['detail'] % result['id'], - 'size': self.parseSize(result['size']), - 'seeders': tryInt(result['seeders']), - 'leechers': tryInt(result['leechers']) - }) - except: - log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc())) diff --git a/couchpotato/core/providers/torrent/ilovetorrents/__init__.py b/couchpotato/core/providers/torrent/ilovetorrents/__init__.py deleted file mode 100644 index f3f7b479..00000000 --- a/couchpotato/core/providers/torrent/ilovetorrents/__init__.py +++ /dev/null @@ -1,61 +0,0 @@ -from .main import ILoveTorrents - - -def start(): - return ILoveTorrents() - -config = [{ - 'name': 'ilovetorrents', - 'groups': [ - { - 'tab': 'searcher', - 'list': 'torrent_providers', - 'name': 'ILoveTorrents', - 'description': 'Where the Love of Torrents is Born', - 'wizard': True, - 'options': [ - { - 'name': 'enabled', - 'type': 'enabler', - 'default': False - }, - { - 'name': 'username', - 'label': 'Username', - 'type': 'string', - 'default': '', - 'description': 'The user name for your ILT account', - }, - { - 'name': 'password', - 'label': 'Password', - 'type': 'password', - 'default': '', - 'description': 'The password for your ILT account.', - }, - { - 'name': 'seed_ratio', - 'label': 'Seed ratio', - 'type': 'float', - 'default': 1, - 'description': 'Will not be (re)moved until this seed ratio is met.', - }, - { - 'name': 'seed_time', - 'label': 'Seed time', - 'type': 'int', - 'default': 40, - 'description': 'Will not be (re)moved until this seed time (in hours) is met.', - }, - { - 'name': 'extra_score', - 'advanced': True, - 'label': 'Extra Score', - 'type': 'int', - 'default': 0, - 'description': 'Starting score for each release found via this provider.', - } - ], - } - ] -}] diff --git a/couchpotato/core/providers/torrent/iptorrents/__init__.py b/couchpotato/core/providers/torrent/iptorrents/__init__.py deleted file mode 100644 index 579d7974..00000000 --- a/couchpotato/core/providers/torrent/iptorrents/__init__.py +++ /dev/null @@ -1,62 +0,0 @@ -from .main import IPTorrents - - -def start(): - return IPTorrents() - -config = [{ - 'name': 'iptorrents', - 'groups': [ - { - 'tab': 'searcher', - 'list': 'torrent_providers', - 'name': 'IPTorrents', - 'description': 'See IPTorrents', - 'wizard': True, - 'options': [ - { - 'name': 'enabled', - 'type': 'enabler', - 'default': False, - }, - { - 'name': 'username', - 'default': '', - }, - { - 'name': 'password', - 'default': '', - 'type': 'password', - }, - { - 'name': 'freeleech', - 'default': 0, - 'type': 'bool', - 'description': 'Only search for [FreeLeech] torrents.', - }, - { - 'name': 'seed_ratio', - 'label': 'Seed ratio', - 'type': 'float', - 'default': 1, - 'description': 'Will not be (re)moved until this seed ratio is met.', - }, - { - 'name': 'seed_time', - 'label': 'Seed time', - 'type': 'int', - 'default': 40, - 'description': 'Will not be (re)moved until this seed time (in hours) is met.', - }, - { - 'name': 'extra_score', - 'advanced': True, - 'label': 'Extra Score', - 'type': 'int', - 'default': 0, - 'description': 'Starting score for each release found via this provider.', - } - ], - }, - ], -}] diff --git a/couchpotato/core/providers/torrent/iptorrents/main.py b/couchpotato/core/providers/torrent/iptorrents/main.py deleted file mode 100644 index 4a2c6dbf..00000000 --- a/couchpotato/core/providers/torrent/iptorrents/main.py +++ /dev/null @@ -1,123 +0,0 @@ -from bs4 import BeautifulSoup -from couchpotato.core.helpers.encoding import tryUrlencode, toSafeString -from couchpotato.core.helpers.variable import tryInt -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.torrent.base import TorrentProvider -import traceback - -log = CPLog(__name__) - - -class IPTorrents(TorrentProvider): - - urls = { - 'test': 'https://www.iptorrents.com/', - 'base_url': 'https://www.iptorrents.com', - 'login': 'https://www.iptorrents.com/torrents/', - 'login_check': 'https://www.iptorrents.com/inbox.php', - 'search': 'https://www.iptorrents.com/torrents/?l%d=1%s&q=%s&qf=ti&p=%d', - } - - cat_ids = [ - ([48], ['720p', '1080p', 'bd50']), - ([72], ['cam', 'ts', 'tc', 'r5', 'scr']), - ([7], ['dvdrip', 'brrip']), - ([6], ['dvdr']), - ] - - http_time_between_calls = 1 #seconds - cat_backup_id = None - - def _searchOnTitle(self, title, movie, quality, results): - - freeleech = '' if not self.conf('freeleech') else '&free=on' - - pages = 1 - current_page = 1 - while current_page <= pages and not self.shuttingDown(): - - url = self.urls['search'] % (self.getCatId(quality['identifier'])[0], freeleech, tryUrlencode('%s %s' % (title.replace(':', ''), movie['library']['year'])), current_page) - data = self.getHTMLData(url) - - if data: - html = BeautifulSoup(data) - - try: - page_nav = html.find('span', attrs = {'class' : 'page_nav'}) - if page_nav: - next_link = page_nav.find("a", text = "Next") - if next_link: - final_page_link = next_link.previous_sibling.previous_sibling - pages = int(final_page_link.string) - - result_table = html.find('table', attrs = {'class' : 'torrents'}) - - if not result_table or 'nothing found!' in data.lower(): - return - - entries = result_table.find_all('tr') - - columns = self.getColumns(entries) - - if 'seeders' not in columns or 'leechers' not in columns: - log.warning('Unrecognized table format returned') - return - - for result in entries[1:]: - - cells = result.find_all('td') - if len(cells) <= 1: - break - - torrent = cells[1].find('a') - - torrent_id = torrent['href'].replace('/details.php?id=', '') - torrent_name = torrent.string - torrent_download_url = self.urls['base_url'] + (result.find_all('td')[3].find('a'))['href'].replace(' ', '.') - torrent_details_url = self.urls['base_url'] + torrent['href'] - torrent_size = self.parseSize(result.find_all('td')[5].string) - torrent_seeders = tryInt(cells[columns['seeders']].string) - torrent_leechers = tryInt(cells[columns['leechers']].string) - - results.append({ - 'id': torrent_id, - 'name': torrent_name, - 'url': torrent_download_url, - 'detail_url': torrent_details_url, - 'size': torrent_size, - 'seeders': torrent_seeders, - 'leechers': torrent_leechers, - }) - - except: - log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc())) - break - - current_page += 1 - - def getColumns(self, entries): - result = {} - - for x, col in enumerate(entries[0].find_all('th')): - name = col.text or col.find('img')['title'] - key = toSafeString(name).strip().lower() - - if not key: - continue - - result[key] = x - - return result - - def getLoginParams(self): - return { - 'username': self.conf('username'), - 'password': self.conf('password'), - 'login': 'submit', - } - - def loginSuccess(self, output): - return 'don\'t have an account' not in output.lower() - - def loginCheckSuccess(self, output): - return '/logout.php' in output.lower() diff --git a/couchpotato/core/providers/torrent/kickasstorrents/__init__.py b/couchpotato/core/providers/torrent/kickasstorrents/__init__.py deleted file mode 100644 index ffe1040b..00000000 --- a/couchpotato/core/providers/torrent/kickasstorrents/__init__.py +++ /dev/null @@ -1,53 +0,0 @@ -from .main import KickAssTorrents - - -def start(): - return KickAssTorrents() - -config = [{ - 'name': 'kickasstorrents', - 'groups': [ - { - 'tab': 'searcher', - 'list': 'torrent_providers', - 'name': 'KickAssTorrents', - 'description': 'See KickAssTorrents', - 'wizard': True, - 'options': [ - { - 'name': 'enabled', - 'type': 'enabler', - 'default': True, - }, - { - 'name': 'domain', - 'advanced': True, - 'label': 'Proxy server', - 'description': 'Domain for requests, keep empty to let CouchPotato pick.', - }, - { - 'name': 'seed_ratio', - 'label': 'Seed ratio', - 'type': 'float', - 'default': 1, - 'description': 'Will not be (re)moved until this seed ratio is met.', - }, - { - 'name': 'seed_time', - 'label': 'Seed time', - 'type': 'int', - 'default': 40, - 'description': 'Will not be (re)moved until this seed time (in hours) is met.', - }, - { - 'name': 'extra_score', - 'advanced': True, - 'label': 'Extra Score', - 'type': 'int', - 'default': 0, - 'description': 'Starting score for each release found via this provider.', - } - ], - }, - ], -}] diff --git a/couchpotato/core/providers/torrent/kickasstorrents/main.py b/couchpotato/core/providers/torrent/kickasstorrents/main.py deleted file mode 100644 index f96e9812..00000000 --- a/couchpotato/core/providers/torrent/kickasstorrents/main.py +++ /dev/null @@ -1,114 +0,0 @@ -from bs4 import BeautifulSoup -from couchpotato.core.helpers.variable import tryInt -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.torrent.base import TorrentMagnetProvider -import re -import traceback - -log = CPLog(__name__) - - -class KickAssTorrents(TorrentMagnetProvider): - - urls = { - 'detail': '%s/%s', - 'search': '%s/%s-i%s/', - } - - cat_ids = [ - (['cam'], ['cam']), - (['telesync'], ['ts', 'tc']), - (['screener', 'tvrip'], ['screener']), - (['x264', '720p', '1080p', 'blu-ray', 'hdrip'], ['bd50', '1080p', '720p', 'brrip']), - (['dvdrip'], ['dvdrip']), - (['dvd'], ['dvdr']), - ] - - http_time_between_calls = 1 #seconds - cat_backup_id = None - - proxy_list = [ - 'https://kickass.to', - 'http://kickass.pw', - 'http://www.kickassunblock.info', - 'http://www.kickassproxy.info', - ] - - def _search(self, movie, quality, results): - - data = self.getHTMLData(self.urls['search'] % (self.getDomain(), 'm', movie['library']['identifier'].replace('tt', ''))) - - if data: - - cat_ids = self.getCatId(quality['identifier']) - table_order = ['name', 'size', None, 'age', 'seeds', 'leechers'] - - try: - html = BeautifulSoup(data) - resultdiv = html.find('div', attrs = {'class': 'tabs'}) - for result in resultdiv.find_all('div', recursive = False): - if result.get('id').lower().strip('tab-') not in cat_ids: - continue - - try: - for temp in result.find_all('tr'): - if temp['class'] is 'firstr' or not temp.get('id'): - continue - - new = {} - - nr = 0 - for td in temp.find_all('td'): - column_name = table_order[nr] - if column_name: - - if column_name == 'name': - link = td.find('div', {'class': 'torrentname'}).find_all('a')[1] - new['id'] = temp.get('id')[-8:] - new['name'] = link.text - new['url'] = td.find('a', 'imagnet')['href'] - new['detail_url'] = self.urls['detail'] % (self.getDomain(), link['href'][1:]) - new['score'] = 20 if td.find('a', 'iverif') else 0 - elif column_name is 'size': - new['size'] = self.parseSize(td.text) - elif column_name is 'age': - new['age'] = self.ageToDays(td.text) - elif column_name is 'seeds': - new['seeders'] = tryInt(td.text) - elif column_name is 'leechers': - new['leechers'] = tryInt(td.text) - - nr += 1 - - results.append(new) - except: - log.error('Failed parsing KickAssTorrents: %s', traceback.format_exc()) - - except AttributeError: - log.debug('No search results found.') - - def ageToDays(self, age_str): - age = 0 - age_str = age_str.replace(' ', ' ') - - regex = '(\d*.?\d+).(sec|hour|day|week|month|year)+' - matches = re.findall(regex, age_str) - for match in matches: - nr, size = match - mult = 1 - if size == 'week': - mult = 7 - elif size == 'month': - mult = 30.5 - elif size == 'year': - mult = 365 - - age += tryInt(nr) * mult - - return tryInt(age) - - def isEnabled(self): - return super(KickAssTorrents, self).isEnabled() and self.getDomain() - - def correctProxy(self, data): - return 'search query' in data.lower() diff --git a/couchpotato/core/providers/torrent/passthepopcorn/__init__.py b/couchpotato/core/providers/torrent/passthepopcorn/__init__.py deleted file mode 100644 index a3e57c79..00000000 --- a/couchpotato/core/providers/torrent/passthepopcorn/__init__.py +++ /dev/null @@ -1,90 +0,0 @@ -from .main import PassThePopcorn - - -def start(): - return PassThePopcorn() - -config = [{ - 'name': 'passthepopcorn', - 'groups': [ - { - 'tab': 'searcher', - 'list': 'torrent_providers', - 'name': 'PassThePopcorn', - 'description': 'See PassThePopcorn.me', - 'wizard': True, - 'options': [ - { - 'name': 'enabled', - 'type': 'enabler', - 'default': False - }, - { - 'name': 'domain', - 'advanced': True, - 'label': 'Proxy server', - 'description': 'Domain for requests (HTTPS only!), keep empty to use default (tls.passthepopcorn.me).', - }, - { - 'name': 'username', - 'default': '', - }, - { - 'name': 'password', - 'default': '', - 'type': 'password', - }, - { - 'name': 'passkey', - 'default': '', - }, - { - 'name': 'prefer_golden', - 'advanced': True, - 'type': 'bool', - 'label': 'Prefer golden', - 'default': 1, - 'description': 'Favors Golden Popcorn-releases over all other releases.' - }, - { - 'name': 'prefer_scene', - 'advanced': True, - 'type': 'bool', - 'label': 'Prefer scene', - 'default': 0, - 'description': 'Favors scene-releases over non-scene releases.' - }, - { - 'name': 'require_approval', - 'advanced': True, - 'type': 'bool', - 'label': 'Require approval', - 'default': 0, - 'description': 'Require staff-approval for releases to be accepted.' - }, - { - 'name': 'seed_ratio', - 'label': 'Seed ratio', - 'type': 'float', - 'default': 1, - 'description': 'Will not be (re)moved until this seed ratio is met.', - }, - { - 'name': 'seed_time', - 'label': 'Seed time', - 'type': 'int', - 'default': 40, - 'description': 'Will not be (re)moved until this seed time (in hours) is met.', - }, - { - 'name': 'extra_score', - 'advanced': True, - 'label': 'Extra Score', - 'type': 'int', - 'default': 20, - 'description': 'Starting score for each release found via this provider.', - } - ], - } - ] -}] diff --git a/couchpotato/core/providers/torrent/publichd/__init__.py b/couchpotato/core/providers/torrent/publichd/__init__.py deleted file mode 100644 index 3c20c51f..00000000 --- a/couchpotato/core/providers/torrent/publichd/__init__.py +++ /dev/null @@ -1,47 +0,0 @@ -from .main import PublicHD - - -def start(): - return PublicHD() - -config = [{ - 'name': 'publichd', - 'groups': [ - { - 'tab': 'searcher', - 'list': 'torrent_providers', - 'name': 'PublicHD', - 'description': 'Public Torrent site with only HD content. See PublicHD', - 'wizard': True, - 'options': [ - { - 'name': 'enabled', - 'type': 'enabler', - 'default': True, - }, - { - 'name': 'seed_ratio', - 'label': 'Seed ratio', - 'type': 'float', - 'default': 1, - 'description': 'Will not be (re)moved until this seed ratio is met.', - }, - { - 'name': 'seed_time', - 'label': 'Seed time', - 'type': 'int', - 'default': 40, - 'description': 'Will not be (re)moved until this seed time (in hours) is met.', - }, - { - 'name': 'extra_score', - 'advanced': True, - 'label': 'Extra Score', - 'type': 'int', - 'default': 0, - 'description': 'Starting score for each release found via this provider.', - } - ], - }, - ], -}] diff --git a/couchpotato/core/providers/torrent/publichd/main.py b/couchpotato/core/providers/torrent/publichd/main.py deleted file mode 100644 index b7c32fba..00000000 --- a/couchpotato/core/providers/torrent/publichd/main.py +++ /dev/null @@ -1,88 +0,0 @@ -from bs4 import BeautifulSoup -from couchpotato.core.helpers.encoding import tryUrlencode, toUnicode -from couchpotato.core.helpers.variable import tryInt -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.torrent.base import TorrentMagnetProvider -from urlparse import parse_qs -import re -import traceback - -log = CPLog(__name__) - - -class PublicHD(TorrentMagnetProvider): - - urls = { - 'test': 'https://publichd.se', - 'detail': 'https://publichd.se/index.php?page=torrent-details&id=%s', - 'search': 'https://publichd.se/index.php', - } - http_time_between_calls = 0 - - def search(self, movie, quality): - - if not quality.get('hd', False): - return [] - - return super(PublicHD, self).search(movie, quality) - - def _searchOnTitle(self, title, movie, quality, results): - - params = tryUrlencode({ - 'page':'torrents', - 'search': '%s %s' % (title, movie['library']['year']), - 'active': 1, - }) - - data = self.getHTMLData('%s?%s' % (self.urls['search'], params)) - - if data: - - try: - soup = BeautifulSoup(data) - - results_table = soup.find('table', attrs = {'id': 'bgtorrlist2'}) - entries = results_table.find_all('tr') - - for result in entries[2:len(entries) - 1]: - info_url = result.find(href = re.compile('torrent-details')) - download = result.find(href = re.compile('magnet:')) - - if info_url and download: - - url = parse_qs(info_url['href']) - - results.append({ - 'id': url['id'][0], - 'name': info_url.string, - 'url': download['href'], - 'detail_url': self.urls['detail'] % url['id'][0], - 'size': self.parseSize(result.find_all('td')[7].string), - 'seeders': tryInt(result.find_all('td')[4].string), - 'leechers': tryInt(result.find_all('td')[5].string), - 'get_more_info': self.getMoreInfo - }) - - except: - log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc())) - - def getMoreInfo(self, item): - - cache_key = 'publichd.%s' % item['id'] - description = self.getCache(cache_key) - - if not description: - - try: - full_description = self.urlopen(item['detail_url']) - html = BeautifulSoup(full_description) - nfo_pre = html.find('div', attrs = {'id': 'torrmain'}) - description = toUnicode(nfo_pre.text) if nfo_pre else '' - except: - log.error('Failed getting more info for %s', item['name']) - description = '' - - self.setCache(cache_key, description, timeout = 25920000) - - item['description'] = description - return item diff --git a/couchpotato/core/providers/torrent/sceneaccess/__init__.py b/couchpotato/core/providers/torrent/sceneaccess/__init__.py deleted file mode 100644 index 3fa5d97f..00000000 --- a/couchpotato/core/providers/torrent/sceneaccess/__init__.py +++ /dev/null @@ -1,56 +0,0 @@ -from .main import SceneAccess - - -def start(): - return SceneAccess() - -config = [{ - 'name': 'sceneaccess', - 'groups': [ - { - 'tab': 'searcher', - 'list': 'torrent_providers', - 'name': 'SceneAccess', - 'description': 'See SceneAccess', - 'wizard': True, - 'options': [ - { - 'name': 'enabled', - 'type': 'enabler', - 'default': False, - }, - { - 'name': 'username', - 'default': '', - }, - { - 'name': 'password', - 'default': '', - 'type': 'password', - }, - { - 'name': 'seed_ratio', - 'label': 'Seed ratio', - 'type': 'float', - 'default': 1, - 'description': 'Will not be (re)moved until this seed ratio is met.', - }, - { - 'name': 'seed_time', - 'label': 'Seed time', - 'type': 'int', - 'default': 40, - 'description': 'Will not be (re)moved until this seed time (in hours) is met.', - }, - { - 'name': 'extra_score', - 'advanced': True, - 'label': 'Extra Score', - 'type': 'int', - 'default': 20, - 'description': 'Starting score for each release found via this provider.', - } - ], - }, - ], -}] diff --git a/couchpotato/core/providers/torrent/sceneaccess/main.py b/couchpotato/core/providers/torrent/sceneaccess/main.py deleted file mode 100644 index c1c871ee..00000000 --- a/couchpotato/core/providers/torrent/sceneaccess/main.py +++ /dev/null @@ -1,99 +0,0 @@ -from bs4 import BeautifulSoup -from couchpotato.core.helpers.encoding import tryUrlencode, toUnicode -from couchpotato.core.helpers.variable import tryInt -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.torrent.base import TorrentProvider -import traceback - -log = CPLog(__name__) - - -class SceneAccess(TorrentProvider): - - urls = { - 'test': 'https://www.sceneaccess.eu/', - 'login': 'https://www.sceneaccess.eu/login', - 'login_check': 'https://www.sceneaccess.eu/inbox', - 'detail': 'https://www.sceneaccess.eu/details?id=%s', - 'search': 'https://www.sceneaccess.eu/browse?c%d=%d', - 'download': 'https://www.sceneaccess.eu/%s', - } - - cat_ids = [ - ([22], ['720p', '1080p']), - ([7], ['cam', 'ts', 'dvdrip', 'tc', 'r5', 'scr', 'brrip']), - ([8], ['dvdr']), - ] - - http_time_between_calls = 1 #seconds - - def _search(self, movie, quality, results): - - cat = self.getCatId(quality['identifier']) - if not cat: - return - - url = self.urls['search'] % ( - cat[0], - cat[0] - ) - - arguments = tryUrlencode({ - 'search': movie['library']['identifier'], - 'method': 3, - }) - url = "%s&%s" % (url, arguments) - - - data = self.getHTMLData(url) - - if data: - html = BeautifulSoup(data) - - try: - resultsTable = html.find('table', attrs = {'id' : 'torrents-table'}) - if resultsTable is None: - return - - entries = resultsTable.find_all('tr', attrs = {'class' : 'tt_row'}) - for result in entries: - - link = result.find('td', attrs = {'class' : 'ttr_name'}).find('a') - url = result.find('td', attrs = {'class' : 'td_dl'}).find('a') - leechers = result.find('td', attrs = {'class' : 'ttr_leechers'}).find('a') - torrent_id = link['href'].replace('details?id=', '') - - results.append({ - 'id': torrent_id, - 'name': link['title'], - 'url': self.urls['download'] % url['href'], - 'detail_url': self.urls['detail'] % torrent_id, - 'size': self.parseSize(result.find('td', attrs = {'class' : 'ttr_size'}).contents[0]), - 'seeders': tryInt(result.find('td', attrs = {'class' : 'ttr_seeders'}).find('a').string), - 'leechers': tryInt(leechers.string) if leechers else 0, - 'get_more_info': self.getMoreInfo, - }) - - except: - log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc())) - - def getLoginParams(self): - return { - 'username': self.conf('username'), - 'password': self.conf('password'), - 'submit': 'come on in', - } - - def getMoreInfo(self, item): - full_description = self.getCache('sceneaccess.%s' % item['id'], item['detail_url'], cache_timeout = 25920000) - html = BeautifulSoup(full_description) - nfo_pre = html.find('div', attrs = {'id':'details_table'}) - description = toUnicode(nfo_pre.text) if nfo_pre else '' - - item['description'] = description - return item - - def loginSuccess(self, output): - return '/inbox' in output.lower() - - loginCheckSuccess = loginSuccess diff --git a/couchpotato/core/providers/torrent/thepiratebay/__init__.py b/couchpotato/core/providers/torrent/thepiratebay/__init__.py deleted file mode 100644 index 8b3921cd..00000000 --- a/couchpotato/core/providers/torrent/thepiratebay/__init__.py +++ /dev/null @@ -1,53 +0,0 @@ -from .main import ThePirateBay - - -def start(): - return ThePirateBay() - -config = [{ - 'name': 'thepiratebay', - 'groups': [ - { - 'tab': 'searcher', - 'list': 'torrent_providers', - 'name': 'ThePirateBay', - 'description': 'The world\'s largest bittorrent tracker. See ThePirateBay', - 'wizard': True, - 'options': [ - { - 'name': 'enabled', - 'type': 'enabler', - 'default': False - }, - { - 'name': 'domain', - 'advanced': True, - 'label': 'Proxy server', - 'description': 'Domain for requests, keep empty to let CouchPotato pick.', - }, - { - 'name': 'seed_ratio', - 'label': 'Seed ratio', - 'type': 'float', - 'default': 1, - 'description': 'Will not be (re)moved until this seed ratio is met.', - }, - { - 'name': 'seed_time', - 'label': 'Seed time', - 'type': 'int', - 'default': 40, - 'description': 'Will not be (re)moved until this seed time (in hours) is met.', - }, - { - 'name': 'extra_score', - 'advanced': True, - 'label': 'Extra Score', - 'type': 'int', - 'default': 0, - 'description': 'Starting score for each release found via this provider.', - } - ], - } - ] -}] diff --git a/couchpotato/core/providers/torrent/torrentbytes/__init__.py b/couchpotato/core/providers/torrent/torrentbytes/__init__.py deleted file mode 100644 index 79dec932..00000000 --- a/couchpotato/core/providers/torrent/torrentbytes/__init__.py +++ /dev/null @@ -1,56 +0,0 @@ -from .main import TorrentBytes - - -def start(): - return TorrentBytes() - -config = [{ - 'name': 'torrentbytes', - 'groups': [ - { - 'tab': 'searcher', - 'list': 'torrent_providers', - 'name': 'TorrentBytes', - 'description': 'See TorrentBytes', - 'wizard': True, - 'options': [ - { - 'name': 'enabled', - 'type': 'enabler', - 'default': False, - }, - { - 'name': 'username', - 'default': '', - }, - { - 'name': 'password', - 'default': '', - 'type': 'password', - }, - { - 'name': 'seed_ratio', - 'label': 'Seed ratio', - 'type': 'float', - 'default': 1, - 'description': 'Will not be (re)moved until this seed ratio is met.', - }, - { - 'name': 'seed_time', - 'label': 'Seed time', - 'type': 'int', - 'default': 40, - 'description': 'Will not be (re)moved until this seed time (in hours) is met.', - }, - { - 'name': 'extra_score', - 'advanced': True, - 'label': 'Extra Score', - 'type': 'int', - 'default': 20, - 'description': 'Starting score for each release found via this provider.', - } - ], - }, - ], -}] diff --git a/couchpotato/core/providers/torrent/torrentday/__init__.py b/couchpotato/core/providers/torrent/torrentday/__init__.py deleted file mode 100644 index 133ec914..00000000 --- a/couchpotato/core/providers/torrent/torrentday/__init__.py +++ /dev/null @@ -1,56 +0,0 @@ -from .main import TorrentDay - - -def start(): - return TorrentDay() - -config = [{ - 'name': 'torrentday', - 'groups': [ - { - 'tab': 'searcher', - 'list': 'torrent_providers', - 'name': 'TorrentDay', - 'description': 'See TorrentDay', - 'wizard': True, - 'options': [ - { - 'name': 'enabled', - 'type': 'enabler', - 'default': False, - }, - { - 'name': 'username', - 'default': '', - }, - { - 'name': 'password', - 'default': '', - 'type': 'password', - }, - { - 'name': 'seed_ratio', - 'label': 'Seed ratio', - 'type': 'float', - 'default': 1, - 'description': 'Will not be (re)moved until this seed ratio is met.', - }, - { - 'name': 'seed_time', - 'label': 'Seed time', - 'type': 'int', - 'default': 40, - 'description': 'Will not be (re)moved until this seed time (in hours) is met.', - }, - { - 'name': 'extra_score', - 'advanced': True, - 'label': 'Extra Score', - 'type': 'int', - 'default': 0, - 'description': 'Starting score for each release found via this provider.', - } - ], - }, - ], -}] diff --git a/couchpotato/core/providers/torrent/torrentday/main.py b/couchpotato/core/providers/torrent/torrentday/main.py deleted file mode 100644 index 6d343234..00000000 --- a/couchpotato/core/providers/torrent/torrentday/main.py +++ /dev/null @@ -1,68 +0,0 @@ -from couchpotato.core.helpers.variable import tryInt -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.torrent.base import TorrentProvider - -log = CPLog(__name__) - - -class TorrentDay(TorrentProvider): - - urls = { - 'test': 'http://www.td.af/', - 'login': 'http://www.td.af/torrents/', - 'login_check': 'http://www.torrentday.com/userdetails.php', - 'detail': 'http://www.td.af/details.php?id=%s', - 'search': 'http://www.td.af/V3/API/API.php', - 'download': 'http://www.td.af/download.php/%s/%s', - } - - cat_ids = [ - ([11], ['720p', '1080p']), - ([1, 21, 25], ['cam', 'ts', 'dvdrip', 'tc', 'r5', 'scr', 'brrip']), - ([3], ['dvdr']), - ([5], ['bd50']), - ] - - http_time_between_calls = 1 #seconds - - def _searchOnTitle(self, title, movie, quality, results): - - q = '"%s %s"' % (title, movie['library']['year']) - - data = { - '/browse.php?': None, - 'cata': 'yes', - 'jxt': 8, - 'jxw': 'b', - 'search': q, - } - - data = self.getJsonData(self.urls['search'], data = data) - try: torrents = data.get('Fs', [])[0].get('Cn', {}).get('torrents', []) - except: return - - for torrent in torrents: - results.append({ - 'id': torrent['id'], - 'name': torrent['name'], - 'url': self.urls['download'] % (torrent['id'], torrent['fname']), - 'detail_url': self.urls['detail'] % torrent['id'], - 'size': self.parseSize(torrent.get('size')), - 'seeders': tryInt(torrent.get('seed')), - 'leechers': tryInt(torrent.get('leech')), - }) - - def getLoginParams(self): - return { - 'username': self.conf('username'), - 'password': self.conf('password'), - 'submit.x': 18, - 'submit.y': 11, - 'submit': 'submit', - } - - def loginSuccess(self, output): - return 'Password not correct' not in output - - def loginCheckSuccess(self, output): - return 'logout.php' in output.lower() diff --git a/couchpotato/core/providers/torrent/torrentleech/__init__.py b/couchpotato/core/providers/torrent/torrentleech/__init__.py deleted file mode 100644 index e64d4baa..00000000 --- a/couchpotato/core/providers/torrent/torrentleech/__init__.py +++ /dev/null @@ -1,56 +0,0 @@ -from .main import TorrentLeech - - -def start(): - return TorrentLeech() - -config = [{ - 'name': 'torrentleech', - 'groups': [ - { - 'tab': 'searcher', - 'list': 'torrent_providers', - 'name': 'TorrentLeech', - 'description': 'See TorrentLeech', - 'wizard': True, - 'options': [ - { - 'name': 'enabled', - 'type': 'enabler', - 'default': False, - }, - { - 'name': 'username', - 'default': '', - }, - { - 'name': 'password', - 'default': '', - 'type': 'password', - }, - { - 'name': 'seed_ratio', - 'label': 'Seed ratio', - 'type': 'float', - 'default': 1, - 'description': 'Will not be (re)moved until this seed ratio is met.', - }, - { - 'name': 'seed_time', - 'label': 'Seed time', - 'type': 'int', - 'default': 40, - 'description': 'Will not be (re)moved until this seed time (in hours) is met.', - }, - { - 'name': 'extra_score', - 'advanced': True, - 'label': 'Extra Score', - 'type': 'int', - 'default': 20, - 'description': 'Starting score for each release found via this provider.', - } - ], - }, - ], -}] diff --git a/couchpotato/core/providers/torrent/torrentleech/main.py b/couchpotato/core/providers/torrent/torrentleech/main.py deleted file mode 100644 index ea6158df..00000000 --- a/couchpotato/core/providers/torrent/torrentleech/main.py +++ /dev/null @@ -1,81 +0,0 @@ -from bs4 import BeautifulSoup -from couchpotato.core.helpers.encoding import tryUrlencode -from couchpotato.core.helpers.variable import tryInt -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.torrent.base import TorrentProvider -import traceback - - -log = CPLog(__name__) - - -class TorrentLeech(TorrentProvider): - - urls = { - 'test': 'http://www.torrentleech.org/', - 'login': 'http://www.torrentleech.org/user/account/login/', - 'login_check': 'http://torrentleech.org/user/messages', - 'detail': 'http://www.torrentleech.org/torrent/%s', - 'search': 'http://www.torrentleech.org/torrents/browse/index/query/%s/categories/%d', - 'download': 'http://www.torrentleech.org%s', - } - - cat_ids = [ - ([13], ['720p', '1080p']), - ([8], ['cam']), - ([9], ['ts', 'tc']), - ([10], ['r5', 'scr']), - ([11], ['dvdrip']), - ([14], ['brrip']), - ([12], ['dvdr']), - ] - - http_time_between_calls = 1 #seconds - cat_backup_id = None - - def _searchOnTitle(self, title, movie, quality, results): - - url = self.urls['search'] % (tryUrlencode('%s %s' % (title.replace(':', ''), movie['library']['year'])), self.getCatId(quality['identifier'])[0]) - data = self.getHTMLData(url) - - if data: - html = BeautifulSoup(data) - - try: - result_table = html.find('table', attrs = {'id' : 'torrenttable'}) - if not result_table: - return - - entries = result_table.find_all('tr') - - for result in entries[1:]: - - link = result.find('td', attrs = {'class' : 'name'}).find('a') - url = result.find('td', attrs = {'class' : 'quickdownload'}).find('a') - details = result.find('td', attrs = {'class' : 'name'}).find('a') - - results.append({ - 'id': link['href'].replace('/torrent/', ''), - 'name': link.string, - 'url': self.urls['download'] % url['href'], - 'detail_url': self.urls['download'] % details['href'], - 'size': self.parseSize(result.find_all('td')[4].string), - 'seeders': tryInt(result.find('td', attrs = {'class' : 'seeders'}).string), - 'leechers': tryInt(result.find('td', attrs = {'class' : 'leechers'}).string), - }) - - except: - log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc())) - - def getLoginParams(self): - return { - 'username': self.conf('username'), - 'password': self.conf('password'), - 'remember_me': 'on', - 'login': 'submit', - } - - def loginSuccess(self, output): - return '/user/account/logout' in output.lower() or 'welcome back' in output.lower() - - loginCheckSuccess = loginSuccess diff --git a/couchpotato/core/providers/torrent/torrentpotato/__init__.py b/couchpotato/core/providers/torrent/torrentpotato/__init__.py deleted file mode 100644 index 03795ffb..00000000 --- a/couchpotato/core/providers/torrent/torrentpotato/__init__.py +++ /dev/null @@ -1,67 +0,0 @@ -from .main import TorrentPotato - - -def start(): - return TorrentPotato() - -config = [{ - 'name': 'torrentpotato', - 'groups': [ - { - 'tab': 'searcher', - 'list': 'torrent_providers', - 'name': 'TorrentPotato', - 'order': 10, - 'description': 'CouchPotato torrent provider. Checkout the wiki page about this provider for more info.', - 'wizard': True, - 'options': [ - { - 'name': 'enabled', - 'type': 'enabler', - 'default': False, - }, - { - 'name': 'use', - 'default': '' - }, - { - 'name': 'host', - 'default': '', - 'description': 'The url path of your TorrentPotato provider.', - }, - { - 'name': 'extra_score', - 'advanced': True, - 'label': 'Extra Score', - 'default': '0', - 'description': 'Starting score for each release found via this provider.', - }, - { - 'name': 'name', - 'label': 'Username', - 'default': '', - }, - { - 'name': 'seed_ratio', - 'label': 'Seed ratio', - 'default': '1', - 'description': 'Will not be (re)moved until this seed ratio is met.', - }, - { - 'name': 'seed_time', - 'label': 'Seed time', - 'default': '40', - 'description': 'Will not be (re)moved until this seed time (in hours) is met.', - }, - { - 'name': 'pass_key', - 'default': ',', - 'label': 'Pass Key', - 'description': 'Can be found on your profile page', - 'type': 'combined', - 'combine': ['use', 'host', 'pass_key', 'name', 'seed_ratio', 'seed_time', 'extra_score'], - }, - ], - }, - ], -}] diff --git a/couchpotato/core/providers/torrent/torrentshack/__init__.py b/couchpotato/core/providers/torrent/torrentshack/__init__.py deleted file mode 100644 index 058236e4..00000000 --- a/couchpotato/core/providers/torrent/torrentshack/__init__.py +++ /dev/null @@ -1,61 +0,0 @@ -from .main import TorrentShack - - -def start(): - return TorrentShack() - -config = [{ - 'name': 'torrentshack', - 'groups': [ - { - 'tab': 'searcher', - 'list': 'torrent_providers', - 'name': 'TorrentShack', - 'description': 'See TorrentShack', - 'options': [ - { - 'name': 'enabled', - 'type': 'enabler', - 'default': False, - }, - { - 'name': 'username', - 'default': '', - }, - { - 'name': 'password', - 'default': '', - 'type': 'password', - }, - { - 'name': 'seed_ratio', - 'label': 'Seed ratio', - 'type': 'float', - 'default': 1, - 'description': 'Will not be (re)moved until this seed ratio is met.', - }, - { - 'name': 'seed_time', - 'label': 'Seed time', - 'type': 'int', - 'default': 40, - 'description': 'Will not be (re)moved until this seed time (in hours) is met.', - }, - { - 'name': 'scene_only', - 'type': 'bool', - 'default': False, - 'description': 'Only allow scene releases.' - }, - { - 'name': 'extra_score', - 'advanced': True, - 'label': 'Extra Score', - 'type': 'int', - 'default': 0, - 'description': 'Starting score for each release found via this provider.', - } - ], - }, - ], -}] diff --git a/couchpotato/core/providers/torrent/torrentshack/main.py b/couchpotato/core/providers/torrent/torrentshack/main.py deleted file mode 100644 index f0cd5997..00000000 --- a/couchpotato/core/providers/torrent/torrentshack/main.py +++ /dev/null @@ -1,79 +0,0 @@ -from bs4 import BeautifulSoup -from couchpotato.core.helpers.encoding import tryUrlencode -from couchpotato.core.helpers.variable import tryInt -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.torrent.base import TorrentProvider -import traceback -import six - -log = CPLog(__name__) - - -class TorrentShack(TorrentProvider): - - urls = { - 'test': 'https://torrentshack.net/', - 'login': 'https://torrentshack.net/login.php', - 'login_check': 'https://torrentshack.net/inbox.php', - 'detail': 'https://torrentshack.net/torrent/%s', - 'search': 'https://torrentshack.net/torrents.php?action=advanced&searchstr=%s&scene=%s&filter_cat[%d]=1', - 'download': 'https://torrentshack.net/%s', - } - - cat_ids = [ - ([970], ['bd50']), - ([300], ['720p', '1080p']), - ([350], ['dvdr']), - ([400], ['brrip', 'dvdrip']), - ] - - http_time_between_calls = 1 #seconds - cat_backup_id = 400 - - def _searchOnTitle(self, title, movie, quality, results): - - scene_only = '1' if self.conf('scene_only') else '' - - url = self.urls['search'] % (tryUrlencode('%s %s' % (title.replace(':', ''), movie['library']['year'])), scene_only, self.getCatId(quality['identifier'])[0]) - data = self.getHTMLData(url) - - if data: - html = BeautifulSoup(data) - - try: - result_table = html.find('table', attrs = {'id' : 'torrent_table'}) - if not result_table: - return - - entries = result_table.find_all('tr', attrs = {'class' : 'torrent'}) - - for result in entries: - - link = result.find('span', attrs = {'class' : 'torrent_name_link'}).parent - url = result.find('td', attrs = {'class' : 'torrent_td'}).find('a') - - results.append({ - 'id': link['href'].replace('torrents.php?torrentid=', ''), - 'name': six.text_type(link.span.string).translate({ord(six.u('\xad')): None}), - 'url': self.urls['download'] % url['href'], - 'detail_url': self.urls['download'] % link['href'], - 'size': self.parseSize(result.find_all('td')[4].string), - 'seeders': tryInt(result.find_all('td')[6].string), - 'leechers': tryInt(result.find_all('td')[7].string), - }) - - except: - log.error('Failed to parsing %s: %s', (self.getName(), traceback.format_exc())) - - def getLoginParams(self): - return { - 'username': self.conf('username'), - 'password': self.conf('password'), - 'keeplogged': '1', - 'login': 'Login', - } - - def loginSuccess(self, output): - return 'logout.php' in output.lower() - - loginCheckSuccess = loginSuccess diff --git a/couchpotato/core/providers/torrent/yify/__init__.py b/couchpotato/core/providers/torrent/yify/__init__.py deleted file mode 100644 index 3a359608..00000000 --- a/couchpotato/core/providers/torrent/yify/__init__.py +++ /dev/null @@ -1,53 +0,0 @@ -from .main import Yify - - -def start(): - return Yify() - -config = [{ - 'name': 'yify', - 'groups': [ - { - 'tab': 'searcher', - 'list': 'torrent_providers', - 'name': 'Yify', - 'description': 'Free provider, less accurate. Small HD movies, encoded by Yify.', - 'wizard': False, - 'options': [ - { - 'name': 'enabled', - 'type': 'enabler', - 'default': 0 - }, - { - 'name': 'domain', - 'advanced': True, - 'label': 'Proxy server', - 'description': 'Domain for requests, keep empty to let CouchPotato pick.', - }, - { - 'name': 'seed_ratio', - 'label': 'Seed ratio', - 'type': 'float', - 'default': 1, - 'description': 'Will not be (re)moved until this seed ratio is met.', - }, - { - 'name': 'seed_time', - 'label': 'Seed time', - 'type': 'int', - 'default': 40, - 'description': 'Will not be (re)moved until this seed time (in hours) is met.', - }, - { - 'name': 'extra_score', - 'advanced': True, - 'label': 'Extra Score', - 'type': 'int', - 'default': 0, - 'description': 'Starting score for each release found via this provider.', - } - ], - } - ] -}] diff --git a/couchpotato/core/providers/torrent/yify/main.py b/couchpotato/core/providers/torrent/yify/main.py deleted file mode 100644 index fe1b8204..00000000 --- a/couchpotato/core/providers/torrent/yify/main.py +++ /dev/null @@ -1,66 +0,0 @@ -from couchpotato.core.helpers.variable import tryInt -from couchpotato.core.logger import CPLog -from couchpotato.core.providers.torrent.base import TorrentMagnetProvider -import traceback - -log = CPLog(__name__) - - -class Yify(TorrentMagnetProvider): - - urls = { - 'test': '%s/api', - 'search': '%s/api/list.json?keywords=%s&quality=%s', - 'detail': '%s/api/movie.json?id=%s' - } - - http_time_between_calls = 1 #seconds - - proxy_list = [ - 'http://yify.unlocktorrent.com', - 'http://yify-torrents.com.come.in', - 'http://yts.re', - 'http://yts.im' - 'https://yify-torrents.im', - ] - - def search(self, movie, quality): - - if not quality.get('hd', False): - return [] - - return super(Yify, self).search(movie, quality) - - def _search(self, movie, quality, results): - - search_url = self.urls['search'] % (self.getDomain(), movie['library']['identifier'], quality['identifier']) - - data = self.getJsonData(search_url) - - if data and data.get('MovieList'): - try: - for result in data.get('MovieList'): - - try: - title = result['TorrentUrl'].split('/')[-1][:-8].replace('_', '.').strip('._') - title = title.replace('.-.', '-') - title = title.replace('..', '.') - except: - continue - - results.append({ - 'id': result['MovieID'], - 'name': title, - 'url': result['TorrentMagnetUrl'], - 'detail_url': self.urls['detail'] % (self.getDomain(), result['MovieID']), - 'size': self.parseSize(result['Size']), - 'seeders': tryInt(result['TorrentSeeds']), - 'leechers': tryInt(result['TorrentPeers']) - }) - - except: - log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc())) - - def correctProxy(self, data): - data = data.lower() - return 'yify' in data and 'yts' in data diff --git a/couchpotato/core/providers/trailer/hdtrailers/__init__.py b/couchpotato/core/providers/trailer/hdtrailers/__init__.py deleted file mode 100644 index 83b93004..00000000 --- a/couchpotato/core/providers/trailer/hdtrailers/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from .main import HDTrailers - - -def start(): - return HDTrailers() - -config = [] diff --git a/couchpotato/core/providers/userscript/allocine/__init__.py b/couchpotato/core/providers/userscript/allocine/__init__.py deleted file mode 100644 index cb2ba992..00000000 --- a/couchpotato/core/providers/userscript/allocine/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from .main import AlloCine - - -def start(): - return AlloCine() - -config = [] diff --git a/couchpotato/core/providers/userscript/appletrailers/__init__.py b/couchpotato/core/providers/userscript/appletrailers/__init__.py deleted file mode 100644 index 075217a8..00000000 --- a/couchpotato/core/providers/userscript/appletrailers/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from .main import AppleTrailers - - -def start(): - return AppleTrailers() - -config = [] diff --git a/couchpotato/core/providers/userscript/criticker/__init__.py b/couchpotato/core/providers/userscript/criticker/__init__.py deleted file mode 100644 index ae24aa1e..00000000 --- a/couchpotato/core/providers/userscript/criticker/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from .main import Criticker - - -def start(): - return Criticker() - -config = [] diff --git a/couchpotato/core/providers/userscript/criticker/main.py b/couchpotato/core/providers/userscript/criticker/main.py deleted file mode 100644 index 680d9a36..00000000 --- a/couchpotato/core/providers/userscript/criticker/main.py +++ /dev/null @@ -1,6 +0,0 @@ -from couchpotato.core.providers.userscript.base import UserscriptBase - - -class Criticker(UserscriptBase): - - includes = ['http://www.criticker.com/film/*'] diff --git a/couchpotato/core/providers/userscript/filmweb/__init__.py b/couchpotato/core/providers/userscript/filmweb/__init__.py deleted file mode 100644 index 3098610c..00000000 --- a/couchpotato/core/providers/userscript/filmweb/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from .main import Filmweb - - -def start(): - return Filmweb() - -config = [] diff --git a/couchpotato/core/providers/userscript/flickchart/__init__.py b/couchpotato/core/providers/userscript/flickchart/__init__.py deleted file mode 100644 index 18a88ffe..00000000 --- a/couchpotato/core/providers/userscript/flickchart/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from .main import Flickchart - - -def start(): - return Flickchart() - -config = [] diff --git a/couchpotato/core/providers/userscript/imdb/__init__.py b/couchpotato/core/providers/userscript/imdb/__init__.py deleted file mode 100644 index c25319b7..00000000 --- a/couchpotato/core/providers/userscript/imdb/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from .main import IMDB - - -def start(): - return IMDB() - -config = [] diff --git a/couchpotato/core/providers/userscript/letterboxd/__init__.py b/couchpotato/core/providers/userscript/letterboxd/__init__.py deleted file mode 100644 index 2fd89000..00000000 --- a/couchpotato/core/providers/userscript/letterboxd/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from .main import Letterboxd - - -def start(): - return Letterboxd() - -config = [] diff --git a/couchpotato/core/providers/userscript/letterboxd/main.py b/couchpotato/core/providers/userscript/letterboxd/main.py deleted file mode 100644 index c0d91d79..00000000 --- a/couchpotato/core/providers/userscript/letterboxd/main.py +++ /dev/null @@ -1,6 +0,0 @@ -from couchpotato.core.providers.userscript.base import UserscriptBase - - -class Letterboxd(UserscriptBase): - - includes = ['*://letterboxd.com/film/*'] diff --git a/couchpotato/core/providers/userscript/moviemeter/__init__.py b/couchpotato/core/providers/userscript/moviemeter/__init__.py deleted file mode 100644 index 7a05a75a..00000000 --- a/couchpotato/core/providers/userscript/moviemeter/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from .main import MovieMeter - - -def start(): - return MovieMeter() - -config = [] diff --git a/couchpotato/core/providers/userscript/moviesio/__init__.py b/couchpotato/core/providers/userscript/moviesio/__init__.py deleted file mode 100644 index e29e8d08..00000000 --- a/couchpotato/core/providers/userscript/moviesio/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from .main import MoviesIO - - -def start(): - return MoviesIO() - -config = [] diff --git a/couchpotato/core/providers/userscript/moviesio/main.py b/couchpotato/core/providers/userscript/moviesio/main.py deleted file mode 100644 index 5dab6183..00000000 --- a/couchpotato/core/providers/userscript/moviesio/main.py +++ /dev/null @@ -1,6 +0,0 @@ -from couchpotato.core.providers.userscript.base import UserscriptBase - - -class MoviesIO(UserscriptBase): - - includes = ['*://movies.io/m/*'] diff --git a/couchpotato/core/providers/userscript/reddit/__init__.py b/couchpotato/core/providers/userscript/reddit/__init__.py deleted file mode 100644 index a74bbf0d..00000000 --- a/couchpotato/core/providers/userscript/reddit/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from .main import Reddit - - -def start(): - return Reddit() - -config = [] diff --git a/couchpotato/core/providers/userscript/rottentomatoes/__init__.py b/couchpotato/core/providers/userscript/rottentomatoes/__init__.py deleted file mode 100644 index 363f103e..00000000 --- a/couchpotato/core/providers/userscript/rottentomatoes/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from .main import RottenTomatoes - - -def start(): - return RottenTomatoes() - -config = [] diff --git a/couchpotato/core/providers/userscript/sharethe/__init__.py b/couchpotato/core/providers/userscript/sharethe/__init__.py deleted file mode 100644 index 3cf393af..00000000 --- a/couchpotato/core/providers/userscript/sharethe/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from .main import ShareThe - - -def start(): - return ShareThe() - -config = [] diff --git a/couchpotato/core/providers/userscript/tmdb/__init__.py b/couchpotato/core/providers/userscript/tmdb/__init__.py deleted file mode 100644 index c77330c3..00000000 --- a/couchpotato/core/providers/userscript/tmdb/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from .main import TMDB - - -def start(): - return TMDB() - -config = [] diff --git a/couchpotato/core/providers/userscript/trakt/__init__.py b/couchpotato/core/providers/userscript/trakt/__init__.py deleted file mode 100644 index 39c17c32..00000000 --- a/couchpotato/core/providers/userscript/trakt/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from .main import Trakt - - -def start(): - return Trakt() - -config = [] diff --git a/couchpotato/core/providers/userscript/whiwa/__init__.py b/couchpotato/core/providers/userscript/whiwa/__init__.py deleted file mode 100644 index c8fd3c9d..00000000 --- a/couchpotato/core/providers/userscript/whiwa/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from .main import WHiWA - - -def start(): - return WHiWA() - -config = [] diff --git a/couchpotato/core/providers/userscript/whiwa/main.py b/couchpotato/core/providers/userscript/whiwa/main.py deleted file mode 100644 index 40ffa2a9..00000000 --- a/couchpotato/core/providers/userscript/whiwa/main.py +++ /dev/null @@ -1,6 +0,0 @@ -from couchpotato.core.providers.userscript.base import UserscriptBase - - -class WHiWA(UserscriptBase): - - includes = ['http://whiwa.net/stats/movie/*'] diff --git a/couchpotato/core/providers/userscript/youteather/__init__.py b/couchpotato/core/providers/userscript/youteather/__init__.py deleted file mode 100644 index f31e911e..00000000 --- a/couchpotato/core/providers/userscript/youteather/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -from .main import YouTheater - - -def start(): - return YouTheater() - -config = [] diff --git a/couchpotato/core/settings/__init__.py b/couchpotato/core/settings.py similarity index 79% rename from couchpotato/core/settings/__init__.py rename to couchpotato/core/settings.py index 0e65c778..c6de952b 100644 --- a/couchpotato/core/settings/__init__.py +++ b/couchpotato/core/settings.py @@ -1,11 +1,12 @@ from __future__ import with_statement -import traceback +import ConfigParser +from hashlib import md5 + +from CodernityDB.hash_index import HashIndex from couchpotato.api import addApiView from couchpotato.core.event import addEvent, fireEvent from couchpotato.core.helpers.encoding import toUnicode from couchpotato.core.helpers.variable import mergeDicts, tryInt, tryFloat -from couchpotato.core.settings.model import Properties -import ConfigParser class Settings(object): @@ -42,15 +43,22 @@ class Settings(object): } }"""} }) + addApiView('settings.save', self.saveView, docs = { 'desc': 'Save setting to config file (settings.conf)', 'params': { 'section': {'desc': 'The section name in settings.conf'}, - 'option': {'desc': 'The option name'}, + 'name': {'desc': 'The option name'}, 'value': {'desc': 'The value you want to save'}, } }) + addEvent('database.setup', self.databaseSetup) + + self.file = None + self.p = None + self.log = None + def setFile(self, config_file): self.file = config_file @@ -62,6 +70,17 @@ class Settings(object): self.connectEvents() + def databaseSetup(self): + from couchpotato import get_db + + db = get_db() + + try: + db.add_index(PropertyIndex(db.path, 'property')) + except: + self.log.debug('Index for properties already exists') + db.edit_index(PropertyIndex(db.path, 'property')) + def parser(self): return self.p @@ -179,7 +198,6 @@ class Settings(object): def getOptions(self): return self.options - def view(self, **kwargs): return { 'options': self.getOptions(), @@ -207,35 +225,48 @@ class Settings(object): } def getProperty(self, identifier): - from couchpotato import get_session + from couchpotato import get_db - db = get_session() + db = get_db() prop = None try: - propert = db.query(Properties).filter_by(identifier = identifier).first() - prop = propert.value + propert = db.get('property', identifier, with_doc = True) + prop = propert['doc']['value'] except: - pass + pass # self.log.debug('Property "%s" doesn\'t exist: %s', (identifier, traceback.format_exc(0))) return prop def setProperty(self, identifier, value = ''): - from couchpotato import get_session + from couchpotato import get_db + + db = get_db() try: - db = get_session() - - p = db.query(Properties).filter_by(identifier = identifier).first() - if not p: - p = Properties() - db.add(p) - - p.identifier = identifier - p.value = toUnicode(value) - - db.commit() + p = db.get('property', identifier, with_doc = True) + p['doc'].update({ + 'identifier': identifier, + 'value': toUnicode(value), + }) + db.update(p['doc']) except: - self.log.error('Failed: %s', traceback.format_exc()) - db.rollback() - finally: - db.close() + db.insert({ + '_t': 'property', + 'identifier': identifier, + 'value': toUnicode(value), + }) + + +class PropertyIndex(HashIndex): + _version = 1 + + def __init__(self, *args, **kwargs): + kwargs['key_format'] = '32s' + super(PropertyIndex, self).__init__(*args, **kwargs) + + def make_key(self, key): + return md5(key).hexdigest() + + def make_key_value(self, data): + if data.get('_t') == 'property': + return md5(data['identifier']).hexdigest(), None diff --git a/couchpotato/core/settings/model.py b/couchpotato/core/settings/model.py deleted file mode 100644 index ef6e8a5c..00000000 --- a/couchpotato/core/settings/model.py +++ /dev/null @@ -1,321 +0,0 @@ -from couchpotato.core.helpers.encoding import toUnicode -from elixir.entity import Entity -from elixir.fields import Field -from elixir.options import options_defaults, using_options -from elixir.relationships import ManyToMany, OneToMany, ManyToOne -from sqlalchemy.ext.mutable import Mutable -from sqlalchemy.types import Integer, Unicode, UnicodeText, Boolean, String, \ - TypeDecorator -import json -import time - -options_defaults["shortnames"] = True - -# We would like to be able to create this schema in a specific database at -# will, so we can test it easily. -# Make elixir not bind to any session to make this possible. -# -# http://elixir.ematia.de/trac/wiki/Recipes/MultipleDatabasesOneMetadata -__session__ = None - - -class SetEncoder(json.JSONEncoder): - def default(self, obj): - if isinstance(obj, set): - return list(obj) - return json.JSONEncoder.default(self, obj) - - -class JsonType(TypeDecorator): - impl = UnicodeText - - def process_bind_param(self, value, dialect): - try: - return toUnicode(json.dumps(value, cls = SetEncoder)) - except: - try: - return toUnicode(json.dumps(value, cls = SetEncoder, encoding = 'latin-1')) - except: - raise - - def process_result_value(self, value, dialect): - return json.loads(value if value else '{}') - - -class MutableDict(Mutable, dict): - - @classmethod - def coerce(cls, key, value): - if not isinstance(value, MutableDict): - if isinstance(value, dict): - return MutableDict(value) - return Mutable.coerce(key, value) - else: - return value - - def __delitem(self, key): - dict.__delitem__(self, key) - self.changed() - - def __setitem__(self, key, value): - dict.__setitem__(self, key, value) - self.changed() - - def __getstate__(self): - return dict(self) - - def __setstate__(self, state): - self.update(self) - - def update(self, *args, **kwargs): - super(MutableDict, self).update(*args, **kwargs) - self.changed() - -MutableDict.associate_with(JsonType) - - -class Movie(Entity): - """Movie Resource a movie could have multiple releases - The files belonging to the movie object are global for the whole movie - such as trailers, nfo, thumbnails""" - - last_edit = Field(Integer, default = lambda: int(time.time()), index = True) - type = 'movie' # Compat tv branch - - library = ManyToOne('Library', cascade = 'delete, delete-orphan', single_parent = True) - status = ManyToOne('Status') - profile = ManyToOne('Profile') - category = ManyToOne('Category') - releases = OneToMany('Release', cascade = 'all, delete-orphan') - files = ManyToMany('File', cascade = 'all, delete-orphan', single_parent = True) - -Media = Movie # Compat tv branch - - -class Library(Entity): - """""" - - year = Field(Integer) - identifier = Field(String(20), index = True) - - plot = Field(UnicodeText) - tagline = Field(UnicodeText(255)) - info = Field(JsonType) - - status = ManyToOne('Status') - movies = OneToMany('Movie', cascade = 'all, delete-orphan') - titles = OneToMany('LibraryTitle', cascade = 'all, delete-orphan') - files = ManyToMany('File', cascade = 'all, delete-orphan', single_parent = True) - - -class LibraryTitle(Entity): - """""" - using_options(order_by = '-default') - - title = Field(Unicode) - simple_title = Field(Unicode, index = True) - default = Field(Boolean, default = False, index = True) - - language = OneToMany('Language') - libraries = ManyToOne('Library') - - -class Language(Entity): - """""" - - identifier = Field(String(20), index = True) - label = Field(Unicode) - - titles = ManyToOne('LibraryTitle') - - -class Release(Entity): - """Logically groups all files that belong to a certain release, such as - parts of a movie, subtitles.""" - - last_edit = Field(Integer, default = lambda: int(time.time()), index = True) - identifier = Field(String(100), index = True) - - movie = ManyToOne('Movie') - status = ManyToOne('Status') - quality = ManyToOne('Quality') - files = ManyToMany('File') - info = OneToMany('ReleaseInfo', cascade = 'all, delete-orphan') - - def to_dict(self, deep = None, exclude = None): - if not exclude: exclude = [] - if not deep: deep = {} - - orig_dict = super(Release, self).to_dict(deep = deep, exclude = exclude) - - new_info = {} - for info in orig_dict.get('info', []): - - value = info['value'] - try: value = int(info['value']) - except: pass - - new_info[info['identifier']] = value - - orig_dict['info'] = new_info - - return orig_dict - - -class ReleaseInfo(Entity): - """Properties that can be bound to a file for off-line usage""" - - identifier = Field(String(50), index = True) - value = Field(Unicode(255), nullable = False) - - release = ManyToOne('Release') - - -class Status(Entity): - """The status of a release, such as Downloaded, Deleted, Wanted etc""" - - identifier = Field(String(20), unique = True) - label = Field(Unicode(20)) - - releases = OneToMany('Release') - movies = OneToMany('Movie') - - -class Quality(Entity): - """Quality name of a release, DVD, 720p, DVD-Rip etc""" - using_options(order_by = 'order') - - identifier = Field(String(20), unique = True) - label = Field(Unicode(20)) - order = Field(Integer, default = 0, index = True) - - size_min = Field(Integer) - size_max = Field(Integer) - - releases = OneToMany('Release') - profile_types = OneToMany('ProfileType') - - -class Profile(Entity): - """""" - using_options(order_by = 'order') - - label = Field(Unicode(50)) - order = Field(Integer, default = 0, index = True) - core = Field(Boolean, default = False) - hide = Field(Boolean, default = False) - - movie = OneToMany('Movie') - types = OneToMany('ProfileType', cascade = 'all, delete-orphan') - - def to_dict(self, deep = None, exclude = None): - if not exclude: exclude = [] - if not deep: deep = {} - - orig_dict = super(Profile, self).to_dict(deep = deep, exclude = exclude) - orig_dict['core'] = orig_dict.get('core') or False - orig_dict['hide'] = orig_dict.get('hide') or False - - return orig_dict - - -class Category(Entity): - """""" - using_options(order_by = 'order') - - label = Field(Unicode(50)) - order = Field(Integer, default = 0, index = True) - required = Field(Unicode(255)) - preferred = Field(Unicode(255)) - ignored = Field(Unicode(255)) - destination = Field(Unicode(255)) - - movie = OneToMany('Movie') - - -class ProfileType(Entity): - """""" - using_options(order_by = 'order') - - order = Field(Integer, default = 0, index = True) - finish = Field(Boolean, default = True) - wait_for = Field(Integer, default = 0) - - quality = ManyToOne('Quality') - profile = ManyToOne('Profile') - - -class File(Entity): - """File that belongs to a release.""" - - path = Field(Unicode(255), nullable = False, unique = True) - part = Field(Integer, default = 1) - available = Field(Boolean, default = True) - - type = ManyToOne('FileType') - properties = OneToMany('FileProperty') - - history = OneToMany('RenameHistory') - movie = ManyToMany('Movie') - release = ManyToMany('Release') - library = ManyToMany('Library') - - -class FileType(Entity): - """Types could be trailer, subtitle, movie, partial movie etc.""" - - identifier = Field(String(20), unique = True) - type = Field(Unicode(20)) - name = Field(Unicode(50), nullable = False) - - files = OneToMany('File') - - -class FileProperty(Entity): - """Properties that can be bound to a file for off-line usage""" - - identifier = Field(String(20), index = True) - value = Field(Unicode(255), nullable = False) - - file = ManyToOne('File') - - -class RenameHistory(Entity): - """Remembers from where to where files have been moved.""" - - old = Field(Unicode(255)) - new = Field(Unicode(255)) - - file = ManyToOne('File') - - -class Notification(Entity): - using_options(order_by = 'added') - - added = Field(Integer, default = lambda: int(time.time())) - read = Field(Boolean, default = False) - message = Field(Unicode(255)) - data = Field(JsonType) - - -class Properties(Entity): - - identifier = Field(String(50), index = True) - value = Field(Unicode(255), nullable = False) - - -def setup(): - """Setup the database and create the tables that don't exists yet""" - from elixir import setup_all, create_all - from couchpotato.environment import Env - - engine = Env.getEngine() - - setup_all() - create_all(engine) - - try: - engine.execute("PRAGMA journal_mode = WAL") - engine.execute("PRAGMA temp_store = MEMORY") - except: - pass diff --git a/couchpotato/environment.py b/couchpotato/environment.py index 1c5863d1..1000d489 100644 --- a/couchpotato/environment.py +++ b/couchpotato/environment.py @@ -1,9 +1,10 @@ +import os + +from couchpotato.core.database import Database from couchpotato.core.event import fireEvent, addEvent +from couchpotato.core.helpers.encoding import toUnicode from couchpotato.core.loader import Loader from couchpotato.core.settings import Settings -from sqlalchemy.engine import create_engine -from sqlalchemy.orm.session import sessionmaker -import os class Env(object): @@ -16,6 +17,7 @@ class Env(object): _debug = False _dev = False _settings = Settings() + _database = Database() _loader = Loader() _cache = None _options = None @@ -23,13 +25,13 @@ class Env(object): _quiet = False _daemonized = False _desktop = None - _engine = None + _http_opener = None ''' Data paths and directories ''' _app_dir = "" _data_dir = "" _cache_dir = "" - _db_path = "" + _db = "" _log_path = "" @staticmethod @@ -37,8 +39,11 @@ class Env(object): return Env._debug @staticmethod - def get(attr): - return getattr(Env, '_' + attr) + def get(attr, unicode = False): + if unicode: + return toUnicode(getattr(Env, '_' + attr)) + else: + return getattr(Env, '_' + attr) @staticmethod def all(): @@ -52,22 +57,6 @@ class Env(object): def set(attr, value): return setattr(Env, '_' + attr, value) - @staticmethod - def getSession(): - session = sessionmaker(bind = Env.getEngine()) - return session() - - @staticmethod - def getEngine(): - existing_engine = Env.get('engine') - if existing_engine: - return existing_engine - - engine = create_engine(Env.get('db_path'), echo = False) - Env.set('engine', engine) - - return engine - @staticmethod def setting(attr, section = 'core', value = None, default = '', type = None): diff --git a/couchpotato/runner.py b/couchpotato/runner.py index 5c175201..8a446051 100644 --- a/couchpotato/runner.py +++ b/couchpotato/runner.py @@ -1,25 +1,29 @@ +from logging import handlers +from uuid import uuid4 +import locale +import logging +import os.path +import sys +import time +import traceback +import warnings +import re +import tarfile + +from CodernityDB.database_super_thread_safe import SuperThreadSafeDatabase from argparse import ArgumentParser from cache import FileSystemCache from couchpotato import KeyHandler, LoginHandler, LogoutHandler from couchpotato.api import NonBlockHandler, ApiHandler from couchpotato.core.event import fireEventAsync, fireEvent -from couchpotato.core.helpers.encoding import toUnicode +from couchpotato.core.helpers.encoding import sp from couchpotato.core.helpers.variable import getDataDir, tryInt -from logging import handlers +import requests from tornado.httpserver import HTTPServer from tornado.web import Application, StaticFileHandler, RedirectHandler -from uuid import uuid4 -import locale -import logging -import os.path -import shutil -import sys -import time -import traceback -import warnings -def getOptions(base_path, args): +def getOptions(args): # Options parser = ArgumentParser(prog = 'CouchPotato.py') @@ -82,54 +86,65 @@ def runCouchPotato(options, base_path, args, data_dir = None, log_dir = None, En Env.set('encoding', encoding) # Do db stuff - db_path = toUnicode(os.path.join(data_dir, 'couchpotato.db')) + db_path = sp(os.path.join(data_dir, 'database')) - # Backup before start and cleanup old databases - new_backup = toUnicode(os.path.join(data_dir, 'db_backup', str(int(time.time())))) - if not os.path.isdir(new_backup): os.makedirs(new_backup) + # Check if database exists + db = SuperThreadSafeDatabase(db_path) + db_exists = db.exists() + if db_exists: - # Remove older backups, keep backups 3 days or at least 3 - backups = [] - for directory in os.listdir(os.path.dirname(new_backup)): - backup = toUnicode(os.path.join(os.path.dirname(new_backup), directory)) - if os.path.isdir(backup): - backups.append(backup) + # Backup before start and cleanup old backups + backup_path = sp(os.path.join(data_dir, 'db_backup')) + backup_count = 5 + existing_backups = [] + if not os.path.isdir(backup_path): os.makedirs(backup_path) - latest_backup = tryInt(os.path.basename(sorted(backups)[-1])) if len(backups) > 0 else 0 - if latest_backup < time.time() - 3600: - # Create path and copy - src_files = [options.config_file, db_path, db_path + '-shm', db_path + '-wal'] - for src_file in src_files: - if os.path.isfile(src_file): - dst_file = toUnicode(os.path.join(new_backup, os.path.basename(src_file))) - shutil.copyfile(src_file, dst_file) + for root, dirs, files in os.walk(backup_path): + for backup_file in sorted(files): + ints = re.findall('\d+', backup_file) - # Try and copy stats seperately - try: shutil.copystat(src_file, dst_file) - except: pass + # Delete non zip files + if len(ints) != 1: + os.remove(os.path.join(backup_path, backup_file)) + else: + existing_backups.append((int(ints[0]), backup_file)) - total_backups = len(backups) - for backup in backups: - if total_backups > 3: - if tryInt(os.path.basename(backup)) < time.time() - 259200: - for the_file in os.listdir(backup): - file_path = os.path.join(backup, the_file) - try: - if os.path.isfile(file_path): - os.remove(file_path) - except: - raise + # Remove all but the last 5 + for eb in existing_backups[:-backup_count]: + os.remove(os.path.join(backup_path, eb[1])) - os.rmdir(backup) - total_backups -= 1 + # Create new backup + new_backup = sp(os.path.join(backup_path, '%s.tar.gz' % int(time.time()))) + zipf = tarfile.open(new_backup, 'w:gz') + for root, dirs, files in os.walk(db_path): + for zfilename in files: + zipf.add(os.path.join(root, zfilename), arcname = 'database/%s' % os.path.join(root[len(db_path) + 1:], zfilename)) + zipf.close() + + # Open last + db.open() + + else: + db.create() + + # Force creation of cachedir + log_dir = sp(log_dir) + cache_dir = sp(os.path.join(data_dir, 'cache')) + python_cache = sp(os.path.join(cache_dir, 'python')) + + if not os.path.exists(cache_dir): + os.mkdir(cache_dir) + if not os.path.exists(python_cache): + os.mkdir(python_cache) # Register environment settings - Env.set('app_dir', toUnicode(base_path)) - Env.set('data_dir', toUnicode(data_dir)) - Env.set('log_path', toUnicode(os.path.join(log_dir, 'CouchPotato.log'))) - Env.set('db_path', toUnicode('sqlite:///' + db_path)) - Env.set('cache_dir', toUnicode(os.path.join(data_dir, 'cache'))) - Env.set('cache', FileSystemCache(toUnicode(os.path.join(Env.get('cache_dir'), 'python')))) + Env.set('app_dir', sp(base_path)) + Env.set('data_dir', sp(data_dir)) + Env.set('log_path', sp(os.path.join(log_dir, 'CouchPotato.log'))) + Env.set('db', db) + Env.set('http_opener', requests.Session()) + Env.set('cache_dir', cache_dir) + Env.set('cache', FileSystemCache(python_cache)) Env.set('console_log', options.console_log) Env.set('quiet', options.quiet) Env.set('desktop', desktop) @@ -149,7 +164,7 @@ def runCouchPotato(options, base_path, args, data_dir = None, log_dir = None, En for logger_name in ['enzyme', 'guessit', 'subliminal', 'apscheduler', 'tornado', 'requests']: logging.getLogger(logger_name).setLevel(logging.ERROR) - for logger_name in ['gntp', 'migrate']: + for logger_name in ['gntp']: logging.getLogger(logger_name).setLevel(logging.WARNING) # Use reloader @@ -174,6 +189,7 @@ def runCouchPotato(options, base_path, args, data_dir = None, log_dir = None, En logger.addHandler(hdlr2) # Start logging & enable colors + # noinspection PyUnresolvedReferences import color_logs from couchpotato.core.logger import CPLog log = CPLog(__name__) @@ -183,34 +199,6 @@ def runCouchPotato(options, base_path, args, data_dir = None, log_dir = None, En log.warning('%s %s %s line:%s', (category, message, filename, lineno)) warnings.showwarning = customwarn - # Check if database exists - db = Env.get('db_path') - db_exists = os.path.isfile(toUnicode(db_path)) - - # Load migrations - if db_exists: - - from migrate.versioning.api import version_control, db_version, version, upgrade - repo = os.path.join(base_path, 'couchpotato', 'core', 'migration') - - latest_db_version = version(repo) - try: - current_db_version = db_version(db, repo) - except: - version_control(db, repo, version = latest_db_version) - current_db_version = db_version(db, repo) - - if current_db_version < latest_db_version: - if development: - log.error('There is a database migration ready, but you are running development mode, so it won\'t be used. If you see this, you are stupid. Please disable development mode.') - else: - log.info('Doing database upgrade. From %d to %d', (current_db_version, latest_db_version)) - upgrade(db, repo) - - # Configure Database - from couchpotato.core.settings.model import setup - setup() - # Create app from couchpotato import WebHandler web_base = ('/' + Env.setting('url_base').lstrip('/') + '/') if Env.setting('url_base') else '/' @@ -236,7 +224,8 @@ def runCouchPotato(options, base_path, args, data_dir = None, log_dir = None, En } # Load the app - application = Application([], + application = Application( + [], log_function = lambda x: None, debug = config['use_reloader'], gzip = True, @@ -267,35 +256,44 @@ def runCouchPotato(options, base_path, args, data_dir = None, log_dir = None, En static_path = '%sstatic/' % web_base for dir_name in ['fonts', 'images', 'scripts', 'style']: application.add_handlers(".*$", [ - ('%s%s/(.*)' % (static_path, dir_name), StaticFileHandler, {'path': toUnicode(os.path.join(base_path, 'couchpotato', 'static', dir_name))}) + ('%s%s/(.*)' % (static_path, dir_name), StaticFileHandler, {'path': sp(os.path.join(base_path, 'couchpotato', 'static', dir_name))}) ]) Env.set('static_path', static_path) # Load configs & plugins loader = Env.get('loader') - loader.preload(root = toUnicode(base_path)) + loader.preload(root = sp(base_path)) loader.run() # Fill database with needed stuff + fireEvent('database.setup') if not db_exists: fireEvent('app.initialize', in_order = True) + fireEvent('app.migrate') # Go go go! from tornado.ioloop import IOLoop + from tornado.autoreload import add_reload_hook loop = IOLoop.current() + # Reload hook + def reload_hook(): + fireEvent('app.shutdown') + add_reload_hook(reload_hook) + # Some logging and fire load event try: log.info('Starting server on port %(port)s', config) except: pass fireEventAsync('app.load') + ssl_options = None if config['ssl_cert'] and config['ssl_key']: - server = HTTPServer(application, no_keep_alive = True, ssl_options = { + ssl_options = { 'certfile': config['ssl_cert'], 'keyfile': config['ssl_key'], - }) - else: - server = HTTPServer(application, no_keep_alive = True) + } + + server = HTTPServer(application, no_keep_alive = True, ssl_options = ssl_options) try_restart = True restart_tries = 5 @@ -304,6 +302,9 @@ def runCouchPotato(options, base_path, args, data_dir = None, log_dir = None, En try: server.listen(config['port'], config['host']) loop.start() + server.close_all_connections() + server.stop() + loop.close(all_fds = True) except Exception as e: log.error('Failed starting: %s', traceback.format_exc()) try: @@ -317,6 +318,8 @@ def runCouchPotato(options, base_path, args, data_dir = None, log_dir = None, En continue else: return + except ValueError: + return except: pass diff --git a/couchpotato/core/plugins/profile/static/handle.png b/couchpotato/static/images/handle.png similarity index 100% rename from couchpotato/core/plugins/profile/static/handle.png rename to couchpotato/static/images/handle.png diff --git a/couchpotato/static/scripts/api.js b/couchpotato/static/scripts/api.js index 38d18740..410b4886 100644 --- a/couchpotato/static/scripts/api.js +++ b/couchpotato/static/scripts/api.js @@ -7,9 +7,9 @@ var ApiClass = new Class({ }, request: function(type, options){ - var self = this; + var self = this, + r_type = self.options.is_remote ? 'JSONP' : 'JSON'; - var r_type = self.options.is_remote ? 'JSONP' : 'JSON'; return new Request[r_type](Object.merge({ 'callbackKey': 'callback_func', 'method': 'get', diff --git a/couchpotato/static/scripts/couchpotato.js b/couchpotato/static/scripts/couchpotato.js index 03332281..19b9f455 100644 --- a/couchpotato/static/scripts/couchpotato.js +++ b/couchpotato/static/scripts/couchpotato.js @@ -54,7 +54,7 @@ }, pushState: function(e){ - if((!e.meta && Browser.Platform.mac) || (!e.control && !Browser.Platform.mac)){ + if((!e.meta && Browser.platform.mac) || (!e.control && !Browser.platform.mac)){ (e).preventDefault(); var url = e.target.get('href'); if(History.getPath() != url) @@ -63,7 +63,7 @@ }, isMac: function(){ - return Browser.Platform.mac + return Browser.platform.mac }, createLayout: function(){ @@ -137,17 +137,34 @@ createPages: function(){ var self = this; + var pages = []; Object.each(Page, function(page_class, class_name){ var pg = new Page[class_name](self, {}); self.pages[class_name] = pg; - $(pg).inject(self.content); + pages.include({ + 'order': pg.order, + 'name': class_name, + 'class': pg + }); }); + pages.stableSort(self.sortPageByOrder).each(function(page){ + page['class'].load(); + self.fireEvent('load'+page.name); + $(page['class']).inject(self.content); + }); + + delete pages; + self.fireEvent('load'); }, + sortPageByOrder: function(a, b){ + return (a.order || 100) - (b.order || 100) + }, + openPage: function(url) { var self = this; @@ -255,11 +272,18 @@ (function(){ - Api.request('app.available', { - 'onFailure': function(){ - self.checkAvailable.delay(1000, self, [delay, onAvailable]); - self.fireEvent('unload'); + var onFailure = function(){ + self.checkAvailable.delay(1000, self, [delay, onAvailable]); + self.fireEvent('unload'); + } + + var request = Api.request('app.available', { + 'timeout': 2000, + 'onTimeout': function(){ + request.cancel(); + onFailure(); }, + 'onFailure': onFailure, 'onSuccess': function(){ if(onAvailable) onAvailable(); @@ -305,7 +329,7 @@ var url = 'http://www.dereferer.org/?' + el.get('href'); - if(el.get('target') == '_blank' || (e.meta && Browser.Platform.mac) || (e.control && !Browser.Platform.mac)) + if(el.get('target') == '_blank' || (e.meta && Browser.platform.mac) || (e.control && !Browser.platform.mac)) window.open(url); else window.location = url; @@ -317,8 +341,8 @@ return new Element('div.group_userscript').adopt( new Element('a.userscript.button', { - 'text': 'Install userscript', - 'href': Api.createUrl('userscript.get')+randomString()+'/couchpotato.user.js', + 'text': 'Install extension', + 'href': 'https://couchpota.to/extension/', 'target': '_blank' }), new Element('span.or[text=or]'), @@ -369,7 +393,7 @@ // Create parallel callback var callbacks = []; - self.global_events[name].each(function(handle, nr){ + self.global_events[name].each(function(handle){ callbacks.push(function(callback){ var results = handle.apply(handle, args || []); diff --git a/couchpotato/static/scripts/library/mootools.js b/couchpotato/static/scripts/library/mootools.js index 9917ad32..838edee6 100644 --- a/couchpotato/static/scripts/library/mootools.js +++ b/couchpotato/static/scripts/library/mootools.js @@ -20,7 +20,7 @@ description: The heart of MooTools. license: MIT-style license. -copyright: Copyright (c) 2006-2012 [Valerio Proietti](http://mad4milk.net/). +copyright: Copyright (c) 2006-2014 [Valerio Proietti](http://mad4milk.net/). authors: The MooTools production team (http://mootools.net/developers/) @@ -36,8 +36,8 @@ provides: [Core, MooTools, Type, typeOf, instanceOf, Native] (function(){ this.MooTools = { - version: '1.4.5', - build: 'ab8ea8824dc3b24b6666867a2c4ed58ebb762cf0' + version: '1.5.0', + build: '0f7b690afee9349b15909f33016a25d2e4d9f4e3' }; // typeOf, instanceOf @@ -50,7 +50,7 @@ var typeOf = this.typeOf = function(item){ if (item.nodeType == 1) return 'element'; if (item.nodeType == 3) return (/\S/).test(item.nodeValue) ? 'textnode' : 'whitespace'; } else if (typeof item.length == 'number'){ - if (item.callee) return 'arguments'; + if ('callee' in item) return 'arguments'; if ('item' in item) return 'collection'; } @@ -267,7 +267,7 @@ var force = function(name, object, methods){ if (!methodsEnumerable) for (var i = 0, l = methods.length; i < l; i++){ fn.call(prototype, prototype[methods[i]], methods[i]); } - for (var key in prototype) fn.call(prototype, prototype[key], key) + for (var key in prototype) fn.call(prototype, prototype[key], key); }; } @@ -275,7 +275,7 @@ var force = function(name, object, methods){ }; force('String', String, [ - 'charAt', 'charCodeAt', 'concat', 'indexOf', 'lastIndexOf', 'match', 'quote', 'replace', 'search', + 'charAt', 'charCodeAt', 'concat', 'contains', 'indexOf', 'lastIndexOf', 'match', 'quote', 'replace', 'search', 'slice', 'split', 'substr', 'substring', 'trim', 'toLowerCase', 'toUpperCase' ])('Array', Array, [ 'pop', 'push', 'reverse', 'shift', 'sort', 'splice', 'unshift', 'concat', 'join', 'slice', @@ -325,11 +325,13 @@ Object.each = Object.forEach; Array.implement({ + /**/ forEach: function(fn, bind){ for (var i = 0, l = this.length; i < l; i++){ if (i in this) fn.call(bind, this[i], i, this); } }, + /**/ each: function(fn, bind){ Array.forEach(this, fn, bind); @@ -421,7 +423,7 @@ description: Contains Array Prototypes like each, contains, and erase. license: MIT-style license. -requires: Type +requires: [Type] provides: Array @@ -564,7 +566,7 @@ Array.implement({ if (this.length != 3) return null; var rgb = this.map(function(value){ if (value.length == 1) value += value; - return value.toInt(16); + return parseInt(value, 16); }); return (array) ? rgb : 'rgb(' + rgb + ')'; }, @@ -594,7 +596,7 @@ description: Contains String Prototypes like camelCase, capitalize, test, and to license: MIT-style license. -requires: Type +requires: [Type, Array] provides: String @@ -603,14 +605,16 @@ provides: String String.implement({ + // + contains: function(string, index){ + return (index ? String(this).slice(index) : String(this)).indexOf(string) > -1; + }, + // + test: function(regex, params){ return ((typeOf(regex) == 'regexp') ? regex : new RegExp('' + regex, params)).test(this); }, - contains: function(string, separator){ - return (separator) ? (separator + this + separator).indexOf(separator + string + separator) > -1 : String(this).indexOf(string) > -1; - }, - trim: function(){ return String(this).replace(/^\s+|\s+$/g, ''); }, @@ -669,6 +673,8 @@ String.implement({ }); + + /* --- @@ -1063,37 +1069,47 @@ provides: [Browser, Window, Document] var document = this.document; var window = document.window = this; -var ua = navigator.userAgent.toLowerCase(), - platform = navigator.platform.toLowerCase(), - UA = ua.match(/(opera|ie|firefox|chrome|version)[\s\/:]([\w\d\.]+)?.*?(safari|version[\s\/:]([\w\d\.]+)|$)/) || [null, 'unknown', 0], - mode = UA[1] == 'ie' && document.documentMode; +var parse = function(ua, platform){ + ua = ua.toLowerCase(); + platform = (platform ? platform.toLowerCase() : ''); -var Browser = this.Browser = { + var UA = ua.match(/(opera|ie|firefox|chrome|trident|crios|version)[\s\/:]([\w\d\.]+)?.*?(safari|(?:rv[\s\/:]|version[\s\/:])([\w\d\.]+)|$)/) || [null, 'unknown', 0]; - extend: Function.prototype.extend, + if (UA[1] == 'trident'){ + UA[1] = 'ie'; + if (UA[4]) UA[2] = UA[4]; + } else if (UA[1] == 'crios') { + UA[1] = 'chrome'; + } - name: (UA[1] == 'version') ? UA[3] : UA[1], + var platform = ua.match(/ip(?:ad|od|hone)/) ? 'ios' : (ua.match(/(?:webos|android)/) || platform.match(/mac|win|linux/) || ['other'])[0]; + if (platform == 'win') platform = 'windows'; - version: mode || parseFloat((UA[1] == 'opera' && UA[4]) ? UA[4] : UA[2]), + return { + extend: Function.prototype.extend, + name: (UA[1] == 'version') ? UA[3] : UA[1], + version: parseFloat((UA[1] == 'opera' && UA[4]) ? UA[4] : UA[2]), + platform: platform + }; +}; - Platform: { - name: ua.match(/ip(?:ad|od|hone)/) ? 'ios' : (ua.match(/(?:webos|android)/) || platform.match(/mac|win|linux/) || ['other'])[0] - }, +var Browser = this.Browser = parse(navigator.userAgent, navigator.platform); +if (Browser.ie){ + Browser.version = document.documentMode; +} + +Browser.extend({ Features: { xpath: !!(document.evaluate), air: !!(window.runtime), query: !!(document.querySelector), json: !!(window.JSON) }, + parseUA: parse +}); - Plugins: {} -}; - -Browser[Browser.name] = true; -Browser[Browser.name + parseInt(Browser.version, 10)] = true; -Browser.Platform[Browser.Platform.name] = true; // Request @@ -1126,18 +1142,7 @@ Browser.Request = (function(){ Browser.Features.xhr = !!(Browser.Request); -// Flash detection -var version = (Function.attempt(function(){ - return navigator.plugins['Shockwave Flash'].description; -}, function(){ - return new ActiveXObject('ShockwaveFlash.ShockwaveFlash').GetVariable('$version'); -}) || '0 r0').match(/\d+/g); - -Browser.Plugins.Flash = { - version: Number(version[0] || '0.' + version[1]) || 0, - build: Number(version[2]) || 0 -}; // String scripts @@ -1756,7 +1761,7 @@ local.setDocument = function(document){ // native matchesSelector function - features.nativeMatchesSelector = root.matchesSelector || /*root.msMatchesSelector ||*/ root.mozMatchesSelector || root.webkitMatchesSelector; + features.nativeMatchesSelector = root.matches || /*root.msMatchesSelector ||*/ root.mozMatchesSelector || root.webkitMatchesSelector; if (features.nativeMatchesSelector) try { // if matchesSelector trows errors on incorrect sintaxes we can use it features.nativeMatchesSelector.call(root, ':slick'); @@ -2590,12 +2595,12 @@ license: MIT-style license. requires: [Window, Document, Array, String, Function, Object, Number, Slick.Parser, Slick.Finder] -provides: [Element, Elements, $, $$, Iframe, Selectors] +provides: [Element, Elements, $, $$, IFrame, Selectors] ... */ -var Element = function(tag, props){ +var Element = this.Element = function(tag, props){ var konstructor = Element.Constructors[tag]; if (konstructor) return konstructor(props); if (typeof tag != 'string') return document.id(tag).set(props); @@ -2779,7 +2784,7 @@ Array.mirror(Elements); /**/ var createElementAcceptsHTML; try { - createElementAcceptsHTML = (document.createElement('').name == 'x'); + createElementAcceptsHTML = (document.createElement('').name == 'x'); } catch (e){} var escapeQuotes = function(html){ @@ -3112,7 +3117,28 @@ var pollutesGetAttribute = (function(div){ return (div.getAttribute('random') == 'attribute'); })(document.createElement('div')); -/* */ +var hasCloneBug = (function(test){ + test.innerHTML = ''; + return test.cloneNode(true).firstChild.childNodes.length != 1; +})(document.createElement('div')); +/* */ + +var hasClassList = !!document.createElement('div').classList; + +var classes = function(className){ + var classNames = (className || '').clean().split(" "), uniques = {}; + return classNames.filter(function(className){ + if (className !== "" && !uniques[className]) return uniques[className] = className; + }); +}; + +var addToClassList = function(name){ + this.classList.add(name); +}; + +var removeFromClassList = function(name){ + this.classList.remove(name); +}; Element.implement({ @@ -3122,7 +3148,8 @@ Element.implement({ setter(this, value); } else { /* */ - if (pollutesGetAttribute) var attributeWhiteList = this.retrieve('$attributeWhiteList', {}); + var attributeWhiteList; + if (pollutesGetAttribute) attributeWhiteList = this.retrieve('$attributeWhiteList', {}); /* */ if (value == null){ @@ -3194,17 +3221,27 @@ Element.implement({ return this; }, - hasClass: function(className){ + hasClass: hasClassList ? function(className){ + return this.classList.contains(className); + } : function(className){ return this.className.clean().contains(className, ' '); }, - addClass: function(className){ - if (!this.hasClass(className)) this.className = (this.className + ' ' + className).clean(); + addClass: hasClassList ? function(className){ + classes(className).forEach(addToClassList, this); + return this; + } : function(className){ + this.className = classes(className + ' ' + this.className).join(' '); return this; }, - removeClass: function(className){ - this.className = this.className.replace(new RegExp('(^|\\s)' + className + '(?:\\s|$)'), '$1'); + removeClass: hasClassList ? function(className){ + classes(className).forEach(removeFromClassList, this); + return this; + } : function(className){ + var classNames = classes(this.className); + classes(className).forEach(classNames.erase, classNames); + this.className = classNames.join(' '); return this; }, @@ -3279,6 +3316,37 @@ Element.implement({ }); + +// appendHTML + +var appendInserters = { + before: 'beforeBegin', + after: 'afterEnd', + bottom: 'beforeEnd', + top: 'afterBegin', + inside: 'beforeEnd' +}; + +Element.implement('appendHTML', ('insertAdjacentHTML' in document.createElement('div')) ? function(html, where){ + this.insertAdjacentHTML(appendInserters[where || 'bottom'], html); + return this; +} : function(html, where){ + var temp = new Element('div', {html: html}), + children = temp.childNodes, + fragment = temp.firstChild; + + if (!fragment) return this; + if (children.length > 1){ + fragment = document.createDocumentFragment(); + for (var i = 0, l = children.length; i < l; i++){ + fragment.appendChild(children[i]); + } + } + + inserters[where || 'bottom'](fragment, this); + return this; +}); + var collected = {}, storage = {}; var get = function(uid){ @@ -3344,7 +3412,7 @@ Element.implement({ } /**/ - if (Browser.ie){ + if (hasCloneBug){ var co = clone.getElementsByTagName('object'), to = this.getElementsByTagName('object'); for (i = co.length; i--;) co[i].outerHTML = to[i].outerHTML; } @@ -3357,13 +3425,7 @@ Element.implement({ [Element, Window, Document].invoke('implement', { addListener: function(type, fn){ - if (type == 'unload'){ - var old = fn, self = this; - fn = function(){ - self.removeListener('unload', fn); - old(); - }; - } else { + if (window.attachEvent && !window.addEventListener){ collected[Slick.uidOf(this)] = this; } if (this.addEventListener) this.addEventListener(type, fn, !!arguments[2]); @@ -3398,10 +3460,14 @@ Element.implement({ }); /**/ -if (window.attachEvent && !window.addEventListener) window.addListener('unload', function(){ - Object.each(collected, clean); - if (window.CollectGarbage) CollectGarbage(); -}); +if (window.attachEvent && !window.addEventListener){ + var gc = function(){ + Object.each(collected, clean); + if (window.CollectGarbage) CollectGarbage(); + window.removeListener('unload', gc); + } + window.addListener('unload', gc); +} /**/ Element.Properties = {}; @@ -3446,11 +3512,13 @@ Element.Properties.html = { }; +var supportsHTML5Elements = true, supportsTableInnerHTML = true, supportsTRInnerHTML = true; + /**/ // technique by jdbarlett - http://jdbartlett.com/innershiv/ var div = document.createElement('div'); div.innerHTML = ''; -var supportsHTML5Elements = (div.childNodes.length == 1); +supportsHTML5Elements = (div.childNodes.length == 1); if (!supportsHTML5Elements){ var tags = 'abbr article aside audio canvas datalist details figcaption figure footer header hgroup mark meter nav output progress section summary time video'.split(' '), fragment = document.createDocumentFragment(), l = tags.length; @@ -3460,7 +3528,7 @@ div = null; /**/ /**/ -var supportsTableInnerHTML = Function.attempt(function(){ +supportsTableInnerHTML = Function.attempt(function(){ var table = document.createElement('table'); table.innerHTML = ''; return true; @@ -3469,7 +3537,7 @@ var supportsTableInnerHTML = Function.attempt(function(){ /**/ var tr = document.createElement('tr'), html = ''; tr.innerHTML = html; -var supportsTRInnerHTML = (tr.innerHTML == html); +supportsTRInnerHTML = (tr.innerHTML == html); tr = null; /**/ @@ -3514,11 +3582,12 @@ if (testForm.firstChild.value != 's') Element.Properties.value = { var tag = this.get('tag'); if (tag != 'select') return this.setProperty('value', value); var options = this.getElements('option'); + value = String(value); for (var i = 0; i < options.length; i++){ var option = options[i], attr = option.getAttributeNode('value'), optionValue = (attr && attr.specified) ? option.value : option.get('text'); - if (optionValue == value) return option.selected = true; + if (optionValue === value) return option.selected = true; } }, @@ -3572,17 +3641,24 @@ provides: Element.Style (function(){ -var html = document.html; +var html = document.html, el; // // Check for oldIE, which does not remove styles when they're set to null -var el = document.createElement('div'); +el = document.createElement('div'); el.style.color = 'red'; el.style.color = null; var doesNotRemoveStyles = el.style.color == 'red'; + +// check for oldIE, which returns border* shorthand styles in the wrong order (color-width-style instead of width-style-color) +var border = '1px solid #123abc'; +el.style.border = border; +var returnsBordersInWrongOrder = el.style.border != border; el = null; // +var hasGetComputedStyle = !!window.getComputedStyle; + Element.Properties.styles = {set: function(styles){ this.setStyles(styles); }}; @@ -3596,16 +3672,25 @@ var setVisibility = function(element, opacity){ element.style.visibility = opacity > 0 || opacity == null ? 'visible' : 'hidden'; }; +// +var setFilter = function(element, regexp, value){ + var style = element.style, + filter = style.filter || element.getComputedStyle('filter') || ''; + style.filter = (regexp.test(filter) ? filter.replace(regexp, value) : filter + ' ' + value).trim(); + if (!style.filter) style.removeAttribute('filter'); +}; +// + var setOpacity = (hasOpacity ? function(element, opacity){ element.style.opacity = opacity; } : (hasFilter ? function(element, opacity){ - var style = element.style; - if (!element.currentStyle || !element.currentStyle.hasLayout) style.zoom = 1; - if (opacity == null || opacity == 1) opacity = ''; - else opacity = 'alpha(opacity=' + (opacity * 100).limit(0, 100).round() + ')'; - var filter = style.filter || element.getComputedStyle('filter') || ''; - style.filter = reAlpha.test(filter) ? filter.replace(reAlpha, opacity) : filter + opacity; - if (!style.filter) style.removeAttribute('filter'); + if (!element.currentStyle || !element.currentStyle.hasLayout) element.style.zoom = 1; + if (opacity == null || opacity == 1){ + setFilter(element, reAlpha, ''); + if (opacity == 1 && getOpacity(element) != 1) setFilter(element, reAlpha, 'alpha(opacity=100)'); + } else { + setFilter(element, reAlpha, 'alpha(opacity=' + (opacity * 100).limit(0, 100).round() + ')'); + } } : setVisibility)); var getOpacity = (hasOpacity ? function(element){ @@ -3622,15 +3707,27 @@ var getOpacity = (hasOpacity ? function(element){ return opacity; })); -var floatName = (html.style.cssFloat == null) ? 'styleFloat' : 'cssFloat'; +var floatName = (html.style.cssFloat == null) ? 'styleFloat' : 'cssFloat', + namedPositions = {left: '0%', top: '0%', center: '50%', right: '100%', bottom: '100%'}, + hasBackgroundPositionXY = (html.style.backgroundPositionX != null); + +// +var removeStyle = function(style, property){ + if (property == 'backgroundPosition'){ + style.removeAttribute(property + 'X'); + property += 'Y'; + } + style.removeAttribute(property); +}; +// Element.implement({ getComputedStyle: function(property){ - if (this.currentStyle) return this.currentStyle[property.camelCase()]; + if (!hasGetComputedStyle && this.currentStyle) return this.currentStyle[property.camelCase()]; var defaultView = Element.getDocument(this).defaultView, computed = defaultView ? defaultView.getComputedStyle(this, null) : null; - return (computed) ? computed.getPropertyValue((property == floatName) ? 'float' : property.hyphenate()) : null; + return (computed) ? computed.getPropertyValue((property == floatName) ? 'float' : property.hyphenate()) : ''; }, setStyle: function(property, value){ @@ -3652,7 +3749,7 @@ Element.implement({ this.style[property] = value; // if ((value == '' || value == null) && doesNotRemoveStyles && this.style.removeAttribute){ - this.style.removeAttribute(property); + removeStyle(this.style, property); } // return this; @@ -3663,20 +3760,25 @@ Element.implement({ property = (property == 'float' ? floatName : property).camelCase(); var result = this.style[property]; if (!result || property == 'zIndex'){ - result = []; - for (var style in Element.ShortStyles){ - if (property != style) continue; - for (var s in Element.ShortStyles[style]) result.push(this.getStyle(s)); + if (Element.ShortStyles.hasOwnProperty(property)){ + result = []; + for (var s in Element.ShortStyles[property]) result.push(this.getStyle(s)); return result.join(' '); } result = this.getComputedStyle(property); } + if (hasBackgroundPositionXY && /^backgroundPosition[XY]?$/.test(property)){ + return result.replace(/(top|right|bottom|left)/g, function(position){ + return namedPositions[position]; + }) || '0px'; + } + if (!result && property == 'backgroundPosition') return '0px 0px'; if (result){ result = String(result); var color = result.match(/rgba?\([\d\s,]+\)/); if (color) result = result.replace(color[0], color[0].rgbToHex()); } - if (Browser.opera || Browser.ie){ + if (!hasGetComputedStyle && !this.style[property]){ if ((/^(height|width)$/).test(property) && !(/px$/.test(result))){ var values = (property == 'width') ? ['left', 'right'] : ['top', 'bottom'], size = 0; values.each(function(value){ @@ -3684,10 +3786,15 @@ Element.implement({ }, this); return this['offset' + property.capitalize()] - size + 'px'; } - if (Browser.ie && (/^border(.+)Width|margin|padding/).test(property) && isNaN(parseFloat(result))){ + if ((/^border(.+)Width|margin|padding/).test(property) && isNaN(parseFloat(result))){ return '0px'; } } + // + if (returnsBordersInWrongOrder && /^border(Top|Right|Bottom|Left)?$/.test(property) && /^#/.test(result)){ + return result.replace(/^(.+)\s(.+)\s(.+)$/, '$2 $3 $1'); + } + // return result; }, @@ -3709,7 +3816,7 @@ Element.implement({ Element.Styles = { left: '@px', top: '@px', bottom: '@px', right: '@px', width: '@px', height: '@px', maxWidth: '@px', maxHeight: '@px', minWidth: '@px', minHeight: '@px', - backgroundColor: 'rgb(@, @, @)', backgroundPosition: '@px @px', color: 'rgb(@, @, @)', + backgroundColor: 'rgb(@, @, @)', backgroundSize: '@px', backgroundPosition: '@px @px', color: 'rgb(@, @, @)', fontSize: '@px', letterSpacing: '@px', lineHeight: '@px', clip: 'rect(@px @px @px @px)', margin: '@px @px @px @px', padding: '@px @px @px @px', border: '@px @ rgb(@, @, @) @px @ rgb(@, @, @) @px @ rgb(@, @, @)', borderWidth: '@px @px @px @px', borderStyle: '@ @ @ @', borderColor: 'rgb(@, @, @) rgb(@, @, @) rgb(@, @, @) rgb(@, @, @)', @@ -3738,6 +3845,7 @@ Element.ShortStyles = {margin: {}, padding: {}, border: {}, borderWidth: {}, bor Short.borderColor[bdc] = Short[bd][bdc] = All[bdc] = 'rgb(@, @, @)'; }); +if (hasBackgroundPositionXY) Element.ShortStyles.backgroundPosition = {backgroundPositionX: '@', backgroundPositionY: '@'}; })(); @@ -3779,7 +3887,7 @@ var DOMEvent = this.DOMEvent = new Type('DOMEvent', function(event, win){ if (type.indexOf('key') == 0){ var code = this.code = (event.which || event.keyCode); this.key = _keys[code]; - if (type == 'keydown'){ + if (type == 'keydown' || type == 'keyup'){ if (code > 111 && code < 124) this.key = 'f' + (code - 111); else if (code > 95 && code < 106) this.key = code - 96; } @@ -4001,23 +4109,27 @@ Element.NativeEvents = { gesturestart: 2, gesturechange: 2, gestureend: 2, // gesture focus: 2, blur: 2, change: 2, reset: 2, select: 2, submit: 2, paste: 2, input: 2, //form elements load: 2, unload: 1, beforeunload: 2, resize: 1, move: 1, DOMContentLoaded: 1, readystatechange: 1, //window + hashchange: 1, popstate: 2, // history error: 1, abort: 1, scroll: 1 //misc }; -Element.Events = {mousewheel: { - base: (Browser.firefox) ? 'DOMMouseScroll' : 'mousewheel' -}}; +Element.Events = { + mousewheel: { + base: 'onwheel' in document ? 'wheel' : 'onmousewheel' in document ? 'mousewheel' : 'DOMMouseScroll' + } +}; + +var check = function(event){ + var related = event.relatedTarget; + if (related == null) return true; + if (!related) return false; + return (related != this && related.prefix != 'xul' && typeOf(this) != 'document' && !this.contains(related)); +}; if ('onmouseenter' in document.documentElement){ Element.NativeEvents.mouseenter = Element.NativeEvents.mouseleave = 2; + Element.MouseenterCheck = check; } else { - var check = function(event){ - var related = event.relatedTarget; - if (related == null) return true; - if (!related) return false; - return (related != this && related.prefix != 'xul' && typeOf(this) != 'document' && !this.contains(related)); - }; - Element.Events.mouseenter = { base: 'mouseover', condition: check @@ -4035,12 +4147,12 @@ if (!window.addEventListener){ Element.Events.change = { base: function(){ var type = this.type; - return (this.get('tag') == 'input' && (type == 'radio' || type == 'checkbox')) ? 'propertychange' : 'change' + return (this.get('tag') == 'input' && (type == 'radio' || type == 'checkbox')) ? 'propertychange' : 'change'; }, condition: function(event){ - return this.type != 'radio' || (event.event.propertyName == 'checked' && this.checked); + return event.type != 'propertychange' || event.event.propertyName == 'checked'; } - } + }; } /**/ @@ -4080,10 +4192,12 @@ var bubbleUp = function(self, match, fn, event, target){ var map = { mouseenter: { - base: 'mouseover' + base: 'mouseover', + condition: Element.MouseenterCheck }, mouseleave: { - base: 'mouseout' + base: 'mouseout', + condition: Element.MouseenterCheck }, focus: { base: 'focus' + (eventListenerSupport ? '' : 'in'), @@ -4190,8 +4304,8 @@ var delegation = { }; var elementEvent = Element.Events[_type]; - if (elementEvent && elementEvent.condition){ - var __match = match, condition = elementEvent.condition; + if (_map.condition || elementEvent && elementEvent.condition){ + var __match = match, condition = _map.condition || elementEvent.condition; match = function(target, event){ return __match(target, event) && condition.call(target, event, type); }; @@ -4226,7 +4340,7 @@ var delegation = { if (_map.remove) _map.remove(this, _uid); delete stored[_uid]; storage[_type] = stored; - return removeEvent.call(this, type, delegator); + return removeEvent.call(this, type, delegator, _map.capture); } var __uid, s; @@ -4344,7 +4458,9 @@ Element.implement({ }, getOffsets: function(){ - if (this.getBoundingClientRect && !Browser.Platform.ios){ + var hasGetBoundingClientRect = this.getBoundingClientRect; + + if (hasGetBoundingClientRect){ var bound = this.getBoundingClientRect(), html = document.id(this.getDocument().documentElement), htmlScroll = html.getScroll(), @@ -4364,27 +4480,9 @@ Element.implement({ position.x += element.offsetLeft; position.y += element.offsetTop; - if (Browser.firefox){ - if (!borderBox(element)){ - position.x += leftBorder(element); - position.y += topBorder(element); - } - var parent = element.parentNode; - if (parent && styleString(parent, 'overflow') != 'visible'){ - position.x += leftBorder(parent); - position.y += topBorder(parent); - } - } else if (element != this && Browser.safari){ - position.x += leftBorder(element); - position.y += topBorder(element); - } - element = element.offsetParent; } - if (Browser.firefox && !borderBox(this)){ - position.x -= leftBorder(this); - position.y -= topBorder(this); - } + return position; }, @@ -4666,13 +4764,17 @@ var Fx = this.Fx = new Class({ }, resume: function(){ - if ((this.frame < this.frames) && !this.isRunning()) pushInstance.call(this, this.options.fps); + if (this.isPaused()) pushInstance.call(this, this.options.fps); return this; }, isRunning: function(){ var list = instances[this.options.fps]; return list && list.contains(this); + }, + + isPaused: function(){ + return (this.frame < this.frames) && !this.isRunning(); } }); @@ -4745,7 +4847,7 @@ Fx.CSS = new Class({ from = element.getStyle(property); var unit = this.options.unit; // adapted from: https://github.com/ryanmorr/fx/blob/master/fx.js#L299 - if (unit && from.slice(-unit.length) != unit && parseFloat(from) != 0){ + if (unit && from && typeof from == 'string' && from.slice(-unit.length) != unit && parseFloat(from) != 0){ element.setStyle(property, to + unit); var value = element.getComputedStyle(property); // IE and Opera support pixelLeft or pixelWidth @@ -4817,11 +4919,13 @@ Fx.CSS = new Class({ search: function(selector){ if (Fx.CSS.Cache[selector]) return Fx.CSS.Cache[selector]; var to = {}, selectorTest = new RegExp('^' + selector.escapeRegExp() + '$'); - Array.each(document.styleSheets, function(sheet, j){ - var href = sheet.href; - if (href && href.contains('://') && !href.contains(document.domain)) return; - var rules = sheet.rules || sheet.cssRules; + + var searchStyles = function(rules){ Array.each(rules, function(rule, i){ + if (rule.media){ + searchStyles(rule.rules || rule.cssRules); + return; + } if (!rule.style) return; var selectorText = (rule.selectorText) ? rule.selectorText.replace(/^\w+/, function(m){ return m.toLowerCase(); @@ -4833,6 +4937,13 @@ Fx.CSS = new Class({ to[style] = ((/^rgb/).test(value)) ? value.rgbToHex() : value; }); }); + }; + + Array.each(document.styleSheets, function(sheet, j){ + var href = sheet.href; + if (href && href.indexOf('://') > -1 && href.indexOf(document.domain) == -1) return; + var rules = sheet.rules || sheet.cssRules; + searchStyles(rules); }); return Fx.CSS.Cache[selector] = to; } @@ -5369,10 +5480,10 @@ var Request = this.Request = new Class({ if (trimPosition > -1 && (trimPosition = url.indexOf('#')) > -1) url = url.substr(0, trimPosition); if (this.options.noCache) - url += (url.contains('?') ? '&' : '?') + String.uniqueID(); + url += (url.indexOf('?') > -1 ? '&' : '?') + String.uniqueID(); - if (data && method == 'get'){ - url += (url.contains('?') ? '&' : '?') + data; + if (data && (method == 'get' || method == 'delete')){ + url += (url.indexOf('?') > -1 ? '&' : '?') + data; data = null; } @@ -5526,10 +5637,14 @@ JSON.encode = JSON.stringify ? function(obj){ return null; }; +JSON.secure = true; + + JSON.decode = function(string, secure){ if (!string || typeOf(string) != 'string') return null; - - if (secure || JSON.secure){ + + if (secure == null) secure = JSON.secure; + if (secure){ if (JSON.parse) return JSON.parse(string); if (!JSON.validate(string)) throw new Error('JSON could not decode the input; security is enabled and the value is not secure.'); } diff --git a/couchpotato/static/scripts/library/mootools_more.js b/couchpotato/static/scripts/library/mootools_more.js index 77b3a7a1..2ff49cb3 100644 --- a/couchpotato/static/scripts/library/mootools_more.js +++ b/couchpotato/static/scripts/library/mootools_more.js @@ -1,6 +1,16 @@ -// MooTools: the javascript framework. -// Load this file's selection again by visiting: http://mootools.net/more/0f75cfbac1aabbedaba7630beef8d10c -// Or build this file again with packager using: packager build More/Events.Pseudos More/Date More/Date.Extras More/Element.Forms More/Element.Position More/Element.Shortcuts More/Fx.Scroll More/Fx.Slide More/Sortables More/Request.JSONP More/Request.Periodical +/* +--- +MooTools: the javascript framework + +web build: + - http://mootools.net/more/0f75cfbac1aabbedaba7630beef8d10c + +packager build: + - packager build More/Events.Pseudos More/Date More/Date.Extras More/Element.Forms More/Element.Position More/Element.Shortcuts More/Fx.Scroll More/Fx.Slide More/Sortables More/Request.JSONP More/Request.Periodical + +... +*/ + /* --- @@ -31,8 +41,8 @@ provides: [MooTools.More] */ MooTools.More = { - 'version': '1.4.0.1', - 'build': 'a4244edf2aa97ac8a196fc96082dd35af1abab87' + version: '1.5.0', + build: '73db5e24e6e9c5c87b3a27aebef2248053f7db37' }; @@ -48,7 +58,7 @@ license: MIT-style license authors: - Arian Stolwijk -requires: [Core/Class.Extras, Core/Slick.Parser, More/MooTools.More] +requires: [Core/Class.Extras, Core/Slick.Parser, MooTools.More] provides: [Events.Pseudos] @@ -211,7 +221,7 @@ authors: requires: - Core/Object - - /MooTools.More + - MooTools.More provides: [Object.Extras] @@ -280,8 +290,8 @@ authors: requires: - Core/Events - - /Object.Extras - - /MooTools.More + - Object.Extras + - MooTools.More provides: [Locale, Lang] @@ -444,7 +454,7 @@ authors: - Aaron Newton requires: - - /Locale + - Locale provides: [Locale.en-US.Date] @@ -1079,7 +1089,7 @@ authors: - Scott Kyle requires: - - /Date + - Date provides: [Date.Extras] @@ -1235,10 +1245,8 @@ var special = { 'S': /[ŠŞŚ]/g, 't': /[ťţ]/g, 'T': /[ŤŢ]/g, - 'ue': /[ü]/g, - 'UE': /[Ü]/g, - 'u': /[ùúûůµ]/g, - 'U': /[ÙÚÛŮ]/g, + 'u': /[ùúûůüµ]/g, + 'U': /[ÙÚÛŮÜ]/g, 'y': /[ÿý]/g, 'Y': /[ŸÝ]/g, 'z': /[žźż]/g, @@ -1263,7 +1271,16 @@ tidy = { '-': /[\u2013]/g, // '--': /[\u2014]/g, '»': /[\uFFFD]/g -}; +}, + +conversions = { + ms: 1, + s: 1000, + m: 6e4, + h: 36e5 +}, + +findUnits = /(\d*.?\d+)([msh]+)/; var walk = function(string, replacements){ var result = string, key; @@ -1325,6 +1342,13 @@ String.implement({ if (trail) string += trail; } return string; + }, + + ms: function(){ + // "Borrowed" from https://gist.github.com/1503944 + var units = findUnits.exec(this); + if (units == null) return Number(this); + return Number(units[1]) * conversions[units[2]]; } }); @@ -1348,8 +1372,8 @@ authors: requires: - Core/Element - - /String.Extras - - /MooTools.More + - String.Extras + - MooTools.More provides: [Element.Forms] @@ -1493,7 +1517,7 @@ authors: requires: - Core/Element.Style - Core/Element.Dimensions - - /MooTools.More + - MooTools.More provides: [Element.Measure] @@ -1710,13 +1734,15 @@ var local = Element.Position = { }, setOffsetOption: function(element, options){ - var parentOffset = {x: 0, y: 0}, - offsetParent = element.measure(function(){ - return document.id(this.getOffsetParent()); - }), - parentScroll = offsetParent.getScroll(); + var parentOffset = {x: 0, y: 0}; + var parentScroll = {x: 0, y: 0}; + var offsetParent = element.measure(function(){ + return document.id(this.getOffsetParent()); + }); if (!offsetParent || offsetParent == element.getDocument().body) return; + + parentScroll = offsetParent.getScroll(); parentOffset = offsetParent.measure(function(){ var position = this.getPosition(); if (this.getStyle('position') == 'fixed'){ @@ -1896,7 +1922,7 @@ authors: requires: - Core/Element.Style - - /MooTools.More + - MooTools.More provides: [Element.Shortcuts] @@ -1976,7 +2002,7 @@ requires: - Core/Fx - Core/Element.Event - Core/Element.Dimensions - - /MooTools.More + - MooTools.More provides: [Fx.Scroll] @@ -2014,7 +2040,6 @@ Fx.Scroll = new Class({ set: function(){ var now = Array.flatten(arguments); - if (Browser.firefox) now = [Math.round(now[0]), Math.round(now[1])]; // not needed anymore in newer firefox versions this.element.scrollTo(now[0], now[1]); return this; }, @@ -2148,7 +2173,7 @@ authors: requires: - Core/Fx - Core/Element.Style - - /MooTools.More + - MooTools.More provides: [Fx.Slide] @@ -2325,7 +2350,7 @@ requires: - Core/Element.Event - Core/Element.Style - Core/Element.Dimensions - - /MooTools.More + - MooTools.More provides: [Drag] ... @@ -2371,10 +2396,10 @@ var Drag = new Class({ this.mouse = {'now': {}, 'pos': {}}; this.value = {'start': {}, 'now': {}}; - this.selection = (Browser.ie) ? 'selectstart' : 'mousedown'; + this.selection = 'selectstart' in document ? 'selectstart' : 'mousedown'; - if (Browser.ie && !Drag.ondragstartFixed){ + if ('ondragstart' in document && !('FileReader' in window) && !Drag.ondragstartFixed){ document.ondragstart = Function.from(false); Drag.ondragstartFixed = true; } @@ -2559,7 +2584,7 @@ authors: requires: - Core/Element.Dimensions - - /Drag + - Drag provides: [Drag.Move] @@ -2586,10 +2611,7 @@ Drag.Move = new Class({ element = this.element; this.droppables = $$(this.options.droppables); - this.container = document.id(this.options.container); - - if (this.container && typeOf(this.container) != 'element') - this.container = document.id(this.container.getDocument().body); + this.setContainer(this.options.container); if (this.options.style){ if (this.options.modifiers.x == 'left' && this.options.modifiers.y == 'top'){ @@ -2606,6 +2628,13 @@ Drag.Move = new Class({ this.addEvent('start', this.checkDroppables, true); this.overed = null; }, + + setContainer: function(container) { + this.container = document.id(container); + if (this.container && typeOf(this.container) != 'element'){ + this.container = document.id(this.container.getDocument().body); + } + }, start: function(event){ if (this.container) this.options.limit = this.calculateLimit(); @@ -2670,7 +2699,9 @@ Drag.Move = new Class({ if (container != offsetParent){ left += containerMargin.left + offsetParentPadding.left; - top += ((Browser.ie6 || Browser.ie7) ? 0 : containerMargin.top) + offsetParentPadding.top; + if (!offsetParentPadding.left && left < 0) left = 0; + top += offsetParent == document.body ? 0 : containerMargin.top + offsetParentPadding.top; + if (!offsetParentPadding.top && top < 0) top = 0; } } else { left -= elementMargin.left; @@ -2754,7 +2785,7 @@ authors: requires: - Core/Fx.Morph - - /Drag.Move + - Drag.Move provides: [Sortables] @@ -2773,7 +2804,8 @@ var Sortables = new Class({ clone: false, revert: false, handle: false, - dragOptions: {} + dragOptions: {}, + unDraggableTags: ['button', 'input', 'a', 'textarea', 'select', 'option'] }, initialize: function(lists, options){ @@ -2839,6 +2871,24 @@ var Sortables = new Class({ return list; }, this)); }, + + getDroppableCoordinates: function (element){ + var offsetParent = element.getOffsetParent(); + var position = element.getPosition(offsetParent); + var scroll = { + w: window.getScroll(), + offsetParent: offsetParent.getScroll() + }; + position.x += scroll.offsetParent.x; + position.y += scroll.offsetParent.y; + + if (offsetParent.getStyle('position') == 'fixed'){ + position.x -= scroll.w.x; + position.y -= scroll.w.y; + } + + return position; + }, getClone: function(event, element){ if (!this.options.clone) return new Element(element.tagName).inject(document.body); @@ -2859,7 +2909,7 @@ var Sortables = new Class({ }); } - return clone.inject(this.list).setPosition(element.getPosition(element.getOffsetParent())); + return clone.inject(this.list).setPosition(this.getDroppableCoordinates(this.element)); }, getDroppables: function(){ @@ -2884,7 +2934,7 @@ var Sortables = new Class({ if ( !this.idle || event.rightClick || - ['button', 'input', 'a', 'textarea'].contains(event.target.get('tag')) + (!this.options.handle && this.options.unDraggableTags.contains(event.target.get('tag'))) ) return; this.idle = false; @@ -2915,14 +2965,16 @@ var Sortables = new Class({ end: function(){ this.drag.detach(); this.element.setStyle('opacity', this.opacity); + var self = this; if (this.effect){ var dim = this.element.getStyles('width', 'height'), clone = this.clone, - pos = clone.computePosition(this.element.getPosition(this.clone.getOffsetParent())); + pos = clone.computePosition(this.getDroppableCoordinates(clone)); var destroy = function(){ this.removeEvent('cancel', destroy); clone.destroy(); + self.reset(); }; this.effect.element = clone; @@ -2935,8 +2987,9 @@ var Sortables = new Class({ }).addEvent('cancel', destroy).chain(destroy); } else { this.clone.destroy(); + self.reset(); } - this.reset(); + }, reset: function(){ @@ -3125,7 +3178,7 @@ authors: requires: - Core/Request - - /MooTools.More + - MooTools.More provides: [Request.Periodical] diff --git a/couchpotato/static/scripts/library/question.js b/couchpotato/static/scripts/library/question.js index ed53b391..64feadbf 100644 --- a/couchpotato/static/scripts/library/question.js +++ b/couchpotato/static/scripts/library/question.js @@ -1,15 +1,15 @@ var Question = new Class( { initialize : function(question, hint, answers) { - var self = this + var self = this; - self.question = question - self.hint = hint - self.answers = answers + self.question = question; + self.hint = hint; + self.answers = answers; self.createQuestion(); self.answers.each(function(answer) { - self.createAnswer(answer) + self.createAnswer(answer); }) }, @@ -29,14 +29,14 @@ var Question = new Class( { }, createAnswer : function(options) { - var self = this + var self = this; var answer = new Element('a', Object.merge(options, { 'class' : 'answer button '+(options['class'] || '')+(options['cancel'] ? ' cancel' : '') - })).inject(this.container) + })).inject(this.container); if (options.cancel) { - answer.addEvent('click', self.close.bind(self)) + answer.addEvent('click', self.close.bind(self)); } else if (options.request) { answer.addEvent('click', function(e){ @@ -44,7 +44,7 @@ var Question = new Class( { new Request(Object.merge(options, { 'url': options.href, 'onComplete': function() { - (options.onComplete || function(){})() + (options.onComplete || function(){})(); self.close(); } })).send(); @@ -59,7 +59,7 @@ var Question = new Class( { }, toElement : function() { - return this.container + return this.container; } -}) +}); diff --git a/couchpotato/static/scripts/page.js b/couchpotato/static/scripts/page.js index 58ba5acd..57b8b108 100644 --- a/couchpotato/static/scripts/page.js +++ b/couchpotato/static/scripts/page.js @@ -6,6 +6,7 @@ var PageBase = new Class({ }, + order: 1, has_tab: true, name: '', @@ -16,6 +17,10 @@ var PageBase = new Class({ // Create main page container self.el = new Element('div.page.'+self.name); + }, + + load: function(){ + var self = this; // Create tab for page if(self.has_tab){ @@ -26,6 +31,7 @@ var PageBase = new Class({ 'text': self.name.capitalize() }); } + }, open: function(action, params){ diff --git a/couchpotato/static/scripts/page/about.js b/couchpotato/static/scripts/page/about.js index d0439bdd..f36f7e48 100644 --- a/couchpotato/static/scripts/page/about.js +++ b/couchpotato/static/scripts/page/about.js @@ -6,7 +6,7 @@ var AboutSettingTab = new Class({ initialize: function(){ var self = this; - App.addEvent('load', self.addSettings.bind(self)) + App.addEvent('loadSettings', self.addSettings.bind(self)) }, @@ -124,4 +124,4 @@ var AboutSettingTab = new Class({ window.addEvent('domready', function(){ new AboutSettingTab(); -}); \ No newline at end of file +}); diff --git a/couchpotato/static/scripts/page/home.js b/couchpotato/static/scripts/page/home.js index 818ce0ed..792b4a07 100644 --- a/couchpotato/static/scripts/page/home.js +++ b/couchpotato/static/scripts/page/home.js @@ -21,7 +21,9 @@ Page.Home = new Class({ self.chain.chain( self.createAvailable.bind(self), self.createSoon.bind(self), + self.createSuggestionsChartsMenu.bind(self), self.createSuggestions.bind(self), + self.createCharts.bind(self), self.createLate.bind(self) ); @@ -52,7 +54,8 @@ Page.Home = new Class({ }) ), 'filter': { - 'release_status': 'snatched,seeding,missing,available,downloaded' + 'release_status': 'snatched,missing,available,downloaded,done,seeding', + 'with_tags': 'recent' }, 'limit': null, 'onLoaded': function(){ @@ -62,12 +65,12 @@ Page.Home = new Class({ // Track movie added var after_search = function(data){ - if(notification.data.id != data.data.id) return; + if(notification.data._id != data.data._id) return; // Force update after search self.available_list.update(); App.off('movie.searcher.ended', after_search); - } + }; App.on('movie.searcher.ended', after_search); } @@ -102,14 +105,13 @@ Page.Home = new Class({ // Make all thumbnails the same size self.soon_list.addEvent('loaded', function(){ - var images = $(self.soon_list).getElements('.poster'), + var images = $(self.soon_list).getElements('.poster, .no_thumbnail'), timer, highest = 100; - images.each(function(img_container){ - img_container.getElements('img').addEvent('load', function(){ - var img = this, - height = img.getSize().y; + images.each(function(img){ + img.addEvent('load', function(){ + var height = img.getSize().y; if(!highest || highest < height){ highest = height; if(timer) clearTimeout(timer); @@ -124,10 +126,8 @@ Page.Home = new Class({ if(timer) clearTimeout(timer); timer = (function(){ var highest = 100; - images.each(function(img_container){ - var img = img_container.getElement('img'); - if(!img) return - + images.each(function(img){ + img.setStyle('height', null); var height = img.getSize().y; if(!highest || highest < height) highest = height; @@ -154,7 +154,78 @@ Page.Home = new Class({ $(self.suggestion_list).inject(self.el); + }, + createCharts: function(){ + var self = this; + + // Charts + self.charts = new Charts({ + 'onCreated': function(){ + self.chain.callChain(); + } + }); + + $(self.charts).inject(self.el); + + }, + + createSuggestionsChartsMenu: function(){ + var self = this; + + self.el_toggle_menu_suggestions = new Element('a.toggle_suggestions.active', { + 'href': '#', + 'events': { 'click': function(e) { + e.preventDefault(); + self.toggleSuggestionsCharts('suggestions'); + } + } + }).grab( new Element('h2', {'text': 'Suggestions'})); + + self.el_toggle_menu_charts = new Element('a.toggle_charts', { + 'href': '#', + 'events': { 'click': function(e) { + e.preventDefault(); + self.toggleSuggestionsCharts('charts'); + } + } + }).grab( new Element('h2', {'text': 'Charts'})); + + self.el_toggle_menu = new Element('div.toggle_menu').grab( + self.el_toggle_menu_suggestions + ).grab( + self.el_toggle_menu_charts + ); + + var menu_selected = Cookie.read('suggestions_charts_menu_selected'); + if( menu_selected === null ) menu_selected = 'suggestions'; + self.toggleSuggestionsCharts( menu_selected ); + + self.el_toggle_menu.inject(self.el); + + self.chain.callChain(); + + }, + + toggleSuggestionsCharts: function(menu_id){ + var self = this; + + switch(menu_id) { + case 'suggestions': + if($(self.suggestion_list)) $(self.suggestion_list).show(); + self.el_toggle_menu_suggestions.addClass('active'); + if($(self.charts)) $(self.charts).hide(); + self.el_toggle_menu_charts.removeClass('active'); + break; + case 'charts': + if($(self.charts)) $(self.charts).show(); + self.el_toggle_menu_charts.addClass('active'); + if($(self.suggestion_list)) $(self.suggestion_list).hide(); + self.el_toggle_menu_suggestions.removeClass('active'); + break; + } + + Cookie.write('suggestions_charts_menu_selected', menu_id, {'duration': 365}); }, createLate: function(){ diff --git a/couchpotato/static/scripts/page/settings.js b/couchpotato/static/scripts/page/settings.js index 74f3c7f0..b9f72aba 100644 --- a/couchpotato/static/scripts/page/settings.js +++ b/couchpotato/static/scripts/page/settings.js @@ -2,6 +2,7 @@ Page.Settings = new Class({ Extends: PageBase, + order: 50, name: 'settings', title: 'Change settings.', wizard_only: false, @@ -26,8 +27,8 @@ Page.Settings = new Class({ }, openTab: function(action){ - var self = this; - var action = (action == 'index' ? 'about' : action) || self.action; + var self = this, + action = (action == 'index' ? 'about' : action) || self.action; if(self.current) self.toggleTab(self.current, true); @@ -112,14 +113,20 @@ Page.Settings = new Class({ }, sortByOrder: function(a, b){ - return (a.order || 100) - (b.order || 100) + return (a.order || 100) - (b.order || 100) }, create: function(json){ var self = this; self.tabs_container = new Element('ul.tabs'); - self.containers = new Element('form.uniForm.containers').adopt( + self.containers = new Element('form.uniForm.containers', { + 'events': { + 'click:relay(.enabler.disabled h2)': function(e, el){ + el.getPrevious().getElements('.check').fireEvent('click'); + } + } + }).adopt( new Element('label.advanced_toggle').adopt( new Element('span', { 'text': 'Show advanced settings' @@ -284,14 +291,23 @@ Page.Settings = new Class({ }) } + var icon; + if(group.icon){ + icon = new Element('span.icon').grab(new Element('img', { + 'src': 'data:image/png;base64,' + group.icon + })); + } + + var label = new Element('span.group_label', { + 'text': group.label || (group.name).capitalize() + }) return new Element('fieldset', { 'class': (group.advanced ? 'inlineLabels advanced' : 'inlineLabels') + ' group_' + (group.name || '') + ' subtab_' + (group.subtab || '') }).grab( - new Element('h2', { - 'text': group.label || (group.name).capitalize() - }).grab(hint) - ); + new Element('h2').adopt(icon, label, hint) + ); + }, createList: function(content_container){ @@ -370,7 +386,7 @@ var OptionBase = new Class({ createTooltip(self.options.description[1]).inject(hint, 'top'); } else { - var hint = new Element('p.formHint', { + new Element('p.formHint', { 'html': self.options.description || '' }).inject(self.el) } @@ -1312,8 +1328,9 @@ Option.Combined = new Class({ var head = new Element('div.head').inject(self.combined_list); Object.each(self.inputs, function(input, name){ + var _in = input.getNext(); self.labels[name] = input.getPrevious().get('text'); - self.descriptions[name] = (_in = input.getNext()) ? _in.get('text') : ''; + self.descriptions[name] = _in ? _in.get('text') : ''; new Element('abbr', { 'class': name, @@ -1465,4 +1482,4 @@ var createTooltip = function(description){ ); return tip; -} +}; diff --git a/couchpotato/static/style/api.css b/couchpotato/static/style/api.css index 0c9f0f08..3d6952d6 100644 --- a/couchpotato/static/style/api.css +++ b/couchpotato/static/style/api.css @@ -92,4 +92,71 @@ pre { .api .return { float: left; width: 700px; - } \ No newline at end of file + } + +.database { + padding: 20px; + margin: 0; +} + + .database * { + margin: 0; + padding: 0; + } + + .database .nav { + } + .database .nav li { + display: inline-block; + } + .database .nav li a { + padding: 5px; + } + + .database table { + font-size: 11px; + } + + .database table th { + text-align: left; + } + + .database table tr:hover { + position: relative; + z-index: 20; + } + + .database table td { + vertical-align: top; + position: relative; + } + + .database table .id { + width: 100px; + } + + .database table ._rev { + width: 60px; + } + + .database table ._t { + width: 60px; + } + + .database table .form { + width: 600px; + } + + .database table form { + width: 600px; + } + + .database textarea { + font-size: 12px; + width: 100%; + height: 200px; + } + + .database input[type=submit] { + display: block; + } diff --git a/couchpotato/static/style/main.css b/couchpotato/static/style/main.css index 0ade5182..c7850b22 100644 --- a/couchpotato/static/style/main.css +++ b/couchpotato/static/style/main.css @@ -8,6 +8,7 @@ body, html { padding: 0; background: #4e5969; -webkit-font-smoothing: subpixel-antialiased; + -moz-osx-font-smoothing: grayscale; } body { overflow-y: scroll; } body.noscroll { overflow: hidden; } @@ -52,6 +53,30 @@ input:-moz-placeholder { font-style: italic; } +.tiny_scroll { + overflow: hidden; +} + + .tiny_scroll:hover { + overflow-y: auto; + } + + .tiny_scroll::-webkit-scrollbar { + width: 5px; + } + + .tiny_scroll::-webkit-scrollbar-track { + -webkit-box-shadow: inset 0 0 6px rgba(0,0,0,0.4); + -webkit-border-radius: 5px; + border-radius: 5px; + } + + .tiny_scroll::-webkit-scrollbar-thumb { + -webkit-border-radius: 5px; + border-radius: 5px; + background: rgba(255,255,255,0.3); + } + a img { border:none; } @@ -111,7 +136,7 @@ body > .spinner, .mask{ width: 100%; padding: 200px; } - + @media all and (max-width: 480px) { body > .mask { padding: 20px; @@ -166,6 +191,7 @@ body > .spinner, .mask{ text-transform: none; line-height: 1; -webkit-font-smoothing: antialiased; + -moz-osx-font-smoothing: grayscale; font-size: 15px; color: #FFF; } @@ -185,9 +211,9 @@ body > .spinner, .mask{ .icon2.completed:before { content: "\e070"; } .icon2.info:before { content: "\e089"; } .icon2.attention:before { content: "\e009"; } -.icon2.readd:before { - display: inline-block; - content: "\e04b"; +.icon2.readd:before { + display: inline-block; + content: "\e04b"; transform: scale(-1, 1); } .icon2.imdb:before { @@ -200,8 +226,8 @@ body > .spinner, .mask{ position: relative; top: -3px; } -.icon2.menu:before { - content: "\e076\00a0 \e076\00a0 \e076\00a0"; +.icon2.menu:before { + content: "\e076\00a0 \e076\00a0 \e076\00a0"; line-height: 6px; transform: scaleX(2); width: 20px; @@ -213,7 +239,7 @@ body > .spinner, .mask{ margin-left: 5px; } @media screen and (-webkit-min-device-pixel-ratio:0) { - .icon2.menu:before { + .icon2.menu:before { margin-top: -7px; } } @@ -229,7 +255,7 @@ body > .spinner, .mask{ box-shadow: 0 0 10px rgba(0,0,0,.1); transition: all .4s ease-in-out; } - + @media all and (max-width: 480px) { .header { height: 44px; @@ -253,7 +279,7 @@ body > .spinner, .mask{ left: 0; bottom: 0; } - + .header .foldout { width: 44px; height: 100%; @@ -264,7 +290,7 @@ body > .spinner, .mask{ line-height: 42px; color: #FFF; } - + .header .logo { display: inline-block; font-size: 3em; @@ -274,36 +300,36 @@ body > .spinner, .mask{ color: #FFF; font-weight: normal; vertical-align: top; - font-family: Lobster; + font-family: Lobster, sans-serif; } - + @media all and (max-width: 480px) { .header .foldout { display: inline-block; } - + .header .logo { padding-top: 7px; border: 0; font-size: 1.7em; } } - + @media all and (min-width: 481px) and (max-width: 640px) { - + .header .logo { display: none; } - + } - + .header .navigation ul { display: inline-block; margin: 0; padding: 0; height: 100%; } - + .header .navigation li { color: #fff; display: inline-block; @@ -317,7 +343,7 @@ body > .spinner, .mask{ .header .navigation li:first-child { border: none; } - + .header .navigation li a { display: block; padding: 15px; @@ -327,13 +353,13 @@ body > .spinner, .mask{ border-width: 0 0 4px 0; font-weight: normal; } - + .header .navigation li:hover a { border-color: #047792; } .header .navigation li.active a { border-color: #04bce6; } - + .header .navigation li.disabled { color: #e5e5e5; } .header .navigation li a { color: #fff; } - + .header .navigation .backtotop { opacity: 0; display: block; @@ -349,24 +375,24 @@ body > .spinner, .mask{ font-weight: normal; } .header:hover .navigation .backtotop { color: #fff; } - + @media all and (max-width: 480px) { - + body { position: absolute; width: 100%; transition: all .5s cubic-bezier(0.9,0,0.1,1); left: 0; } - + .menu_shown body { left: 240px; } - + .header .navigation { height: 100%; } - + .menu_shown .header .navigation .overlay { position: fixed; right: 0; @@ -374,7 +400,7 @@ body > .spinner, .mask{ bottom: 0; left: 240px; } - + .header .navigation ul { width: 240px; position: fixed; @@ -382,11 +408,11 @@ body > .spinner, .mask{ background: rgba(0,0,0,.5); transition: all .5s cubic-bezier(0.9,0,0.1,1); } - + .menu_shown .header .navigation ul { left: 0; } - + .header .navigation ul li { display: block; text-align: left; @@ -397,7 +423,7 @@ body > .spinner, .mask{ border-width: 0 4px 0 0; padding: 5px 20px; } - + .header .navigation ul li.separator { background-color: rgba(255,255,255, .07); height: 5px; @@ -410,31 +436,31 @@ body > .spinner, .mask{ height: 100%; border-left: 1px solid rgba(255,255,255,.07); } - + @media all and (max-width: 480px) { .header .more_menu { display: none; } } - + .header .more_menu .button { height: 100%; line-height: 66px; text-align: center; padding: 0; } - + .header .more_menu .wrapper { width: 200px; margin-left: -106px; margin-top: 22px; } - + @media all and (max-width: 480px) { .header .more_menu .button { line-height: 44px; } - + .header .more_menu .wrapper { margin-top: 0; } @@ -454,12 +480,12 @@ body > .spinner, .mask{ top: 0; right: 0; } - + .header .notification_menu { right: 60px; display: block; } - + @media all and (max-width: 480px) { .header .notification_menu { right: 0; @@ -685,15 +711,15 @@ body > .spinner, .mask{ border-bottom: 4px solid transparent; border-radius: 0; } - + .more_menu .button:hover { border-color: #047792; } - + .more_menu.show .button { border-color: #04bce6; } - + .more_menu .wrapper { display: none; top: 0; @@ -706,23 +732,23 @@ body > .spinner, .mask{ color: #444; background: #fff; } - + .more_menu.show .wrapper { display: block; top: 44px; } - + .more_menu ul { padding: 0; margin: 0; list-style: none; } - + .more_menu .wrapper li { width: 100%; height: auto; } - + .more_menu .wrapper li a { display: block; border-bottom: 1px solid rgba(255,255,255,0.2); @@ -735,21 +761,21 @@ body > .spinner, .mask{ } .more_menu .wrapper li:first-child a { padding-top: 5px; } .more_menu .wrapper li:last-child a { padding-bottom: 5px; } - + .more_menu .wrapper li .separator { border-bottom: 1px solid rgba(0,0,0,.1); display: block; height: 1px; margin: 5px 0; } - + .more_menu .wrapper li:last-child a { border: none; } .more_menu .wrapper li a:hover { background: rgba(0,0,0,0.05); } - + .messages { position: fixed; right: 0; @@ -823,7 +849,7 @@ body > .spinner, .mask{ margin: -200px 0 0 -200px; } @media all and (max-width: 480px) { - + .login form { padding: 0; height: 300px; @@ -833,9 +859,9 @@ body > .spinner, .mask{ top: 10px; margin: 0; } - + } - + .page.login .ctrlHolder { padding: 0; margin: 0 0 20px; @@ -843,31 +869,31 @@ body > .spinner, .mask{ .page.login .ctrlHolder:hover { background: none; } - + .page.login input[type=text], .page.login input[type=password] { width: 100% !important; font-size: 25px; padding: 14px !important; } - + .page.login .remember_me { font-size: 15px; float: left; width: 150px; padding: 20px 0; } - + .page.login .remember_me .check { margin: 5px 5px 0 0; } - + .page.login .button { font-size: 25px; padding: 20px; float: right; } - + /* Fonts */ @font-face { font-family: 'Elusive-Icons'; @@ -936,4 +962,4 @@ body > .spinner, .mask{ url('../fonts/Lobster-webfont.svg#lobster_1.4regular') format('svg'); font-weight: normal; font-style: normal; -} \ No newline at end of file +} diff --git a/couchpotato/static/style/settings.css b/couchpotato/static/style/settings.css index 5bb226f3..50b305eb 100644 --- a/couchpotato/static/style/settings.css +++ b/couchpotato/static/style/settings.css @@ -92,6 +92,22 @@ border-bottom: 1px solid #333; box-shadow: 0 1px 0 rgba(255,255,255, 0.15); } + + .page fieldset h2 .icon { + vertical-align: bottom; + position: absolute; + left: -25px; + top: 3px; + background: #FFF; + border-radius: 2.5px; + line-height: 0; + overflow: hidden; + } + + .page fieldset.enabler:hover h2 .icon { + display: none; + } + .page fieldset h2 .hint { font-size: 12px; margin-left: 10px; @@ -200,17 +216,23 @@ .page .option_list .enabler.disabled { display: inline-block; - margin: 3px 3px 3px 20px; - padding: 4px 0; - width: 173px; + padding: 4px 0 5px; + width: 24%; vertical-align: top; } + .page .option_list .enabler:not(.disabled) .icon { + display: none; + } .page .option_list .enabler.disabled h2 { + cursor: pointer; border: none; box-shadow: none; - padding: 0 10px 0 25px; + padding: 0 10px 0 0; font-size: 16px; + position: relative; + left: 25px; + margin-right: 25px; } .page .option_list .enabler:not(.disabled) h2 { @@ -698,12 +720,18 @@ } .group_userscript { - background: center bottom no-repeat; - min-height: 360px; + background: 5px 75px no-repeat; + min-height: 460px; font-size: 20px; font-weight: normal; } + .settings .group_userscript { + background-position: center 120px; + background-size: auto 70%; + min-height: 360px; + } + .group_userscript h2 .hint { display: block; margin: 0 !important; @@ -741,7 +769,7 @@ .tooltip { position: absolute; - right: 0px; + right: 0; width: 30px; height: 30px; } diff --git a/couchpotato/templates/api.html b/couchpotato/templates/api.html index 1a5c4ce3..11d7ea9d 100644 --- a/couchpotato/templates/api.html +++ b/couchpotato/templates/api.html @@ -10,7 +10,7 @@

CouchPotato API Documentation

You can access the API via
{{ Env.get('api_base') }}
- To see it in action, have a look at the webinterface with Firebug (on firefox) or the development tools included in Chrome. + To see it in action, have a look at the webinterface with Firebug (on firefox) or the development tools included in Chrome. All the data that you see there are from the API.

@@ -26,7 +26,7 @@ Will return {"api_key": "XXXXXXXXXX", "success": true}. When username or password is empty you don't need to md5 it.
- + {% for route in routes %} {% if api_docs.get(route) %}
@@ -68,4 +68,4 @@
- \ No newline at end of file + diff --git a/couchpotato/templates/database.html b/couchpotato/templates/database.html new file mode 100644 index 00000000..e5125387 --- /dev/null +++ b/couchpotato/templates/database.html @@ -0,0 +1,144 @@ +{% autoescape None %} + + + + + + + + + + Document Manager + + + +

Documents

+
+ + + diff --git a/couchpotato/templates/index.html b/couchpotato/templates/index.html index 52a4491b..0d8acbc1 100644 --- a/couchpotato/templates/index.html +++ b/couchpotato/templates/index.html @@ -69,18 +69,14 @@ 'qualities': {{ json_encode(fireEvent('quality.all', single = True)) }} }); - Status.setup({{ json_encode(fireEvent('status.all', single = True)) }}); - - File.Type.setup({{ json_encode(fireEvent('file.types', single = True)) }}); - CategoryList.setup({{ json_encode(fireEvent('category.all', single = True)) }}); App.setup({ 'base_url': {{ json_encode(Env.get('web_base')) }}, - 'args': {{ json_encode(Env.get('args')) }}, + 'args': {{ json_encode(Env.get('args', unicode = True)) }}, 'options': {{ json_encode(('%s' % Env.get('options'))) }}, - 'app_dir': {{ json_encode(Env.get('app_dir')) }}, - 'data_dir': {{ json_encode(Env.get('data_dir')) }}, + 'app_dir': {{ json_encode(Env.get('app_dir', unicode = True)) }}, + 'data_dir': {{ json_encode(Env.get('data_dir', unicode = True)) }}, 'pid': {{ json_encode(Env.getPid()) }}, 'userscript_version': {{ json_encode(fireEvent('userscript.get_version', single = True)) }} }); @@ -95,4 +91,4 @@ CouchPotato - \ No newline at end of file + diff --git a/init/couchpotato.fedora.service b/init/couchpotato.fedora.service index 7df166bc..d3b52ba5 100644 --- a/init/couchpotato.fedora.service +++ b/init/couchpotato.fedora.service @@ -1,12 +1,13 @@ [Unit] Description=CouchPotato application instance +After=network.target [Service] -ExecStart=/usr/lib/CouchPotatoServer/CouchPotato.py --daemon +ExecStart=/var/lib/CouchPotatoServer/CouchPotato.py --daemon GuessMainPID=no Type=forking User=couchpotato Group=couchpotato [Install] -WantedBy=multi-user.target \ No newline at end of file +WantedBy=multi-user.target diff --git a/libs/CodernityDB/__init__.py b/libs/CodernityDB/__init__.py new file mode 100644 index 00000000..8399a60f --- /dev/null +++ b/libs/CodernityDB/__init__.py @@ -0,0 +1,20 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2011-2013 Codernity (http://codernity.com) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +__version__ = '0.4.2' +__license__ = "Apache 2.0" diff --git a/libs/CodernityDB/database.py b/libs/CodernityDB/database.py new file mode 100644 index 00000000..064836f1 --- /dev/null +++ b/libs/CodernityDB/database.py @@ -0,0 +1,1214 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2011-2013 Codernity (http://codernity.com) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import io +from inspect import getsource + +# for custom indexes +from CodernityDB.storage import Storage, IU_Storage +from CodernityDB.hash_index import (IU_UniqueHashIndex, + IU_HashIndex, + HashIndex, + UniqueHashIndex) +# normal imports + +from CodernityDB.index import (ElemNotFound, + DocIdNotFound, + IndexException, + Index, + TryReindexException, + ReindexException, + IndexNotFoundException, + IndexConflict) + +from CodernityDB.misc import NONE + +from CodernityDB.env import cdb_environment + +from random import randrange + +import warnings + + +def header_for_indexes(index_name, index_class, db_custom="", ind_custom="", classes_code=""): + return """# %s +# %s + +# inserted automatically +import os +import marshal + +import struct +import shutil + +from hashlib import md5 + +# custom db code start +# db_custom +%s + +# custom index code start +# ind_custom +%s + +# source of classes in index.classes_code +# classes_code +%s + +# index code start + +""" % (index_name, index_class, db_custom, ind_custom, classes_code) + + +class DatabaseException(Exception): + pass + + +class PreconditionsException(DatabaseException): + pass + + +class RecordDeleted(DatabaseException): + pass + + +class RecordNotFound(DatabaseException): + pass + + +class RevConflict(DatabaseException): + pass + + +class DatabaseConflict(DatabaseException): + pass + + +class DatabasePathException(DatabaseException): + pass + + +class DatabaseIsNotOpened(PreconditionsException): + pass + + +class Database(object): + """ + A default single thread database object. + """ + + custom_header = "" # : use it for imports required by your database + + def __init__(self, path): + self.path = path + self.storage = None + self.indexes = [] + self.id_ind = None + self.indexes_names = {} + self.opened = False + + def create_new_rev(self, old_rev=None): + """ + Creates new revision number based on previous one. + Increments it + random bytes. On overflow starts from 0 again. + """ + if old_rev: + try: + rev_num = int(old_rev[:4], 16) + except: + raise RevConflict() + rev_num += 1 + if rev_num > 65025: + # starting the counter from 0 again + rev_num = 0 + rnd = randrange(65536) + return "%04x%04x" % (rev_num, rnd) + else: + # new rev + rnd = randrange(256 ** 2) + return '0001%04x' % rnd + + def __not_opened(self): + if not self.opened: + raise DatabaseIsNotOpened("Database is not opened") + + def set_indexes(self, indexes=[]): + """ + Set indexes using ``indexes`` param + + :param indexes: indexes to set in db + :type indexes: iterable of :py:class:`CodernityDB.index.Index` objects. + + """ + for ind in indexes: + self.add_index(ind, create=False) + + def _add_single_index(self, p, i, index): + """ + Adds single index to a database. + It will use :py:meth:`inspect.getsource` to get class source. + Then it will build real index file, save it in ``_indexes`` directory. + """ + code = getsource(index.__class__) + if not code.startswith('c'): # fix for indented index codes + import textwrap + code = textwrap.dedent(code) + index._order = i + cls_code = getattr(index, 'classes_code', []) + classes_code = "" + for curr in cls_code: + classes_code += getsource(curr) + '\n\n' + with io.FileIO(os.path.join(p, "%.2d%s" % (i, index.name) + '.py'), 'w') as f: + f.write(header_for_indexes(index.name, + index.__class__.__name__, + getattr(self, 'custom_header', ''), + getattr(index, 'custom_header', ''), + classes_code)) + f.write(code) + return True + + def _read_index_single(self, p, ind, ind_kwargs={}): + """ + It will read single index from index file (ie. generated in :py:meth:`._add_single_index`). + Then it will perform ``exec`` on that code + + If error will occur the index file will be saved with ``_broken`` suffix + + :param p: path + :param ind: index name (will be joined with *p*) + :returns: new index object + """ + with io.FileIO(os.path.join(p, ind), 'r') as f: + name = f.readline()[2:].strip() + _class = f.readline()[2:].strip() + code = f.read() + try: + obj = compile(code, '', f.__name__, repr(args[1:]) + res = f(*args, **kwargs) +# if db.opened: +# db.flush() +# print '<=', f.__name__, repr(args[1:]) + return res + return _inner + + def __new__(cls, classname, bases, attr): + new_attr = {} + for base in bases: + for b_attr in dir(base): + a = getattr(base, b_attr, None) + if isinstance(a, MethodType) and not b_attr.startswith('_'): + if b_attr == 'flush' or b_attr == 'flush_indexes': + pass + else: + # setattr(base, b_attr, SuperLock.wrapper(a)) + new_attr[b_attr] = SuperLock.wrapper(a) + for attr_name, attr_value in attr.iteritems(): + if isinstance(attr_value, FunctionType) and not attr_name.startswith('_'): + attr_value = SuperLock.wrapper(attr_value) + new_attr[attr_name] = attr_value + new_attr['super_lock'] = RLock() + return type.__new__(cls, classname, bases, new_attr) + + +class SuperThreadSafeDatabase(Database): + """ + Thread safe version that always allows single thread to use db. + It adds the same lock for all methods, so only one operation can be + performed in given time. Completely different implementation + than ThreadSafe version (without super word) + """ + + __metaclass__ = SuperLock + + def __init__(self, *args, **kwargs): + super(SuperThreadSafeDatabase, self).__init__(*args, **kwargs) + + def __patch_index_gens(self, name): + ind = self.indexes_names[name] + for c in ('all', 'get_many'): + m = getattr(ind, c) + if getattr(ind, c + "_orig", None): + return + m_fixed = th_safe_gen.wrapper(m, name, c, self.super_lock) + setattr(ind, c, m_fixed) + setattr(ind, c + '_orig', m) + + def open(self, *args, **kwargs): + res = super(SuperThreadSafeDatabase, self).open(*args, **kwargs) + for name in self.indexes_names.iterkeys(): + self.__patch_index_gens(name) + return res + + def create(self, *args, **kwargs): + res = super(SuperThreadSafeDatabase, self).create(*args, **kwargs) + for name in self.indexes_names.iterkeys(): + self.__patch_index_gens(name) + return res + + def add_index(self, *args, **kwargs): + res = super(SuperThreadSafeDatabase, self).add_index(*args, **kwargs) + self.__patch_index_gens(res) + return res + + def edit_index(self, *args, **kwargs): + res = super(SuperThreadSafeDatabase, self).edit_index(*args, **kwargs) + self.__patch_index_gens(res) + return res diff --git a/libs/CodernityDB/database_thread_safe.py b/libs/CodernityDB/database_thread_safe.py new file mode 100644 index 00000000..5349e09b --- /dev/null +++ b/libs/CodernityDB/database_thread_safe.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2011-2013 Codernity (http://codernity.com) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from threading import RLock + +from CodernityDB.env import cdb_environment + +cdb_environment['mode'] = "threads" +cdb_environment['rlock_obj'] = RLock + + +from database_safe_shared import SafeDatabase + + +class ThreadSafeDatabase(SafeDatabase): + """ + Thread safe version of CodernityDB that uses several lock objects, + on different methods / different indexes etc. It's completely different + implementation of locking than SuperThreadSafe one. + """ + pass diff --git a/libs/CodernityDB/debug_stuff.py b/libs/CodernityDB/debug_stuff.py new file mode 100644 index 00000000..76cdedf9 --- /dev/null +++ b/libs/CodernityDB/debug_stuff.py @@ -0,0 +1,211 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2011-2013 Codernity (http://codernity.com) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from CodernityDB.tree_index import TreeBasedIndex +import struct +import os + +import inspect +from functools import wraps +import json + + +class DebugTreeBasedIndex(TreeBasedIndex): + + def __init__(self, *args, **kwargs): + super(DebugTreeBasedIndex, self).__init__(*args, **kwargs) + + def print_tree(self): + print '-----CURRENT TREE-----' + print self.root_flag + + if self.root_flag == 'l': + print '---ROOT---' + self._print_leaf_data(self.data_start) + return + else: + print '---ROOT---' + self._print_node_data(self.data_start) + nr_of_el, children_flag = self._read_node_nr_of_elements_and_children_flag( + self.data_start) + nodes = [] + for index in range(nr_of_el): + l_pointer, key, r_pointer = self._read_single_node_key( + self.data_start, index) + nodes.append(l_pointer) + nodes.append(r_pointer) + print 'ROOT NODES', nodes + while children_flag == 'n': + self._print_level(nodes, 'n') + new_nodes = [] + for node in nodes: + nr_of_el, children_flag = \ + self._read_node_nr_of_elements_and_children_flag(node) + for index in range(nr_of_el): + l_pointer, key, r_pointer = self._read_single_node_key( + node, index) + new_nodes.append(l_pointer) + new_nodes.append(r_pointer) + nodes = new_nodes + self._print_level(nodes, 'l') + + def _print_level(self, nodes, flag): + print '---NEXT LVL---' + if flag == 'n': + for node in nodes: + self._print_node_data(node) + elif flag == 'l': + for node in nodes: + self._print_leaf_data(node) + + def _print_leaf_data(self, leaf_start_position): + print 'printing data of leaf at', leaf_start_position + nr_of_elements = self._read_leaf_nr_of_elements(leaf_start_position) + self.buckets.seek(leaf_start_position) + data = self.buckets.read(self.leaf_heading_size + + nr_of_elements * self.single_leaf_record_size) + leaf = struct.unpack('<' + self.leaf_heading_format + + nr_of_elements * self.single_leaf_record_format, data) + print leaf + print + + def _print_node_data(self, node_start_position): + print 'printing data of node at', node_start_position + nr_of_elements = self._read_node_nr_of_elements_and_children_flag( + node_start_position)[0] + self.buckets.seek(node_start_position) + data = self.buckets.read(self.node_heading_size + self.pointer_size + + nr_of_elements * (self.key_size + self.pointer_size)) + node = struct.unpack('<' + self.node_heading_format + self.pointer_format + + nr_of_elements * ( + self.key_format + self.pointer_format), + data) + print node + print +# ------------------> + + +def database_step_by_step(db_obj, path=None): + + if not path: + # ugly for multiplatform support.... + p = db_obj.path + p1 = os.path.split(p) + p2 = os.path.split(p1[0]) + p3 = '_'.join([p2[1], 'operation_logger.log']) + path = os.path.join(os.path.split(p2[0])[0], p3) + f_obj = open(path, 'wb') + + __stack = [] # inspect.stack() is not working on pytest etc + + def remove_from_stack(name): + for i in range(len(__stack)): + if __stack[-i] == name: + __stack.pop(-i) + + def __dumper(f): + @wraps(f) + def __inner(*args, **kwargs): + funct_name = f.__name__ + if funct_name == 'count': + name = args[0].__name__ + meth_args = (name,) + args[1:] + elif funct_name in ('reindex_index', 'compact_index'): + name = args[0].name + meth_args = (name,) + args[1:] + else: + meth_args = args + kwargs_copy = kwargs.copy() + res = None + __stack.append(funct_name) + if funct_name == 'insert': + try: + res = f(*args, **kwargs) + except: + packed = json.dumps((funct_name, + meth_args, kwargs_copy, None)) + f_obj.write('%s\n' % packed) + f_obj.flush() + raise + else: + packed = json.dumps((funct_name, + meth_args, kwargs_copy, res)) + f_obj.write('%s\n' % packed) + f_obj.flush() + else: + if funct_name == 'get': + for curr in __stack: + if ('delete' in curr or 'update' in curr) and not curr.startswith('test'): + remove_from_stack(funct_name) + return f(*args, **kwargs) + packed = json.dumps((funct_name, meth_args, kwargs_copy)) + f_obj.write('%s\n' % packed) + f_obj.flush() + res = f(*args, **kwargs) + remove_from_stack(funct_name) + return res + return __inner + + for meth_name, meth_f in inspect.getmembers(db_obj, predicate=inspect.ismethod): + if not meth_name.startswith('_'): + setattr(db_obj, meth_name, __dumper(meth_f)) + + setattr(db_obj, 'operation_logger', f_obj) + + +def database_from_steps(db_obj, path): + # db_obj.insert=lambda data : insert_for_debug(db_obj, data) + with open(path, 'rb') as f_obj: + for current in f_obj: + line = json.loads(current[:-1]) + if line[0] == 'count': + obj = getattr(db_obj, line[1][0]) + line[1] = [obj] + line[1][1:] + name = line[0] + if name == 'insert': + try: + line[1][0].pop('_rev') + except: + pass + elif name in ('delete', 'update'): + el = db_obj.get('id', line[1][0]['_id']) + line[1][0]['_rev'] = el['_rev'] +# print 'FROM STEPS doing', line + meth = getattr(db_obj, line[0], None) + if not meth: + raise Exception("Method = `%s` not found" % line[0]) + + meth(*line[1], **line[2]) + + +# def insert_for_debug(self, data): +# +# _rev = data['_rev'] +# +# if not '_id' in data: +# _id = uuid4().hex +# else: +# _id = data['_id'] +# data['_id'] = _id +# try: +# _id = bytes(_id) +# except: +# raise DatabaseException("`_id` must be valid bytes object") +# self._insert_indexes(_id, _rev, data) +# ret = {'_id': _id, '_rev': _rev} +# data.update(ret) +# return ret diff --git a/libs/CodernityDB/env.py b/libs/CodernityDB/env.py new file mode 100644 index 00000000..69ca8cdd --- /dev/null +++ b/libs/CodernityDB/env.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2011-2013 Codernity (http://codernity.com) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +It's CodernityDB environment. +Handles internal informations.' +""" + +cdb_environment = { + 'mode': 'normal' +} diff --git a/libs/CodernityDB/hash_index.py b/libs/CodernityDB/hash_index.py new file mode 100644 index 00000000..cd160fd0 --- /dev/null +++ b/libs/CodernityDB/hash_index.py @@ -0,0 +1,880 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2011-2013 Codernity (http://codernity.com) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from CodernityDB.index import (Index, + IndexException, + DocIdNotFound, + ElemNotFound, + TryReindexException, + IndexPreconditionsException) + +import os +import marshal +import io +import struct +import shutil + +from CodernityDB.storage import IU_Storage, DummyStorage + +from CodernityDB.env import cdb_environment + +if cdb_environment.get('rlock_obj'): + from CodernityDB import patch + patch.patch_cache_rr(cdb_environment['rlock_obj']) + +from CodernityDB.rr_cache import cache1lvl + + +from CodernityDB.misc import random_hex_32 + +try: + from CodernityDB import __version__ +except ImportError: + from __init__ import __version__ + + +class IU_HashIndex(Index): + """ + That class is for Internal Use only, if you want to use HashIndex just subclass the :py:class:`HashIndex` instead this one. + + That design is because main index logic should be always in database not in custom user indexes. + """ + + def __init__(self, db_path, name, entry_line_format='<32s{key}IIcI', hash_lim=0xfffff, storage_class=None, key_format='c'): + """ + The index is capable to solve conflicts by `Separate chaining` + :param db_path: database path + :type db_path: string + :param name: index name + :type name: ascii string + :param line_format: line format, `key_format` parameter value will replace `{key}` if present. + :type line_format: string (32s{key}IIcI by default) {doc_id}{hash_key}{start}{size}{status}{next} + :param hash_lim: maximum hash functon results (remember about birthday problem) count from 0 + :type hash_lim: integer + :param storage_class: Storage class by default it will open standard :py:class:`CodernityDB.storage.Storage` (if string has to be accesible by globals()[storage_class]) + :type storage_class: class name which will be instance of CodernityDB.storage.Storage instance or None + :param key_format: a index key format + """ + if key_format and '{key}' in entry_line_format: + entry_line_format = entry_line_format.replace('{key}', key_format) + super(IU_HashIndex, self).__init__(db_path, name) + self.hash_lim = hash_lim + if not storage_class: + storage_class = IU_Storage + if storage_class and not isinstance(storage_class, basestring): + storage_class = storage_class.__name__ + self.storage_class = storage_class + self.storage = None + + self.bucket_line_format = "= self.data_start: + self.buckets.seek(pos_prev) + data = self.buckets.read(self.entry_line_size) + if data: + doc_id, l_key, start, size, status, _next = self.entry_struct.unpack(data) + self.buckets.seek(pos_prev) + self.buckets.write(self.entry_struct.pack(doc_id, + l_key, + start, + size, + status, + pos_next)) + self.flush() + if pos_next: + self.buckets.seek(pos_next) + data = self.buckets.read(self.entry_line_size) + if data: + doc_id, l_key, start, size, status, _next = self.entry_struct.unpack(data) + self.buckets.seek(pos_next) + self.buckets.write(self.entry_struct.pack(doc_id, + l_key, + start, + size, + status, + _next)) + self.flush() + return + + def delete(self, doc_id, key, start=0, size=0): + start_position = self._calculate_position(key) + self.buckets.seek(start_position) + curr_data = self.buckets.read(self.bucket_line_size) + if curr_data: + location = self.bucket_struct.unpack(curr_data)[0] + else: + # case happens when trying to delete element with new index key in data + # after adding new index to database without reindex + raise TryReindexException() + found_at, _doc_id, _key, start, size, status, _next = self._locate_doc_id(doc_id, key, location) + self.buckets.seek(found_at) + self.buckets.write(self.entry_struct.pack(doc_id, + key, + start, + size, + 'd', + _next)) + self.flush() + # self._fix_link(_key, _prev, _next) + self._find_key.delete(key) + self._locate_doc_id.delete(doc_id) + return True + + def compact(self, hash_lim=None): + + if not hash_lim: + hash_lim = self.hash_lim + + compact_ind = self.__class__( + self.db_path, self.name + '_compact', hash_lim=hash_lim) + compact_ind.create_index() + + gen = self.all() + while True: + try: + doc_id, key, start, size, status = gen.next() + except StopIteration: + break + self.storage._f.seek(start) + value = self.storage._f.read(size) + start_ = compact_ind.storage._f.tell() + compact_ind.storage._f.write(value) + compact_ind.insert(doc_id, key, start_, size, status) + + compact_ind.close_index() + original_name = self.name + # os.unlink(os.path.join(self.db_path, self.name + "_buck")) + self.close_index() + shutil.move(os.path.join(compact_ind.db_path, compact_ind. + name + "_buck"), os.path.join(self.db_path, self.name + "_buck")) + shutil.move(os.path.join(compact_ind.db_path, compact_ind. + name + "_stor"), os.path.join(self.db_path, self.name + "_stor")) + # self.name = original_name + self.open_index() # reload... + self.name = original_name + self._save_params(dict(name=original_name)) + self._fix_params() + self._clear_cache() + return True + + def make_key(self, key): + return key + + def make_key_value(self, data): + return '1', data + + def _clear_cache(self): + self._find_key.clear() + self._locate_doc_id.clear() + + def close_index(self): + super(IU_HashIndex, self).close_index() + self._clear_cache() + + +class IU_UniqueHashIndex(IU_HashIndex): + """ + Index for *unique* keys! Designed to be a **id** index. + + That class is for Internal Use only, if you want to use UniqueHashIndex just subclass the :py:class:`UniqueHashIndex` instead this one. + + That design is because main index logic should be always in database not in custom user indexes. + """ + + def __init__(self, db_path, name, entry_line_format="<32s8sIIcI", *args, **kwargs): + if 'key' in kwargs: + raise IndexPreconditionsException( + "UniqueHashIndex doesn't accept key parameter'") + super(IU_UniqueHashIndex, self).__init__(db_path, name, + entry_line_format, *args, **kwargs) + self.create_key = random_hex_32 # : set the function to create random key when no _id given + # self.entry_struct=struct.Struct(entry_line_format) + +# @lfu_cache(100) + def _find_key(self, key): + """ + Find the key position + + :param key: the key to find + """ + start_position = self._calculate_position(key) + self.buckets.seek(start_position) + curr_data = self.buckets.read(self.bucket_line_size) + if curr_data: + location = self.bucket_struct.unpack(curr_data)[0] + found_at, l_key, rev, start, size, status, _next = self._locate_key( + key, location) + return l_key, rev, start, size, status + else: + return None, None, 0, 0, 'u' + + def _find_key_many(self, *args, **kwargs): + raise NotImplementedError() + + def _find_place(self, start, key): + """ + Find a place to where put the key. It will iterate using `next` field in record, until + empty `next` found + + :param start: position to start from + """ + location = start + while True: + self.buckets.seek(location) + data = self.buckets.read(self.entry_line_size) + # todo, maybe partial read there... + l_key, rev, start, size, status, _next = self.entry_struct.unpack( + data) + if l_key == key: + raise IndexException("The '%s' key already exists" % key) + if not _next or status == 'd': + return self.buckets.tell() - self.entry_line_size, l_key, rev, start, size, status, _next + else: + location = _next # go to next record + + # @lfu_cache(100) + def _locate_key(self, key, start): + """ + Locate position of the key, it will iterate using `next` field in record + until required key will be find. + + :param key: the key to locate + :param start: position to start from + """ + location = start + while True: + self.buckets.seek(location) + data = self.buckets.read(self.entry_line_size) + # todo, maybe partial read there... + try: + l_key, rev, start, size, status, _next = self.entry_struct.unpack(data) + except struct.error: + raise ElemNotFound("Location '%s' not found" % key) + if l_key == key: + break + else: + if not _next: + # not found + raise ElemNotFound("Location '%s' not found" % key) + else: + location = _next # go to next record + return self.buckets.tell() - self.entry_line_size, l_key, rev, start, size, status, _next + + def update(self, key, rev, u_start=0, u_size=0, u_status='o'): + start_position = self._calculate_position(key) + self.buckets.seek(start_position) + curr_data = self.buckets.read(self.bucket_line_size) + # test if it's unique or not really unique hash + + if curr_data: + location = self.bucket_struct.unpack(curr_data)[0] + else: + raise ElemNotFound("Location '%s' not found" % key) + found_at, _key, _rev, start, size, status, _next = self._locate_key( + key, location) + if u_start == 0: + u_start = start + if u_size == 0: + u_size = size + self.buckets.seek(found_at) + self.buckets.write(self.entry_struct.pack(key, + rev, + u_start, + u_size, + u_status, + _next)) + self.flush() + self._find_key.delete(key) + return True + + def insert(self, key, rev, start, size, status='o'): + start_position = self._calculate_position(key) + self.buckets.seek(start_position) + curr_data = self.buckets.read(self.bucket_line_size) + + # conflict occurs? + if curr_data: + location = self.bucket_struct.unpack(curr_data)[0] + else: + location = 0 + if location: + # last key with that hash + found_at, _key, _rev, _start, _size, _status, _next = self._find_place( + location, key) + self.buckets.seek(0, 2) + wrote_at = self.buckets.tell() + + # check if position is bigger than all hash entries... + if wrote_at < self.data_start: + self.buckets.seek(self.data_start) + wrote_at = self.buckets.tell() + + self.buckets.write(self.entry_struct.pack(key, + rev, + start, + size, + status, + _next)) + +# self.flush() + self.buckets.seek(found_at) + self.buckets.write(self.entry_struct.pack(_key, + _rev, + _start, + _size, + _status, + wrote_at)) + self.flush() + self._find_key.delete(_key) + # self._locate_key.delete(_key) + return True + # raise NotImplementedError + else: + self.buckets.seek(0, 2) + wrote_at = self.buckets.tell() + + # check if position is bigger than all hash entries... + if wrote_at < self.data_start: + self.buckets.seek(self.data_start) + wrote_at = self.buckets.tell() + + self.buckets.write(self.entry_struct.pack(key, + rev, + start, + size, + status, + 0)) +# self.flush() + self.buckets.seek(start_position) + self.buckets.write(self.bucket_struct.pack(wrote_at)) + self.flush() + self._find_key.delete(key) + return True + + def all(self, limit=-1, offset=0): + self.buckets.seek(self.data_start) + while offset: + curr_data = self.buckets.read(self.entry_line_size) + if not curr_data: + break + try: + doc_id, rev, start, size, status, next = self.entry_struct.unpack(curr_data) + except IndexException: + break + else: + if status != 'd': + offset -= 1 + + while limit: + curr_data = self.buckets.read(self.entry_line_size) + if not curr_data: + break + try: + doc_id, rev, start, size, status, next = self.entry_struct.unpack(curr_data) + except IndexException: + break + else: + if status != 'd': + yield doc_id, rev, start, size, status + limit -= 1 + + def get_many(self, *args, **kwargs): + raise NotImplementedError() + + def delete(self, key, start=0, size=0): + self.update(key, '00000000', start, size, 'd') + + def make_key_value(self, data): + _id = data['_id'] + try: + _id = bytes(data['_id']) + except: + raise IndexPreconditionsException( + "_id must be valid string/bytes object") + if len(_id) != 32: + raise IndexPreconditionsException("Invalid _id lenght") + del data['_id'] + del data['_rev'] + return _id, data + + def destroy(self): + Index.destroy(self) + self._clear_cache() + + def _clear_cache(self): + self._find_key.clear() + + def insert_with_storage(self, _id, _rev, value): + if value: + start, size = self.storage.insert(value) + else: + start = 1 + size = 0 + return self.insert(_id, _rev, start, size) + + def update_with_storage(self, _id, _rev, value): + if value: + start, size = self.storage.insert(value) + else: + start = 1 + size = 0 + return self.update(_id, _rev, start, size) + + +class DummyHashIndex(IU_HashIndex): + def __init__(self, db_path, name, entry_line_format="<32s4sIIcI", *args, **kwargs): + super(DummyHashIndex, self).__init__(db_path, name, + entry_line_format, *args, **kwargs) + self.create_key = random_hex_32 # : set the function to create random key when no _id given + # self.entry_struct=struct.Struct(entry_line_format) + + def update(self, *args, **kwargs): + return True + + def insert(self, *args, **kwargs): + return True + + def all(self, *args, **kwargs): + raise StopIteration + + def get(self, *args, **kwargs): + raise ElemNotFound + + def get_many(self, *args, **kwargs): + raise StopIteration + + def delete(self, *args, **kwargs): + pass + + def make_key_value(self, data): + return '1', {'_': 1} + + def destroy(self): + pass + + def _clear_cache(self): + pass + + def _open_storage(self): + if not self.storage: + self.storage = DummyStorage() + self.storage.open() + + def _create_storage(self): + if not self.storage: + self.storage = DummyStorage() + self.storage.create() + + +class IU_MultiHashIndex(IU_HashIndex): + """ + Class that allows to index more than one key per database record. + + It operates very well on GET/INSERT. It's not optimized for + UPDATE operations (will always readd everything) + """ + + def __init__(self, *args, **kwargs): + super(IU_MultiHashIndex, self).__init__(*args, **kwargs) + + def insert(self, doc_id, key, start, size, status='o'): + if isinstance(key, (list, tuple)): + key = set(key) + elif not isinstance(key, set): + key = set([key]) + ins = super(IU_MultiHashIndex, self).insert + for curr_key in key: + ins(doc_id, curr_key, start, size, status) + return True + + def update(self, doc_id, key, u_start, u_size, u_status='o'): + if isinstance(key, (list, tuple)): + key = set(key) + elif not isinstance(key, set): + key = set([key]) + upd = super(IU_MultiHashIndex, self).update + for curr_key in key: + upd(doc_id, curr_key, u_start, u_size, u_status) + + def delete(self, doc_id, key, start=0, size=0): + if isinstance(key, (list, tuple)): + key = set(key) + elif not isinstance(key, set): + key = set([key]) + delete = super(IU_MultiHashIndex, self).delete + for curr_key in key: + delete(doc_id, curr_key, start, size) + + def get(self, key): + return super(IU_MultiHashIndex, self).get(key) + + def make_key_value(self, data): + raise NotImplementedError() + + +# classes for public use, done in this way because of +# generation static files with indexes (_index directory) + + +class HashIndex(IU_HashIndex): + """ + That class is designed to be used in custom indexes. + """ + pass + + +class UniqueHashIndex(IU_UniqueHashIndex): + """ + That class is designed to be used in custom indexes. It's designed to be **id** index. + """ + pass + + +class MultiHashIndex(IU_MultiHashIndex): + """ + That class is designed to be used in custom indexes. + """ diff --git a/libs/CodernityDB/index.py b/libs/CodernityDB/index.py new file mode 100644 index 00000000..48db2a4a --- /dev/null +++ b/libs/CodernityDB/index.py @@ -0,0 +1,195 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2011-2013 Codernity (http://codernity.com) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import os +import marshal + +import struct +import shutil + +from CodernityDB.storage import IU_Storage, DummyStorage + +try: + from CodernityDB import __version__ +except ImportError: + from __init__ import __version__ + + +import io + + +class IndexException(Exception): + pass + + +class IndexNotFoundException(IndexException): + pass + + +class ReindexException(IndexException): + pass + + +class TryReindexException(ReindexException): + pass + + +class ElemNotFound(IndexException): + pass + + +class DocIdNotFound(ElemNotFound): + pass + + +class IndexConflict(IndexException): + pass + + +class IndexPreconditionsException(IndexException): + pass + + +class Index(object): + + __version__ = __version__ + + custom_header = "" # : use it for imports required by your index + + def __init__(self, + db_path, + name): + self.name = name + self._start_ind = 500 + self.db_path = db_path + + def open_index(self): + if not os.path.isfile(os.path.join(self.db_path, self.name + '_buck')): + raise IndexException("Doesn't exists") + self.buckets = io.open( + os.path.join(self.db_path, self.name + "_buck"), 'r+b', buffering=0) + self._fix_params() + self._open_storage() + + def _close(self): + self.buckets.close() + self.storage.close() + + def close_index(self): + self.flush() + self.fsync() + self._close() + + def create_index(self): + raise NotImplementedError() + + def _fix_params(self): + self.buckets.seek(0) + props = marshal.loads(self.buckets.read(self._start_ind)) + for k, v in props.iteritems(): + self.__dict__[k] = v + self.buckets.seek(0, 2) + + def _save_params(self, in_params={}): + self.buckets.seek(0) + props = marshal.loads(self.buckets.read(self._start_ind)) + props.update(in_params) + self.buckets.seek(0) + data = marshal.dumps(props) + if len(data) > self._start_ind: + raise IndexException("To big props") + self.buckets.write(data) + self.flush() + self.buckets.seek(0, 2) + self.__dict__.update(props) + + def _open_storage(self, *args, **kwargs): + pass + + def _create_storage(self, *args, **kwargs): + pass + + def _destroy_storage(self, *args, **kwargs): + self.storage.destroy() + + def _find_key(self, key): + raise NotImplementedError() + + def update(self, doc_id, key, start, size): + raise NotImplementedError() + + def insert(self, doc_id, key, start, size): + raise NotImplementedError() + + def get(self, key): + raise NotImplementedError() + + def get_many(self, key, start_from=None, limit=0): + raise NotImplementedError() + + def all(self, start_pos): + raise NotImplementedError() + + def delete(self, key, start, size): + raise NotImplementedError() + + def make_key_value(self, data): + raise NotImplementedError() + + def make_key(self, data): + raise NotImplementedError() + + def compact(self, *args, **kwargs): + raise NotImplementedError() + + def destroy(self, *args, **kwargs): + self._close() + bucket_file = os.path.join(self.db_path, self.name + '_buck') + os.unlink(bucket_file) + self._destroy_storage() + self._find_key.clear() + + def flush(self): + try: + self.buckets.flush() + self.storage.flush() + except: + pass + + def fsync(self): + try: + os.fsync(self.buckets.fileno()) + self.storage.fsync() + except: + pass + + def update_with_storage(self, doc_id, key, value): + if value: + start, size = self.storage.insert(value) + else: + start = 1 + size = 0 + return self.update(doc_id, key, start, size) + + def insert_with_storage(self, doc_id, key, value): + if value: + start, size = self.storage.insert(value) + else: + start = 1 + size = 0 + return self.insert(doc_id, key, start, size) diff --git a/libs/CodernityDB/indexcreator.py b/libs/CodernityDB/indexcreator.py new file mode 100644 index 00000000..1e09a22b --- /dev/null +++ b/libs/CodernityDB/indexcreator.py @@ -0,0 +1,645 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2011-2013 Codernity (http://codernity.com) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import re +import tokenize +import token +import uuid + + +class IndexCreatorException(Exception): + def __init__(self, ex, line=None): + self.ex = ex + self.line = line + + def __str__(self): + if self.line: + return repr(self.ex + "(in line: %d)" % self.line) + return repr(self.ex) + + +class IndexCreatorFunctionException(IndexCreatorException): + pass + + +class IndexCreatorValueException(IndexCreatorException): + pass + + +class Parser(object): + def __init__(self): + pass + + def parse(self, data, name=None): + if not name: + self.name = "_" + uuid.uuid4().hex + else: + self.name = name + + self.ind = 0 + self.stage = 0 + self.logic = ['and', 'or', 'in'] + self.logic2 = ['&', '|'] + self.allowed_props = {'TreeBasedIndex': ['type', 'name', 'key_format', 'node_capacity', 'pointer_format', 'meta_format'], + 'HashIndex': ['type', 'name', 'key_format', 'hash_lim', 'entry_line_format'], + 'MultiHashIndex': ['type', 'name', 'key_format', 'hash_lim', 'entry_line_format'], + 'MultiTreeBasedIndex': ['type', 'name', 'key_format', 'node_capacity', 'pointer_format', 'meta_format'] + } + self.funcs = {'md5': (['md5'], ['.digest()']), + 'len': (['len'], []), + 'str': (['str'], []), + 'fix_r': (['self.fix_r'], []), + 'prefix': (['self.prefix'], []), + 'infix': (['self.infix'], []), + 'suffix': (['self.suffix'], []) + } + self.handle_int_imports = {'infix': "from itertools import izip\n"} + + self.funcs_with_body = {'fix_r': + (""" def fix_r(self,s,l): + e = len(s) + if e == l: + return s + elif e > l: + return s[:l] + else: + return s.rjust(l,'_')\n""", False), + 'prefix': + (""" def prefix(self,s,m,l,f): + t = len(s) + if m < 1: + m = 1 + o = set() + if t > l: + s = s[:l] + t = l + while m <= t: + o.add(s.rjust(f,'_')) + s = s[:-1] + t -= 1 + return o\n""", False), + 'suffix': + (""" def suffix(self,s,m,l,f): + t = len(s) + if m < 1: + m = 1 + o = set() + if t > l: + s = s[t-l:] + t = len(s) + while m <= t: + o.add(s.rjust(f,'_')) + s = s[1:] + t -= 1 + return o\n""", False), + 'infix': + (""" def infix(self,s,m,l,f): + t = len(s) + o = set() + for x in xrange(m - 1, l): + t = (s, ) + for y in xrange(0, x): + t += (s[y + 1:],) + o.update(set(''.join(x).rjust(f, '_').lower() for x in izip(*t))) + return o\n""", False)} + self.none = ['None', 'none', 'null'] + self.props_assign = ['=', ':'] + self.all_adj_num_comp = {token.NUMBER: ( + token.NUMBER, token.NAME, '-', '('), + token.NAME: (token.NUMBER, token.NAME, '-', '('), + ')': (token.NUMBER, token.NAME, '-', '(') + } + + self.all_adj_num_op = {token.NUMBER: (token.NUMBER, token.NAME, '('), + token.NAME: (token.NUMBER, token.NAME, '('), + ')': (token.NUMBER, token.NAME, '(') + } + self.allowed_adjacent = { + "<=": self.all_adj_num_comp, + ">=": self.all_adj_num_comp, + ">": self.all_adj_num_comp, + "<": self.all_adj_num_comp, + + "==": {token.NUMBER: (token.NUMBER, token.NAME, '('), + token.NAME: (token.NUMBER, token.NAME, token.STRING, '('), + token.STRING: (token.NAME, token.STRING, '('), + ')': (token.NUMBER, token.NAME, token.STRING, '('), + ']': (token.NUMBER, token.NAME, token.STRING, '(') + }, + + "+": {token.NUMBER: (token.NUMBER, token.NAME, '('), + token.NAME: (token.NUMBER, token.NAME, token.STRING, '('), + token.STRING: (token.NAME, token.STRING, '('), + ')': (token.NUMBER, token.NAME, token.STRING, '('), + ']': (token.NUMBER, token.NAME, token.STRING, '(') + }, + + "-": {token.NUMBER: (token.NUMBER, token.NAME, '('), + token.NAME: (token.NUMBER, token.NAME, '('), + ')': (token.NUMBER, token.NAME, '('), + '<': (token.NUMBER, token.NAME, '('), + '>': (token.NUMBER, token.NAME, '('), + '<=': (token.NUMBER, token.NAME, '('), + '>=': (token.NUMBER, token.NAME, '('), + '==': (token.NUMBER, token.NAME, '('), + ']': (token.NUMBER, token.NAME, '(') + }, + "*": self.all_adj_num_op, + "/": self.all_adj_num_op, + "%": self.all_adj_num_op, + ",": {token.NUMBER: (token.NUMBER, token.NAME, token.STRING, '{', '[', '('), + token.NAME: (token.NUMBER, token.NAME, token.STRING, '(', '{', '['), + token.STRING: (token.NAME, token.STRING, token.NUMBER, '(', '{', '['), + ')': (token.NUMBER, token.NAME, token.STRING, '(', '{', '['), + ']': (token.NUMBER, token.NAME, token.STRING, '(', '{', '['), + '}': (token.NUMBER, token.NAME, token.STRING, '(', '{', '[') + } + } + + def is_num(s): + m = re.search('[^0-9*()+\-\s/]+', s) + return not m + + def is_string(s): + m = re.search('\s*(?P[\'\"]+).*?(?P=a)\s*', s) + return m + data = re.split('make_key_value\:', data) + + if len(data) < 2: + raise IndexCreatorFunctionException( + "Couldn't find a definition of make_key_value function!\n") + + spl1 = re.split('make_key\:', data[0]) + spl2 = re.split('make_key\:', data[1]) + + self.funcs_rev = False + + if len(spl1) > 1: + data = [spl1[0]] + [data[1]] + [spl1[1]] + self.funcs_rev = True + elif len(spl2) > 1: + data = [data[0]] + spl2 + else: + data.append("key") + + if data[1] == re.search('\s*', data[1], re.S | re.M).group(0): + raise IndexCreatorFunctionException("Empty function body ", + len(re.split('\n', data[0])) + (len(re.split('\n', data[2])) if self.funcs_rev else 1) - 1) + if data[2] == re.search('\s*', data[2], re.S | re.M).group(0): + raise IndexCreatorFunctionException("Empty function body ", + len(re.split('\n', data[0])) + (1 if self.funcs_rev else len(re.split('\n', data[1]))) - 1) + if data[0] == re.search('\s*', data[0], re.S | re.M).group(0): + raise IndexCreatorValueException("You didn't set any properity or you set them not at the begining of the code\n") + + data = [re.split( + '\n', data[0]), re.split('\n', data[1]), re.split('\n', data[2])] + self.cnt_lines = (len(data[0]), len(data[1]), len(data[2])) + ind = 0 + self.predata = data + self.data = [[], [], []] + for i, v in enumerate(self.predata[0]): + for k, w in enumerate(self.predata[0][i]): + if self.predata[0][i][k] in self.props_assign: + if not is_num(self.predata[0][i][k + 1:]) and self.predata[0][i].strip()[:4] != 'type' and self.predata[0][i].strip()[:4] != 'name': + s = self.predata[0][i][k + 1:] + self.predata[0][i] = self.predata[0][i][:k + 1] + + m = re.search('\s+', s.strip()) + if not is_string(s) and not m: + s = "'" + s.strip() + "'" + self.predata[0][i] += s + break + + for n, i in enumerate(self.predata): + for k in i: + k = k.strip() + if k: + self.data[ind].append(k) + self.check_enclosures(k, n) + ind += 1 + + return self.parse_ex() + + def readline(self, stage): + def foo(): + if len(self.data[stage]) <= self.ind: + self.ind = 0 + return "" + else: + self.ind += 1 + return self.data[stage][self.ind - 1] + return foo + + def add(self, l, i): + def add_aux(*args): + # print args,self.ind + if len(l[i]) < self.ind: + l[i].append([]) + l[i][self.ind - 1].append(args) + return add_aux + + def parse_ex(self): + self.index_name = "" + self.index_type = "" + self.curLine = -1 + self.con = -1 + self.brackets = -1 + self.curFunc = None + self.colons = 0 + self.line_cons = ([], [], []) + self.pre_tokens = ([], [], []) + self.known_dicts_in_mkv = [] + self.prop_name = True + self.prop_assign = False + self.is_one_arg_enough = False + self.funcs_stack = [] + self.last_line = [-1, -1, -1] + self.props_set = [] + self.custom_header = set() + + self.tokens = [] + self.tokens_head = ['# %s\n' % self.name, 'class %s(' % self.name, '):\n', ' def __init__(self, *args, **kwargs): '] + + for i in xrange(3): + tokenize.tokenize(self.readline(i), self.add(self.pre_tokens, i)) + # tokenize treats some keyword not in the right way, thats why we + # have to change some of them + for nk, k in enumerate(self.pre_tokens[i]): + for na, a in enumerate(k): + if a[0] == token.NAME and a[1] in self.logic: + self.pre_tokens[i][nk][ + na] = (token.OP, a[1], a[2], a[3], a[4]) + + for i in self.pre_tokens[1]: + self.line_cons[1].append(self.check_colons(i, 1)) + self.check_adjacents(i, 1) + if self.check_for_2nd_arg(i) == -1 and not self.is_one_arg_enough: + raise IndexCreatorValueException("No 2nd value to return (did u forget about ',None'?", self.cnt_line_nr(i[0][4], 1)) + self.is_one_arg_enough = False + + for i in self.pre_tokens[2]: + self.line_cons[2].append(self.check_colons(i, 2)) + self.check_adjacents(i, 2) + + for i in self.pre_tokens[0]: + self.handle_prop_line(i) + + self.cur_brackets = 0 + self.tokens += ['\n super(%s, self).__init__(*args, **kwargs)\n def make_key_value(self, data): ' % self.name] + + for i in self.pre_tokens[1]: + for k in i: + self.handle_make_value(*k) + + self.curLine = -1 + self.con = -1 + self.cur_brackets = 0 + self.tokens += ['\n def make_key(self, key):'] + + for i in self.pre_tokens[2]: + for k in i: + self.handle_make_key(*k) + + if self.index_type == "": + raise IndexCreatorValueException("Missing index type definition\n") + if self.index_name == "": + raise IndexCreatorValueException("Missing index name\n") + + self.tokens_head[0] = "# " + self.index_name + "\n" + \ + self.tokens_head[0] + + for i in self.funcs_with_body: + if self.funcs_with_body[i][1]: + self.tokens_head.insert(4, self.funcs_with_body[i][0]) + + if None in self.custom_header: + self.custom_header.remove(None) + if self.custom_header: + s = ' custom_header = """' + for i in self.custom_header: + s += i + s += '"""\n' + self.tokens_head.insert(4, s) + + if self.index_type in self.allowed_props: + for i in self.props_set: + if i not in self.allowed_props[self.index_type]: + raise IndexCreatorValueException("Properity %s is not allowed for index type: %s" % (i, self.index_type)) + + # print "".join(self.tokens_head) + # print "----------" + # print (" ".join(self.tokens)) + return "".join(self.custom_header), "".join(self.tokens_head) + (" ".join(self.tokens)) + + # has to be run BEFORE tokenize + def check_enclosures(self, d, st): + encs = [] + contr = {'(': ')', '{': '}', '[': ']', "'": "'", '"': '"'} + ends = [')', '}', ']', "'", '"'] + for i in d: + if len(encs) > 0 and encs[-1] in ['"', "'"]: + if encs[-1] == i: + del encs[-1] + elif i in contr: + encs += [i] + elif i in ends: + if len(encs) < 1 or contr[encs[-1]] != i: + raise IndexCreatorValueException("Missing opening enclosure for \'%s\'" % i, self.cnt_line_nr(d, st)) + del encs[-1] + + if len(encs) > 0: + raise IndexCreatorValueException("Missing closing enclosure for \'%s\'" % encs[0], self.cnt_line_nr(d, st)) + + def check_adjacents(self, d, st): + def std_check(d, n): + if n == 0: + prev = -1 + else: + prev = d[n - 1][1] if d[n - 1][0] == token.OP else d[n - 1][0] + + cur = d[n][1] if d[n][0] == token.OP else d[n][0] + + # there always is an endmarker at the end, but this is a precaution + if n + 2 > len(d): + nex = -1 + else: + nex = d[n + 1][1] if d[n + 1][0] == token.OP else d[n + 1][0] + + if prev not in self.allowed_adjacent[cur]: + raise IndexCreatorValueException("Wrong left value of the %s" % cur, self.cnt_line_nr(line, st)) + + # there is an assumption that whole data always ends with 0 marker, the idea prolly needs a rewritting to allow more whitespaces + # between tokens, so it will be handled anyway + elif nex not in self.allowed_adjacent[cur][prev]: + raise IndexCreatorValueException("Wrong right value of the %s" % cur, self.cnt_line_nr(line, st)) + + for n, (t, i, _, _, line) in enumerate(d): + if t == token.NAME or t == token.STRING: + if n + 1 < len(d) and d[n + 1][0] in [token.NAME, token.STRING]: + raise IndexCreatorValueException("Did you forget about an operator in between?", self.cnt_line_nr(line, st)) + elif i in self.allowed_adjacent: + std_check(d, n) + + def check_colons(self, d, st): + cnt = 0 + br = 0 + + def check_ret_args_nr(a, s): + c_b_cnt = 0 + s_b_cnt = 0 + n_b_cnt = 0 + comas_cnt = 0 + for _, i, _, _, line in a: + + if c_b_cnt == n_b_cnt == s_b_cnt == 0: + if i == ',': + comas_cnt += 1 + if (s == 1 and comas_cnt > 1) or (s == 2 and comas_cnt > 0): + raise IndexCreatorFunctionException("Too much arguments to return", self.cnt_line_nr(line, st)) + if s == 0 and comas_cnt > 0: + raise IndexCreatorValueException("A coma here doesn't make any sense", self.cnt_line_nr(line, st)) + + elif i == ':': + if s == 0: + raise IndexCreatorValueException("A colon here doesn't make any sense", self.cnt_line_nr(line, st)) + raise IndexCreatorFunctionException("Two colons don't make any sense", self.cnt_line_nr(line, st)) + + if i == '{': + c_b_cnt += 1 + elif i == '}': + c_b_cnt -= 1 + elif i == '(': + n_b_cnt += 1 + elif i == ')': + n_b_cnt -= 1 + elif i == '[': + s_b_cnt += 1 + elif i == ']': + s_b_cnt -= 1 + + def check_if_empty(a): + for i in a: + if i not in [token.NEWLINE, token.INDENT, token.ENDMARKER]: + return False + return True + if st == 0: + check_ret_args_nr(d, st) + return + + for n, i in enumerate(d): + if i[1] == ':': + if br == 0: + if len(d) < n or check_if_empty(d[n + 1:]): + raise IndexCreatorValueException( + "Empty return value", self.cnt_line_nr(i[4], st)) + elif len(d) >= n: + check_ret_args_nr(d[n + 1:], st) + return cnt + else: + cnt += 1 + elif i[1] == '{': + br += 1 + elif i[1] == '}': + br -= 1 + check_ret_args_nr(d, st) + return -1 + + def check_for_2nd_arg(self, d): + c_b_cnt = 0 # curly brackets counter '{}' + s_b_cnt = 0 # square brackets counter '[]' + n_b_cnt = 0 # normal brackets counter '()' + + def check_2nd_arg(d, ind): + d = d[ind[0]:] + for t, i, (n, r), _, line in d: + if i == '{' or i is None: + return 0 + elif t == token.NAME: + self.known_dicts_in_mkv.append((i, (n, r))) + return 0 + elif t == token.STRING or t == token.NUMBER: + raise IndexCreatorValueException("Second return value of make_key_value function has to be a dictionary!", self.cnt_line_nr(line, 1)) + + for ind in enumerate(d): + t, i, _, _, _ = ind[1] + if s_b_cnt == n_b_cnt == c_b_cnt == 0: + if i == ',': + return check_2nd_arg(d, ind) + elif (t == token.NAME and i not in self.funcs) or i == '{': + self.is_one_arg_enough = True + + if i == '{': + c_b_cnt += 1 + self.is_one_arg_enough = True + elif i == '}': + c_b_cnt -= 1 + elif i == '(': + n_b_cnt += 1 + elif i == ')': + n_b_cnt -= 1 + elif i == '[': + s_b_cnt += 1 + elif i == ']': + s_b_cnt -= 1 + return -1 + + def cnt_line_nr(self, l, stage): + nr = -1 + for n, i in enumerate(self.predata[stage]): + # print i,"|||",i.strip(),"|||",l + if l == i.strip(): + nr = n + if nr == -1: + return -1 + + if stage == 0: + return nr + 1 + elif stage == 1: + return nr + self.cnt_lines[0] + (self.cnt_lines[2] - 1 if self.funcs_rev else 0) + elif stage == 2: + return nr + self.cnt_lines[0] + (self.cnt_lines[1] - 1 if not self.funcs_rev else 0) + + return -1 + + def handle_prop_line(self, d): + d_len = len(d) + if d[d_len - 1][0] == token.ENDMARKER: + d_len -= 1 + + if d_len < 3: + raise IndexCreatorValueException("Can't handle properity assingment ", self.cnt_line_nr(d[0][4], 0)) + + if not d[1][1] in self.props_assign: + raise IndexCreatorValueException( + "Did you forget : or =?", self.cnt_line_nr(d[0][4], 0)) + + if d[0][0] == token.NAME or d[0][0] == token.STRING: + if d[0][1] in self.props_set: + raise IndexCreatorValueException("Properity %s is set more than once" % d[0][1], self.cnt_line_nr(d[0][4], 0)) + self.props_set += [d[0][1]] + if d[0][1] == "type" or d[0][1] == "name": + t, tk, _, _, line = d[2] + + if d_len > 3: + raise IndexCreatorValueException( + "Wrong value to assign", self.cnt_line_nr(line, 0)) + + if t == token.STRING: + m = re.search('\s*(?P[\'\"]+)(.*?)(?P=a)\s*', tk) + if m: + tk = m.groups()[1] + elif t != token.NAME: + raise IndexCreatorValueException( + "Wrong value to assign", self.cnt_line_nr(line, 0)) + + if d[0][1] == "type": + if d[2][1] == "TreeBasedIndex": + self.custom_header.add("from CodernityDB.tree_index import TreeBasedIndex\n") + elif d[2][1] == "MultiTreeBasedIndex": + self.custom_header.add("from CodernityDB.tree_index import MultiTreeBasedIndex\n") + elif d[2][1] == "MultiHashIndex": + self.custom_header.add("from CodernityDB.hash_index import MultiHashIndex\n") + self.tokens_head.insert(2, tk) + self.index_type = tk + else: + self.index_name = tk + return + else: + self.tokens += ['\n kwargs["' + d[0][1] + '"]'] + else: + raise IndexCreatorValueException("Can't handle properity assingment ", self.cnt_line_nr(d[0][4], 0)) + + self.tokens += ['='] + + self.check_adjacents(d[2:], 0) + self.check_colons(d[2:], 0) + + for i in d[2:]: + self.tokens += [i[1]] + + def generate_func(self, t, tk, pos_start, pos_end, line, hdata, stage): + if self.last_line[stage] != -1 and pos_start[0] > self.last_line[stage] and line != '': + raise IndexCreatorFunctionException("This line will never be executed!", self.cnt_line_nr(line, stage)) + if t == 0: + return + + if pos_start[1] == 0: + if self.line_cons[stage][pos_start[0] - 1] == -1: + self.tokens += ['\n return'] + self.last_line[stage] = pos_start[0] + else: + self.tokens += ['\n if'] + elif tk == ':' and self.line_cons[stage][pos_start[0] - 1] > -1: + if self.line_cons[stage][pos_start[0] - 1] == 0: + self.tokens += [':\n return'] + return + self.line_cons[stage][pos_start[0] - 1] -= 1 + + if tk in self.logic2: + # print tk + if line[pos_start[1] - 1] != tk and line[pos_start[1] + 1] != tk: + self.tokens += [tk] + if line[pos_start[1] - 1] != tk and line[pos_start[1] + 1] == tk: + if tk == '&': + self.tokens += ['and'] + else: + self.tokens += ['or'] + return + + if self.brackets != 0: + def search_through_known_dicts(a): + for i, (n, r) in self.known_dicts_in_mkv: + if i == tk and r > pos_start[1] and n == pos_start[0] and hdata == 'data': + return True + return False + + if t == token.NAME and len(self.funcs_stack) > 0 and self.funcs_stack[-1][0] == 'md5' and search_through_known_dicts(tk): + raise IndexCreatorValueException("Second value returned by make_key_value for sure isn't a dictionary ", self.cnt_line_nr(line, 1)) + + if tk == ')': + self.cur_brackets -= 1 + if len(self.funcs_stack) > 0 and self.cur_brackets == self.funcs_stack[-1][1]: + self.tokens += [tk] + self.tokens += self.funcs[self.funcs_stack[-1][0]][1] + del self.funcs_stack[-1] + return + if tk == '(': + self.cur_brackets += 1 + + if tk in self.none: + self.tokens += ['None'] + return + + if t == token.NAME and tk not in self.logic and tk != hdata: + if tk not in self.funcs: + self.tokens += [hdata + '["' + tk + '"]'] + else: + self.tokens += self.funcs[tk][0] + if tk in self.funcs_with_body: + self.funcs_with_body[tk] = ( + self.funcs_with_body[tk][0], True) + self.custom_header.add(self.handle_int_imports.get(tk)) + self.funcs_stack += [(tk, self.cur_brackets)] + else: + self.tokens += [tk] + + def handle_make_value(self, t, tk, pos_start, pos_end, line): + self.generate_func(t, tk, pos_start, pos_end, line, 'data', 1) + + def handle_make_key(self, t, tk, pos_start, pos_end, line): + self.generate_func(t, tk, pos_start, pos_end, line, 'key', 2) diff --git a/libs/CodernityDB/lfu_cache.py b/libs/CodernityDB/lfu_cache.py new file mode 100644 index 00000000..e11ffc95 --- /dev/null +++ b/libs/CodernityDB/lfu_cache.py @@ -0,0 +1,150 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2011-2013 Codernity (http://codernity.com) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import functools +from heapq import nsmallest +from operator import itemgetter +from collections import defaultdict + +try: + from collections import Counter +except ImportError: + class Counter(dict): + 'Mapping where default values are zero' + def __missing__(self, key): + return 0 + + +def cache1lvl(maxsize=100): + """ + modified version of http://code.activestate.com/recipes/498245/ + """ + def decorating_function(user_function): + cache = {} + use_count = Counter() + + @functools.wraps(user_function) + def wrapper(key, *args, **kwargs): + try: + result = cache[key] + except KeyError: + if len(cache) == maxsize: + for k, _ in nsmallest(maxsize // 10 or 1, + use_count.iteritems(), + key=itemgetter(1)): + del cache[k], use_count[k] + cache[key] = user_function(key, *args, **kwargs) + result = cache[key] + # result = user_function(obj, key, *args, **kwargs) + finally: + use_count[key] += 1 + return result + + def clear(): + cache.clear() + use_count.clear() + + def delete(key): + try: + del cache[key] + del use_count[key] + except KeyError: + return False + else: + return True + + wrapper.clear = clear + wrapper.cache = cache + wrapper.delete = delete + return wrapper + return decorating_function + + +def twolvl_iterator(dict): + for k, v in dict.iteritems(): + for kk, vv in v.iteritems(): + yield k, kk, vv + + +def cache2lvl(maxsize=100): + """ + modified version of http://code.activestate.com/recipes/498245/ + """ + def decorating_function(user_function): + cache = {} + use_count = defaultdict(Counter) + + @functools.wraps(user_function) + def wrapper(*args, **kwargs): +# return user_function(*args, **kwargs) + try: + result = cache[args[0]][args[1]] + except KeyError: + if wrapper.cache_size == maxsize: + to_delete = maxsize // 10 or 1 + for k1, k2, v in nsmallest(to_delete, + twolvl_iterator(use_count), + key=itemgetter(2)): + del cache[k1][k2], use_count[k1][k2] + if not cache[k1]: + del cache[k1] + del use_count[k1] + wrapper.cache_size -= to_delete + result = user_function(*args, **kwargs) + try: + cache[args[0]][args[1]] = result + except KeyError: + cache[args[0]] = {args[1]: result} + wrapper.cache_size += 1 + finally: + use_count[args[0]][args[1]] += 1 + return result + + def clear(): + cache.clear() + use_count.clear() + + def delete(key, inner_key=None): + if inner_key is not None: + try: + del cache[key][inner_key] + del use_count[key][inner_key] + if not cache[key]: + del cache[key] + del use_count[key] + wrapper.cache_size -= 1 + except KeyError: + return False + else: + return True + else: + try: + wrapper.cache_size -= len(cache[key]) + del cache[key] + del use_count[key] + except KeyError: + return False + else: + return True + + wrapper.clear = clear + wrapper.cache = cache + wrapper.delete = delete + wrapper.cache_size = 0 + return wrapper + return decorating_function diff --git a/libs/CodernityDB/lfu_cache_with_lock.py b/libs/CodernityDB/lfu_cache_with_lock.py new file mode 100644 index 00000000..39f43cc6 --- /dev/null +++ b/libs/CodernityDB/lfu_cache_with_lock.py @@ -0,0 +1,158 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2011-2013 Codernity (http://codernity.com) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import functools +from heapq import nsmallest +from operator import itemgetter +from collections import defaultdict + + +try: + from collections import Counter +except ImportError: + class Counter(dict): + 'Mapping where default values are zero' + def __missing__(self, key): + return 0 + + +def twolvl_iterator(dict): + for k, v in dict.iteritems(): + for kk, vv in v.iteritems(): + yield k, kk, vv + + +def create_cache1lvl(lock_obj): + def cache1lvl(maxsize=100): + """ + modified version of http://code.activestate.com/recipes/498245/ + """ + def decorating_function(user_function): + cache = {} + use_count = Counter() + lock = lock_obj() + + @functools.wraps(user_function) + def wrapper(key, *args, **kwargs): + try: + result = cache[key] + except KeyError: + with lock: + if len(cache) == maxsize: + for k, _ in nsmallest(maxsize // 10 or 1, + use_count.iteritems(), + key=itemgetter(1)): + del cache[k], use_count[k] + cache[key] = user_function(key, *args, **kwargs) + result = cache[key] + use_count[key] += 1 + else: + with lock: + use_count[key] += 1 + return result + + def clear(): + cache.clear() + use_count.clear() + + def delete(key): + try: + del cache[key] + del use_count[key] + return True + except KeyError: + return False + + wrapper.clear = clear + wrapper.cache = cache + wrapper.delete = delete + return wrapper + return decorating_function + return cache1lvl + + +def create_cache2lvl(lock_obj): + def cache2lvl(maxsize=100): + """ + modified version of http://code.activestate.com/recipes/498245/ + """ + def decorating_function(user_function): + cache = {} + use_count = defaultdict(Counter) + lock = lock_obj() + + @functools.wraps(user_function) + def wrapper(*args, **kwargs): + try: + result = cache[args[0]][args[1]] + except KeyError: + with lock: + if wrapper.cache_size == maxsize: + to_delete = maxsize / 10 or 1 + for k1, k2, v in nsmallest(to_delete, + twolvl_iterator( + use_count), + key=itemgetter(2)): + del cache[k1][k2], use_count[k1][k2] + if not cache[k1]: + del cache[k1] + del use_count[k1] + wrapper.cache_size -= to_delete + result = user_function(*args, **kwargs) + try: + cache[args[0]][args[1]] = result + except KeyError: + cache[args[0]] = {args[1]: result} + use_count[args[0]][args[1]] += 1 + wrapper.cache_size += 1 + else: + use_count[args[0]][args[1]] += 1 + return result + + def clear(): + cache.clear() + use_count.clear() + + def delete(key, *args): + if args: + try: + del cache[key][args[0]] + del use_count[key][args[0]] + if not cache[key]: + del cache[key] + del use_count[key] + wrapper.cache_size -= 1 + return True + except KeyError: + return False + else: + try: + wrapper.cache_size -= len(cache[key]) + del cache[key] + del use_count[key] + return True + except KeyError: + return False + + wrapper.clear = clear + wrapper.cache = cache + wrapper.delete = delete + wrapper.cache_size = 0 + return wrapper + return decorating_function + return cache2lvl diff --git a/libs/CodernityDB/migrate.py b/libs/CodernityDB/migrate.py new file mode 100644 index 00000000..4d0b4005 --- /dev/null +++ b/libs/CodernityDB/migrate.py @@ -0,0 +1,45 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2011-2013 Codernity (http://codernity.com) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from CodernityDB.database import Database +import shutil +import os + + +def migrate(source, destination): + """ + Very basic for now + """ + dbs = Database(source) + dbt = Database(destination) + dbs.open() + dbt.create() + dbt.close() + for curr in os.listdir(os.path.join(dbs.path, '_indexes')): + if curr != '00id.py': + shutil.copyfile(os.path.join(dbs.path, '_indexes', curr), + os.path.join(dbt.path, '_indexes', curr)) + dbt.open() + for c in dbs.all('id'): + del c['_rev'] + dbt.insert(c) + return True + + +if __name__ == '__main__': + import sys + migrate(sys.argv[1], sys.argv[2]) diff --git a/libs/CodernityDB/misc.py b/libs/CodernityDB/misc.py new file mode 100644 index 00000000..54c94812 --- /dev/null +++ b/libs/CodernityDB/misc.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2011-2013 Codernity (http://codernity.com) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from random import getrandbits, randrange +import uuid + + +class NONE: + """ + It's inteded to be None but different, + for internal use only! + """ + pass + + +def random_hex_32(): + return uuid.UUID(int=getrandbits(128), version=4).hex + + +def random_hex_4(*args, **kwargs): + return '%04x' % randrange(256 ** 2) diff --git a/libs/CodernityDB/patch.py b/libs/CodernityDB/patch.py new file mode 100644 index 00000000..4c074f43 --- /dev/null +++ b/libs/CodernityDB/patch.py @@ -0,0 +1,99 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2011-2013 Codernity (http://codernity.com) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from CodernityDB.misc import NONE + + +def __patch(obj, name, new): + n = NONE() + orig = getattr(obj, name, n) + if orig is not n: + if orig == new: + raise Exception("Shouldn't happen, new and orig are the same") + setattr(obj, name, new) + return + + +def patch_cache_lfu(lock_obj): + """ + Patnches cache mechanizm to be thread safe (gevent ones also) + + .. note:: + + It's internal CodernityDB mechanizm, it will be called when needed + + """ + import lfu_cache + import lfu_cache_with_lock + lfu_lock1lvl = lfu_cache_with_lock.create_cache1lvl(lock_obj) + lfu_lock2lvl = lfu_cache_with_lock.create_cache2lvl(lock_obj) + __patch(lfu_cache, 'cache1lvl', lfu_lock1lvl) + __patch(lfu_cache, 'cache2lvl', lfu_lock2lvl) + + +def patch_cache_rr(lock_obj): + """ + Patches cache mechanizm to be thread safe (gevent ones also) + + .. note:: + + It's internal CodernityDB mechanizm, it will be called when needed + + """ + import rr_cache + import rr_cache_with_lock + rr_lock1lvl = rr_cache_with_lock.create_cache1lvl(lock_obj) + rr_lock2lvl = rr_cache_with_lock.create_cache2lvl(lock_obj) + __patch(rr_cache, 'cache1lvl', rr_lock1lvl) + __patch(rr_cache, 'cache2lvl', rr_lock2lvl) + + +def patch_flush_fsync(db_obj): + """ + Will always execute index.fsync after index.flush. + + .. note:: + + It's for advanced users, use when you understand difference between `flush` and `fsync`, and when you definitely need that. + + It's important to call it **AFTER** database has all indexes etc (after db.create or db.open) + + Example usage:: + + ... + db = Database('/tmp/patch_demo') + db.create() + patch_flush_fsync(db) + ... + + """ + + def always_fsync(ind_obj): + def _inner(): + ind_obj.orig_flush() + ind_obj.fsync() + return _inner + + for index in db_obj.indexes: + setattr(index, 'orig_flush', index.flush) + setattr(index, 'flush', always_fsync(index)) + + setattr(db_obj, 'orig_flush', db_obj.flush) + setattr(db_obj, 'flush', always_fsync(db_obj)) + + return diff --git a/libs/CodernityDB/rr_cache.py b/libs/CodernityDB/rr_cache.py new file mode 100644 index 00000000..5801b7cc --- /dev/null +++ b/libs/CodernityDB/rr_cache.py @@ -0,0 +1,112 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2011-2013 Codernity (http://codernity.com) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import functools +from random import choice + + +def cache1lvl(maxsize=100): + def decorating_function(user_function): + cache1lvl = {} + + @functools.wraps(user_function) + def wrapper(key, *args, **kwargs): + try: + result = cache1lvl[key] + except KeyError: + if len(cache1lvl) == maxsize: + for i in xrange(maxsize // 10 or 1): + del cache1lvl[choice(cache1lvl.keys())] + cache1lvl[key] = user_function(key, *args, **kwargs) + result = cache1lvl[key] +# result = user_function(obj, key, *args, **kwargs) + return result + + def clear(): + cache1lvl.clear() + + def delete(key): + try: + del cache1lvl[key] + return True + except KeyError: + return False + + wrapper.clear = clear + wrapper.cache = cache1lvl + wrapper.delete = delete + return wrapper + return decorating_function + + +def cache2lvl(maxsize=100): + def decorating_function(user_function): + cache = {} + + @functools.wraps(user_function) + def wrapper(*args, **kwargs): +# return user_function(*args, **kwargs) + try: + result = cache[args[0]][args[1]] + except KeyError: +# print wrapper.cache_size + if wrapper.cache_size == maxsize: + to_delete = maxsize // 10 or 1 + for i in xrange(to_delete): + key1 = choice(cache.keys()) + key2 = choice(cache[key1].keys()) + del cache[key1][key2] + if not cache[key1]: + del cache[key1] + wrapper.cache_size -= to_delete +# print wrapper.cache_size + result = user_function(*args, **kwargs) + try: + cache[args[0]][args[1]] = result + except KeyError: + cache[args[0]] = {args[1]: result} + wrapper.cache_size += 1 + return result + + def clear(): + cache.clear() + wrapper.cache_size = 0 + + def delete(key, inner_key=None): + if inner_key: + try: + del cache[key][inner_key] + if not cache[key]: + del cache[key] + wrapper.cache_size -= 1 + return True + except KeyError: + return False + else: + try: + wrapper.cache_size -= len(cache[key]) + del cache[key] + return True + except KeyError: + return False + + wrapper.clear = clear + wrapper.cache = cache + wrapper.delete = delete + wrapper.cache_size = 0 + return wrapper + return decorating_function diff --git a/libs/CodernityDB/rr_cache_with_lock.py b/libs/CodernityDB/rr_cache_with_lock.py new file mode 100644 index 00000000..66298c59 --- /dev/null +++ b/libs/CodernityDB/rr_cache_with_lock.py @@ -0,0 +1,117 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2011-2013 Codernity (http://codernity.com) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import functools +from random import choice + + +def create_cache1lvl(lock_obj): + def cache1lvl(maxsize=100): + def decorating_function(user_function): + cache = {} + lock = lock_obj() + + @functools.wraps(user_function) + def wrapper(key, *args, **kwargs): + try: + result = cache[key] + except KeyError: + with lock: + if len(cache) == maxsize: + for i in xrange(maxsize // 10 or 1): + del cache[choice(cache.keys())] + cache[key] = user_function(key, *args, **kwargs) + result = cache[key] + return result + + def clear(): + cache.clear() + + def delete(key): + try: + del cache[key] + return True + except KeyError: + return False + + wrapper.clear = clear + wrapper.cache = cache + wrapper.delete = delete + return wrapper + return decorating_function + return cache1lvl + + +def create_cache2lvl(lock_obj): + def cache2lvl(maxsize=100): + def decorating_function(user_function): + cache = {} + lock = lock_obj() + + @functools.wraps(user_function) + def wrapper(*args, **kwargs): + try: + result = cache[args[0]][args[1]] + except KeyError: + with lock: + if wrapper.cache_size == maxsize: + to_delete = maxsize // 10 or 1 + for i in xrange(to_delete): + key1 = choice(cache.keys()) + key2 = choice(cache[key1].keys()) + del cache[key1][key2] + if not cache[key1]: + del cache[key1] + wrapper.cache_size -= to_delete + result = user_function(*args, **kwargs) + try: + cache[args[0]][args[1]] = result + except KeyError: + cache[args[0]] = {args[1]: result} + wrapper.cache_size += 1 + return result + + def clear(): + cache.clear() + wrapper.cache_size = 0 + + def delete(key, *args): + if args: + try: + del cache[key][args[0]] + if not cache[key]: + del cache[key] + wrapper.cache_size -= 1 + return True + except KeyError: + return False + else: + try: + wrapper.cache_size -= len(cache[key]) + del cache[key] + return True + except KeyError: + return False + + wrapper.clear = clear + wrapper.cache = cache + wrapper.delete = delete + wrapper.cache_size = 0 + return wrapper + return decorating_function + return cache2lvl diff --git a/libs/CodernityDB/sharded_hash.py b/libs/CodernityDB/sharded_hash.py new file mode 100644 index 00000000..08a8c2f0 --- /dev/null +++ b/libs/CodernityDB/sharded_hash.py @@ -0,0 +1,146 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2011-2013 Codernity (http://codernity.com) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from CodernityDB.hash_index import UniqueHashIndex, HashIndex +from CodernityDB.sharded_index import ShardedIndex +from CodernityDB.index import IndexPreconditionsException + +from random import getrandbits +import uuid + + +class IU_ShardedUniqueHashIndex(ShardedIndex): + + custom_header = """import uuid +from random import getrandbits +from CodernityDB.sharded_index import ShardedIndex +""" + + def __init__(self, db_path, name, *args, **kwargs): + if kwargs.get('sh_nums', 0) > 255: + raise IndexPreconditionsException("Too many shards") + kwargs['ind_class'] = UniqueHashIndex + super(IU_ShardedUniqueHashIndex, self).__init__(db_path, + name, *args, **kwargs) + self.patchers.append(self.wrap_insert_id_index) + + @staticmethod + def wrap_insert_id_index(db_obj, clean=False): + def _insert_id_index(_rev, data): + """ + Performs insert on **id** index. + """ + _id, value = db_obj.id_ind.make_key_value(data) # may be improved + trg_shard = _id[:2] + storage = db_obj.id_ind.shards_r[trg_shard].storage + start, size = storage.insert(value) + db_obj.id_ind.insert(_id, _rev, start, size) + return _id + if not clean: + if hasattr(db_obj, '_insert_id_index_orig'): + raise IndexPreconditionsException( + "Already patched, something went wrong") + setattr(db_obj, "_insert_id_index_orig", db_obj._insert_id_index) + setattr(db_obj, "_insert_id_index", _insert_id_index) + else: + setattr(db_obj, "_insert_id_index", db_obj._insert_id_index_orig) + delattr(db_obj, "_insert_id_index_orig") + + def create_key(self): + h = uuid.UUID(int=getrandbits(128), version=4).hex + trg = self.last_used + 1 + if trg >= self.sh_nums: + trg = 0 + self.last_used = trg + h = '%02x%30s' % (trg, h[2:]) + return h + + def delete(self, key, *args, **kwargs): + trg_shard = key[:2] + op = self.shards_r[trg_shard] + return op.delete(key, *args, **kwargs) + + def update(self, key, *args, **kwargs): + trg_shard = key[:2] + self.last_used = int(trg_shard, 16) + op = self.shards_r[trg_shard] + return op.update(key, *args, **kwargs) + + def insert(self, key, *args, **kwargs): + trg_shard = key[:2] # in most cases it's in create_key BUT not always + self.last_used = int(key[:2], 16) + op = self.shards_r[trg_shard] + return op.insert(key, *args, **kwargs) + + def get(self, key, *args, **kwargs): + trg_shard = key[:2] + self.last_used = int(trg_shard, 16) + op = self.shards_r[trg_shard] + return op.get(key, *args, **kwargs) + + +class ShardedUniqueHashIndex(IU_ShardedUniqueHashIndex): + + # allow unique hash to be used directly + custom_header = 'from CodernityDB.sharded_hash import IU_ShardedUniqueHashIndex' + + pass + + +class IU_ShardedHashIndex(ShardedIndex): + + custom_header = """from CodernityDB.sharded_index import ShardedIndex""" + + def __init__(self, db_path, name, *args, **kwargs): + kwargs['ind_class'] = HashIndex + super(IU_ShardedHashIndex, self).__init__(db_path, name, * + args, **kwargs) + + def calculate_shard(self, key): + """ + Must be implemented. It has to return shard to be used by key + + :param key: key + :returns: target shard + :rtype: int + """ + raise NotImplementedError() + + def delete(self, doc_id, key, *args, **kwargs): + trg_shard = self.calculate_shard(key) + op = self.shards_r[trg_shard] + return op.delete(doc_id, key, *args, **kwargs) + + def insert(self, doc_id, key, *args, **kwargs): + trg_shard = self.calculate_shard(key) + op = self.shards_r[trg_shard] + return op.insert(doc_id, key, *args, **kwargs) + + def update(self, doc_id, key, *args, **kwargs): + trg_shard = self.calculate_shard(key) + op = self.shards_r[trg_shard] + return op.insert(doc_id, key, *args, **kwargs) + + def get(self, key, *args, **kwargs): + trg_shard = self.calculate_shard(key) + op = self.shards_r[trg_shard] + return op.get(key, *args, **kwargs) + + +class ShardedHashIndex(IU_ShardedHashIndex): + pass diff --git a/libs/CodernityDB/sharded_index.py b/libs/CodernityDB/sharded_index.py new file mode 100644 index 00000000..2bdf9d75 --- /dev/null +++ b/libs/CodernityDB/sharded_index.py @@ -0,0 +1,112 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2011-2013 Codernity (http://codernity.com) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from CodernityDB.index import Index +# from CodernityDB.env import cdb_environment +# import warnings + + +class ShardedIndex(Index): + + def __init__(self, db_path, name, *args, **kwargs): + """ + There are 3 additional parameters. You have to hardcode them in your custom class. **NEVER** use directly + + :param int sh_nums: how many shards should be + :param class ind_class: Index class to use (HashIndex or your custom one) + :param bool use_make_keys: if True, `make_key`, and `make_key_value` will be overriden with those from first shard + + The rest parameters are passed straight to `ind_class` shards. + + """ + super(ShardedIndex, self).__init__(db_path, name) + try: + self.sh_nums = kwargs.pop('sh_nums') + except KeyError: + self.sh_nums = 5 + try: + ind_class = kwargs.pop('ind_class') + except KeyError: + raise Exception("ind_class must be given") + else: + # if not isinstance(ind_class, basestring): + # ind_class = ind_class.__name__ + self.ind_class = ind_class + if 'use_make_keys' in kwargs: + self.use_make_keys = kwargs.pop('use_make_keys') + else: + self.use_make_keys = False + self._set_shard_datas(*args, **kwargs) + self.patchers = [] # database object patchers + + def _set_shard_datas(self, *args, **kwargs): + self.shards = {} + self.shards_r = {} +# ind_class = globals()[self.ind_class] + ind_class = self.ind_class + i = 0 + for sh_name in [self.name + str(x) for x in xrange(self.sh_nums)]: + # dict is better than list in that case + self.shards[i] = ind_class(self.db_path, sh_name, *args, **kwargs) + self.shards_r['%02x' % i] = self.shards[i] + self.shards_r[i] = self.shards[i] + i += 1 + + if not self.use_make_keys: + self.make_key = self.shards[0].make_key + self.make_key_value = self.shards[0].make_key_value + + self.last_used = 0 + + @property + def storage(self): + st = self.shards[self.last_used].storage + return st + + def __getattr__(self, name): + return getattr(self.shards[self.last_used], name) + + def open_index(self): + for curr in self.shards.itervalues(): + curr.open_index() + + def create_index(self): + for curr in self.shards.itervalues(): + curr.create_index() + + def destroy(self): + for curr in self.shards.itervalues(): + curr.destroy() + + def compact(self): + for curr in self.shards.itervalues(): + curr.compact() + + def reindex(self): + for curr in self.shards.itervalues(): + curr.reindex() + + def all(self, *args, **kwargs): + for curr in self.shards.itervalues(): + for now in curr.all(*args, **kwargs): + yield now + + def get_many(self, *args, **kwargs): + for curr in self.shards.itervalues(): + for now in curr.get_many(*args, **kwargs): + yield now diff --git a/libs/CodernityDB/storage.py b/libs/CodernityDB/storage.py new file mode 100644 index 00000000..30be1f3b --- /dev/null +++ b/libs/CodernityDB/storage.py @@ -0,0 +1,153 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2011-2013 Codernity (http://codernity.com) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import struct +import shutil +import marshal +import io + + +try: + from CodernityDB import __version__ +except ImportError: + from __init__ import __version__ + + +class StorageException(Exception): + pass + + +class DummyStorage(object): + """ + Storage mostly used to fake real storage + """ + + def create(self, *args, **kwargs): + pass + + def open(self, *args, **kwargs): + pass + + def close(self, *args, **kwargs): + pass + + def data_from(self, *args, **kwargs): + pass + + def data_to(self, *args, **kwargs): + pass + + def save(self, *args, **kwargs): + return 0, 0 + + def insert(self, *args, **kwargs): + return self.save(*args, **kwargs) + + def update(self, *args, **kwargs): + return 0, 0 + + def get(self, *args, **kwargs): + return None + + # def compact(self, *args, **kwargs): + # pass + + def fsync(self, *args, **kwargs): + pass + + def flush(self, *args, **kwargs): + pass + + +class IU_Storage(object): + + __version__ = __version__ + + def __init__(self, db_path, name='main'): + self.db_path = db_path + self.name = name + self._header_size = 100 + + def create(self): + if os.path.exists(os.path.join(self.db_path, self.name + "_stor")): + raise IOError("Storage already exists!") + with io.open(os.path.join(self.db_path, self.name + "_stor"), 'wb') as f: + f.write(struct.pack("10s90s", self.__version__, '|||||')) + f.close() + self._f = io.open(os.path.join( + self.db_path, self.name + "_stor"), 'r+b', buffering=0) + self.flush() + self._f.seek(0, 2) + + def open(self): + if not os.path.exists(os.path.join(self.db_path, self.name + "_stor")): + raise IOError("Storage doesn't exists!") + self._f = io.open(os.path.join( + self.db_path, self.name + "_stor"), 'r+b', buffering=0) + self.flush() + self._f.seek(0, 2) + + def destroy(self): + os.unlink(os.path.join(self.db_path, self.name + '_stor')) + + def close(self): + self._f.close() + # self.flush() + # self.fsync() + + def data_from(self, data): + return marshal.loads(data) + + def data_to(self, data): + return marshal.dumps(data) + + def save(self, data): + s_data = self.data_to(data) + self._f.seek(0, 2) + start = self._f.tell() + size = len(s_data) + self._f.write(s_data) + self.flush() + return start, size + + def insert(self, data): + return self.save(data) + + def update(self, data): + return self.save(data) + + def get(self, start, size, status='c'): + if status == 'd': + return None + else: + self._f.seek(start) + return self.data_from(self._f.read(size)) + + def flush(self): + self._f.flush() + + def fsync(self): + os.fsync(self._f.fileno()) + + +# classes for public use, done in this way because of +# generation static files with indexes (_index directory) + + +class Storage(IU_Storage): + pass diff --git a/libs/CodernityDB/tree_index.py b/libs/CodernityDB/tree_index.py new file mode 100644 index 00000000..b79805db --- /dev/null +++ b/libs/CodernityDB/tree_index.py @@ -0,0 +1,2048 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2011-2013 Codernity (http://codernity.com) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from index import Index, IndexException, DocIdNotFound, ElemNotFound +import struct +import marshal +import os +import io +import shutil +from storage import IU_Storage +# from ipdb import set_trace + +from CodernityDB.env import cdb_environment +from CodernityDB.index import TryReindexException + +if cdb_environment.get('rlock_obj'): + from CodernityDB import patch + patch.patch_cache_rr(cdb_environment['rlock_obj']) + +from CodernityDB.rr_cache import cache1lvl, cache2lvl + +tree_buffer_size = io.DEFAULT_BUFFER_SIZE + +cdb_environment['tree_buffer_size'] = tree_buffer_size + + +MODE_FIRST = 0 +MODE_LAST = 1 + +MOVE_BUFFER_PREV = 0 +MOVE_BUFFER_NEXT = 1 + + +class NodeCapacityException(IndexException): + pass + + +class IU_TreeBasedIndex(Index): + + custom_header = 'from CodernityDB.tree_index import TreeBasedIndex' + + def __init__(self, db_path, name, key_format='32s', pointer_format='I', + meta_format='32sIIc', node_capacity=10, storage_class=None): + if node_capacity < 3: + raise NodeCapacityException + super(IU_TreeBasedIndex, self).__init__(db_path, name) + self.data_start = self._start_ind + 1 + self.node_capacity = node_capacity + self.flag_format = 'c' + self.elements_counter_format = 'h' + self.pointer_format = pointer_format + self.key_format = key_format + self.meta_format = meta_format + self._count_props() + if not storage_class: + storage_class = IU_Storage + if storage_class and not isinstance(storage_class, basestring): + storage_class = storage_class.__name__ + self.storage_class = storage_class + self.storage = None + cache = cache1lvl(100) + twolvl_cache = cache2lvl(150) + self._find_key = cache(self._find_key) + self._match_doc_id = cache(self._match_doc_id) +# self._read_single_leaf_record = +# twolvl_cache(self._read_single_leaf_record) + self._find_key_in_leaf = twolvl_cache(self._find_key_in_leaf) + self._read_single_node_key = twolvl_cache(self._read_single_node_key) + self._find_first_key_occurence_in_node = twolvl_cache( + self._find_first_key_occurence_in_node) + self._find_last_key_occurence_in_node = twolvl_cache( + self._find_last_key_occurence_in_node) + self._read_leaf_nr_of_elements = cache(self._read_leaf_nr_of_elements) + self._read_leaf_neighbours = cache(self._read_leaf_neighbours) + self._read_leaf_nr_of_elements_and_neighbours = cache( + self._read_leaf_nr_of_elements_and_neighbours) + self._read_node_nr_of_elements_and_children_flag = cache( + self._read_node_nr_of_elements_and_children_flag) + + def _count_props(self): + """ + Counts dynamic properties for tree, such as all complex formats + """ + self.single_leaf_record_format = self.key_format + self.meta_format + self.single_node_record_format = self.pointer_format + \ + self.key_format + self.pointer_format + self.node_format = self.elements_counter_format + self.flag_format\ + + self.pointer_format + (self.key_format + + self.pointer_format) * self.node_capacity + self.leaf_format = self.elements_counter_format + self.pointer_format * 2\ + + (self.single_leaf_record_format) * self.node_capacity + self.leaf_heading_format = self.elements_counter_format + \ + self.pointer_format * 2 + self.node_heading_format = self.elements_counter_format + \ + self.flag_format + self.key_size = struct.calcsize('<' + self.key_format) + self.meta_size = struct.calcsize('<' + self.meta_format) + self.single_leaf_record_size = struct.calcsize('<' + self. + single_leaf_record_format) + self.single_node_record_size = struct.calcsize('<' + self. + single_node_record_format) + self.node_size = struct.calcsize('<' + self.node_format) + self.leaf_size = struct.calcsize('<' + self.leaf_format) + self.flag_size = struct.calcsize('<' + self.flag_format) + self.elements_counter_size = struct.calcsize('<' + self. + elements_counter_format) + self.pointer_size = struct.calcsize('<' + self.pointer_format) + self.leaf_heading_size = struct.calcsize( + '<' + self.leaf_heading_format) + self.node_heading_size = struct.calcsize( + '<' + self.node_heading_format) + + def create_index(self): + if os.path.isfile(os.path.join(self.db_path, self.name + '_buck')): + raise IndexException('Already exists') + with io.open(os.path.join(self.db_path, self.name + "_buck"), 'w+b') as f: + props = dict(name=self.name, + flag_format=self.flag_format, + pointer_format=self.pointer_format, + elements_counter_format=self.elements_counter_format, + node_capacity=self.node_capacity, + key_format=self.key_format, + meta_format=self.meta_format, + version=self.__version__, + storage_class=self.storage_class) + f.write(marshal.dumps(props)) + self.buckets = io.open(os.path.join(self.db_path, self.name + + "_buck"), 'r+b', buffering=0) + self._create_storage() + self.buckets.seek(self._start_ind) + self.buckets.write(struct.pack(' candidate_start: + move_buffer = MOVE_BUFFER_PREV + elif buffer_end < candidate_start + self.single_leaf_record_size: + move_buffer = MOVE_BUFFER_NEXT + else: + move_buffer = None + return self._calculate_key_position(leaf_start, (imin + imax) / 2, 'l'), (imin + imax) / 2, move_buffer + + def _choose_next_candidate_index_in_node(self, node_start, candidate_start, buffer_start, buffer_end, imin, imax): + if buffer_start > candidate_start: + move_buffer = MOVE_BUFFER_PREV + elif buffer_end < candidate_start + self.single_node_record_size: + (self.pointer_size + self.key_size) - 1 + move_buffer = MOVE_BUFFER_NEXT + else: + move_buffer = None + return self._calculate_key_position(node_start, (imin + imax) / 2, 'n'), (imin + imax) / 2, move_buffer + + def _find_key_in_leaf(self, leaf_start, key, nr_of_elements): + if nr_of_elements == 1: + return self._find_key_in_leaf_with_one_element(key, leaf_start)[-5:] + else: + return self._find_key_in_leaf_using_binary_search(key, leaf_start, nr_of_elements)[-5:] + + def _find_key_in_leaf_for_update(self, key, doc_id, leaf_start, nr_of_elements): + if nr_of_elements == 1: + return self._find_key_in_leaf_with_one_element(key, leaf_start, doc_id=doc_id) + else: + return self._find_key_in_leaf_using_binary_search(key, leaf_start, nr_of_elements, mode=MODE_FIRST, doc_id=doc_id) + + def _find_index_of_first_key_equal_or_smaller_key(self, key, leaf_start, nr_of_elements): + if nr_of_elements == 1: + return self._find_key_in_leaf_with_one_element(key, leaf_start, mode=MODE_FIRST, return_closest=True)[:2] + else: + return self._find_key_in_leaf_using_binary_search(key, leaf_start, nr_of_elements, mode=MODE_FIRST, return_closest=True)[:2] + + def _find_index_of_last_key_equal_or_smaller_key(self, key, leaf_start, nr_of_elements): + if nr_of_elements == 1: + return self._find_key_in_leaf_with_one_element(key, leaf_start, mode=MODE_LAST, return_closest=True)[:2] + else: + return self._find_key_in_leaf_using_binary_search(key, leaf_start, nr_of_elements, mode=MODE_LAST, return_closest=True)[:2] + + def _find_index_of_first_key_equal(self, key, leaf_start, nr_of_elements): + if nr_of_elements == 1: + return self._find_key_in_leaf_with_one_element(key, leaf_start, mode=MODE_FIRST)[:2] + else: + return self._find_key_in_leaf_using_binary_search(key, leaf_start, nr_of_elements, mode=MODE_FIRST)[:2] + + def _find_key_in_leaf_with_one_element(self, key, leaf_start, doc_id=None, mode=None, return_closest=False): + curr_key, curr_doc_id, curr_start, curr_size,\ + curr_status = self._read_single_leaf_record(leaf_start, 0) + if key != curr_key: + if return_closest and curr_status != 'd': + return leaf_start, 0 + else: + raise ElemNotFound + else: + if curr_status == 'd': + raise ElemNotFound + elif doc_id is not None and doc_id != curr_doc_id: +# should't happen, crashes earlier on id index + raise DocIdNotFound + else: + return leaf_start, 0, curr_doc_id, curr_key, curr_start, curr_size, curr_status + + def _find_key_in_leaf_using_binary_search(self, key, leaf_start, nr_of_elements, doc_id=None, mode=None, return_closest=False): + """ + Binary search implementation used in all get functions + """ + imin, imax = 0, nr_of_elements - 1 + buffer_start, buffer_end = self._set_buffer_limits() + candidate_start, candidate_index, move_buffer = self._choose_next_candidate_index_in_leaf(leaf_start, + self._calculate_key_position(leaf_start, + (imin + imax) / 2, + 'l'), + buffer_start, + buffer_end, + imin, imax) + while imax != imin and imax > imin: + curr_key, curr_doc_id, curr_start, curr_size, curr_status = self._read_single_leaf_record(leaf_start, + candidate_index) + candidate_start = self._calculate_key_position( + leaf_start, candidate_index, 'l') + if key < curr_key: + if move_buffer == MOVE_BUFFER_PREV: + buffer_start, buffer_end = self._prev_buffer( + buffer_start, buffer_end) + else: # if next chosen element is in current buffer, abort moving to other + move_buffer is None + imax = candidate_index - 1 + candidate_start, candidate_index, move_buffer = self._choose_next_candidate_index_in_leaf(leaf_start, + candidate_start, + buffer_start, + buffer_end, + imin, imax) + elif key == curr_key: + if mode == MODE_LAST: + if move_buffer == MOVE_BUFFER_NEXT: + buffer_start, buffer_end = self._next_buffer( + buffer_start, buffer_end) + else: + move_buffer is None + imin = candidate_index + 1 + candidate_start, candidate_index, move_buffer = self._choose_next_candidate_index_in_leaf(leaf_start, + candidate_start, + buffer_start, + buffer_end, + imin, imax) + else: + if curr_status == 'o': + break + else: + if move_buffer == MOVE_BUFFER_PREV: + buffer_start, buffer_end = self._prev_buffer( + buffer_start, buffer_end) + else: + move_buffer is None + imax = candidate_index + candidate_start, candidate_index, move_buffer = self._choose_next_candidate_index_in_leaf(leaf_start, + candidate_start, + buffer_start, + buffer_end, + imin, imax) + else: + if move_buffer == MOVE_BUFFER_NEXT: + buffer_start, buffer_end = self._next_buffer( + buffer_start, buffer_end) + else: + move_buffer is None + imin = candidate_index + 1 + candidate_start, candidate_index, move_buffer = self._choose_next_candidate_index_in_leaf(leaf_start, + candidate_start, + buffer_start, + buffer_end, + imin, imax) + + if imax > imin: + chosen_key_position = candidate_index + else: + chosen_key_position = imax + curr_key, curr_doc_id, curr_start, curr_size, curr_status = self._read_single_leaf_record(leaf_start, + chosen_key_position) + if key != curr_key: + if return_closest: # useful for find all bigger/smaller methods + return leaf_start, chosen_key_position + else: + raise ElemNotFound + if doc_id and doc_id == curr_doc_id and curr_status == 'o': + return leaf_start, chosen_key_position, curr_doc_id, curr_key, curr_start, curr_size, curr_status + else: + if mode == MODE_FIRST and imin < chosen_key_position: # check if there isn't any element with equal key before chosen one + matching_record_index = self._leaf_linear_key_search(key, + self._calculate_key_position(leaf_start, + imin, + 'l'), + imin, + chosen_key_position) + else: + matching_record_index = chosen_key_position + curr_key, curr_doc_id, curr_start, curr_size, curr_status = self._read_single_leaf_record(leaf_start, + matching_record_index) + if curr_status == 'd' and not return_closest: + leaf_start, nr_of_elements, matching_record_index = self._find_existing(key, + matching_record_index, + leaf_start, + nr_of_elements) + curr_key, curr_doc_id, curr_start, curr_size, curr_status = self._read_single_leaf_record(leaf_start, + matching_record_index) + if doc_id is not None and doc_id != curr_doc_id: + leaf_start, nr_of_elements, matching_record_index = self._match_doc_id(doc_id, + key, + matching_record_index, + leaf_start, + nr_of_elements) + curr_key, curr_doc_id, curr_start, curr_size, curr_status = self._read_single_leaf_record(leaf_start, + matching_record_index) + return leaf_start, matching_record_index, curr_doc_id, curr_key, curr_start, curr_size, curr_status + + def _find_place_in_leaf(self, key, leaf_start, nr_of_elements): + if nr_of_elements == 1: + return self._find_place_in_leaf_with_one_element(key, leaf_start) + else: + return self._find_place_in_leaf_using_binary_search(key, leaf_start, nr_of_elements) + + def _find_place_in_leaf_with_one_element(self, key, leaf_start): + curr_key, curr_doc_id, curr_start, curr_size,\ + curr_status = self._read_single_leaf_record(leaf_start, 0) + if curr_status == 'd': + return leaf_start, 0, 0, False, True # leaf start, index of new key position, nr of rec to rewrite, full_leaf flag, on_deleted flag + else: + if key < curr_key: + return leaf_start, 0, 1, False, False + else: + return leaf_start, 1, 0, False, False + + def _find_place_in_leaf_using_binary_search(self, key, leaf_start, nr_of_elements): + """ + Binary search implementation used in insert function + """ + imin, imax = 0, nr_of_elements - 1 + buffer_start, buffer_end = self._set_buffer_limits() + candidate_start, candidate_index, move_buffer = self._choose_next_candidate_index_in_leaf(leaf_start, + self._calculate_key_position(leaf_start, + (imin + imax) / 2, + 'l'), + buffer_start, + buffer_end, + imin, imax) + while imax != imin and imax > imin: + curr_key, curr_doc_id, curr_start, curr_size, curr_status = self._read_single_leaf_record(leaf_start, + candidate_index) + candidate_start = self._calculate_key_position( + leaf_start, candidate_index, 'l') + if key < curr_key: + if move_buffer == MOVE_BUFFER_PREV: + buffer_start, buffer_end = self._prev_buffer( + buffer_start, buffer_end) + else: # if next chosen element is in current buffer, abort moving to other + move_buffer is None + imax = candidate_index - 1 + candidate_start, candidate_index, move_buffer = self._choose_next_candidate_index_in_leaf(leaf_start, + candidate_start, + buffer_start, + buffer_end, + imin, imax) + else: + if move_buffer == MOVE_BUFFER_NEXT: + buffer_start, buffer_end = self._next_buffer( + buffer_start, buffer_end) + else: + move_buffer is None + imin = candidate_index + 1 + candidate_start, candidate_index, move_buffer = self._choose_next_candidate_index_in_leaf(leaf_start, + candidate_start, + buffer_start, + buffer_end, + imin, imax) + if imax < imin and imin < nr_of_elements: + chosen_key_position = imin + else: + chosen_key_position = imax + curr_key, curr_doc_id, curr_start, curr_size, curr_status = self._read_single_leaf_record(leaf_start, + chosen_key_position) + if curr_status == 'd': + return leaf_start, chosen_key_position, 0, False, True + elif key < curr_key: + if chosen_key_position > 0: + curr_key, curr_doc_id, curr_start, curr_size, curr_status = self._read_single_leaf_record(leaf_start, + chosen_key_position - 1) + if curr_start == 'd': + return leaf_start, chosen_key_position - 1, 0, False, True + else: + return leaf_start, chosen_key_position, nr_of_elements - chosen_key_position, (nr_of_elements == self.node_capacity), False + else: + return leaf_start, chosen_key_position, nr_of_elements - chosen_key_position, (nr_of_elements == self.node_capacity), False + else: + if chosen_key_position < nr_of_elements - 1: + curr_key, curr_doc_id, curr_start, curr_size, curr_status = self._read_single_leaf_record(leaf_start, + chosen_key_position + 1) + if curr_start == 'd': + return leaf_start, chosen_key_position + 1, 0, False, True + else: + return leaf_start, chosen_key_position + 1, nr_of_elements - chosen_key_position - 1, (nr_of_elements == self.node_capacity), False + else: + return leaf_start, chosen_key_position + 1, nr_of_elements - chosen_key_position - 1, (nr_of_elements == self.node_capacity), False + + def _set_buffer_limits(self): + pos = self.buckets.tell() + buffer_start = pos - (pos % tree_buffer_size) + return buffer_start, (buffer_start + tree_buffer_size) + + def _find_first_key_occurence_in_node(self, node_start, key, nr_of_elements): + if nr_of_elements == 1: + return self._find_key_in_node_with_one_element(key, node_start, mode=MODE_FIRST) + else: + return self._find_key_in_node_using_binary_search(key, node_start, nr_of_elements, mode=MODE_FIRST) + + def _find_last_key_occurence_in_node(self, node_start, key, nr_of_elements): + if nr_of_elements == 1: + return self._find_key_in_node_with_one_element(key, node_start, mode=MODE_LAST) + else: + return self._find_key_in_node_using_binary_search(key, node_start, nr_of_elements, mode=MODE_LAST) + + def _find_key_in_node_with_one_element(self, key, node_start, mode=None): + l_pointer, curr_key, r_pointer = self._read_single_node_key( + node_start, 0) + if key < curr_key: + return 0, l_pointer + elif key > curr_key: + return 0, r_pointer + else: + if mode == MODE_FIRST: + return 0, l_pointer + elif mode == MODE_LAST: + return 0, r_pointer + else: + raise Exception('Invalid mode declared: set first/last') + + def _find_key_in_node_using_binary_search(self, key, node_start, nr_of_elements, mode=None): + imin, imax = 0, nr_of_elements - 1 + buffer_start, buffer_end = self._set_buffer_limits() + candidate_start, candidate_index, move_buffer = self._choose_next_candidate_index_in_node(node_start, + self._calculate_key_position(node_start, + (imin + imax) / 2, + 'n'), + buffer_start, + buffer_end, + imin, imax) + while imax != imin and imax > imin: + l_pointer, curr_key, r_pointer = self._read_single_node_key( + node_start, candidate_index) + candidate_start = self._calculate_key_position( + node_start, candidate_index, 'n') + if key < curr_key: + if move_buffer == MOVE_BUFFER_PREV: + buffer_start, buffer_end = self._prev_buffer( + buffer_start, buffer_end) + else: # if next chosen element is in current buffer, abort moving to other + move_buffer is None + imax = candidate_index - 1 + candidate_start, candidate_index, move_buffer = self._choose_next_candidate_index_in_node(node_start, + candidate_start, + buffer_start, + buffer_end, + imin, imax) + elif key == curr_key: + if mode == MODE_LAST: + if move_buffer == MOVE_BUFFER_NEXT: + buffer_start, buffer_end = self._next_buffer( + buffer_start, buffer_end) + else: + move_buffer is None + imin = candidate_index + 1 + candidate_start, candidate_index, move_buffer = self._choose_next_candidate_index_in_node(node_start, + candidate_start, + buffer_start, + buffer_end, + imin, imax) + else: + break + else: + if move_buffer == MOVE_BUFFER_NEXT: + buffer_start, buffer_end = self._next_buffer( + buffer_start, buffer_end) + else: + move_buffer is None + imin = candidate_index + 1 + candidate_start, candidate_index, move_buffer = self._choose_next_candidate_index_in_node(node_start, + candidate_start, + buffer_start, + buffer_end, + imin, imax) + + if imax > imin: + chosen_key_position = candidate_index + elif imax < imin and imin < nr_of_elements: + chosen_key_position = imin + else: + chosen_key_position = imax + l_pointer, curr_key, r_pointer = self._read_single_node_key( + node_start, chosen_key_position) + if mode == MODE_FIRST and imin < chosen_key_position: # check if there is no elements with equal key before chosen one + matching_record_index = self._node_linear_key_search(key, + self._calculate_key_position(node_start, + imin, + 'n'), + imin, + chosen_key_position) + else: + matching_record_index = chosen_key_position + l_pointer, curr_key, r_pointer = self._read_single_node_key( + node_start, matching_record_index) + if key < curr_key: + return matching_record_index, l_pointer + elif key > curr_key: + return matching_record_index, r_pointer + else: + if mode == MODE_FIRST: + return matching_record_index, l_pointer + elif mode == MODE_LAST: + return matching_record_index, r_pointer + else: + raise Exception('Invalid mode declared: first/last') + + def _update_leaf_ready_data(self, leaf_start, start_index, new_nr_of_elements, records_to_rewrite): + self.buckets.seek(leaf_start) + self.buckets.write(struct.pack(' new_leaf_size - 1: + key_moved_to_parent_node = leaf_data[(old_leaf_size - 1) * 5] + elif nr_of_records_to_rewrite == new_leaf_size - 1: + key_moved_to_parent_node = new_data[0] + else: + key_moved_to_parent_node = leaf_data[old_leaf_size * 5] + data_to_write = self._prepare_new_root_data(key_moved_to_parent_node, + left_leaf_start_position, + right_leaf_start_position, + 'l') + if nr_of_records_to_rewrite > half_size: + # key goes to first half + # prepare left leaf data + left_leaf_data = struct.pack('<' + self.leaf_heading_format + self.single_leaf_record_format + * (self.node_capacity - nr_of_records_to_rewrite), + old_leaf_size, + 0, + right_leaf_start_position, + *leaf_data[:-nr_of_records_to_rewrite * 5]) + left_leaf_data += struct.pack( + '<' + self.single_leaf_record_format * ( + nr_of_records_to_rewrite - new_leaf_size + 1), + new_data[0], + new_data[1], + new_data[2], + new_data[3], + new_data[4], + *leaf_data[-nr_of_records_to_rewrite * 5:(old_leaf_size - 1) * 5]) + # prepare right leaf_data + right_leaf_data = struct.pack('<' + self.elements_counter_format + 2 * self.pointer_format + + self.single_leaf_record_format * + new_leaf_size, + new_leaf_size, + left_leaf_start_position, + 0, + *leaf_data[-new_leaf_size * 5:]) + else: + # key goes to second half + if nr_of_records_to_rewrite: + records_before = leaf_data[old_leaf_size * + 5:-nr_of_records_to_rewrite * 5] + records_after = leaf_data[-nr_of_records_to_rewrite * 5:] + else: + records_before = leaf_data[old_leaf_size * 5:] + records_after = [] + + left_leaf_data = struct.pack( + '<' + self.leaf_heading_format + + self.single_leaf_record_format * old_leaf_size, + old_leaf_size, + 0, + right_leaf_start_position, + *leaf_data[:old_leaf_size * 5]) + # prepare right leaf_data + right_leaf_data = struct.pack('<' + self.elements_counter_format + 2 * self.pointer_format + + self.single_leaf_record_format * (new_leaf_size - + nr_of_records_to_rewrite - 1), + new_leaf_size, + left_leaf_start_position, + 0, + *records_before) + right_leaf_data += struct.pack( + '<' + self.single_leaf_record_format * ( + nr_of_records_to_rewrite + 1), + new_data[0], + new_data[1], + new_data[2], + new_data[3], + new_data[4], + *records_after) + left_leaf_data += (self.node_capacity - + old_leaf_size) * self.single_leaf_record_size * '\x00' + right_leaf_data += blanks + data_to_write += left_leaf_data + data_to_write += right_leaf_data + self.buckets.seek(self._start_ind) + self.buckets.write(struct.pack(' half_size: # insert key into first half of leaf + self.buckets.seek(self._calculate_key_position(leaf_start, + self.node_capacity - nr_of_records_to_rewrite, + 'l')) + # read all records with key>new_key + data = self.buckets.read( + nr_of_records_to_rewrite * self.single_leaf_record_size) + records_to_rewrite = struct.unpack( + '<' + nr_of_records_to_rewrite * self.single_leaf_record_format, data) + # remove deleted records, if succeded abort spliting + if self._update_if_has_deleted(leaf_start, + records_to_rewrite, + self.node_capacity - + nr_of_records_to_rewrite, + [new_key, new_doc_id, new_start, new_size, new_status]): + return None + key_moved_to_parent_node = records_to_rewrite[ + -new_leaf_size * 5] + # write new leaf at end of file + self.buckets.seek(0, 2) # end of file + new_leaf_start = self.buckets.tell() + # prepare new leaf_data + new_leaf = struct.pack('<' + self.elements_counter_format + 2 * self.pointer_format + + self.single_leaf_record_format * + new_leaf_size, + new_leaf_size, + leaf_start, + next_l, + *records_to_rewrite[-new_leaf_size * 5:]) + new_leaf += blanks + # write new leaf + self.buckets.write(new_leaf) + # update old leaf heading + self._update_leaf_size_and_pointers(leaf_start, + old_leaf_size, + prev_l, + new_leaf_start) + # seek position of new key in first half + self.buckets.seek(self._calculate_key_position(leaf_start, + self.node_capacity - nr_of_records_to_rewrite, + 'l')) + # write new key and keys after + self.buckets.write( + struct.pack( + '<' + self.single_leaf_record_format * + (nr_of_records_to_rewrite - new_leaf_size + 1), + new_key, + new_doc_id, + new_start, + new_size, + 'o', + *records_to_rewrite[:-new_leaf_size * 5])) + + if next_l: # when next_l is 0 there is no next leaf to update, avoids writing data at 0 position of file + self._update_leaf_prev_pointer( + next_l, new_leaf_start) + +# self._read_single_leaf_record.delete(leaf_start) + self._find_key_in_leaf.delete(leaf_start) + + return new_leaf_start, key_moved_to_parent_node + else: # key goes into second half of leaf ' + # seek half of the leaf + self.buckets.seek(self._calculate_key_position( + leaf_start, old_leaf_size, 'l')) + data = self.buckets.read( + self.single_leaf_record_size * (new_leaf_size - 1)) + records_to_rewrite = struct.unpack('<' + (new_leaf_size - 1) * + self.single_leaf_record_format, data) + # remove deleted records, if succeded abort spliting + if self._update_if_has_deleted(leaf_start, + records_to_rewrite, + old_leaf_size, + [new_key, new_doc_id, new_start, new_size, new_status]): + return None + key_moved_to_parent_node = records_to_rewrite[ + -(new_leaf_size - 1) * 5] + if key_moved_to_parent_node > new_key: + key_moved_to_parent_node = new_key + self.buckets.seek(0, 2) # end of file + new_leaf_start = self.buckets.tell() + # prepare new leaf data + index_of_records_split = nr_of_records_to_rewrite * 5 + if index_of_records_split: + records_before = records_to_rewrite[ + :-index_of_records_split] + records_after = records_to_rewrite[ + -index_of_records_split:] + else: + records_before = records_to_rewrite + records_after = [] + new_leaf = struct.pack('<' + self.elements_counter_format + 2 * self.pointer_format + + self.single_leaf_record_format * (new_leaf_size - + nr_of_records_to_rewrite - 1), + new_leaf_size, + leaf_start, + next_l, + *records_before) + new_leaf += struct.pack( + '<' + self.single_leaf_record_format * + (nr_of_records_to_rewrite + 1), + new_key, + new_doc_id, + new_start, + new_size, + 'o', + *records_after) + new_leaf += blanks + self.buckets.write(new_leaf) + self._update_leaf_size_and_pointers(leaf_start, + old_leaf_size, + prev_l, + new_leaf_start) + if next_l: # pren next_l is 0 there is no next leaf to update, avoids writing data at 0 position of file + self._update_leaf_prev_pointer( + next_l, new_leaf_start) + +# self._read_single_leaf_record.delete(leaf_start) + self._find_key_in_leaf.delete(leaf_start) + + return new_leaf_start, key_moved_to_parent_node + + def _update_if_has_deleted(self, leaf_start, records_to_rewrite, start_position, new_record_data): + """ + Checks if there are any deleted elements in data to rewrite and prevent from writing then back. + """ + curr_index = 0 + nr_of_elements = self.node_capacity + records_to_rewrite = list(records_to_rewrite) + for status in records_to_rewrite[4::5]: # remove deleted from list + if status != 'o': + del records_to_rewrite[curr_index * 5:curr_index * 5 + 5] + nr_of_elements -= 1 + else: + curr_index += 1 + # if were deleted dont have to split, just update leaf + if nr_of_elements < self.node_capacity: + data_split_index = 0 + for key in records_to_rewrite[0::5]: + if key > new_record_data[0]: + break + else: + data_split_index += 1 + records_to_rewrite = records_to_rewrite[:data_split_index * 5]\ + + new_record_data\ + + records_to_rewrite[data_split_index * 5:] + self._update_leaf_ready_data(leaf_start, + start_position, + nr_of_elements + 1, + records_to_rewrite), + return True + else: # did not found any deleted records in leaf + return False + + def _prepare_new_root_data(self, root_key, left_pointer, right_pointer, children_flag='n'): + new_root = struct.pack( + '<' + self.node_heading_format + self.single_node_record_format, + 1, + children_flag, + left_pointer, + root_key, + right_pointer) + new_root += (self.key_size + self.pointer_size) * (self. + node_capacity - 1) * '\x00' + return new_root + + def _create_new_root_from_node(self, node_start, children_flag, nr_of_keys_to_rewrite, new_node_size, old_node_size, new_key, new_pointer): + # reading second half of node + self.buckets.seek(self.data_start + self.node_heading_size) + # read all keys with key>new_key + data = self.buckets.read(self.pointer_size + self. + node_capacity * (self.key_size + self.pointer_size)) + old_node_data = struct.unpack('<' + self.pointer_format + self.node_capacity * + (self.key_format + self.pointer_format), data) + self.buckets.seek(0, 2) # end of file + new_node_start = self.buckets.tell() + if nr_of_keys_to_rewrite == new_node_size: + key_moved_to_root = new_key + # prepare new nodes data + left_node = struct.pack('<' + self.node_heading_format + self.pointer_format + + old_node_size * (self. + key_format + self.pointer_format), + old_node_size, + children_flag, + *old_node_data[:old_node_size * 2 + 1]) + + right_node = struct.pack('<' + self.node_heading_format + self.pointer_format + + new_node_size * (self. + key_format + self.pointer_format), + new_node_size, + children_flag, + new_pointer, + *old_node_data[old_node_size * 2 + 1:]) + elif nr_of_keys_to_rewrite > new_node_size: + key_moved_to_root = old_node_data[old_node_size * 2 - 1] + # prepare new nodes data + if nr_of_keys_to_rewrite == self.node_capacity: + keys_before = old_node_data[:1] + keys_after = old_node_data[1:old_node_size * 2 - 1] + else: + keys_before = old_node_data[:-nr_of_keys_to_rewrite * 2] + keys_after = old_node_data[-( + nr_of_keys_to_rewrite) * 2:old_node_size * 2 - 1] + left_node = struct.pack('<' + self.node_heading_format + self.pointer_format + + (self.node_capacity - nr_of_keys_to_rewrite) * (self. + key_format + self.pointer_format), + old_node_size, + children_flag, + *keys_before) + left_node += struct.pack( + '<' + (self.key_format + self.pointer_format) * + (nr_of_keys_to_rewrite - new_node_size), + new_key, + new_pointer, + *keys_after) + + right_node = struct.pack('<' + self.node_heading_format + self.pointer_format + + new_node_size * (self. + key_format + self.pointer_format), + new_node_size, + children_flag, + *old_node_data[old_node_size * 2:]) + else: +# 'inserting key into second half of node and creating new root' + key_moved_to_root = old_node_data[old_node_size * 2 + 1] + # prepare new nodes data + left_node = struct.pack('<' + self.node_heading_format + self.pointer_format + + old_node_size * (self. + key_format + self.pointer_format), + old_node_size, + children_flag, + *old_node_data[:old_node_size * 2 + 1]) + if nr_of_keys_to_rewrite: + keys_before = old_node_data[(old_node_size + + 1) * 2:-nr_of_keys_to_rewrite * 2] + keys_after = old_node_data[-nr_of_keys_to_rewrite * 2:] + else: + keys_before = old_node_data[(old_node_size + 1) * 2:] + keys_after = [] + right_node = struct.pack('<' + self.node_heading_format + self.pointer_format + + (new_node_size - nr_of_keys_to_rewrite - 1) * (self. + key_format + self.pointer_format), + new_node_size, + children_flag, + *keys_before) + right_node += struct.pack( + '<' + (nr_of_keys_to_rewrite + 1) * + (self.key_format + self.pointer_format), + new_key, + new_pointer, + *keys_after) + new_root = self._prepare_new_root_data(key_moved_to_root, + new_node_start, + new_node_start + self.node_size) + left_node += (self.node_capacity - old_node_size) * \ + (self.key_size + self.pointer_size) * '\x00' + # adding blanks after new node + right_node += (self.node_capacity - new_node_size) * \ + (self.key_size + self.pointer_size) * '\x00' + self.buckets.seek(0, 2) + self.buckets.write(left_node + right_node) + self.buckets.seek(self.data_start) + self.buckets.write(new_root) + + self._read_single_node_key.delete(node_start) + self._read_node_nr_of_elements_and_children_flag.delete(node_start) + return None + + def _split_node(self, node_start, nr_of_keys_to_rewrite, new_key, new_pointer, children_flag, create_new_root=False): + """ + Splits full node in two separate ones, first half of records stays on old position, + second half is written as new leaf at the end of file. + """ + half_size = self.node_capacity / 2 + if self.node_capacity % 2 == 0: + old_node_size = new_node_size = half_size + else: + old_node_size = half_size + new_node_size = half_size + 1 + if create_new_root: + self._create_new_root_from_node(node_start, children_flag, nr_of_keys_to_rewrite, new_node_size, old_node_size, new_key, new_pointer) + else: + blanks = (self.node_capacity - new_node_size) * ( + self.key_size + self.pointer_size) * '\x00' + if nr_of_keys_to_rewrite == new_node_size: # insert key into first half of node + # reading second half of node + self.buckets.seek(self._calculate_key_position(node_start, + old_node_size, + 'n') + self.pointer_size) + # read all keys with key>new_key + data = self.buckets.read(nr_of_keys_to_rewrite * + (self.key_size + self.pointer_size)) + old_node_data = struct.unpack('<' + nr_of_keys_to_rewrite * + (self.key_format + self.pointer_format), data) + # write new node at end of file + self.buckets.seek(0, 2) + new_node_start = self.buckets.tell() + # prepare new node_data + new_node = struct.pack('<' + self.node_heading_format + self.pointer_format + + (self.key_format + + self.pointer_format) * new_node_size, + new_node_size, + children_flag, + new_pointer, + *old_node_data) + new_node += blanks + # write new node + self.buckets.write(new_node) + # update old node data + self._update_size( + node_start, old_node_size) + + self._read_single_node_key.delete(node_start) + self._read_node_nr_of_elements_and_children_flag.delete( + node_start) + + return new_node_start, new_key + elif nr_of_keys_to_rewrite > half_size: # insert key into first half of node + # seek for first key to rewrite + self.buckets.seek(self._calculate_key_position(node_start, self.node_capacity - nr_of_keys_to_rewrite, 'n') + + self.pointer_size) + # read all keys with key>new_key + data = self.buckets.read( + nr_of_keys_to_rewrite * (self.key_size + self.pointer_size)) + old_node_data = struct.unpack( + '<' + nr_of_keys_to_rewrite * (self.key_format + self.pointer_format), data) + key_moved_to_parent_node = old_node_data[-( + new_node_size + 1) * 2] + self.buckets.seek(0, 2) + new_node_start = self.buckets.tell() + # prepare new node_data + new_node = struct.pack('<' + self.node_heading_format + + self.pointer_format + (self.key_format + + self.pointer_format) * new_node_size, + new_node_size, + children_flag, + old_node_data[-new_node_size * 2 - 1], + *old_node_data[-new_node_size * 2:]) + new_node += blanks + # write new node + self.buckets.write(new_node) + self._update_size( + node_start, old_node_size) + # seek position of new key in first half + self.buckets.seek(self._calculate_key_position(node_start, self.node_capacity - nr_of_keys_to_rewrite, 'n') + + self.pointer_size) + # write new key and keys after + self.buckets.write( + struct.pack( + '<' + (self.key_format + self.pointer_format) * + (nr_of_keys_to_rewrite - new_node_size), + new_key, + new_pointer, + *old_node_data[:-(new_node_size + 1) * 2])) + + self._read_single_node_key.delete(node_start) + self._read_node_nr_of_elements_and_children_flag.delete( + node_start) + + return new_node_start, key_moved_to_parent_node + else: # key goes into second half + # reading second half of node + self.buckets.seek(self._calculate_key_position(node_start, + old_node_size, + 'n') + + self.pointer_size) + data = self.buckets.read( + new_node_size * (self.key_size + self.pointer_size)) + old_node_data = struct.unpack('<' + new_node_size * + (self.key_format + self.pointer_format), data) + # find key which goes to parent node + key_moved_to_parent_node = old_node_data[0] + self.buckets.seek(0, 2) # end of file + new_node_start = self.buckets.tell() + index_of_records_split = nr_of_keys_to_rewrite * 2 + # prepare new node_data + first_leaf_pointer = old_node_data[1] + old_node_data = old_node_data[2:] + if index_of_records_split: + keys_before = old_node_data[:-index_of_records_split] + keys_after = old_node_data[-index_of_records_split:] + else: + keys_before = old_node_data + keys_after = [] + new_node = struct.pack('<' + self.node_heading_format + self.pointer_format + + (self.key_format + self.pointer_format) * + (new_node_size - + nr_of_keys_to_rewrite - 1), + new_node_size, + children_flag, + first_leaf_pointer, + *keys_before) + new_node += struct.pack('<' + (self.key_format + self.pointer_format) * + (nr_of_keys_to_rewrite + 1), + new_key, + new_pointer, + *keys_after) + new_node += blanks + # write new node + self.buckets.write(new_node) + self._update_size(node_start, old_node_size) + + self._read_single_node_key.delete(node_start) + self._read_node_nr_of_elements_and_children_flag.delete( + node_start) + + return new_node_start, key_moved_to_parent_node + + def insert_first_record_into_leaf(self, leaf_start, key, doc_id, start, size, status): + self.buckets.seek(leaf_start) + self.buckets.write(struct.pack('<' + self.elements_counter_format, + 1)) + self.buckets.seek(leaf_start + self.leaf_heading_size) + self.buckets.write(struct.pack('<' + self.single_leaf_record_format, + key, + doc_id, + start, + size, + status)) + +# self._read_single_leaf_record.delete(leaf_start) + self._find_key_in_leaf.delete(leaf_start) + self._read_leaf_nr_of_elements.delete(leaf_start) + self._read_leaf_nr_of_elements_and_neighbours.delete(leaf_start) + + def _insert_new_record_into_leaf(self, leaf_start, key, doc_id, start, size, status, nodes_stack, indexes): + nr_of_elements = self._read_leaf_nr_of_elements(leaf_start) + if nr_of_elements == 0: + self.insert_first_record_into_leaf( + leaf_start, key, doc_id, start, size, status) + return + leaf_start, new_record_position, nr_of_records_to_rewrite, full_leaf, on_deleted\ + = self._find_place_in_leaf(key, leaf_start, nr_of_elements) + if full_leaf: + try: # check if leaf has parent node + leaf_parent_pointer = nodes_stack.pop() + except IndexError: # leaf is a root + leaf_parent_pointer = 0 + split_data = self._split_leaf(leaf_start, + nr_of_records_to_rewrite, + key, + doc_id, + start, + size, + status, + create_new_root=(False if leaf_parent_pointer else True)) + if split_data is not None: # means that split created new root or replaced split with update_if_has_deleted + new_leaf_start_position, key_moved_to_parent_node = split_data + self._insert_new_key_into_node(leaf_parent_pointer, + key_moved_to_parent_node, + leaf_start, + new_leaf_start_position, + nodes_stack, + indexes) + else: # there is a place for record in leaf + self.buckets.seek(leaf_start) + self._update_leaf( + leaf_start, new_record_position, nr_of_elements, nr_of_records_to_rewrite, + on_deleted, key, doc_id, start, size, status) + + def _update_node(self, new_key_position, nr_of_keys_to_rewrite, new_key, new_pointer): + if nr_of_keys_to_rewrite == 0: + self.buckets.seek(new_key_position) + self.buckets.write( + struct.pack('<' + self.key_format + self.pointer_format, + new_key, + new_pointer)) + self.flush() + else: + self.buckets.seek(new_key_position) + data = self.buckets.read(nr_of_keys_to_rewrite * ( + self.key_size + self.pointer_size)) + keys_to_rewrite = struct.unpack( + '<' + nr_of_keys_to_rewrite * (self.key_format + self.pointer_format), data) + self.buckets.seek(new_key_position) + self.buckets.write( + struct.pack( + '<' + (nr_of_keys_to_rewrite + 1) * + (self.key_format + self.pointer_format), + new_key, + new_pointer, + *keys_to_rewrite)) + self.flush() + + def _insert_new_key_into_node(self, node_start, new_key, old_half_start, new_half_start, nodes_stack, indexes): + parent_key_index = indexes.pop() + nr_of_elements, children_flag = self._read_node_nr_of_elements_and_children_flag(node_start) + parent_prev_pointer = self._read_single_node_key( + node_start, parent_key_index)[0] + if parent_prev_pointer == old_half_start: # splited child was on the left side of his parent key, must write new key before it + new_key_position = self.pointer_size + self._calculate_key_position(node_start, parent_key_index, 'n') + nr_of_keys_to_rewrite = nr_of_elements - parent_key_index + else: # splited child was on the right side of his parent key, must write new key after it + new_key_position = self.pointer_size + self._calculate_key_position(node_start, parent_key_index + 1, 'n') + nr_of_keys_to_rewrite = nr_of_elements - (parent_key_index + 1) + if nr_of_elements == self.node_capacity: + try: # check if node has parent + node_parent_pointer = nodes_stack.pop() + except IndexError: # node is a root + node_parent_pointer = 0 + new_data = self._split_node(node_start, + nr_of_keys_to_rewrite, + new_key, + new_half_start, + children_flag, + create_new_root=(False if node_parent_pointer else True)) + if new_data: # if not new_data, new root has been created + new_node_start_position, key_moved_to_parent_node = new_data + self._insert_new_key_into_node(node_parent_pointer, + key_moved_to_parent_node, + node_start, + new_node_start_position, + nodes_stack, + indexes) + + self._find_first_key_occurence_in_node.delete(node_start) + self._find_last_key_occurence_in_node.delete(node_start) + else: # there is a empty slot for new key in node + self._update_size(node_start, nr_of_elements + 1) + self._update_node(new_key_position, + nr_of_keys_to_rewrite, + new_key, + new_half_start) + + self._find_first_key_occurence_in_node.delete(node_start) + self._find_last_key_occurence_in_node.delete(node_start) + self._read_single_node_key.delete(node_start) + self._read_node_nr_of_elements_and_children_flag.delete(node_start) + + def _find_leaf_to_insert(self, key): + """ + Traverses tree in search for leaf for insert, remembering parent nodes in path, + looks for last occurence of key if already in tree. + """ + nodes_stack = [self.data_start] + if self.root_flag == 'l': + return nodes_stack, [] + else: + nr_of_elements, curr_child_flag = self._read_node_nr_of_elements_and_children_flag(self.data_start) + curr_index, curr_pointer = self._find_last_key_occurence_in_node( + self.data_start, key, nr_of_elements) + nodes_stack.append(curr_pointer) + indexes = [curr_index] + while(curr_child_flag == 'n'): + nr_of_elements, curr_child_flag = self._read_node_nr_of_elements_and_children_flag(curr_pointer) + curr_index, curr_pointer = self._find_last_key_occurence_in_node(curr_pointer, key, nr_of_elements) + nodes_stack.append(curr_pointer) + indexes.append(curr_index) + return nodes_stack, indexes + # nodes stack contains start addreses of nodes directly above leaf with key, indexes match keys adjacent nodes_stack values (as pointers) + # required when inserting new keys in upper tree levels + + def _find_leaf_with_last_key_occurence(self, key): + if self.root_flag == 'l': + return self.data_start + else: + nr_of_elements, curr_child_flag = self._read_node_nr_of_elements_and_children_flag(self.data_start) + curr_position = self._find_last_key_occurence_in_node( + self.data_start, key, nr_of_elements)[1] + while(curr_child_flag == 'n'): + nr_of_elements, curr_child_flag = self._read_node_nr_of_elements_and_children_flag(curr_position) + curr_position = self._find_last_key_occurence_in_node( + curr_position, key, nr_of_elements)[1] + return curr_position + + def _find_leaf_with_first_key_occurence(self, key): + if self.root_flag == 'l': + return self.data_start + else: + nr_of_elements, curr_child_flag = self._read_node_nr_of_elements_and_children_flag(self.data_start) + curr_position = self._find_first_key_occurence_in_node( + self.data_start, key, nr_of_elements)[1] + while(curr_child_flag == 'n'): + nr_of_elements, curr_child_flag = self._read_node_nr_of_elements_and_children_flag(curr_position) + curr_position = self._find_first_key_occurence_in_node( + curr_position, key, nr_of_elements)[1] + return curr_position + + def _find_key(self, key): + containing_leaf_start = self._find_leaf_with_first_key_occurence(key) + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(containing_leaf_start) + try: + doc_id, l_key, start, size, status = self._find_key_in_leaf( + containing_leaf_start, key, nr_of_elements) + except ElemNotFound: + if next_leaf: + nr_of_elements = self._read_leaf_nr_of_elements(next_leaf) + else: + raise ElemNotFound + doc_id, l_key, start, size, status = self._find_key_in_leaf( + next_leaf, key, nr_of_elements) + return doc_id, l_key, start, size, status + + def _find_key_to_update(self, key, doc_id): + """ + Search tree for key that matches not only given key but also doc_id. + """ + containing_leaf_start = self._find_leaf_with_first_key_occurence(key) + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(containing_leaf_start) + try: + leaf_start, record_index, doc_id, l_key, start, size, status = self._find_key_in_leaf_for_update(key, + doc_id, + containing_leaf_start, + nr_of_elements) + except ElemNotFound: + if next_leaf: + nr_of_elements = self._read_leaf_nr_of_elements(next_leaf) + else: + raise TryReindexException() + try: + leaf_start, record_index, doc_id, l_key, start, size, status = self._find_key_in_leaf_for_update(key, + doc_id, + next_leaf, + nr_of_elements) + except ElemNotFound: + raise TryReindexException() + return leaf_start, record_index, doc_id, l_key, start, size, status + + def update(self, doc_id, key, u_start=0, u_size=0, u_status='o'): + containing_leaf_start, element_index, old_doc_id, old_key, old_start, old_size, old_status = self._find_key_to_update(key, doc_id) + new_data = (old_doc_id, old_start, old_size, old_status) + if not u_start: + new_data[1] = u_start + if not u_size: + new_data[2] = u_size + if not u_status: + new_data[3] = u_status + self._update_element(containing_leaf_start, element_index, new_data) + + self._find_key.delete(key) + self._match_doc_id.delete(doc_id) + self._find_key_in_leaf.delete(containing_leaf_start, key) + return True + + def delete(self, doc_id, key, start=0, size=0): + containing_leaf_start, element_index = self._find_key_to_update( + key, doc_id)[:2] + self._delete_element(containing_leaf_start, element_index) + + self._find_key.delete(key) + self._match_doc_id.delete(doc_id) + self._find_key_in_leaf.delete(containing_leaf_start, key) + return True + + def _find_key_many(self, key, limit=1, offset=0): + leaf_with_key = self._find_leaf_with_first_key_occurence(key) + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(leaf_with_key) + try: + leaf_with_key, key_index = self._find_index_of_first_key_equal( + key, leaf_with_key, nr_of_elements) + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(leaf_with_key) + except ElemNotFound: + leaf_with_key = next_leaf + key_index = 0 + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(leaf_with_key) + while offset: + if key_index < nr_of_elements: + curr_key, doc_id, start, size, status = self._read_single_leaf_record( + leaf_with_key, key_index) + if key == curr_key: + if status != 'd': + offset -= 1 + key_index += 1 + else: + return + else: + key_index = 0 + if next_leaf: + leaf_with_key = next_leaf + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(next_leaf) + else: + return + while limit: + if key_index < nr_of_elements: + curr_key, doc_id, start, size, status = self._read_single_leaf_record( + leaf_with_key, key_index) + if key == curr_key: + if status != 'd': + yield doc_id, start, size, status + limit -= 1 + key_index += 1 + else: + return + else: + key_index = 0 + if next_leaf: + leaf_with_key = next_leaf + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(next_leaf) + else: + return + + def _find_key_smaller(self, key, limit=1, offset=0): + leaf_with_key = self._find_leaf_with_first_key_occurence(key) + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(leaf_with_key) + leaf_with_key, key_index = self._find_index_of_first_key_equal_or_smaller_key(key, leaf_with_key, nr_of_elements) + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(leaf_with_key) + curr_key = self._read_single_leaf_record(leaf_with_key, key_index)[0] + if curr_key >= key: + key_index -= 1 + while offset: + if key_index >= 0: + key, doc_id, start, size, status = self._read_single_leaf_record( + leaf_with_key, key_index) + if status != 'd': + offset -= 1 + key_index -= 1 + else: + if prev_leaf: + leaf_with_key = prev_leaf + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(prev_leaf) + key_index = nr_of_elements - 1 + else: + return + while limit: + if key_index >= 0: + key, doc_id, start, size, status = self._read_single_leaf_record( + leaf_with_key, key_index) + if status != 'd': + yield doc_id, key, start, size, status + limit -= 1 + key_index -= 1 + else: + if prev_leaf: + leaf_with_key = prev_leaf + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(prev_leaf) + key_index = nr_of_elements - 1 + else: + return + + def _find_key_equal_and_smaller(self, key, limit=1, offset=0): + leaf_with_key = self._find_leaf_with_last_key_occurence(key) + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(leaf_with_key) + try: + leaf_with_key, key_index = self._find_index_of_last_key_equal_or_smaller_key(key, leaf_with_key, nr_of_elements) + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(leaf_with_key) + except ElemNotFound: + leaf_with_key = prev_leaf + key_index = self._read_leaf_nr_of_elements_and_neighbours( + leaf_with_key)[0] + curr_key = self._read_single_leaf_record(leaf_with_key, key_index)[0] + if curr_key > key: + key_index -= 1 + while offset: + if key_index >= 0: + key, doc_id, start, size, status = self._read_single_leaf_record( + leaf_with_key, key_index) + if status != 'd': + offset -= 1 + key_index -= 1 + else: + if prev_leaf: + leaf_with_key = prev_leaf + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(prev_leaf) + key_index = nr_of_elements - 1 + else: + return + while limit: + if key_index >= 0: + key, doc_id, start, size, status = self._read_single_leaf_record( + leaf_with_key, key_index) + if status != 'd': + yield doc_id, key, start, size, status + limit -= 1 + key_index -= 1 + else: + if prev_leaf: + leaf_with_key = prev_leaf + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(prev_leaf) + key_index = nr_of_elements - 1 + else: + return + + def _find_key_bigger(self, key, limit=1, offset=0): + leaf_with_key = self._find_leaf_with_last_key_occurence(key) + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(leaf_with_key) + try: + leaf_with_key, key_index = self._find_index_of_last_key_equal_or_smaller_key(key, leaf_with_key, nr_of_elements) + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(leaf_with_key) + except ElemNotFound: + key_index = 0 + curr_key = self._read_single_leaf_record(leaf_with_key, key_index)[0] + if curr_key <= key: + key_index += 1 + while offset: + if key_index < nr_of_elements: + curr_key, doc_id, start, size, status = self._read_single_leaf_record( + leaf_with_key, key_index) + if status != 'd': + offset -= 1 + key_index += 1 + else: + key_index = 0 + if next_leaf: + leaf_with_key = next_leaf + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(next_leaf) + else: + return + while limit: + if key_index < nr_of_elements: + curr_key, doc_id, start, size, status = self._read_single_leaf_record( + leaf_with_key, key_index) + if status != 'd': + yield doc_id, curr_key, start, size, status + limit -= 1 + key_index += 1 + else: + key_index = 0 + if next_leaf: + leaf_with_key = next_leaf + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(next_leaf) + else: + return + + def _find_key_equal_and_bigger(self, key, limit=1, offset=0): + leaf_with_key = self._find_leaf_with_first_key_occurence(key) + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(leaf_with_key) + leaf_with_key, key_index = self._find_index_of_first_key_equal_or_smaller_key(key, leaf_with_key, nr_of_elements) + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(leaf_with_key) + curr_key = self._read_single_leaf_record(leaf_with_key, key_index)[0] + if curr_key < key: + key_index += 1 + while offset: + if key_index < nr_of_elements: + curr_key, doc_id, start, size, status = self._read_single_leaf_record( + leaf_with_key, key_index) + if status != 'd': + offset -= 1 + key_index += 1 + else: + key_index = 0 + if next_leaf: + leaf_with_key = next_leaf + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(next_leaf) + else: + return + while limit: + if key_index < nr_of_elements: + curr_key, doc_id, start, size, status = self._read_single_leaf_record( + leaf_with_key, key_index) + if status != 'd': + yield doc_id, curr_key, start, size, status + limit -= 1 + key_index += 1 + else: + key_index = 0 + if next_leaf: + leaf_with_key = next_leaf + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(next_leaf) + else: + return + + def _find_key_between(self, start, end, limit, offset, inclusive_start, inclusive_end): + """ + Returns generator containing all keys withing given interval. + """ + if inclusive_start: + leaf_with_key = self._find_leaf_with_first_key_occurence(start) + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(leaf_with_key) + leaf_with_key, key_index = self._find_index_of_first_key_equal_or_smaller_key(start, leaf_with_key, nr_of_elements) + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(leaf_with_key) + curr_key = self._read_single_leaf_record( + leaf_with_key, key_index)[0] + if curr_key < start: + key_index += 1 + else: + leaf_with_key = self._find_leaf_with_last_key_occurence(start) + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(leaf_with_key) + leaf_with_key, key_index = self._find_index_of_last_key_equal_or_smaller_key(start, leaf_with_key, nr_of_elements) + curr_key, curr_doc_id, curr_start, curr_size, curr_status = self._read_single_leaf_record(leaf_with_key, key_index) + if curr_key <= start: + key_index += 1 + while offset: + if key_index < nr_of_elements: + curr_key, curr_doc_id, curr_start, curr_size, curr_status = self._read_single_leaf_record(leaf_with_key, key_index) + if curr_status != 'd': + offset -= 1 + key_index += 1 + else: + key_index = 0 + if next_leaf: + leaf_with_key = next_leaf + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(next_leaf) + else: + return + while limit: + if key_index < nr_of_elements: + curr_key, curr_doc_id, curr_start, curr_size, curr_status = self._read_single_leaf_record(leaf_with_key, key_index) + if curr_key > end or (curr_key == end and not inclusive_end): + return + elif curr_status != 'd': + yield curr_doc_id, curr_key, curr_start, curr_size, curr_status + limit -= 1 + key_index += 1 + else: + key_index = 0 + if next_leaf: + leaf_with_key = next_leaf + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(next_leaf) + else: + return + + def get(self, key): + return self._find_key(self.make_key(key)) + + def get_many(self, key, limit=1, offset=0): + return self._find_key_many(self.make_key(key), limit, offset) + + def get_between(self, start, end, limit=1, offset=0, inclusive_start=True, inclusive_end=True): + if start is None: + end = self.make_key(end) + if inclusive_end: + return self._find_key_equal_and_smaller(end, limit, offset) + else: + return self._find_key_smaller(end, limit, offset) + elif end is None: + start = self.make_key(start) + if inclusive_start: + return self._find_key_equal_and_bigger(start, limit, offset) + else: + return self._find_key_bigger(start, limit, offset) + else: + start = self.make_key(start) + end = self.make_key(end) + return self._find_key_between(start, end, limit, offset, inclusive_start, inclusive_end) + + def all(self, limit=-1, offset=0): + """ + Traverses linked list of all tree leaves and returns generator containing all elements stored in index. + """ + if self.root_flag == 'n': + leaf_start = self.data_start + self.node_size + else: + leaf_start = self.data_start + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(leaf_start) + key_index = 0 + while offset: + if key_index < nr_of_elements: + curr_key, doc_id, start, size, status = self._read_single_leaf_record( + leaf_start, key_index) + if status != 'd': + offset -= 1 + key_index += 1 + else: + key_index = 0 + if next_leaf: + leaf_start = next_leaf + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(next_leaf) + else: + return + while limit: + if key_index < nr_of_elements: + curr_key, doc_id, start, size, status = self._read_single_leaf_record( + leaf_start, key_index) + if status != 'd': + yield doc_id, curr_key, start, size, status + limit -= 1 + key_index += 1 + else: + key_index = 0 + if next_leaf: + leaf_start = next_leaf + nr_of_elements, prev_leaf, next_leaf = self._read_leaf_nr_of_elements_and_neighbours(next_leaf) + else: + return + + def make_key(self, key): + raise NotImplementedError() + + def make_key_value(self, data): + raise NotImplementedError() + + def _open_storage(self): + s = globals()[self.storage_class] + if not self.storage: + self.storage = s(self.db_path, self.name) + self.storage.open() + + def _create_storage(self): + s = globals()[self.storage_class] + if not self.storage: + self.storage = s(self.db_path, self.name) + self.storage.create() + + def compact(self, node_capacity=0): + if not node_capacity: + node_capacity = self.node_capacity + + compact_ind = self.__class__( + self.db_path, self.name + '_compact', node_capacity=node_capacity) + compact_ind.create_index() + + gen = self.all() + while True: + try: + doc_id, key, start, size, status = gen.next() + except StopIteration: + break + self.storage._f.seek(start) + value = self.storage._f.read(size) + start_ = compact_ind.storage._f.tell() + compact_ind.storage._f.write(value) + compact_ind.insert(doc_id, key, start_, size, status) + + compact_ind.close_index() + original_name = self.name + # os.unlink(os.path.join(self.db_path, self.name + "_buck")) + self.close_index() + shutil.move(os.path.join(compact_ind.db_path, compact_ind. + name + "_buck"), os.path.join(self.db_path, self.name + "_buck")) + shutil.move(os.path.join(compact_ind.db_path, compact_ind. + name + "_stor"), os.path.join(self.db_path, self.name + "_stor")) + # self.name = original_name + self.open_index() # reload... + self.name = original_name + self._save_params(dict(name=original_name)) + self._fix_params() + self._clear_cache() + return True + + def _fix_params(self): + super(IU_TreeBasedIndex, self)._fix_params() + self._count_props() + + def _clear_cache(self): + self._find_key.clear() + self._match_doc_id.clear() +# self._read_single_leaf_record.clear() + self._find_key_in_leaf.clear() + self._read_single_node_key.clear() + self._find_first_key_occurence_in_node.clear() + self._find_last_key_occurence_in_node.clear() + self._read_leaf_nr_of_elements.clear() + self._read_leaf_neighbours.clear() + self._read_leaf_nr_of_elements_and_neighbours.clear() + self._read_node_nr_of_elements_and_children_flag.clear() + + def close_index(self): + super(IU_TreeBasedIndex, self).close_index() + self._clear_cache() + + +class IU_MultiTreeBasedIndex(IU_TreeBasedIndex): + """ + Class that allows to index more than one key per database record. + + It operates very well on GET/INSERT. It's not optimized for + UPDATE operations (will always readd everything) + """ + + def __init__(self, *args, **kwargs): + super(IU_MultiTreeBasedIndex, self).__init__(*args, **kwargs) + + def insert(self, doc_id, key, start, size, status='o'): + if isinstance(key, (list, tuple)): + key = set(key) + elif not isinstance(key, set): + key = set([key]) + ins = super(IU_MultiTreeBasedIndex, self).insert + for curr_key in key: + ins(doc_id, curr_key, start, size, status) + return True + + def update(self, doc_id, key, u_start, u_size, u_status='o'): + if isinstance(key, (list, tuple)): + key = set(key) + elif not isinstance(key, set): + key = set([key]) + upd = super(IU_MultiTreeBasedIndex, self).update + for curr_key in key: + upd(doc_id, curr_key, u_start, u_size, u_status) + + def delete(self, doc_id, key, start=0, size=0): + if isinstance(key, (list, tuple)): + key = set(key) + elif not isinstance(key, set): + key = set([key]) + delete = super(IU_MultiTreeBasedIndex, self).delete + for curr_key in key: + delete(doc_id, curr_key, start, size) + + def get(self, key): + return super(IU_MultiTreeBasedIndex, self).get(key) + + def make_key_value(self, data): + raise NotImplementedError() + + +# classes for public use, done in this way because of +# generation static files with indexes (_index directory) + + +class TreeBasedIndex(IU_TreeBasedIndex): + pass + + +class MultiTreeBasedIndex(IU_MultiTreeBasedIndex): + """ + It allows to index more than one key for record. (ie. prefix/infix/suffix search mechanizms) + That class is designed to be used in custom indexes. + """ + pass diff --git a/libs/axl/axel.py b/libs/axl/axel.py index d0f069ab..64d29779 100644 --- a/libs/axl/axel.py +++ b/libs/axl/axel.py @@ -1,6 +1,7 @@ # axel.py # # Copyright (C) 2010 Adrian Cristea adrian dot cristea at gmail dotcom +# Edits by Ruud Burger # # Based on an idea by Peter Thatcher, found on # http://www.valuedlessons.com/2008/04/events-in-python.html @@ -11,12 +12,14 @@ # Source: http://pypi.python.org/pypi/axel # Docs: http://packages.python.org/axel -from couchpotato.core.helpers.variable import natsortKey -import Queue +from Queue import Empty, Queue import hashlib import sys import threading +from couchpotato.core.helpers.variable import natsortKey + + class Event(object): """ Event object inspired by C# events. Handlers can be registered and @@ -140,7 +143,7 @@ class Event(object): def fire(self, *args, **kwargs): """ Stores all registered handlers in a queue for processing """ - self.queue = Queue.Queue() + self.queue = Queue() result = {} if self.handlers: @@ -239,9 +242,9 @@ class Event(object): order_lock.release() if self.queue.empty(): - raise Queue.Empty + raise Empty - except Queue.Empty: + except Empty: break def _extract(self, queue_item): diff --git a/libs/caper/__init__.py b/libs/caper/__init__.py new file mode 100644 index 00000000..95fb6d73 --- /dev/null +++ b/libs/caper/__init__.py @@ -0,0 +1,195 @@ +# Copyright 2013 Dean Gardiner +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from logr import Logr +from caper.matcher import FragmentMatcher +from caper.objects import CaperFragment, CaperClosure +from caper.parsers.anime import AnimeParser +from caper.parsers.scene import SceneParser +from caper.parsers.usenet import UsenetParser + + +__version_info__ = ('0', '3', '1') +__version_branch__ = 'master' + +__version__ = "%s%s" % ( + '.'.join(__version_info__), + '-' + __version_branch__ if __version_branch__ else '' +) + + +CL_START_CHARS = ['(', '[', '<', '>'] +CL_END_CHARS = [')', ']', '<', '>'] +CL_END_STRINGS = [' - '] + +STRIP_START_CHARS = ''.join(CL_START_CHARS) +STRIP_END_CHARS = ''.join(CL_END_CHARS) +STRIP_CHARS = ''.join(['_', ' ', '.']) + +FRAGMENT_SEPARATORS = ['.', '-', '_', ' '] + + +CL_START = 0 +CL_END = 1 + + +class Caper(object): + def __init__(self, debug=False): + self.debug = debug + + self.parsers = { + 'anime': AnimeParser, + 'scene': SceneParser, + 'usenet': UsenetParser + } + + def _closure_split(self, name): + """ + :type name: str + + :rtype: list of CaperClosure + """ + + closures = [] + + def end_closure(closures, buf): + buf = buf.strip(STRIP_CHARS) + if len(buf) < 2: + return + + cur = CaperClosure(len(closures), buf) + cur.left = closures[len(closures) - 1] if len(closures) > 0 else None + + if cur.left: + cur.left.right = cur + + closures.append(cur) + + state = CL_START + buf = "" + for x, ch in enumerate(name): + # Check for start characters + if state == CL_START and ch in CL_START_CHARS: + end_closure(closures, buf) + + state = CL_END + buf = "" + + buf += ch + + if state == CL_END and ch in CL_END_CHARS: + # End character found, create the closure + end_closure(closures, buf) + + state = CL_START + buf = "" + elif state == CL_START and buf[-3:] in CL_END_STRINGS: + # End string found, create the closure + end_closure(closures, buf[:-3]) + + state = CL_START + buf = "" + + end_closure(closures, buf) + + return closures + + def _clean_closure(self, closure): + """ + :type closure: str + + :rtype: str + """ + + return closure.lstrip(STRIP_START_CHARS).rstrip(STRIP_END_CHARS) + + def _fragment_split(self, closures): + """ + :type closures: list of CaperClosure + + :rtype: list of CaperClosure + """ + + cur_position = 0 + cur = None + + def end_fragment(fragments, cur, cur_position): + cur.position = cur_position + + cur.left = fragments[len(fragments) - 1] if len(fragments) > 0 else None + if cur.left: + cur.left_sep = cur.left.right_sep + cur.left.right = cur + + cur.right_sep = ch + + fragments.append(cur) + + for closure in closures: + closure.fragments = [] + + separator_buffer = "" + + for x, ch in enumerate(self._clean_closure(closure.value)): + if not cur: + cur = CaperFragment(closure) + + if ch in FRAGMENT_SEPARATORS: + if cur.value: + separator_buffer = "" + + separator_buffer += ch + + if cur.value or not closure.fragments: + end_fragment(closure.fragments, cur, cur_position) + elif len(separator_buffer) > 1: + cur.value = separator_buffer.strip() + + if cur.value: + end_fragment(closure.fragments, cur, cur_position) + + separator_buffer = "" + + # Reset + cur = None + cur_position += 1 + else: + cur.value += ch + + # Finish parsing the last fragment + if cur and cur.value: + end_fragment(closure.fragments, cur, cur_position) + + # Reset + cur_position = 0 + cur = None + + return closures + + def parse(self, name, parser='scene'): + closures = self._closure_split(name) + closures = self._fragment_split(closures) + + # Print closures + for closure in closures: + Logr.debug("closure [%s]", closure.value) + + for fragment in closure.fragments: + Logr.debug("\tfragment [%s]", fragment.value) + + if parser not in self.parsers: + raise ValueError("Unknown parser") + + # TODO autodetect the parser type + return self.parsers[parser](self.debug).run(closures) diff --git a/libs/caper/constraint.py b/libs/caper/constraint.py new file mode 100644 index 00000000..e092d33d --- /dev/null +++ b/libs/caper/constraint.py @@ -0,0 +1,134 @@ +# Copyright 2013 Dean Gardiner +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +class CaptureConstraint(object): + def __init__(self, capture_group, constraint_type, comparisons=None, target=None, **kwargs): + """Capture constraint object + + :type capture_group: CaptureGroup + """ + + self.capture_group = capture_group + + self.constraint_type = constraint_type + self.target = target + + self.comparisons = comparisons if comparisons else [] + self.kwargs = {} + + for orig_key, value in kwargs.items(): + key = orig_key.split('__') + if len(key) != 2: + self.kwargs[orig_key] = value + continue + name, method = key + + method = 'constraint_match_' + method + if not hasattr(self, method): + self.kwargs[orig_key] = value + continue + + self.comparisons.append((name, getattr(self, method), value)) + + def execute(self, parent_node, node, **kwargs): + func_name = 'constraint_%s' % self.constraint_type + + if hasattr(self, func_name): + return getattr(self, func_name)(parent_node, node, **kwargs) + + raise ValueError('Unknown constraint type "%s"' % self.constraint_type) + + # + # Node Matching + # + + def constraint_match(self, parent_node, node): + results = [] + total_weight = 0 + + for name, method, argument in self.comparisons: + weight, success = method(node, name, argument) + total_weight += weight + results.append(success) + + return total_weight / (float(len(results)) or 1), all(results) if len(results) > 0 else False + + def constraint_match_eq(self, node, name, expected): + if not hasattr(node, name): + return 1.0, False + + return 1.0, getattr(node, name) == expected + + def constraint_match_re(self, node, name, arg): + # Node match + if name == 'node': + group, minimum_weight = arg if type(arg) is tuple and len(arg) > 1 else (arg, 0) + + weight, match, num_fragments = self.capture_group.parser.matcher.fragment_match(node, group) + return weight, weight > minimum_weight + + # Regex match + if type(arg).__name__ == 'SRE_Pattern': + return 1.0, arg.match(getattr(node, name)) is not None + + # Value match + if hasattr(node, name): + match = self.capture_group.parser.matcher.value_match(getattr(node, name), arg, single=True) + return 1.0, match is not None + + raise ValueError("Unknown constraint match type '%s'" % name) + + # + # Result + # + + def constraint_result(self, parent_node, fragment): + ctag = self.kwargs.get('tag') + if not ctag: + return 0, False + + ckey = self.kwargs.get('key') + + for tag, result in parent_node.captured(): + if tag != ctag: + continue + + if not ckey or ckey in result.keys(): + return 1.0, True + + return 0.0, False + + # + # Failure + # + + def constraint_failure(self, parent_node, fragment, match): + if not match or not match.success: + return 1.0, True + + return 0, False + + # + # Success + # + + def constraint_success(self, parent_node, fragment, match): + if match and match.success: + return 1.0, True + + return 0, False + + def __repr__(self): + return "CaptureConstraint(comparisons=%s)" % repr(self.comparisons) diff --git a/libs/caper/group.py b/libs/caper/group.py new file mode 100644 index 00000000..8f0399ef --- /dev/null +++ b/libs/caper/group.py @@ -0,0 +1,284 @@ +# Copyright 2013 Dean Gardiner +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from logr import Logr +from caper import CaperClosure, CaperFragment +from caper.helpers import clean_dict +from caper.result import CaperFragmentNode, CaperClosureNode +from caper.step import CaptureStep +from caper.constraint import CaptureConstraint + + +class CaptureGroup(object): + def __init__(self, parser, result): + """Capture group object + + :type parser: caper.parsers.base.Parser + :type result: caper.result.CaperResult + """ + + self.parser = parser + self.result = result + + #: @type: list of CaptureStep + self.steps = [] + + #: type: str + self.step_source = None + + #: @type: list of CaptureConstraint + self.pre_constraints = [] + + #: :type: list of CaptureConstraint + self.post_constraints = [] + + def capture_fragment(self, tag, regex=None, func=None, single=True, **kwargs): + Logr.debug('capture_fragment("%s", "%s", %s, %s)', tag, regex, func, single) + + if self.step_source != 'fragment': + if self.step_source is None: + self.step_source = 'fragment' + else: + raise ValueError("Unable to mix fragment and closure capturing in a group") + + self.steps.append(CaptureStep( + self, tag, + 'fragment', + regex=regex, + func=func, + single=single, + **kwargs + )) + + return self + + def capture_closure(self, tag, regex=None, func=None, single=True, **kwargs): + Logr.debug('capture_closure("%s", "%s", %s, %s)', tag, regex, func, single) + + if self.step_source != 'closure': + if self.step_source is None: + self.step_source = 'closure' + else: + raise ValueError("Unable to mix fragment and closure capturing in a group") + + self.steps.append(CaptureStep( + self, tag, + 'closure', + regex=regex, + func=func, + single=single, + **kwargs + )) + + return self + + def until_closure(self, **kwargs): + self.pre_constraints.append(CaptureConstraint(self, 'match', target='closure', **kwargs)) + + return self + + def until_fragment(self, **kwargs): + self.pre_constraints.append(CaptureConstraint(self, 'match', target='fragment', **kwargs)) + + return self + + def until_result(self, **kwargs): + self.pre_constraints.append(CaptureConstraint(self, 'result', **kwargs)) + + return self + + def until_failure(self, **kwargs): + self.post_constraints.append(CaptureConstraint(self, 'failure', **kwargs)) + + return self + + def until_success(self, **kwargs): + self.post_constraints.append(CaptureConstraint(self, 'success', **kwargs)) + + return self + + def parse_subject(self, parent_head, subject): + Logr.debug("parse_subject (%s) subject: %s", self.step_source, repr(subject)) + + if type(subject) is CaperClosure: + return self.parse_closure(parent_head, subject) + + if type(subject) is CaperFragment: + return self.parse_fragment(parent_head, subject) + + raise ValueError('Unknown subject (%s)', subject) + + def parse_fragment(self, parent_head, subject): + parent_node = parent_head[0] if type(parent_head) is list else parent_head + + nodes, match = self.match(parent_head, parent_node, subject) + + # Capturing broke on constraint, return now + if not match: + return nodes + + Logr.debug('created fragment node with subject.value: "%s"' % subject.value) + + result = [CaperFragmentNode( + parent_node.closure, + subject.take_right(match.num_fragments), + parent_head, + match + )] + + # Branch if the match was indefinite (weight below 1.0) + if match.result and match.weight < 1.0: + if match.num_fragments == 1: + result.append(CaperFragmentNode(parent_node.closure, [subject], parent_head)) + else: + nodes.append(CaperFragmentNode(parent_node.closure, [subject], parent_head)) + + nodes.append(result[0] if len(result) == 1 else result) + + return nodes + + def parse_closure(self, parent_head, subject): + parent_node = parent_head[0] if type(parent_head) is list else parent_head + + nodes, match = self.match(parent_head, parent_node, subject) + + # Capturing broke on constraint, return now + if not match: + return nodes + + Logr.debug('created closure node with subject.value: "%s"' % subject.value) + + result = [CaperClosureNode( + subject, + parent_head, + match + )] + + # Branch if the match was indefinite (weight below 1.0) + if match.result and match.weight < 1.0: + if match.num_fragments == 1: + result.append(CaperClosureNode(subject, parent_head)) + else: + nodes.append(CaperClosureNode(subject, parent_head)) + + nodes.append(result[0] if len(result) == 1 else result) + + return nodes + + def match(self, parent_head, parent_node, subject): + nodes = [] + + # Check pre constaints + broke, definite = self.check_constraints(self.pre_constraints, parent_head, subject) + + if broke: + nodes.append(parent_head) + + if definite: + return nodes, None + + # Try match subject against the steps available + match = None + + for step in self.steps: + if step.source == 'closure' and type(subject) is not CaperClosure: + pass + elif step.source == 'fragment' and type(subject) is CaperClosure: + Logr.debug('Closure encountered on fragment step, jumping into fragments') + return [CaperClosureNode(subject, parent_head, None)], None + + match = step.execute(subject) + + if match.success: + if type(match.result) is dict: + match.result = clean_dict(match.result) + + Logr.debug('Found match with weight %s, match: %s, num_fragments: %s' % ( + match.weight, match.result, match.num_fragments + )) + + step.matched = True + + break + + if all([step.single and step.matched for step in self.steps]): + Logr.debug('All steps completed, group finished') + parent_node.finished_groups.append(self) + return nodes, match + + # Check post constraints + broke, definite = self.check_constraints(self.post_constraints, parent_head, subject, match=match) + if broke: + return nodes, None + + return nodes, match + + def check_constraints(self, constraints, parent_head, subject, **kwargs): + parent_node = parent_head[0] if type(parent_head) is list else parent_head + + # Check constraints + for constraint in [c for c in constraints if c.target == subject.__key__ or not c.target]: + Logr.debug("Testing constraint %s against subject %s", repr(constraint), repr(subject)) + + weight, success = constraint.execute(parent_node, subject, **kwargs) + + if success: + Logr.debug('capturing broke on "%s" at %s', subject.value, constraint) + parent_node.finished_groups.append(self) + + return True, weight == 1.0 + + return False, None + + def execute(self): + heads_finished = None + + while heads_finished is None or not (len(heads_finished) == len(self.result.heads) and all(heads_finished)): + heads_finished = [] + + heads = self.result.heads + self.result.heads = [] + + for head in heads: + node = head[0] if type(head) is list else head + + if self in node.finished_groups: + Logr.debug("head finished for group") + self.result.heads.append(head) + heads_finished.append(True) + continue + + Logr.debug('') + + Logr.debug(node) + + next_subject = node.next() + + Logr.debug('----------[%s] (%s)----------' % (next_subject, repr(next_subject.value) if next_subject else None)) + + if next_subject: + for node_result in self.parse_subject(head, next_subject): + self.result.heads.append(node_result) + + Logr.debug('Heads: %s', self.result.heads) + + heads_finished.append(self in node.finished_groups or next_subject is None) + + if len(self.result.heads) == 0: + self.result.heads = heads + + Logr.debug("heads_finished: %s, self.result.heads: %s", heads_finished, self.result.heads) + + Logr.debug("group finished") diff --git a/libs/caper/helpers.py b/libs/caper/helpers.py new file mode 100644 index 00000000..ded5d482 --- /dev/null +++ b/libs/caper/helpers.py @@ -0,0 +1,80 @@ +# Copyright 2013 Dean Gardiner +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import sys + + +PY2 = sys.version_info[0] == 2 +PY3 = sys.version_info[0] == 3 + + +def is_list_type(obj, element_type): + if not type(obj) is list: + return False + + if len(obj) < 1: + raise ValueError("Unable to determine list element type from empty list") + + return type(obj[0]) is element_type + + +def clean_dict(target, remove=None): + """Recursively remove items matching a value 'remove' from the dictionary + + :type target: dict + """ + if type(target) is not dict: + raise ValueError("Target is required to be a dict") + + remove_keys = [] + for key in target.keys(): + if type(target[key]) is not dict: + if target[key] == remove: + remove_keys.append(key) + else: + clean_dict(target[key], remove) + + for key in remove_keys: + target.pop(key) + + return target + + +def update_dict(a, b): + for key, value in b.items(): + if key not in a: + a[key] = value + elif isinstance(a[key], dict) and isinstance(value, dict): + update_dict(a[key], value) + elif isinstance(a[key], list): + a[key].append(value) + else: + a[key] = [a[key], value] + + +def xrange_six(start, stop=None, step=None): + if stop is not None and step is not None: + if PY3: + return range(start, stop, step) + else: + return xrange(start, stop, step) + else: + if PY3: + return range(start) + else: + return xrange(start) + + +def delta_seconds(td): + return (td.microseconds + (td.seconds + td.days * 24 * 3600) * 1e6) / 1e6 diff --git a/libs/caper/matcher.py b/libs/caper/matcher.py new file mode 100644 index 00000000..3acf2e68 --- /dev/null +++ b/libs/caper/matcher.py @@ -0,0 +1,144 @@ +# Copyright 2013 Dean Gardiner +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from caper.helpers import is_list_type, update_dict, delta_seconds +from datetime import datetime +from logr import Logr +import re + + +class FragmentMatcher(object): + def __init__(self, pattern_groups): + self.regex = {} + + self.construct_patterns(pattern_groups) + + def construct_patterns(self, pattern_groups): + compile_start = datetime.now() + compile_count = 0 + + for group_name, patterns in pattern_groups: + if group_name not in self.regex: + self.regex[group_name] = [] + + # Transform into weight groups + if type(patterns[0]) is str or type(patterns[0][0]) not in [int, float]: + patterns = [(1.0, patterns)] + + for weight, patterns in patterns: + weight_patterns = [] + + for pattern in patterns: + # Transform into multi-fragment patterns + if type(pattern) is str: + pattern = (pattern,) + + if type(pattern) is tuple and len(pattern) == 2: + if type(pattern[0]) is str and is_list_type(pattern[1], str): + pattern = (pattern,) + + result = [] + for value in pattern: + if type(value) is tuple: + if len(value) == 2: + # Construct OR-list pattern + value = value[0] % '|'.join(value[1]) + elif len(value) == 1: + value = value[0] + + result.append(re.compile(value, re.IGNORECASE)) + compile_count += 1 + + weight_patterns.append(tuple(result)) + + self.regex[group_name].append((weight, weight_patterns)) + + Logr.info("Compiled %s patterns in %ss", compile_count, delta_seconds(datetime.now() - compile_start)) + + def find_group(self, name): + for group_name, weight_groups in self.regex.items(): + if group_name and group_name == name: + return group_name, weight_groups + + return None, None + + def value_match(self, value, group_name=None, single=True): + result = None + + for group, weight_groups in self.regex.items(): + if group_name and group != group_name: + continue + + # TODO handle multiple weights + weight, patterns = weight_groups[0] + + for pattern in patterns: + match = pattern[0].match(value) + if not match: + continue + + if result is None: + result = {} + if group not in result: + result[group] = {} + + result[group].update(match.groupdict()) + + if single: + return result + + return result + + def fragment_match(self, fragment, group_name=None): + """Follow a fragment chain to try find a match + + :type fragment: caper.objects.CaperFragment + :type group_name: str or None + + :return: The weight of the match found between 0.0 and 1.0, + where 1.0 means perfect match and 0.0 means no match + :rtype: (float, dict, int) + """ + + group_name, weight_groups = self.find_group(group_name) + + for weight, patterns in weight_groups: + for pattern in patterns: + cur_fragment = fragment + success = True + result = {} + + # Ignore empty patterns + if len(pattern) < 1: + break + + for fragment_pattern in pattern: + if not cur_fragment: + success = False + break + + match = fragment_pattern.match(cur_fragment.value) + if match: + update_dict(result, match.groupdict()) + else: + success = False + break + + cur_fragment = cur_fragment.right if cur_fragment else None + + if success: + Logr.debug("Found match with weight %s" % weight) + return float(weight), result, len(pattern) + + return 0.0, None, 1 diff --git a/libs/caper/objects.py b/libs/caper/objects.py new file mode 100644 index 00000000..b7d9084d --- /dev/null +++ b/libs/caper/objects.py @@ -0,0 +1,124 @@ +# Copyright 2013 Dean Gardiner +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from caper.helpers import xrange_six + + +class CaperClosure(object): + __key__ = 'closure' + + def __init__(self, index, value): + #: :type: int + self.index = index + + #: :type: str + self.value = value + + #: :type: CaperClosure + self.left = None + #: :type: CaperClosure + self.right = None + + #: :type: list of CaperFragment + self.fragments = [] + + def __str__(self): + return "" % repr(self.result) + + def __repr__(self): + return self.__str__() diff --git a/libs/migrate/versioning/templates/script/__init__.py b/libs/caper/parsers/__init__.py similarity index 100% rename from libs/migrate/versioning/templates/script/__init__.py rename to libs/caper/parsers/__init__.py diff --git a/libs/caper/parsers/anime.py b/libs/caper/parsers/anime.py new file mode 100644 index 00000000..86c70917 --- /dev/null +++ b/libs/caper/parsers/anime.py @@ -0,0 +1,88 @@ +# Copyright 2013 Dean Gardiner +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import re +from caper.parsers.base import Parser + + +REGEX_GROUP = re.compile(r'(\(|\[)(?P.*?)(\)|\])', re.IGNORECASE) + + +PATTERN_GROUPS = [ + ('identifier', [ + r'S(?P\d+)E(?P\d+)', + r'(S(?P\d+))|(E(?P\d+))', + + r'Ep(?P\d+)', + r'$(?P\d+)^', + + (r'Episode', r'(?P\d+)'), + ]), + ('video', [ + (r'(?P%s)', [ + 'Hi10P' + ]), + (r'.(?P%s)', [ + '720p', + '1080p', + + '960x720', + '1920x1080' + ]), + (r'(?P%s)', [ + 'BD' + ]), + ]), + ('audio', [ + (r'(?P%s)', [ + 'FLAC' + ]), + ]) +] + + +class AnimeParser(Parser): + def __init__(self, debug=False): + super(AnimeParser, self).__init__(PATTERN_GROUPS, debug) + + def capture_group(self, fragment): + match = REGEX_GROUP.match(fragment.value) + + if not match: + return None + + return match.group('group') + + def run(self, closures): + """ + :type closures: list of CaperClosure + """ + + self.setup(closures) + + self.capture_closure('group', func=self.capture_group)\ + .execute(once=True) + + self.capture_fragment('show_name', single=False)\ + .until_fragment(value__re='identifier')\ + .until_fragment(value__re='video')\ + .execute() + + self.capture_fragment('identifier', regex='identifier') \ + .capture_fragment('video', regex='video', single=False) \ + .capture_fragment('audio', regex='audio', single=False) \ + .execute() + + self.result.build() + return self.result diff --git a/libs/caper/parsers/base.py b/libs/caper/parsers/base.py new file mode 100644 index 00000000..16bbc19f --- /dev/null +++ b/libs/caper/parsers/base.py @@ -0,0 +1,84 @@ +# Copyright 2013 Dean Gardiner +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from caper import FragmentMatcher +from caper.group import CaptureGroup +from caper.result import CaperResult, CaperClosureNode, CaperRootNode +from logr import Logr + + +class Parser(object): + def __init__(self, matcher, debug=False): + self.debug = debug + + self.matcher = matcher + + self.closures = None + #: :type: caper.result.CaperResult + self.result = None + + self._match_cache = None + self._fragment_pos = None + self._closure_pos = None + self._history = None + + self.reset() + + def reset(self): + self.closures = None + self.result = CaperResult() + + self._match_cache = {} + self._fragment_pos = -1 + self._closure_pos = -1 + self._history = [] + + def setup(self, closures): + """ + :type closures: list of CaperClosure + """ + + self.reset() + self.closures = closures + + self.result.heads = [CaperRootNode(closures[0])] + + def run(self, closures): + """ + :type closures: list of CaperClosure + """ + + raise NotImplementedError() + + # + # Capture Methods + # + + def capture_fragment(self, tag, regex=None, func=None, single=True, **kwargs): + return CaptureGroup(self, self.result).capture_fragment( + tag, + regex=regex, + func=func, + single=single, + **kwargs + ) + + def capture_closure(self, tag, regex=None, func=None, single=True, **kwargs): + return CaptureGroup(self, self.result).capture_closure( + tag, + regex=regex, + func=func, + single=single, + **kwargs + ) diff --git a/libs/caper/parsers/scene.py b/libs/caper/parsers/scene.py new file mode 100644 index 00000000..cd0a8fdf --- /dev/null +++ b/libs/caper/parsers/scene.py @@ -0,0 +1,230 @@ +# Copyright 2013 Dean Gardiner +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from logr import Logr +from caper import FragmentMatcher +from caper.parsers.base import Parser +from caper.result import CaperFragmentNode + + +PATTERN_GROUPS = [ + ('identifier', [ + (1.0, [ + # S01E01-E02 + ('^S(?P\d+)E(?P\d+)$', '^E(?P\d+)$'), + # 'S03 E01 to E08' or 'S03 E01 - E09' + ('^S(?P\d+)$', '^E(?P\d+)$', '^(to|-)$', '^E(?P\d+)$'), + # 'E01 to E08' or 'E01 - E09' + ('^E(?P\d+)$', '^(to|-)$', '^E(?P\d+)$'), + + # S01-S03 + ('^S(?P\d+)$', '^S(?P\d+)$'), + + # S02E13 + r'^S(?P\d+)E(?P\d+)$', + # S01 E13 + (r'^(S(?P\d+))$', r'^(E(?P\d+))$'), + # S02 + # E13 + r'^((S(?P\d+))|(E(?P\d+)))$', + # 3x19 + r'^(?P\d+)x(?P\d+)$', + + # 2013.09.15 + (r'^(?P\d{4})$', r'^(?P\d{2})$', r'^(?P\d{2})$'), + # 09.15.2013 + (r'^(?P\d{2})$', r'^(?P\d{2})$', r'^(?P\d{4})$'), + # TODO - US/UK Date Format Conflict? will only support US format for now.. + # 15.09.2013 + #(r'^(?P\d{2})$', r'^(?P\d{2})$', r'^(?P\d{4})$'), + # 130915 + r'^(?P\d{2})(?P\d{2})(?P\d{2})$', + + # Season 3 Episode 14 + (r'^Se(ason)?$', r'^(?P\d+)$', r'^Ep(isode)?$', r'^(?P\d+)$'), + # Season 3 + (r'^Se(ason)?$', r'^(?P\d+)$'), + # Episode 14 + (r'^Ep(isode)?$', r'^(?P\d+)$'), + + # Part.3 + # Part.1.and.Part.3 + ('^Part$', '(?P\d+)'), + + r'(?PSpecial)', + r'(?PNZ|AU|US|UK)' + ]), + (0.8, [ + # 100 - 1899, 2100 - 9999 (skips 1900 to 2099 - so we don't get years my mistake) + # TODO - Update this pattern on 31 Dec 2099 + r'^(?P([1-9])|(1[0-8])|(2[1-9])|([3-9][0-9]))(?P\d{2})$' + ]), + (0.5, [ + # 100 - 9999 + r'^(?P([1-9])|([1-9][0-9]))(?P\d{2})$' + ]) + ]), + + ('video', [ + r'(?PFS|WS)', + + (r'(?P%s)', [ + '480p', + '720p', + '1080p' + ]), + + # + # Source + # + + (r'(?P%s)', [ + 'DVDRiP', + # HDTV + 'HDTV', + 'PDTV', + 'DSR', + # WEB + 'WEBRip', + 'WEBDL', + # BluRay + 'BluRay', + 'B(D|R)Rip', + # DVD + 'DVDR', + 'DVD9', + 'DVD5' + ]), + + # For multi-fragment 'WEB-DL', 'WEB-Rip', etc... matches + ('(?PWEB)', '(?PDL|Rip)'), + + # + # Codec + # + + (r'(?P%s)', [ + 'x264', + 'XViD', + 'H264', + 'AVC' + ]), + + # For multi-fragment 'H 264' tags + ('(?PH)', '(?P264)'), + ]), + + ('dvd', [ + r'D(ISC)?(?P\d+)', + + r'R(?P[0-8])', + + (r'(?P%s)', [ + 'PAL', + 'NTSC' + ]), + ]), + + ('audio', [ + (r'(?P%s)', [ + 'AC3', + 'TrueHD' + ]), + + (r'(?P%s)', [ + 'GERMAN', + 'DUTCH', + 'FRENCH', + 'SWEDiSH', + 'DANiSH', + 'iTALiAN' + ]), + ]), + + ('scene', [ + r'(?PPROPER|REAL)', + ]) +] + + +class SceneParser(Parser): + matcher = None + + def __init__(self, debug=False): + if not SceneParser.matcher: + SceneParser.matcher = FragmentMatcher(PATTERN_GROUPS) + Logr.info("Fragment matcher for %s created", self.__class__.__name__) + + super(SceneParser, self).__init__(SceneParser.matcher, debug) + + def capture_group(self, fragment): + if fragment.closure.index + 1 != len(self.closures): + return None + + if fragment.left_sep != '-' or fragment.right: + return None + + return fragment.value + + def run(self, closures): + """ + :type closures: list of CaperClosure + """ + + self.setup(closures) + + self.capture_fragment('show_name', single=False)\ + .until_fragment(node__re='identifier')\ + .until_fragment(node__re='video')\ + .until_fragment(node__re='dvd')\ + .until_fragment(node__re='audio')\ + .until_fragment(node__re='scene')\ + .execute() + + self.capture_fragment('identifier', regex='identifier', single=False)\ + .capture_fragment('video', regex='video', single=False)\ + .capture_fragment('dvd', regex='dvd', single=False)\ + .capture_fragment('audio', regex='audio', single=False)\ + .capture_fragment('scene', regex='scene', single=False)\ + .until_fragment(left_sep__eq='-', right__eq=None)\ + .execute() + + self.capture_fragment('group', func=self.capture_group)\ + .execute() + + self.print_tree(self.result.heads) + + self.result.build() + return self.result + + def print_tree(self, heads): + if not self.debug: + return + + for head in heads: + head = head if type(head) is list else [head] + + if type(head[0]) is CaperFragmentNode: + for fragment in head[0].fragments: + Logr.debug(fragment.value) + else: + Logr.debug(head[0].closure.value) + + for node in head: + Logr.debug('\t' + str(node).ljust(55) + '\t' + ( + str(node.match.weight) + '\t' + str(node.match.result) + ) if node.match else '') + + if len(head) > 0 and head[0].parent: + self.print_tree([head[0].parent]) diff --git a/libs/caper/parsers/usenet.py b/libs/caper/parsers/usenet.py new file mode 100644 index 00000000..f622d43b --- /dev/null +++ b/libs/caper/parsers/usenet.py @@ -0,0 +1,115 @@ +# Copyright 2013 Dean Gardiner +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from logr import Logr +from caper import FragmentMatcher +from caper.parsers.base import Parser + + +PATTERN_GROUPS = [ + ('usenet', [ + r'\[(?P#[\w\.@]+)\]', + r'^\[(?P\w+)\]$', + r'\[(?PFULL)\]', + r'\[\s?(?PTOWN)\s?\]', + r'(.*?\s)?[_\W]*(?Pwww\..*?\.[a-z0-9]+)[_\W]*(.*?\s)?', + r'(.*?\s)?[_\W]*(?P(www\.)?[-\w]+\.(com|org|info))[_\W]*(.*?\s)?' + ]), + + ('part', [ + r'.?(?P\d+)/(?P\d+).?' + ]), + + ('detail', [ + r'[\s-]*\w*?[\s-]*\"(?P.*?)\"[\s-]*\w*?[\s-]*(?P[\d,\.]*\s?MB)?[\s-]*(?PyEnc)?', + r'(?P[\d,\.]*\s?MB)[\s-]*(?PyEnc)', + r'(?P[\d,\.]*\s?MB)|(?PyEnc)' + ]) +] + + +class UsenetParser(Parser): + matcher = None + + def __init__(self, debug=False): + if not UsenetParser.matcher: + UsenetParser.matcher = FragmentMatcher(PATTERN_GROUPS) + Logr.info("Fragment matcher for %s created", self.__class__.__name__) + + super(UsenetParser, self).__init__(UsenetParser.matcher, debug) + + def run(self, closures): + """ + :type closures: list of CaperClosure + """ + + self.setup(closures) + + # Capture usenet or part info until we get a part or matching fails + self.capture_closure('usenet', regex='usenet', single=False)\ + .capture_closure('part', regex='part', single=True) \ + .until_result(tag='part') \ + .until_failure()\ + .execute() + + is_town_release, has_part = self.get_state() + + if not is_town_release: + self.capture_release_name() + + # If we already have the part (TOWN releases), ignore matching part again + if not is_town_release and not has_part: + self.capture_fragment('part', regex='part', single=True)\ + .until_closure(node__re='usenet')\ + .until_success()\ + .execute() + + # Capture any leftover details + self.capture_closure('usenet', regex='usenet', single=False)\ + .capture_closure('detail', regex='detail', single=False)\ + .execute() + + self.result.build() + return self.result + + def capture_release_name(self): + self.capture_closure('detail', regex='detail', single=False)\ + .until_failure()\ + .execute() + + self.capture_fragment('release_name', single=False, include_separators=True) \ + .until_closure(node__re='usenet') \ + .until_closure(node__re='detail') \ + .until_closure(node__re='part') \ + .until_fragment(value__eq='-')\ + .execute() + + # Capture any detail after the release name + self.capture_closure('detail', regex='detail', single=False)\ + .until_failure()\ + .execute() + + def get_state(self): + # TODO multiple-chains? + is_town_release = False + has_part = False + + for tag, result in self.result.heads[0].captured(): + if tag == 'usenet' and result.get('group') == 'TOWN': + is_town_release = True + + if tag == 'part': + has_part = True + + return is_town_release, has_part diff --git a/libs/caper/result.py b/libs/caper/result.py new file mode 100644 index 00000000..c9e34237 --- /dev/null +++ b/libs/caper/result.py @@ -0,0 +1,213 @@ +# Copyright 2013 Dean Gardiner +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +from logr import Logr + + +GROUP_MATCHES = ['identifier'] + + +class CaperNode(object): + def __init__(self, closure, parent=None, match=None): + """ + :type parent: CaperNode + :type weight: float + """ + + #: :type: caper.objects.CaperClosure + self.closure = closure + + #: :type: CaperNode + self.parent = parent + + #: :type: CaptureMatch + self.match = match + + #: :type: list of CaptureGroup + self.finished_groups = [] + + def next(self): + raise NotImplementedError() + + def captured(self): + cur = self + + if cur.match: + yield cur.match.tag, cur.match.result + + while cur.parent: + cur = cur.parent + + if cur.match: + yield cur.match.tag, cur.match.result + + +class CaperRootNode(CaperNode): + def __init__(self, closure): + """ + :type closure: caper.objects.CaperClosure or list of caper.objects.CaperClosure + """ + super(CaperRootNode, self).__init__(closure) + + def next(self): + return self.closure + + +class CaperClosureNode(CaperNode): + def __init__(self, closure, parent=None, match=None): + """ + :type closure: caper.objects.CaperClosure or list of caper.objects.CaperClosure + """ + super(CaperClosureNode, self).__init__(closure, parent, match) + + def next(self): + if not self.closure: + return None + + if self.match: + # Jump to next closure if we have a match + return self.closure.right + elif len(self.closure.fragments) > 0: + # Otherwise parse the fragments + return self.closure.fragments[0] + + return None + + def __str__(self): + return "" % repr(self.match) + + def __repr__(self): + return self.__str__() + + +class CaperFragmentNode(CaperNode): + def __init__(self, closure, fragments, parent=None, match=None): + """ + :type closure: caper.objects.CaperClosure + :type fragments: list of caper.objects.CaperFragment + """ + super(CaperFragmentNode, self).__init__(closure, parent, match) + + #: :type: caper.objects.CaperFragment or list of caper.objects.CaperFragment + self.fragments = fragments + + def next(self): + if len(self.fragments) > 0 and self.fragments[-1] and self.fragments[-1].right: + return self.fragments[-1].right + + if self.closure.right: + return self.closure.right + + return None + + def __str__(self): + return "" % repr(self.match) + + def __repr__(self): + return self.__str__() + + +class CaperResult(object): + def __init__(self): + #: :type: list of CaperNode + self.heads = [] + + self.chains = [] + + def build(self): + max_matched = 0 + + for head in self.heads: + for chain in self.combine_chain(head): + if chain.num_matched > max_matched: + max_matched = chain.num_matched + + self.chains.append(chain) + + for chain in self.chains: + chain.weights.append(chain.num_matched / float(max_matched or chain.num_matched or 1)) + chain.finish() + + self.chains.sort(key=lambda chain: chain.weight, reverse=True) + + for chain in self.chains: + Logr.debug("chain weight: %.02f", chain.weight) + Logr.debug("\tInfo: %s", chain.info) + + Logr.debug("\tWeights: %s", chain.weights) + Logr.debug("\tNumber of Fragments Matched: %s", chain.num_matched) + + def combine_chain(self, subject, chain=None): + nodes = subject if type(subject) is list else [subject] + + if chain is None: + chain = CaperResultChain() + + result = [] + + for x, node in enumerate(nodes): + node_chain = chain if x == len(nodes) - 1 else chain.copy() + + if not node.parent: + result.append(node_chain) + continue + + node_chain.update(node) + result.extend(self.combine_chain(node.parent, node_chain)) + + return result + + +class CaperResultChain(object): + def __init__(self): + #: :type: float + self.weight = None + self.info = {} + self.num_matched = 0 + + self.weights = [] + + def update(self, subject): + """ + :type subject: CaperFragmentNode + """ + if not subject.match or not subject.match.success: + return + + # TODO this should support closure nodes + if type(subject) is CaperFragmentNode: + self.num_matched += len(subject.fragments) if subject.fragments is not None else 0 + + self.weights.append(subject.match.weight) + + if subject.match: + if subject.match.tag not in self.info: + self.info[subject.match.tag] = [] + + self.info[subject.match.tag].insert(0, subject.match.result) + + def finish(self): + self.weight = sum(self.weights) / len(self.weights) + + def copy(self): + chain = CaperResultChain() + + chain.weight = self.weight + chain.info = copy.deepcopy(self.info) + + chain.num_matched = self.num_matched + chain.weights = copy.copy(self.weights) + + return chain \ No newline at end of file diff --git a/libs/caper/step.py b/libs/caper/step.py new file mode 100644 index 00000000..817514b6 --- /dev/null +++ b/libs/caper/step.py @@ -0,0 +1,96 @@ +# Copyright 2013 Dean Gardiner +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from caper.objects import CaptureMatch +from logr import Logr + + +class CaptureStep(object): + REPR_KEYS = ['regex', 'func', 'single'] + + def __init__(self, capture_group, tag, source, regex=None, func=None, single=None, **kwargs): + #: @type: CaptureGroup + self.capture_group = capture_group + + #: @type: str + self.tag = tag + #: @type: str + self.source = source + #: @type: str + self.regex = regex + #: @type: function + self.func = func + #: @type: bool + self.single = single + + self.kwargs = kwargs + + self.matched = False + + def execute(self, fragment): + """Execute step on fragment + + :type fragment: CaperFragment + :rtype : CaptureMatch + """ + + match = CaptureMatch(self.tag, self) + + if self.regex: + weight, result, num_fragments = self.capture_group.parser.matcher.fragment_match(fragment, self.regex) + Logr.debug('(execute) [regex] tag: "%s"', self.tag) + + if not result: + return match + + # Populate CaptureMatch + match.success = True + match.weight = weight + match.result = result + match.num_fragments = num_fragments + elif self.func: + result = self.func(fragment) + Logr.debug('(execute) [func] %s += "%s"', self.tag, match) + + if not result: + return match + + # Populate CaptureMatch + match.success = True + match.weight = 1.0 + match.result = result + else: + Logr.debug('(execute) [raw] %s += "%s"', self.tag, fragment.value) + + include_separators = self.kwargs.get('include_separators', False) + + # Populate CaptureMatch + match.success = True + match.weight = 1.0 + + if include_separators: + match.result = (fragment.left_sep, fragment.value, fragment.right_sep) + else: + match.result = fragment.value + + return match + + def __repr__(self): + attribute_values = [key + '=' + repr(getattr(self, key)) + for key in self.REPR_KEYS + if hasattr(self, key) and getattr(self, key)] + + attribute_string = ', ' + ', '.join(attribute_values) if len(attribute_values) > 0 else '' + + return "CaptureStep('%s'%s)" % (self.tag, attribute_string) diff --git a/libs/decorator.py b/libs/decorator.py deleted file mode 100644 index ea7e9909..00000000 --- a/libs/decorator.py +++ /dev/null @@ -1,210 +0,0 @@ -########################## LICENCE ############################### -## -## Copyright (c) 2005-2011, Michele Simionato -## All rights reserved. -## -## Redistributions of source code must retain the above copyright -## notice, this list of conditions and the following disclaimer. -## Redistributions in bytecode form must reproduce the above copyright -## notice, this list of conditions and the following disclaimer in -## the documentation and/or other materials provided with the -## distribution. - -## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -## "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -## LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -## A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -## HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -## INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -## BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS -## OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -## ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR -## TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE -## USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH -## DAMAGE. - -""" -Decorator module, see http://pypi.python.org/pypi/decorator -for the documentation. -""" - -__version__ = '3.3.2' - -__all__ = ["decorator", "FunctionMaker", "partial"] - -import sys, re, inspect - -try: - from functools import partial -except ImportError: # for Python version < 2.5 - class partial(object): - "A simple replacement of functools.partial" - def __init__(self, func, *args, **kw): - self.func = func - self.args = args - self.keywords = kw - def __call__(self, *otherargs, **otherkw): - kw = self.keywords.copy() - kw.update(otherkw) - return self.func(*(self.args + otherargs), **kw) - -if sys.version >= '3': - from inspect import getfullargspec -else: - class getfullargspec(object): - "A quick and dirty replacement for getfullargspec for Python 2.X" - def __init__(self, f): - self.args, self.varargs, self.varkw, self.defaults = \ - inspect.getargspec(f) - self.kwonlyargs = [] - self.kwonlydefaults = None - self.annotations = getattr(f, '__annotations__', {}) - def __iter__(self): - yield self.args - yield self.varargs - yield self.varkw - yield self.defaults - -DEF = re.compile('\s*def\s*([_\w][_\w\d]*)\s*\(') - -# basic functionality -class FunctionMaker(object): - """ - An object with the ability to create functions with a given signature. - It has attributes name, doc, module, signature, defaults, dict and - methods update and make. - """ - def __init__(self, func=None, name=None, signature=None, - defaults=None, doc=None, module=None, funcdict=None): - self.shortsignature = signature - if func: - # func can be a class or a callable, but not an instance method - self.name = func.__name__ - if self.name == '': # small hack for lambda functions - self.name = '_lambda_' - self.doc = func.__doc__ - self.module = func.__module__ - if inspect.isfunction(func): - argspec = getfullargspec(func) - for a in ('args', 'varargs', 'varkw', 'defaults', 'kwonlyargs', - 'kwonlydefaults', 'annotations'): - setattr(self, a, getattr(argspec, a)) - for i, arg in enumerate(self.args): - setattr(self, 'arg%d' % i, arg) - self.signature = inspect.formatargspec( - formatvalue=lambda val: "", *argspec)[1:-1] - allargs = list(self.args) - if self.varargs: - allargs.append('*' + self.varargs) - if self.varkw: - allargs.append('**' + self.varkw) - try: - self.shortsignature = ', '.join(allargs) - except TypeError: # exotic signature, valid only in Python 2.X - self.shortsignature = self.signature - self.dict = func.__dict__.copy() - # func=None happens when decorating a caller - if name: - self.name = name - if signature is not None: - self.signature = signature - if defaults: - self.defaults = defaults - if doc: - self.doc = doc - if module: - self.module = module - if funcdict: - self.dict = funcdict - # check existence required attributes - assert hasattr(self, 'name') - if not hasattr(self, 'signature'): - raise TypeError('You are decorating a non function: %s' % func) - - def update(self, func, **kw): - "Update the signature of func with the data in self" - func.__name__ = self.name - func.__doc__ = getattr(self, 'doc', None) - func.__dict__ = getattr(self, 'dict', {}) - func.func_defaults = getattr(self, 'defaults', ()) - func.__kwdefaults__ = getattr(self, 'kwonlydefaults', None) - callermodule = sys._getframe(3).f_globals.get('__name__', '?') - func.__module__ = getattr(self, 'module', callermodule) - func.__dict__.update(kw) - - def make(self, src_templ, evaldict=None, addsource=False, **attrs): - "Make a new function from a given template and update the signature" - src = src_templ % vars(self) # expand name and signature - evaldict = evaldict or {} - mo = DEF.match(src) - if mo is None: - raise SyntaxError('not a valid function template\n%s' % src) - name = mo.group(1) # extract the function name - names = set([name] + [arg.strip(' *') for arg in - self.shortsignature.split(',')]) - for n in names: - if n in ('_func_', '_call_'): - raise NameError('%s is overridden in\n%s' % (n, src)) - if not src.endswith('\n'): # add a newline just for safety - src += '\n' # this is needed in old versions of Python - try: - code = compile(src, '', 'single') - # print >> sys.stderr, 'Compiling %s' % src - exec code in evaldict - except: - print >> sys.stderr, 'Error in generated code:' - print >> sys.stderr, src - raise - func = evaldict[name] - if addsource: - attrs['__source__'] = src - self.update(func, **attrs) - return func - - @classmethod - def create(cls, obj, body, evaldict, defaults=None, - doc=None, module=None, addsource=True,**attrs): - """ - Create a function from the strings name, signature and body. - evaldict is the evaluation dictionary. If addsource is true an attribute - __source__ is added to the result. The attributes attrs are added, - if any. - """ - if isinstance(obj, str): # "name(signature)" - name, rest = obj.strip().split('(', 1) - signature = rest[:-1] #strip a right parens - func = None - else: # a function - name = None - signature = None - func = obj - self = cls(func, name, signature, defaults, doc, module) - ibody = '\n'.join(' ' + line for line in body.splitlines()) - return self.make('def %(name)s(%(signature)s):\n' + ibody, - evaldict, addsource, **attrs) - -def decorator(caller, func=None): - """ - decorator(caller) converts a caller function into a decorator; - decorator(caller, func) decorates a function using a caller. - """ - if func is not None: # returns a decorated function - evaldict = func.func_globals.copy() - evaldict['_call_'] = caller - evaldict['_func_'] = func - return FunctionMaker.create( - func, "return _call_(_func_, %(shortsignature)s)", - evaldict, undecorated=func, __wrapped__=func) - else: # returns a decorator - if isinstance(caller, partial): - return partial(decorator, caller) - # otherwise assume caller is a function - first = inspect.getargspec(caller)[0][0] # first arg - evaldict = caller.func_globals.copy() - evaldict['_call_'] = caller - evaldict['decorator'] = decorator - return FunctionMaker.create( - '%s(%s)' % (caller.__name__, first), - 'return decorator(_call_, %s)' % first, - evaldict, undecorated=caller, __wrapped__=caller, - doc=caller.__doc__, module=caller.__module__) diff --git a/libs/elixir/__init__.py b/libs/elixir/__init__.py deleted file mode 100644 index a242b538..00000000 --- a/libs/elixir/__init__.py +++ /dev/null @@ -1,114 +0,0 @@ -''' -Elixir package - -A declarative layer on top of the `SQLAlchemy library -`_. It is a fairly thin wrapper, which provides -the ability to create simple Python classes that map directly to relational -database tables (this pattern is often referred to as the Active Record design -pattern), providing many of the benefits of traditional databases -without losing the convenience of Python objects. - -Elixir is intended to replace the ActiveMapper SQLAlchemy extension, and the -TurboEntity project but does not intend to replace SQLAlchemy's core features, -and instead focuses on providing a simpler syntax for defining model objects -when you do not need the full expressiveness of SQLAlchemy's manual mapper -definitions. -''' - -try: - set -except NameError: - from sets import Set as set - -import sqlalchemy -from sqlalchemy.types import * - -from elixir.options import using_options, using_table_options, \ - using_mapper_options, options_defaults, \ - using_options_defaults -from elixir.entity import Entity, EntityBase, EntityMeta, EntityDescriptor, \ - setup_entities, cleanup_entities -from elixir.fields import has_field, Field -from elixir.relationships import belongs_to, has_one, has_many, \ - has_and_belongs_to_many, \ - ManyToOne, OneToOne, OneToMany, ManyToMany -from elixir.properties import has_property, GenericProperty, ColumnProperty, \ - Synonym -from elixir.statements import Statement -from elixir.collection import EntityCollection, GlobalEntityCollection - - -__version__ = '0.8.0dev' - -__all__ = ['Entity', 'EntityBase', 'EntityMeta', 'EntityCollection', - 'entities', - 'Field', 'has_field', - 'has_property', 'GenericProperty', 'ColumnProperty', 'Synonym', - 'belongs_to', 'has_one', 'has_many', 'has_and_belongs_to_many', - 'ManyToOne', 'OneToOne', 'OneToMany', 'ManyToMany', - 'using_options', 'using_table_options', 'using_mapper_options', - 'options_defaults', 'using_options_defaults', - 'metadata', 'session', - 'create_all', 'drop_all', - 'setup_all', 'cleanup_all', - 'setup_entities', 'cleanup_entities'] + \ - sqlalchemy.types.__all__ - -__doc_all__ = ['create_all', 'drop_all', - 'setup_all', 'cleanup_all', - 'metadata', 'session'] - -# default session -session = sqlalchemy.orm.scoped_session(sqlalchemy.orm.sessionmaker()) - -# default metadata -metadata = sqlalchemy.MetaData() - -metadatas = set() - -# default entity collection -entities = GlobalEntityCollection() - - -def create_all(*args, **kwargs): - '''Create the necessary tables for all declared entities''' - for md in metadatas: - md.create_all(*args, **kwargs) - - -def drop_all(*args, **kwargs): - '''Drop tables for all declared entities''' - for md in metadatas: - md.drop_all(*args, **kwargs) - - -def setup_all(create_tables=False, *args, **kwargs): - '''Setup the table and mapper of all entities in the default entity - collection. - ''' - setup_entities(entities) - - # issue the "CREATE" SQL statements - if create_tables: - create_all(*args, **kwargs) - - -def cleanup_all(drop_tables=False, *args, **kwargs): - '''Clear all mappers, clear the session, and clear all metadatas. - Optionally drops the tables. - ''' - session.close() - - cleanup_entities(entities) - - sqlalchemy.orm.clear_mappers() - entities.clear() - - if drop_tables: - drop_all(*args, **kwargs) - - for md in metadatas: - md.clear() - metadatas.clear() - - diff --git a/libs/elixir/collection.py b/libs/elixir/collection.py deleted file mode 100644 index 78127e3e..00000000 --- a/libs/elixir/collection.py +++ /dev/null @@ -1,125 +0,0 @@ -''' -Default entity collection implementation -''' -import sys -import re - -class BaseCollection(list): - def __init__(self, entities=None): - list.__init__(self) - if entities is not None: - self.extend(entities) - - def extend(self, entities): - for e in entities: - self.append(e) - - def clear(self): - del self[:] - - def resolve_absolute(self, key, full_path, entity=None, root=None): - if root is None: - root = entity._descriptor.resolve_root - if root: - full_path = '%s.%s' % (root, full_path) - module_path, classname = full_path.rsplit('.', 1) - module = sys.modules[module_path] - res = getattr(module, classname, None) - if res is None: - if entity is not None: - raise Exception("Couldn't resolve target '%s' <%s> in '%s'!" - % (key, full_path, entity.__name__)) - else: - raise Exception("Couldn't resolve target '%s' <%s>!" - % (key, full_path)) - return res - - def __getattr__(self, key): - return self.resolve(key) - -# default entity collection -class GlobalEntityCollection(BaseCollection): - def __init__(self, entities=None): - # _entities is a dict of entities keyed on their name. - self._entities = {} - super(GlobalEntityCollection, self).__init__(entities) - - def append(self, entity): - ''' - Add an entity to the collection. - ''' - super(EntityCollection, self).append(entity) - - existing_entities = self._entities.setdefault(entity.__name__, []) - existing_entities.append(entity) - - def resolve(self, key, entity=None): - ''' - Resolve a key to an Entity. The optional `entity` argument is the - "source" entity when resolving relationship targets. - ''' - # Do we have a fully qualified entity name? - if '.' in key: - return self.resolve_absolute(key, key, entity) - else: - # Otherwise we look in the entities of this collection - res = self._entities.get(key, None) - if res is None: - if entity: - raise Exception("Couldn't resolve target '%s' in '%s'" - % (key, entity.__name__)) - else: - raise Exception("This collection does not contain any " - "entity corresponding to the key '%s'!" - % key) - elif len(res) > 1: - raise Exception("'%s' resolves to several entities, you should" - " use the full path (including the full module" - " name) to that entity." % key) - else: - return res[0] - - def clear(self): - self._entities = {} - super(GlobalEntityCollection, self).clear() - -# backward compatible name -EntityCollection = GlobalEntityCollection - -_leading_dots = re.compile('^([.]*).*$') - -class RelativeEntityCollection(BaseCollection): - # the entity=None does not make any sense with a relative entity collection - def resolve(self, key, entity): - ''' - Resolve a key to an Entity. The optional `entity` argument is the - "source" entity when resolving relationship targets. - ''' - full_path = key - - if '.' not in key or key.startswith('.'): - # relative target - - # any leading dot is stripped and with each dot removed, - # the entity_module is stripped of one more chunk (starting with - # the last one). - num_dots = _leading_dots.match(full_path).end(1) - full_path = full_path[num_dots:] - chunks = entity.__module__.split('.') - chunkstokeep = len(chunks) - num_dots - if chunkstokeep < 0: - raise Exception("Couldn't resolve relative target " - "'%s' relative to '%s'" % (key, entity.__module__)) - entity_module = '.'.join(chunks[:chunkstokeep]) - - if entity_module and entity_module is not '__main__': - full_path = '%s.%s' % (entity_module, full_path) - - root = '' - else: - root = None - return self.resolve_absolute(key, full_path, entity, root=root) - - def __getattr__(self, key): - raise NotImplementedError - diff --git a/libs/elixir/entity.py b/libs/elixir/entity.py deleted file mode 100644 index 87f5154c..00000000 --- a/libs/elixir/entity.py +++ /dev/null @@ -1,1039 +0,0 @@ -''' -This module provides the ``Entity`` base class, as well as its metaclass -``EntityMeta``. -''' - -import sys -import types -import warnings - -from copy import deepcopy - -import sqlalchemy -from sqlalchemy import Table, Column, Integer, desc, ForeignKey, and_, \ - ForeignKeyConstraint -from sqlalchemy.orm import MapperExtension, mapper, object_session, \ - EXT_CONTINUE, polymorphic_union, ScopedSession, \ - ColumnProperty -from sqlalchemy.sql import ColumnCollection - -import elixir -from elixir.statements import process_mutators, MUTATORS -from elixir import options -from elixir.properties import Property - -DEBUG = False - -__doc_all__ = ['Entity', 'EntityMeta'] - - -def session_mapper_factory(scoped_session): - def session_mapper(cls, *args, **kwargs): - if kwargs.pop('save_on_init', True): - old_init = cls.__init__ - def __init__(self, *args, **kwargs): - old_init(self, *args, **kwargs) - scoped_session.add(self) - cls.__init__ = __init__ - cls.query = scoped_session.query_property() - return mapper(cls, *args, **kwargs) - return session_mapper - - -class EntityDescriptor(object): - ''' - EntityDescriptor describes fields and options needed for table creation. - ''' - - def __init__(self, entity): - self.entity = entity - self.parent = None - - bases = [] - for base in entity.__bases__: - if isinstance(base, EntityMeta): - if is_entity(base) and not is_abstract_entity(base): - if self.parent: - raise Exception( - '%s entity inherits from several entities, ' - 'and this is not supported.' - % self.entity.__name__) - else: - self.parent = base - bases.extend(base._descriptor.bases) - self.parent._descriptor.children.append(entity) - else: - bases.append(base) - self.bases = bases - if not is_entity(entity) or is_abstract_entity(entity): - return - - # entity.__module__ is not always reliable (eg in mod_python) - self.module = sys.modules.get(entity.__module__) - - self.builders = [] - - #XXX: use entity.__subclasses__ ? - self.children = [] - - # used for multi-table inheritance - self.join_condition = None - self.has_pk = False - self._pk_col_done = False - - # columns and constraints waiting for a table to exist - self._columns = ColumnCollection() - self.constraints = [] - - # properties (it is only useful for checking dupe properties at the - # moment, and when adding properties before the mapper is created, - # which shouldn't happen). - self.properties = {} - - # - self.relationships = [] - - # set default value for options - self.table_args = [] - - # base class(es) options_defaults - options_defaults = self.options_defaults() - - complete_defaults = options.options_defaults.copy() - complete_defaults.update({ - 'metadata': elixir.metadata, - 'session': elixir.session, - 'collection': elixir.entities - }) - - # set default value for other options - for key in options.valid_options: - value = options_defaults.get(key, complete_defaults[key]) - if isinstance(value, dict): - value = value.copy() - setattr(self, key, value) - - # override options with module-level defaults defined - for key in ('metadata', 'session', 'collection'): - attr = '__%s__' % key - if hasattr(self.module, attr): - setattr(self, key, getattr(self.module, attr)) - - def options_defaults(self): - base_defaults = {} - for base in self.bases: - base_defaults.update(base._descriptor.options_defaults()) - base_defaults.update(getattr(self.entity, 'options_defaults', {})) - return base_defaults - - def setup_options(self): - ''' - Setup any values that might depend on the "using_options" class - mutator. For example, the tablename or the metadata. - ''' - elixir.metadatas.add(self.metadata) - if self.collection is not None: - self.collection.append(self.entity) - - entity = self.entity - if self.parent: - if self.inheritance == 'single': - self.tablename = self.parent._descriptor.tablename - - if not self.tablename: - if self.shortnames: - self.tablename = entity.__name__.lower() - else: - modulename = entity.__module__.replace('.', '_') - tablename = "%s_%s" % (modulename, entity.__name__) - self.tablename = tablename.lower() - elif hasattr(self.tablename, '__call__'): - self.tablename = self.tablename(entity) - - if not self.identity: - if 'polymorphic_identity' in self.mapper_options: - self.identity = self.mapper_options['polymorphic_identity'] - else: - #TODO: include module name (We could have b.Account inherit - # from a.Account) - self.identity = entity.__name__.lower() - elif 'polymorphic_identity' in self.mapper_options: - raise Exception('You cannot use the "identity" option and the ' - 'polymorphic_identity mapper option at the same ' - 'time.') - elif hasattr(self.identity, '__call__'): - self.identity = self.identity(entity) - - if self.polymorphic: - if not isinstance(self.polymorphic, basestring): - self.polymorphic = options.DEFAULT_POLYMORPHIC_COL_NAME - - #--------------------- - # setup phase methods - - def setup_autoload_table(self): - self.setup_table(True) - - def create_pk_cols(self): - """ - Create primary_key columns. That is, call the 'create_pk_cols' - builders then add a primary key to the table if it hasn't already got - one and needs one. - - This method is "semi-recursive" in some cases: it calls the - create_keys method on ManyToOne relationships and those in turn call - create_pk_cols on their target. It shouldn't be possible to have an - infinite loop since a loop of primary_keys is not a valid situation. - """ - if self._pk_col_done: - return - - self.call_builders('create_pk_cols') - - if not self.autoload: - if self.parent: - if self.inheritance == 'multi': - # Add columns with foreign keys to the parent's primary - # key columns - parent_desc = self.parent._descriptor - tablename = parent_desc.table_fullname - join_clauses = [] - fk_columns = [] - for pk_col in parent_desc.primary_keys: - colname = options.MULTIINHERITANCECOL_NAMEFORMAT % \ - {'entity': self.parent.__name__.lower(), - 'key': pk_col.key} - - # It seems like SA ForeignKey is not happy being given - # a real column object when said column is not yet - # attached to a table - pk_col_name = "%s.%s" % (tablename, pk_col.key) - col = Column(colname, pk_col.type, primary_key=True) - fk_columns.append(col) - self.add_column(col) - join_clauses.append(col == pk_col) - self.join_condition = and_(*join_clauses) - self.add_constraint( - ForeignKeyConstraint(fk_columns, - parent_desc.primary_keys, ondelete='CASCADE')) - elif self.inheritance == 'concrete': - # Copy primary key columns from the parent. - for col in self.parent._descriptor.columns: - if col.primary_key: - self.add_column(col.copy()) - elif not self.has_pk and self.auto_primarykey: - if isinstance(self.auto_primarykey, basestring): - colname = self.auto_primarykey - else: - colname = options.DEFAULT_AUTO_PRIMARYKEY_NAME - - self.add_column( - Column(colname, options.DEFAULT_AUTO_PRIMARYKEY_TYPE, - primary_key=True)) - self._pk_col_done = True - - def setup_relkeys(self): - self.call_builders('create_non_pk_cols') - - def before_table(self): - self.call_builders('before_table') - - def setup_table(self, only_autoloaded=False): - ''' - Create a SQLAlchemy table-object with all columns that have been - defined up to this point. - ''' - if self.entity.table is not None: - return - - if self.autoload != only_autoloaded: - return - - kwargs = self.table_options - if self.autoload: - args = self.table_args - kwargs['autoload'] = True - else: - if self.parent: - if self.inheritance == 'single': - # we know the parent is setup before the child - self.entity.table = self.parent.table - - # re-add the entity columns to the parent entity so that - # they are added to the parent's table (whether the - # parent's table is already setup or not). - for col in self._columns: - self.parent._descriptor.add_column(col) - for constraint in self.constraints: - self.parent._descriptor.add_constraint(constraint) - return - elif self.inheritance == 'concrete': - #TODO: we should also copy columns from the parent table - # if the parent is a base (abstract?) entity (whatever the - # inheritance type -> elif will need to be changed) - - # Copy all non-primary key columns from parent table - # (primary key columns have already been copied earlier). - for col in self.parent._descriptor.columns: - if not col.primary_key: - self.add_column(col.copy()) - - for con in self.parent._descriptor.constraints: - self.add_constraint( - ForeignKeyConstraint( - [e.parent.key for e in con.elements], - [e.target_fullname for e in con.elements], - name=con.name, #TODO: modify it - onupdate=con.onupdate, ondelete=con.ondelete, - use_alter=con.use_alter)) - - if self.polymorphic and \ - self.inheritance in ('single', 'multi') and \ - self.children and not self.parent: - self.add_column(Column(self.polymorphic, - options.POLYMORPHIC_COL_TYPE)) - - if self.version_id_col: - if not isinstance(self.version_id_col, basestring): - self.version_id_col = options.DEFAULT_VERSION_ID_COL_NAME - self.add_column(Column(self.version_id_col, Integer)) - - args = list(self.columns) + self.constraints + self.table_args - self.entity.table = Table(self.tablename, self.metadata, - *args, **kwargs) - if DEBUG: - print self.entity.table.repr2() - - def setup_reltables(self): - self.call_builders('create_tables') - - def after_table(self): - self.call_builders('after_table') - - def setup_events(self): - def make_proxy_method(methods): - def proxy_method(self, mapper, connection, instance): - for func in methods: - ret = func(instance) - # I couldn't commit myself to force people to - # systematicaly return EXT_CONTINUE in all their event - # methods. - # But not doing that diverge to how SQLAlchemy works. - # I should try to convince Mike to do EXT_CONTINUE by - # default, and stop processing as the special case. -# if ret != EXT_CONTINUE: - if ret is not None and ret != EXT_CONTINUE: - return ret - return EXT_CONTINUE - return proxy_method - - # create a list of callbacks for each event - methods = {} - - all_methods = getmembers(self.entity, - lambda a: isinstance(a, types.MethodType)) - - for name, method in all_methods: - for event in getattr(method, '_elixir_events', []): - event_methods = methods.setdefault(event, []) - event_methods.append(method) - - if not methods: - return - - # transform that list into methods themselves - for event in methods: - methods[event] = make_proxy_method(methods[event]) - - # create a custom mapper extension class, tailored to our entity - ext = type('EventMapperExtension', (MapperExtension,), methods)() - - # then, make sure that the entity's mapper has our mapper extension - self.add_mapper_extension(ext) - - def before_mapper(self): - self.call_builders('before_mapper') - - def _get_children(self): - children = self.children[:] - for child in self.children: - children.extend(child._descriptor._get_children()) - return children - - def translate_order_by(self, order_by): - if isinstance(order_by, basestring): - order_by = [order_by] - - order = [] - for colname in order_by: - #FIXME: get_column uses self.columns[key] instead of property - # names. self.columns correspond to the columns of the table if - # the table was already created and to self._columns otherwise, - # which is a ColumnCollection indexed on columns.key - # See ticket #108. - col = self.get_column(colname.strip('-')) - if colname.startswith('-'): - col = desc(col) - order.append(col) - return order - - def setup_mapper(self): - ''' - Initializes and assign a mapper to the entity. - At this point the mapper will usually have no property as they are - added later. - ''' - if self.entity.mapper: - return - - # for now we don't support the "abstract" parent class in a concrete - # inheritance scenario as demonstrated in - # sqlalchemy/test/orm/inheritance/concrete.py - # this should be added along other - kwargs = {} - if self.order_by: - kwargs['order_by'] = self.translate_order_by(self.order_by) - - if self.version_id_col: - kwargs['version_id_col'] = self.get_column(self.version_id_col) - - if self.inheritance in ('single', 'concrete', 'multi'): - if self.parent and \ - (self.inheritance != 'concrete' or self.polymorphic): - # non-polymorphic concrete doesn't need this - kwargs['inherits'] = self.parent.mapper - - if self.inheritance == 'multi' and self.parent: - kwargs['inherit_condition'] = self.join_condition - - if self.polymorphic: - if self.children: - if self.inheritance == 'concrete': - keys = [(self.identity, self.entity.table)] - keys.extend([(child._descriptor.identity, child.table) - for child in self._get_children()]) - # Having the same alias name for an entity and one of - # its child (which is a parent itself) shouldn't cause - # any problem because the join shouldn't be used at - # the same time. But in reality, some versions of SA - # do misbehave on this. Since it doesn't hurt to have - # different names anyway, here they go. - pjoin = polymorphic_union( - dict(keys), self.polymorphic, - 'pjoin_%s' % self.identity) - - kwargs['with_polymorphic'] = ('*', pjoin) - kwargs['polymorphic_on'] = \ - getattr(pjoin.c, self.polymorphic) - elif not self.parent: - kwargs['polymorphic_on'] = \ - self.get_column(self.polymorphic) - - if self.children or self.parent: - kwargs['polymorphic_identity'] = self.identity - - if self.parent and self.inheritance == 'concrete': - kwargs['concrete'] = True - - if self.parent and self.inheritance == 'single': - args = [] - else: - args = [self.entity.table] - - # let user-defined kwargs override Elixir-generated ones, though that's - # not very usefull since most of them expect Column instances. - kwargs.update(self.mapper_options) - - #TODO: document this! - if 'primary_key' in kwargs: - cols = self.entity.table.c - kwargs['primary_key'] = [getattr(cols, colname) for - colname in kwargs['primary_key']] - - # do the mapping - if self.session is None: - self.entity.mapper = mapper(self.entity, *args, **kwargs) - elif isinstance(self.session, ScopedSession): - session_mapper = session_mapper_factory(self.session) - self.entity.mapper = session_mapper(self.entity, *args, **kwargs) - else: - raise Exception("Failed to map entity '%s' with its table or " - "selectable. You can only bind an Entity to a " - "ScopedSession object or None for manual session " - "management." - % self.entity.__name__) - - def after_mapper(self): - self.call_builders('after_mapper') - - def setup_properties(self): - self.call_builders('create_properties') - - def finalize(self): - self.call_builders('finalize') - self.entity._setup_done = True - - #---------------- - # helper methods - - def call_builders(self, what): - for builder in self.builders: - if hasattr(builder, what): - getattr(builder, what)() - - def add_column(self, col, check_duplicate=None): - '''when check_duplicate is None, the value of the allowcoloverride - option of the entity is used. - ''' - if check_duplicate is None: - check_duplicate = not self.allowcoloverride - - if col.key in self._columns: - if check_duplicate: - raise Exception("Column '%s' already exist in '%s' ! " % - (col.key, self.entity.__name__)) - else: - del self._columns[col.key] - # are indexed on col.key - self._columns.add(col) - - if col.primary_key: - self.has_pk = True - - table = self.entity.table - if table is not None: - if check_duplicate and col.key in table.columns.keys(): - raise Exception("Column '%s' already exist in table '%s' ! " % - (col.key, table.name)) - table.append_column(col) - if DEBUG: - print "table.append_column(%s)" % col - - def add_constraint(self, constraint): - self.constraints.append(constraint) - - table = self.entity.table - if table is not None: - table.append_constraint(constraint) - - def add_property(self, name, property, check_duplicate=True): - if check_duplicate and name in self.properties: - raise Exception("property '%s' already exist in '%s' ! " % - (name, self.entity.__name__)) - self.properties[name] = property - -#FIXME: something like this is needed to propagate the relationships from -# parent entities to their children in a concrete inheritance scenario. But -# this doesn't work because of the backref matching code. In most case -# (test_concrete.py) it doesn't even happen at all. -# if self.children and self.inheritance == 'concrete': -# for child in self.children: -# child._descriptor.add_property(name, property) - - mapper = self.entity.mapper - if mapper: - mapper.add_property(name, property) - if DEBUG: - print "mapper.add_property('%s', %s)" % (name, repr(property)) - - def add_mapper_extension(self, extension): - extensions = self.mapper_options.get('extension', []) - if not isinstance(extensions, list): - extensions = [extensions] - extensions.append(extension) - self.mapper_options['extension'] = extensions - - def get_column(self, key, check_missing=True): - #TODO: this needs to work whether the table is already setup or not - #TODO: support SA table/autoloaded entity - try: - return self.columns[key] - except KeyError: - if check_missing: - raise Exception("No column named '%s' found in the table of " - "the '%s' entity!" - % (key, self.entity.__name__)) - - def get_inverse_relation(self, rel, check_reverse=True): - ''' - Return the inverse relation of rel, if any, None otherwise. - ''' - - matching_rel = None - for other_rel in self.relationships: - if rel.is_inverse(other_rel): - if matching_rel is None: - matching_rel = other_rel - else: - raise Exception( - "Several relations match as inverse of the '%s' " - "relation in entity '%s'. You should specify " - "inverse relations manually by using the inverse " - "keyword." - % (rel.name, rel.entity.__name__)) - # When a matching inverse is found, we check that it has only - # one relation matching as its own inverse. We don't need the result - # of the method though. But we do need to be careful not to start an - # infinite recursive loop. - if matching_rel and check_reverse: - rel.entity._descriptor.get_inverse_relation(matching_rel, False) - - return matching_rel - - def find_relationship(self, name): - for rel in self.relationships: - if rel.name == name: - return rel - if self.parent: - return self.parent._descriptor.find_relationship(name) - else: - return None - - #------------------------ - # some useful properties - - @property - def table_fullname(self): - ''' - Complete name of the table for the related entity. - Includes the schema name if there is one specified. - ''' - schema = self.table_options.get('schema', None) - if schema is not None: - return "%s.%s" % (schema, self.tablename) - else: - return self.tablename - - @property - def columns(self): - if self.entity.table is not None: - return self.entity.table.columns - else: - #FIXME: depending on the type of inheritance, we should also - # return the parent entity's columns (for example for order_by - # using a column defined in the parent. - return self._columns - - @property - def primary_keys(self): - """ - Returns the list of primary key columns of the entity. - - This property isn't valid before the "create_pk_cols" phase. - """ - if self.autoload: - return [col for col in self.entity.table.primary_key.columns] - else: - if self.parent and self.inheritance == 'single': - return self.parent._descriptor.primary_keys - else: - return [col for col in self.columns if col.primary_key] - - @property - def table(self): - if self.entity.table is not None: - return self.entity.table - else: - return FakeTable(self) - - @property - def primary_key_properties(self): - """ - Returns the list of (mapper) properties corresponding to the primary - key columns of the table of the entity. - - This property caches its value, so it shouldn't be called before the - entity is fully set up. - """ - if not hasattr(self, '_pk_props'): - col_to_prop = {} - mapper = self.entity.mapper - for prop in mapper.iterate_properties: - if isinstance(prop, ColumnProperty): - for col in prop.columns: - #XXX: Why is this extra loop necessary? What is this - # "proxy_set" supposed to mean? - for col in col.proxy_set: - col_to_prop[col] = prop - pk_cols = [c for c in mapper.mapped_table.c if c.primary_key] - self._pk_props = [col_to_prop[c] for c in pk_cols] - return self._pk_props - -class FakePK(object): - def __init__(self, descriptor): - self.descriptor = descriptor - - @property - def columns(self): - return self.descriptor.primary_keys - -class FakeTable(object): - def __init__(self, descriptor): - self.descriptor = descriptor - self.primary_key = FakePK(descriptor) - - @property - def columns(self): - return self.descriptor.columns - - @property - def fullname(self): - ''' - Complete name of the table for the related entity. - Includes the schema name if there is one specified. - ''' - schema = self.descriptor.table_options.get('schema', None) - if schema is not None: - return "%s.%s" % (schema, self.descriptor.tablename) - else: - return self.descriptor.tablename - - -def is_entity(cls): - """ - Scan the bases classes of `cls` to see if any is an instance of - EntityMeta. If we don't find any, it means it is either an unrelated class - or an entity base class (like the 'Entity' class). - """ - for base in cls.__bases__: - if isinstance(base, EntityMeta): - return True - return False - - -# Note that we don't use inspect.getmembers because of -# http://bugs.python.org/issue1785 -# See also http://elixir.ematia.de/trac/changeset/262 -def getmembers(object, predicate=None): - base_props = [] - for key in dir(object): - try: - value = getattr(object, key) - except AttributeError: - continue - if not predicate or predicate(value): - base_props.append((key, value)) - return base_props - -def is_abstract_entity(dict_or_cls): - if not isinstance(dict_or_cls, dict): - dict_or_cls = dict_or_cls.__dict__ - for mutator, args, kwargs in dict_or_cls.get(MUTATORS, []): - if 'abstract' in kwargs: - return kwargs['abstract'] - - return False - -def instrument_class(cls): - """ - Instrument a class as an Entity. This is usually done automatically through - the EntityMeta metaclass. - """ - # Create the entity descriptor - desc = cls._descriptor = EntityDescriptor(cls) - - # Process mutators - # We *do* want mutators to be processed for base/abstract classes - # (so that statements like using_options_defaults work). - process_mutators(cls) - - # We do not want to do any more processing for base/abstract classes - # (Entity et al.). - if not is_entity(cls) or is_abstract_entity(cls): - return - - cls.table = None - cls.mapper = None - - # Copy the properties ('Property' instances) of the entity base class(es). - # We use getmembers (instead of __dict__) so that we also get the - # properties from the parents of the base class if any. - base_props = [] - for base in cls.__bases__: - if isinstance(base, EntityMeta) and \ - (not is_entity(base) or is_abstract_entity(base)): - base_props += [(name, deepcopy(attr)) for name, attr in - getmembers(base, lambda a: isinstance(a, Property))] - - # Process attributes (using the assignment syntax), looking for - # 'Property' instances and attaching them to this entity. - properties = [(name, attr) for name, attr in cls.__dict__.iteritems() - if isinstance(attr, Property)] - sorted_props = sorted(base_props + properties, - key=lambda i: i[1]._counter) - for name, prop in sorted_props: - prop.attach(cls, name) - - # setup misc options here (like tablename etc.) - desc.setup_options() - - -class EntityMeta(type): - """ - Entity meta class. - You should only use it directly if you want to define your own base class - for your entities (ie you don't want to use the provided 'Entity' class). - """ - - def __init__(cls, name, bases, dict_): - instrument_class(cls) - - def __setattr__(cls, key, value): - if isinstance(value, Property): - if hasattr(cls, '_setup_done'): - raise Exception('Cannot set attribute on a class after ' - 'setup_all') - else: - value.attach(cls, key) - else: - type.__setattr__(cls, key, value) - - -def setup_entities(entities): - '''Setup all entities in the list passed as argument''' - - for entity in entities: - # delete all Elixir properties so that it doesn't interfere with - # SQLAlchemy. At this point they should have be converted to - # builders. - for name, attr in entity.__dict__.items(): - if isinstance(attr, Property): - delattr(entity, name) - - for method_name in ( - 'setup_autoload_table', 'create_pk_cols', 'setup_relkeys', - 'before_table', 'setup_table', 'setup_reltables', 'after_table', - 'setup_events', - 'before_mapper', 'setup_mapper', 'after_mapper', - 'setup_properties', - 'finalize'): -# if DEBUG: -# print "=" * 40 -# print method_name -# print "=" * 40 - for entity in entities: -# print entity.__name__, "...", - if hasattr(entity, '_setup_done'): -# print "already done" - continue - method = getattr(entity._descriptor, method_name) - method() -# print "ok" - - -def cleanup_entities(entities): - """ - Try to revert back the list of entities passed as argument to the state - they had just before their setup phase. - - As of now, this function is *not* functional in that it doesn't revert to - the exact same state the entities were before setup. For example, the - properties do not work yet as those would need to be regenerated (since the - columns they are based on are regenerated too -- and as such the - corresponding joins are not correct) but this doesn't happen because of - the way relationship setup is designed to be called only once (especially - the backref stuff in create_properties). - """ - for entity in entities: - desc = entity._descriptor - - if hasattr(entity, '_setup_done'): - del entity._setup_done - - entity.table = None - entity.mapper = None - - desc._pk_col_done = False - desc.has_pk = False - desc._columns = ColumnCollection() - desc.constraints = [] - desc.properties = {} - -class EntityBase(object): - """ - This class holds all methods of the "Entity" base class, but does not act - as a base class itself (it does not use the EntityMeta metaclass), but - rather as a parent class for Entity. This is meant so that people who want - to provide their own base class but don't want to loose or copy-paste all - the methods of Entity can do so by inheriting from EntityBase: - - .. sourcecode:: python - - class MyBase(EntityBase): - __metaclass__ = EntityMeta - - def myCustomMethod(self): - # do something great - """ - - def __init__(self, **kwargs): - self.set(**kwargs) - - def set(self, **kwargs): - for key, value in kwargs.iteritems(): - setattr(self, key, value) - - @classmethod - def update_or_create(cls, data, surrogate=True): - pk_props = cls._descriptor.primary_key_properties - - # if all pk are present and not None - if not [1 for p in pk_props if data.get(p.key) is None]: - pk_tuple = tuple([data[prop.key] for prop in pk_props]) - record = cls.query.get(pk_tuple) - if record is None: - if surrogate: - raise Exception("Cannot create surrogate with pk") - else: - record = cls() - else: - if surrogate: - record = cls() - else: - raise Exception("Cannot create non surrogate without pk") - record.from_dict(data) - return record - - def from_dict(self, data): - """ - Update a mapped class with data from a JSON-style nested dict/list - structure. - """ - # surrogate can be guessed from autoincrement/sequence but I guess - # that's not 100% reliable, so we'll need an override - - mapper = sqlalchemy.orm.object_mapper(self) - - for key, value in data.iteritems(): - if isinstance(value, dict): - dbvalue = getattr(self, key) - rel_class = mapper.get_property(key).mapper.class_ - pk_props = rel_class._descriptor.primary_key_properties - - # If the data doesn't contain any pk, and the relationship - # already has a value, update that record. - if not [1 for p in pk_props if p.key in data] and \ - dbvalue is not None: - dbvalue.from_dict(value) - else: - record = rel_class.update_or_create(value) - setattr(self, key, record) - elif isinstance(value, list) and \ - value and isinstance(value[0], dict): - - rel_class = mapper.get_property(key).mapper.class_ - new_attr_value = [] - for row in value: - if not isinstance(row, dict): - raise Exception( - 'Cannot send mixed (dict/non dict) data ' - 'to list relationships in from_dict data.') - record = rel_class.update_or_create(row) - new_attr_value.append(record) - setattr(self, key, new_attr_value) - else: - setattr(self, key, value) - - def to_dict(self, deep={}, exclude=[]): - """Generate a JSON-style nested dict/list structure from an object.""" - col_prop_names = [p.key for p in self.mapper.iterate_properties \ - if isinstance(p, ColumnProperty)] - data = dict([(name, getattr(self, name)) - for name in col_prop_names if name not in exclude]) - for rname, rdeep in deep.iteritems(): - dbdata = getattr(self, rname) - #FIXME: use attribute names (ie coltoprop) instead of column names - fks = self.mapper.get_property(rname).remote_side - exclude = [c.name for c in fks] - if dbdata is None: - data[rname] = None - elif isinstance(dbdata, list): - data[rname] = [o.to_dict(rdeep, exclude) for o in dbdata] - else: - data[rname] = dbdata.to_dict(rdeep, exclude) - return data - - # session methods - def flush(self, *args, **kwargs): - return object_session(self).flush([self], *args, **kwargs) - - def delete(self, *args, **kwargs): - return object_session(self).delete(self, *args, **kwargs) - - def expire(self, *args, **kwargs): - return object_session(self).expire(self, *args, **kwargs) - - def refresh(self, *args, **kwargs): - return object_session(self).refresh(self, *args, **kwargs) - - def expunge(self, *args, **kwargs): - return object_session(self).expunge(self, *args, **kwargs) - - # This bunch of session methods, along with all the query methods below - # only make sense when using a global/scoped/contextual session. - @property - def _global_session(self): - return self._descriptor.session.registry() - - #FIXME: remove all deprecated methods, possibly all of these - def merge(self, *args, **kwargs): - return self._global_session.merge(self, *args, **kwargs) - - def save(self, *args, **kwargs): - return self._global_session.save(self, *args, **kwargs) - - def update(self, *args, **kwargs): - return self._global_session.update(self, *args, **kwargs) - - # only exist in SA < 0.5 - # IMO, the replacement (session.add) doesn't sound good enough to be added - # here. For example: "o = Order(); o.add()" is not very telling. It's - # better to leave it as "session.add(o)" - def save_or_update(self, *args, **kwargs): - return self._global_session.save_or_update(self, *args, **kwargs) - - # query methods - @classmethod - def get_by(cls, *args, **kwargs): - """ - Returns the first instance of this class matching the given criteria. - This is equivalent to: - session.query(MyClass).filter_by(...).first() - """ - return cls.query.filter_by(*args, **kwargs).first() - - @classmethod - def get(cls, *args, **kwargs): - """ - Return the instance of this class based on the given identifier, - or None if not found. This is equivalent to: - session.query(MyClass).get(...) - """ - return cls.query.get(*args, **kwargs) - - -class Entity(EntityBase): - ''' - The base class for all entities - - All Elixir model objects should inherit from this class. Statements can - appear within the body of the definition of an entity to define its - fields, relationships, and other options. - - Here is an example: - - .. sourcecode:: python - - class Person(Entity): - name = Field(Unicode(128)) - birthdate = Field(DateTime, default=datetime.now) - - Please note, that if you don't specify any primary keys, Elixir will - automatically create one called ``id``. - - For further information, please refer to the provided examples or - tutorial. - ''' - __metaclass__ = EntityMeta - - diff --git a/libs/elixir/events.py b/libs/elixir/events.py deleted file mode 100644 index 293a8a4a..00000000 --- a/libs/elixir/events.py +++ /dev/null @@ -1,27 +0,0 @@ -from sqlalchemy.orm import reconstructor - -__all__ = [ - 'before_insert', - 'after_insert', - 'before_update', - 'after_update', - 'before_delete', - 'after_delete', - 'reconstructor' -] - -def create_decorator(event_name): - def decorator(func): - if not hasattr(func, '_elixir_events'): - func._elixir_events = [] - func._elixir_events.append(event_name) - return func - return decorator - -before_insert = create_decorator('before_insert') -after_insert = create_decorator('after_insert') -before_update = create_decorator('before_update') -after_update = create_decorator('after_update') -before_delete = create_decorator('before_delete') -after_delete = create_decorator('after_delete') - diff --git a/libs/elixir/ext/__init__.py b/libs/elixir/ext/__init__.py deleted file mode 100644 index c8708f25..00000000 --- a/libs/elixir/ext/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -''' -Ext package - -Additional Elixir statements and functionality. -''' diff --git a/libs/elixir/ext/associable.py b/libs/elixir/ext/associable.py deleted file mode 100644 index b31c5a74..00000000 --- a/libs/elixir/ext/associable.py +++ /dev/null @@ -1,234 +0,0 @@ -''' -Associable Elixir Statement Generator - -========== -Associable -========== - -About Polymorphic Associations ------------------------------- - -A frequent pattern in database schemas is the has_and_belongs_to_many, or a -many-to-many table. Quite often multiple tables will refer to a single one -creating quite a few many-to-many intermediate tables. - -Polymorphic associations lower the amount of many-to-many tables by setting up -a table that allows relations to any other table in the database, and relates -it to the associable table. In some implementations, this layout does not -enforce referential integrity with database foreign key constraints, this -implementation uses an additional many-to-many table with foreign key -constraints to avoid this problem. - -.. note: - SQLite does not support foreign key constraints, so referential integrity - can only be enforced using database backends with such support. - -Elixir Statement Generator for Polymorphic Associations -------------------------------------------------------- - -The ``associable`` function generates the intermediary tables for an Elixir -entity that should be associable with other Elixir entities and returns an -Elixir Statement for use with them. This automates the process of creating the -polymorphic association tables and ensuring their referential integrity. - -Matching select_XXX and select_by_XXX are also added to the associated entity -which allow queries to be run for the associated objects. - -Example usage: - -.. sourcecode:: python - - class Tag(Entity): - name = Field(Unicode) - - acts_as_taggable = associable(Tag) - - class Entry(Entity): - title = Field(Unicode) - acts_as_taggable('tags') - - class Article(Entity): - title = Field(Unicode) - acts_as_taggable('tags') - -Or if one of the entities being associated should only have a single member of -the associated table: - -.. sourcecode:: python - - class Address(Entity): - street = Field(String(130)) - city = Field(String(100)) - - is_addressable = associable(Address, 'addresses') - - class Person(Entity): - name = Field(Unicode) - orders = OneToMany('Order') - is_addressable() - - class Order(Entity): - order_num = Field(primary_key=True) - item_count = Field(Integer) - person = ManyToOne('Person') - is_addressable('address', uselist=False) - - home = Address(street='123 Elm St.', city='Spooksville') - user = Person(name='Jane Doe') - user.addresses.append(home) - - neworder = Order(item_count=4) - neworder.address = home - user.orders.append(neworder) - - # Queries using the added helpers - Person.select_by_addresses(city='Cupertino') - Person.select_addresses(and_(Address.c.street=='132 Elm St', - Address.c.city=='Smallville')) - -Statement Options ------------------ - -The generated Elixir Statement has several options available: - -+---------------+-------------------------------------------------------------+ -| Option Name | Description | -+===============+=============================================================+ -| ``name`` | Specify a custom name for the Entity attribute. This is | -| | used to declare the attribute used to access the associated | -| | table values. Otherwise, the name will use the plural_name | -| | provided to the associable call. | -+---------------+-------------------------------------------------------------+ -| ``uselist`` | Whether or not the associated table should be represented | -| | as a list, or a single property. It should be set to False | -| | when the entity should only have a single associated | -| | entity. Defaults to True. | -+---------------+-------------------------------------------------------------+ -| ``lazy`` | Determines eager loading of the associated entity objects. | -| | Defaults to False, to indicate that they should not be | -| | lazily loaded. | -+---------------+-------------------------------------------------------------+ -''' -from elixir.statements import Statement -import sqlalchemy as sa - -__doc_all__ = ['associable'] - - -def associable(assoc_entity, plural_name=None, lazy=True): - ''' - Generate an associable Elixir Statement - ''' - interface_name = assoc_entity._descriptor.tablename - able_name = interface_name + 'able' - - if plural_name: - attr_name = "%s_rel" % plural_name - else: - plural_name = interface_name - attr_name = "%s_rel" % interface_name - - class GenericAssoc(object): - - def __init__(self, tablename): - self.type = tablename - - #TODO: inherit from entity builder - class Associable(object): - """An associable Elixir Statement object""" - - def __init__(self, entity, name=None, uselist=True, lazy=True): - self.entity = entity - self.lazy = lazy - self.uselist = uselist - - if name is None: - self.name = plural_name - else: - self.name = name - - def after_table(self): - col = sa.Column('%s_assoc_id' % interface_name, sa.Integer, - sa.ForeignKey('%s.id' % able_name)) - self.entity._descriptor.add_column(col) - - if not hasattr(assoc_entity, '_assoc_table'): - metadata = assoc_entity._descriptor.metadata - association_table = sa.Table("%s" % able_name, metadata, - sa.Column('id', sa.Integer, primary_key=True), - sa.Column('type', sa.String(40), nullable=False), - ) - tablename = "%s_to_%s" % (able_name, interface_name) - association_to_table = sa.Table(tablename, metadata, - sa.Column('assoc_id', sa.Integer, - sa.ForeignKey(association_table.c.id, - ondelete="CASCADE"), - primary_key=True), - #FIXME: this assumes a single id col - sa.Column('%s_id' % interface_name, sa.Integer, - sa.ForeignKey(assoc_entity.table.c.id, - ondelete="RESTRICT"), - primary_key=True), - ) - - assoc_entity._assoc_table = association_table - assoc_entity._assoc_to_table = association_to_table - - def after_mapper(self): - if not hasattr(assoc_entity, '_assoc_mapper'): - assoc_entity._assoc_mapper = sa.orm.mapper( - GenericAssoc, assoc_entity._assoc_table, properties={ - 'targets': sa.orm.relation( - assoc_entity, - secondary=assoc_entity._assoc_to_table, - lazy=lazy, backref='associations', - order_by=assoc_entity.mapper.order_by) - }) - - entity = self.entity - entity.mapper.add_property( - attr_name, - sa.orm.relation(GenericAssoc, lazy=self.lazy, - backref='_backref_%s' % entity.table.name) - ) - - if self.uselist: - def get(self): - if getattr(self, attr_name) is None: - setattr(self, attr_name, - GenericAssoc(entity.table.name)) - return getattr(self, attr_name).targets - setattr(entity, self.name, property(get)) - else: - # scalar based property decorator - def get(self): - attr = getattr(self, attr_name) - if attr is not None: - return attr.targets[0] - else: - return None - def set(self, value): - if getattr(self, attr_name) is None: - setattr(self, attr_name, - GenericAssoc(entity.table.name)) - getattr(self, attr_name).targets = [value] - setattr(entity, self.name, property(get, set)) - - # self.name is both set via mapper synonym and the python - # property, but that's how synonym properties work. - # adding synonym property after "real" property otherwise it - # breaks when using SQLAlchemy > 0.4.1 - entity.mapper.add_property(self.name, sa.orm.synonym(attr_name)) - - # add helper methods - def select_by(cls, **kwargs): - return cls.query.join(attr_name, 'targets') \ - .filter_by(**kwargs).all() - setattr(entity, 'select_by_%s' % self.name, classmethod(select_by)) - - def select(cls, *args, **kwargs): - return cls.query.join(attr_name, 'targets') \ - .filter(*args, **kwargs).all() - setattr(entity, 'select_%s' % self.name, classmethod(select)) - - return Statement(Associable) diff --git a/libs/elixir/ext/encrypted.py b/libs/elixir/ext/encrypted.py deleted file mode 100644 index 410855d2..00000000 --- a/libs/elixir/ext/encrypted.py +++ /dev/null @@ -1,124 +0,0 @@ -''' -An encryption plugin for Elixir utilizing the excellent PyCrypto library, which -can be downloaded here: http://www.amk.ca/python/code/crypto - -Values for columns that are specified to be encrypted will be transparently -encrypted and safely encoded for storage in a unicode column using the powerful -and secure Blowfish Cipher using a specified "secret" which can be passed into -the plugin at class declaration time. - -Example usage: - -.. sourcecode:: python - - from elixir import * - from elixir.ext.encrypted import acts_as_encrypted - - class Person(Entity): - name = Field(Unicode) - password = Field(Unicode) - ssn = Field(Unicode) - acts_as_encrypted(for_fields=['password', 'ssn'], - with_secret='secret') - -The above Person entity will automatically encrypt and decrypt the password and -ssn columns on save, update, and load. Different secrets can be specified on -an entity by entity basis, for added security. - -**Important note**: instance attributes are encrypted in-place. This means that -if one of the encrypted attributes of an instance is accessed after the -instance has been flushed to the database (and thus encrypted), the value for -that attribute will be crypted in the in-memory object in addition to the -database row. -''' - -from Crypto.Cipher import Blowfish -from elixir.statements import Statement -from sqlalchemy.orm import MapperExtension, EXT_CONTINUE, EXT_STOP - -try: - from sqlalchemy.orm import EXT_PASS - SA05orlater = False -except ImportError: - SA05orlater = True - -__all__ = ['acts_as_encrypted'] -__doc_all__ = [] - - -# -# encryption and decryption functions -# - -def encrypt_value(value, secret): - return Blowfish.new(secret, Blowfish.MODE_CFB) \ - .encrypt(value).encode('string_escape') - -def decrypt_value(value, secret): - return Blowfish.new(secret, Blowfish.MODE_CFB) \ - .decrypt(value.decode('string_escape')) - - -# -# acts_as_encrypted statement -# - -class ActsAsEncrypted(object): - - def __init__(self, entity, for_fields=[], with_secret='abcdef'): - - def perform_encryption(instance, encrypt=True): - encrypted = getattr(instance, '_elixir_encrypted', None) - if encrypted is encrypt: - # skipping encryption or decryption, as it is already done - return - else: - # marking instance as already encrypted/decrypted - instance._elixir_encrypted = encrypt - - if encrypt: - func = encrypt_value - else: - func = decrypt_value - - for column_name in for_fields: - current_value = getattr(instance, column_name) - if current_value: - setattr(instance, column_name, - func(current_value, with_secret)) - - def perform_decryption(instance): - perform_encryption(instance, encrypt=False) - - class EncryptedMapperExtension(MapperExtension): - - def before_insert(self, mapper, connection, instance): - perform_encryption(instance) - return EXT_CONTINUE - - def before_update(self, mapper, connection, instance): - perform_encryption(instance) - return EXT_CONTINUE - - if SA05orlater: - def reconstruct_instance(self, mapper, instance): - perform_decryption(instance) - # no special return value is required for - # reconstruct_instance, but you never know... - return EXT_CONTINUE - else: - def populate_instance(self, mapper, selectcontext, row, - instance, *args, **kwargs): - mapper.populate_instance(selectcontext, instance, row, - *args, **kwargs) - perform_decryption(instance) - # EXT_STOP because we already did populate the instance and - # the normal processing should not happen - return EXT_STOP - - # make sure that the entity's mapper has our mapper extension - entity._descriptor.add_mapper_extension(EncryptedMapperExtension()) - - -acts_as_encrypted = Statement(ActsAsEncrypted) - diff --git a/libs/elixir/ext/perform_ddl.py b/libs/elixir/ext/perform_ddl.py deleted file mode 100644 index bb8528df..00000000 --- a/libs/elixir/ext/perform_ddl.py +++ /dev/null @@ -1,106 +0,0 @@ -''' -DDL statements for Elixir. - -Entities having the perform_ddl statement, will automatically execute the -given DDL statement, at the given moment: ether before or after the table -creation in SQL. - -The 'when' argument can be either 'before-create' or 'after-create'. -The 'statement' argument can be one of: - -- a single string statement -- a list of string statements, in which case, each of them will be executed - in turn. -- a callable which should take no argument and return either a single string - or a list of strings. - -In each string statement, you may use the special '%(fullname)s' construct, -that will be replaced with the real table name including schema, if unknown -to you. Also, self explained '%(table)s' and '%(schema)s' may be used here. - -You would use this extension to handle non elixir sql statemts, like triggers -etc. - -.. sourcecode:: python - - class Movie(Entity): - title = Field(Unicode(30), primary_key=True) - year = Field(Integer) - - perform_ddl('after-create', - "insert into %(fullname)s values ('Alien', 1979)") - -preload_data is a more specific statement meant to preload data in your -entity table from a list of tuples (of fields values for each row). - -.. sourcecode:: python - - class Movie(Entity): - title = Field(Unicode(30), primary_key=True) - year = Field(Integer) - - preload_data(('title', 'year'), - [(u'Alien', 1979), (u'Star Wars', 1977)]) - preload_data(('year', 'title'), - [(1982, u'Blade Runner')]) - preload_data(data=[(u'Batman', 1966)]) -''' - -from elixir.statements import Statement -from elixir.properties import EntityBuilder -from sqlalchemy import DDL - -__all__ = ['perform_ddl', 'preload_data'] -__doc_all__ = [] - -# -# the perform_ddl statement -# -class PerformDDLEntityBuilder(EntityBuilder): - - def __init__(self, entity, when, statement, on=None, context=None): - self.entity = entity - self.when = when - self.statement = statement - self.on = on - self.context = context - - def after_table(self): - statement = self.statement - if hasattr(statement, '__call__'): - statement = statement() - if not isinstance(statement, list): - statement = [statement] - for s in statement: - ddl = DDL(s, self.on, self.context) - ddl.execute_at(self.when, self.entity.table) - -perform_ddl = Statement(PerformDDLEntityBuilder) - -# -# the preload_data statement -# -class PreloadDataEntityBuilder(EntityBuilder): - - def __init__(self, entity, columns=None, data=None): - self.entity = entity - self.columns = columns - self.data = data - - def after_table(self): - all_columns = [col.name for col in self.entity.table.columns] - def onload(event, schema_item, connection): - columns = self.columns - if columns is None: - columns = all_columns - data = self.data - if hasattr(data, '__call__'): - data = data() - insert = schema_item.insert() - connection.execute(insert, - [dict(zip(columns, values)) for values in data]) - - self.entity.table.append_ddl_listener('after-create', onload) - -preload_data = Statement(PreloadDataEntityBuilder) - diff --git a/libs/elixir/ext/versioned.py b/libs/elixir/ext/versioned.py deleted file mode 100644 index 75f406b0..00000000 --- a/libs/elixir/ext/versioned.py +++ /dev/null @@ -1,288 +0,0 @@ -''' -A versioning plugin for Elixir. - -Entities that are marked as versioned with the `acts_as_versioned` statement -will automatically have a history table created and a timestamp and version -column added to their tables. In addition, versioned entities are provided -with four new methods: revert, revert_to, compare_with and get_as_of, and one -new attribute: versions. Entities with compound primary keys are supported. - -The `versions` attribute will contain a list of previous versions of the -instance, in increasing version number order. - -The `get_as_of` method will retrieve a previous version of the instance "as of" -a specified datetime. If the current version is the most recent, it will be -returned. - -The `revert` method will rollback the current instance to its previous version, -if possible. Once reverted, the current instance will be expired from the -session, and you will need to fetch it again to retrieve the now reverted -instance. - -The `revert_to` method will rollback the current instance to the specified -version number, if possibe. Once reverted, the current instance will be expired -from the session, and you will need to fetch it again to retrieve the now -reverted instance. - -The `compare_with` method will compare the instance with a previous version. A -dictionary will be returned with each field difference as an element in the -dictionary where the key is the field name and the value is a tuple of the -format (current_value, version_value). Version instances also have a -`compare_with` method so that two versions can be compared. - -Also included in the module is a `after_revert` decorator that can be used to -decorate methods on the versioned entity that will be called following that -instance being reverted. - -The acts_as_versioned statement also accepts an optional `ignore` argument -that consists of a list of strings, specifying names of fields. Changes in -those fields will not result in a version increment. In addition, you can -pass in an optional `check_concurrent` argument, which will use SQLAlchemy's -built-in optimistic concurrency mechanisms. - -Note that relationships that are stored in mapping tables will not be included -as part of the versioning process, and will need to be handled manually. Only -values within the entity's main table will be versioned into the history table. -''' - -from datetime import datetime -import inspect - -from sqlalchemy import Table, Column, and_, desc -from sqlalchemy.orm import mapper, MapperExtension, EXT_CONTINUE, \ - object_session - -from elixir import Integer, DateTime -from elixir.statements import Statement -from elixir.properties import EntityBuilder -from elixir.entity import getmembers - -__all__ = ['acts_as_versioned', 'after_revert'] -__doc_all__ = [] - -# -# utility functions -# - -def get_entity_where(instance): - clauses = [] - for column in instance.table.primary_key.columns: - instance_value = getattr(instance, column.name) - clauses.append(column==instance_value) - return and_(*clauses) - - -def get_history_where(instance): - clauses = [] - history_columns = instance.__history_table__.primary_key.columns - for column in instance.table.primary_key.columns: - instance_value = getattr(instance, column.name) - history_column = getattr(history_columns, column.name) - clauses.append(history_column==instance_value) - return and_(*clauses) - - -# -# a mapper extension to track versions on insert, update, and delete -# - -class VersionedMapperExtension(MapperExtension): - def before_insert(self, mapper, connection, instance): - version_colname, timestamp_colname = \ - instance.__class__.__versioned_column_names__ - setattr(instance, version_colname, 1) - setattr(instance, timestamp_colname, datetime.now()) - return EXT_CONTINUE - - def before_update(self, mapper, connection, instance): - old_values = instance.table.select(get_entity_where(instance)) \ - .execute().fetchone() - - # SA might've flagged this for an update even though it didn't change. - # This occurs when a relation is updated, thus marking this instance - # for a save/update operation. We check here against the last version - # to ensure we really should save this version and update the version - # data. - ignored = instance.__class__.__ignored_fields__ - version_colname, timestamp_colname = \ - instance.__class__.__versioned_column_names__ - for key in instance.table.c.keys(): - if key in ignored: - continue - if getattr(instance, key) != old_values[key]: - # the instance was really updated, so we create a new version - dict_values = dict(old_values.items()) - connection.execute( - instance.__class__.__history_table__.insert(), dict_values) - old_version = getattr(instance, version_colname) - setattr(instance, version_colname, old_version + 1) - setattr(instance, timestamp_colname, datetime.now()) - break - - return EXT_CONTINUE - - def before_delete(self, mapper, connection, instance): - connection.execute(instance.__history_table__.delete( - get_history_where(instance) - )) - return EXT_CONTINUE - - -versioned_mapper_extension = VersionedMapperExtension() - - -# -# the acts_as_versioned statement -# - -class VersionedEntityBuilder(EntityBuilder): - - def __init__(self, entity, ignore=None, check_concurrent=False, - column_names=None): - self.entity = entity - self.add_mapper_extension(versioned_mapper_extension) - #TODO: we should rather check that the version_id_col isn't set - # externally - self.check_concurrent = check_concurrent - - # Changes in these fields will be ignored - if column_names is None: - column_names = ['version', 'timestamp'] - entity.__versioned_column_names__ = column_names - if ignore is None: - ignore = [] - ignore.extend(column_names) - entity.__ignored_fields__ = ignore - - def create_non_pk_cols(self): - # add a version column to the entity, along with a timestamp - version_colname, timestamp_colname = \ - self.entity.__versioned_column_names__ - #XXX: fail in case the columns already exist? - #col_names = [col.name for col in self.entity._descriptor.columns] - #if version_colname not in col_names: - self.add_table_column(Column(version_colname, Integer)) - #if timestamp_colname not in col_names: - self.add_table_column(Column(timestamp_colname, DateTime)) - - # add a concurrent_version column to the entity, if required - if self.check_concurrent: - self.entity._descriptor.version_id_col = 'concurrent_version' - - # we copy columns from the main entity table, so we need it to exist first - def after_table(self): - entity = self.entity - version_colname, timestamp_colname = \ - entity.__versioned_column_names__ - - # look for events - after_revert_events = [] - for name, func in getmembers(entity, inspect.ismethod): - if getattr(func, '_elixir_after_revert', False): - after_revert_events.append(func) - - # create a history table for the entity - skipped_columns = [version_colname] - if self.check_concurrent: - skipped_columns.append('concurrent_version') - - columns = [ - column.copy() for column in entity.table.c - if column.name not in skipped_columns - ] - columns.append(Column(version_colname, Integer, primary_key=True)) - table = Table(entity.table.name + '_history', entity.table.metadata, - *columns - ) - entity.__history_table__ = table - - # create an object that represents a version of this entity - class Version(object): - pass - - # map the version class to the history table for this entity - Version.__name__ = entity.__name__ + 'Version' - Version.__versioned_entity__ = entity - mapper(Version, entity.__history_table__) - - version_col = getattr(table.c, version_colname) - timestamp_col = getattr(table.c, timestamp_colname) - - # attach utility methods and properties to the entity - def get_versions(self): - v = object_session(self).query(Version) \ - .filter(get_history_where(self)) \ - .order_by(version_col) \ - .all() - # history contains all the previous records. - # Add the current one to the list to get all the versions - v.append(self) - return v - - def get_as_of(self, dt): - # if the passed in timestamp is older than our current version's - # time stamp, then the most recent version is our current version - if getattr(self, timestamp_colname) < dt: - return self - - # otherwise, we need to look to the history table to get our - # older version - sess = object_session(self) - query = sess.query(Version) \ - .filter(and_(get_history_where(self), - timestamp_col <= dt)) \ - .order_by(desc(timestamp_col)).limit(1) - return query.first() - - def revert_to(self, to_version): - if isinstance(to_version, Version): - to_version = getattr(to_version, version_colname) - - old_version = table.select(and_( - get_history_where(self), - version_col == to_version - )).execute().fetchone() - - entity.table.update(get_entity_where(self)).execute( - dict(old_version.items()) - ) - - table.delete(and_(get_history_where(self), - version_col >= to_version)).execute() - self.expire() - for event in after_revert_events: - event(self) - - def revert(self): - assert getattr(self, version_colname) > 1 - self.revert_to(getattr(self, version_colname) - 1) - - def compare_with(self, version): - differences = {} - for column in self.table.c: - if column.name in (version_colname, 'concurrent_version'): - continue - this = getattr(self, column.name) - that = getattr(version, column.name) - if this != that: - differences[column.name] = (this, that) - return differences - - entity.versions = property(get_versions) - entity.get_as_of = get_as_of - entity.revert_to = revert_to - entity.revert = revert - entity.compare_with = compare_with - Version.compare_with = compare_with - -acts_as_versioned = Statement(VersionedEntityBuilder) - - -def after_revert(func): - """ - Decorator for watching for revert events. - """ - func._elixir_after_revert = True - return func - - diff --git a/libs/elixir/fields.py b/libs/elixir/fields.py deleted file mode 100644 index 8659cdd8..00000000 --- a/libs/elixir/fields.py +++ /dev/null @@ -1,191 +0,0 @@ -''' -This module provides support for defining the fields (columns) of your -entities. Elixir currently supports two syntaxes to do so: the default -`Attribute-based syntax`_ as well as the has_field_ DSL statement. - -Attribute-based syntax ----------------------- - -Here is a quick example of how to use the object-oriented syntax. - -.. sourcecode:: python - - class Person(Entity): - id = Field(Integer, primary_key=True) - name = Field(String(50), required=True) - ssn = Field(String(50), unique=True) - biography = Field(Text) - join_date = Field(DateTime, default=datetime.datetime.now) - photo = Field(Binary, deferred=True) - _email = Field(String(20), colname='email', synonym='email') - - def _set_email(self, email): - self._email = email - def _get_email(self): - return self._email - email = property(_get_email, _set_email) - - -The Field class takes one mandatory argument, which is its type. Please refer -to SQLAlchemy documentation for a list of `types supported by SQLAlchemy -`_. - -Following that first mandatory argument, fields can take any number of -optional keyword arguments. Please note that all the **arguments** that are -**not specifically processed by Elixir**, as mentioned in the documentation -below **are passed on to the SQLAlchemy ``Column`` object**. Please refer to -the `SQLAlchemy Column object's documentation -`_ for more details about other -supported keyword arguments. - -The following Elixir-specific arguments are supported: - -+-------------------+---------------------------------------------------------+ -| Argument Name | Description | -+===================+=========================================================+ -| ``required`` | Specify whether or not this field can be set to None | -| | (left without a value). Defaults to ``False``, unless | -| | the field is a primary key. | -+-------------------+---------------------------------------------------------+ -| ``colname`` | Specify a custom name for the column of this field. By | -| | default the column will have the same name as the | -| | attribute. | -+-------------------+---------------------------------------------------------+ -| ``deferred`` | Specify whether this particular column should be | -| | fetched by default (along with the other columns) when | -| | an instance of the entity is fetched from the database | -| | or rather only later on when this particular column is | -| | first referenced. This can be useful when one wants to | -| | avoid loading a large text or binary field into memory | -| | when its not needed. Individual columns can be lazy | -| | loaded by themselves (by using ``deferred=True``) | -| | or placed into groups that lazy-load together (by using | -| | ``deferred`` = `"group_name"`). | -+-------------------+---------------------------------------------------------+ -| ``synonym`` | Specify a synonym name for this field. The field will | -| | also be usable under that name in keyword-based Query | -| | functions such as filter_by. The Synonym class (see the | -| | `properties` module) provides a similar functionality | -| | with an (arguably) nicer syntax, but a limited scope. | -+-------------------+---------------------------------------------------------+ - -has_field ---------- - -The `has_field` statement allows you to define fields one at a time. - -The first argument is the name of the field, the second is its type. Following -these, any number of keyword arguments can be specified for additional -behavior. The following arguments are supported: - -+-------------------+---------------------------------------------------------+ -| Argument Name | Description | -+===================+=========================================================+ -| ``through`` | Specify a relation name to go through. This field will | -| | not exist as a column on the database but will be a | -| | property which automatically proxy values to the | -| | ``attribute`` attribute of the object pointed to by the | -| | relation. If the ``attribute`` argument is not present, | -| | the name of the current field will be used. In an | -| | has_field statement, you can only proxy through a | -| | belongs_to or an has_one relationship. | -+-------------------+---------------------------------------------------------+ -| ``attribute`` | Name of the "endpoint" attribute to proxy to. This | -| | should only be used in combination with the ``through`` | -| | argument. | -+-------------------+---------------------------------------------------------+ - - -Here is a quick example of how to use ``has_field``. - -.. sourcecode:: python - - class Person(Entity): - has_field('id', Integer, primary_key=True) - has_field('name', String(50)) -''' -from sqlalchemy import Column -from sqlalchemy.orm import deferred, synonym -from sqlalchemy.ext.associationproxy import association_proxy - -from elixir.statements import ClassMutator -from elixir.properties import Property - -__doc_all__ = ['Field'] - - -class Field(Property): - ''' - Represents the definition of a 'field' on an entity. - - This class represents a column on the table where the entity is stored. - ''' - - def __init__(self, type, *args, **kwargs): - super(Field, self).__init__() - - self.colname = kwargs.pop('colname', None) - self.synonym = kwargs.pop('synonym', None) - self.deferred = kwargs.pop('deferred', False) - if 'required' in kwargs: - kwargs['nullable'] = not kwargs.pop('required') - self.type = type - self.primary_key = kwargs.get('primary_key', False) - - self.column = None - self.property = None - - self.args = args - self.kwargs = kwargs - - def attach(self, entity, name): - # If no colname was defined (through the 'colname' kwarg), set - # it to the name of the attr. - if self.colname is None: - self.colname = name - super(Field, self).attach(entity, name) - - def create_pk_cols(self): - if self.primary_key: - self.create_col() - - def create_non_pk_cols(self): - if not self.primary_key: - self.create_col() - - def create_col(self): - self.column = Column(self.colname, self.type, - *self.args, **self.kwargs) - self.add_table_column(self.column) - - def create_properties(self): - if self.deferred: - group = None - if isinstance(self.deferred, basestring): - group = self.deferred - self.property = deferred(self.column, group=group) - elif self.name != self.colname: - # if the property name is different from the column name, we need - # to add an explicit property (otherwise nothing is needed as it's - # done automatically by SA) - self.property = self.column - - if self.property is not None: - self.add_mapper_property(self.name, self.property) - - if self.synonym: - self.add_mapper_property(self.synonym, synonym(self.name)) - - -def has_field_handler(entity, name, *args, **kwargs): - if 'through' in kwargs: - setattr(entity, name, - association_proxy(kwargs.pop('through'), - kwargs.pop('attribute', name), - **kwargs)) - return - field = Field(*args, **kwargs) - field.attach(entity, name) - -has_field = ClassMutator(has_field_handler) diff --git a/libs/elixir/options.py b/libs/elixir/options.py deleted file mode 100644 index 27d7d195..00000000 --- a/libs/elixir/options.py +++ /dev/null @@ -1,274 +0,0 @@ -''' -This module provides support for defining several options on your Elixir -entities. There are three different kinds of options that can be set -up, and for this there are three different statements: using_options_, -using_table_options_ and using_mapper_options_. - -Alternatively, these options can be set on all Elixir entities by modifying -the `options_defaults` dictionary before defining any entity. - -`using_options` ---------------- -The 'using_options' DSL statement allows you to set up some additional -behaviors on your model objects, including table names, ordering, and -more. To specify an option, simply supply the option as a keyword -argument onto the statement, as follows: - -.. sourcecode:: python - - class Person(Entity): - name = Field(Unicode(64)) - - using_options(shortnames=True, order_by='name') - -The list of supported arguments are as follows: - -+---------------------+-------------------------------------------------------+ -| Option Name | Description | -+=====================+=======================================================+ -| ``inheritance`` | Specify the type of inheritance this entity must use. | -| | It can be one of ``single``, ``concrete`` or | -| | ``multi``. Defaults to ``single``. | -| | Note that polymorphic concrete inheritance is | -| | currently not implemented. See: | -| | http://www.sqlalchemy.org/docs/05/mappers.html | -| | #mapping-class-inheritance-hierarchies for an | -| | explanation of the different kinds of inheritances. | -+---------------------+-------------------------------------------------------+ -| ``abstract`` | Set 'abstract'=True to declare abstract entity. | -| | Abstract base classes are useful when you want to put | -| | some common information into a number of other | -| | entities. Abstract entity will not be used to create | -| | any database table. Instead, when it is used as a base| -| | class for other entity, its fields will be added to | -| | those of the child class. | -+---------------------+-------------------------------------------------------+ -| ``polymorphic`` | Whether the inheritance should be polymorphic or not. | -| | Defaults to ``True``. The column used to store the | -| | type of each row is named "row_type" by default. You | -| | can change this by passing the desired name for the | -| | column to this argument. | -+---------------------+-------------------------------------------------------+ -| ``identity`` | Specify a custom polymorphic identity. When using | -| | polymorphic inheritance, this value (usually a | -| | string) will represent this particular entity (class) | -| | . It will be used to differentiate it from other | -| | entities (classes) in your inheritance hierarchy when | -| | loading from the database instances of different | -| | entities in that hierarchy at the same time. | -| | This value will be stored by default in the | -| | "row_type" column of the entity's table (see above). | -| | You can either provide a | -| | plain string or a callable. The callable will be | -| | given the entity (ie class) as argument and must | -| | return a value (usually a string) representing the | -| | polymorphic identity of that entity. | -| | By default, this value is automatically generated: it | -| | is the name of the entity lower-cased. | -+---------------------+-------------------------------------------------------+ -| ``metadata`` | Specify a custom MetaData for this entity. | -| | By default, entities uses the global | -| | ``elixir.metadata``. | -| | This option can also be set for all entities of a | -| | module by setting the ``__metadata__`` attribute of | -| | that module. | -+---------------------+-------------------------------------------------------+ -| ``autoload`` | Automatically load column definitions from the | -| | existing database table. | -+---------------------+-------------------------------------------------------+ -| ``tablename`` | Specify a custom tablename. You can either provide a | -| | plain string or a callable. The callable will be | -| | given the entity (ie class) as argument and must | -| | return a string representing the name of the table | -| | for that entity. By default, the tablename is | -| | automatically generated: it is a concatenation of the | -| | full module-path to the entity and the entity (class) | -| | name itself. The result is lower-cased and separated | -| | by underscores ("_"), eg.: for an entity named | -| | "MyEntity" in the module "project1.model", the | -| | generated table name will be | -| | "project1_model_myentity". | -+---------------------+-------------------------------------------------------+ -| ``shortnames`` | Specify whether or not the automatically generated | -| | table names include the full module-path | -| | to the entity. If ``shortnames`` is ``True``, only | -| | the entity name is used. Defaults to ``False``. | -+---------------------+-------------------------------------------------------+ -| ``auto_primarykey`` | If given as string, it will represent the | -| | auto-primary-key's column name. If this option | -| | is True, it will allow auto-creation of a primary | -| | key if there's no primary key defined for the | -| | corresponding entity. If this option is False, | -| | it will disallow auto-creation of a primary key. | -| | Defaults to ``True``. | -+---------------------+-------------------------------------------------------+ -| ``version_id_col`` | If this option is True, it will create a version | -| | column automatically using the default name. If given | -| | as string, it will create the column using that name. | -| | This can be used to prevent concurrent modifications | -| | to the entity's table rows (i.e. it will raise an | -| | exception if it happens). Defaults to ``False``. | -+---------------------+-------------------------------------------------------+ -| ``order_by`` | How to order select results. Either a string or a | -| | list of strings, composed of the field name, | -| | optionally lead by a minus (for descending order). | -+---------------------+-------------------------------------------------------+ -| ``session`` | Specify a custom contextual session for this entity. | -| | By default, entities uses the global | -| | ``elixir.session``. | -| | This option takes a ``ScopedSession`` object or | -| | ``None``. In the later case your entity will be | -| | mapped using a non-contextual mapper which requires | -| | manual session management, as seen in pure SQLAlchemy.| -| | This option can also be set for all entities of a | -| | module by setting the ``__session__`` attribute of | -| | that module. | -+---------------------+-------------------------------------------------------+ -| ``allowcoloverride``| Specify whether it is allowed to override columns. | -| | By default, Elixir forbids you to add a column to an | -| | entity's table which already exist in that table. If | -| | you set this option to ``True`` it will skip that | -| | check. Use with care as it is easy to shoot oneself | -| | in the foot when overriding columns. | -+---------------------+-------------------------------------------------------+ - -For examples, please refer to the examples and unit tests. - -`using_table_options` ---------------------- -The 'using_table_options' DSL statement allows you to set up some -additional options on your entity table. It is meant only to handle the -options which are not supported directly by the 'using_options' statement. -By opposition to the 'using_options' statement, these options are passed -directly to the underlying SQLAlchemy Table object (both non-keyword arguments -and keyword arguments) without any processing. - -For further information, please refer to the `SQLAlchemy table's documentation -`_. - -You might also be interested in the section about `constraints -`_. - -`using_mapper_options` ----------------------- -The 'using_mapper_options' DSL statement allows you to set up some -additional options on your entity mapper. It is meant only to handle the -options which are not supported directly by the 'using_options' statement. -By opposition to the 'using_options' statement, these options are passed -directly to the underlying SQLAlchemy mapper (as keyword arguments) -without any processing. - -For further information, please refer to the `SQLAlchemy mapper -function's documentation -`_. - -`using_options_defaults` ------------------------- -The 'using_options_defaults' DSL statement allows you to set up some -default options on a custom base class. These will be used as the default value -for options of all its subclasses. Note that any option not set within the -using_options_defaults (nor specifically on a particular Entity) will use the -global defaults, so you don't have to provide a default value for all options, -but only those you want to change. Please also note that this statement does -not work on normal entities, and the normal using_options statement does not -work on base classes (because normal options do not and should not propagate to -the children classes). -''' - -from sqlalchemy import Integer, String - -from elixir.statements import ClassMutator - -__doc_all__ = ['options_defaults'] - -OLD_M2MCOL_NAMEFORMAT = "%(tablename)s_%(key)s%(numifself)s" -ALTERNATE_M2MCOL_NAMEFORMAT = "%(inversename)s_%(key)s" - -def default_m2m_column_formatter(data): - if data['selfref']: - return ALTERNATE_M2MCOL_NAMEFORMAT % data - else: - return OLD_M2MCOL_NAMEFORMAT % data - -NEW_M2MCOL_NAMEFORMAT = default_m2m_column_formatter - -# format constants -FKCOL_NAMEFORMAT = "%(relname)s_%(key)s" -M2MCOL_NAMEFORMAT = NEW_M2MCOL_NAMEFORMAT -CONSTRAINT_NAMEFORMAT = "%(tablename)s_%(colnames)s_fk" -MULTIINHERITANCECOL_NAMEFORMAT = "%(entity)s_%(key)s" - -# other global constants -DEFAULT_AUTO_PRIMARYKEY_NAME = "id" -DEFAULT_AUTO_PRIMARYKEY_TYPE = Integer -DEFAULT_VERSION_ID_COL_NAME = "row_version" -DEFAULT_POLYMORPHIC_COL_NAME = "row_type" -POLYMORPHIC_COL_SIZE = 40 -POLYMORPHIC_COL_TYPE = String(POLYMORPHIC_COL_SIZE) - -# debugging/migration help -MIGRATION_TO_07_AID = False - -# -options_defaults = dict( - abstract=False, - inheritance='single', - polymorphic=True, - identity=None, - autoload=False, - tablename=None, - shortnames=False, - auto_primarykey=True, - version_id_col=False, - allowcoloverride=False, - order_by=None, - resolve_root=None, - mapper_options={}, - table_options={} -) - -valid_options = options_defaults.keys() + [ - 'metadata', - 'session', - 'collection' -] - - -def using_options_defaults_handler(entity, **kwargs): - for kwarg in kwargs: - if kwarg not in valid_options: - raise Exception("'%s' is not a valid option for Elixir entities." - % kwarg) - - # We use __dict__ instead of hasattr to not check its presence within the - # parent, and thus update the parent dict instead of creating a local dict. - if not entity.__dict__.get('options_defaults'): - entity.options_defaults = {} - entity.options_defaults.update(kwargs) - - -def using_options_handler(entity, *args, **kwargs): - for kwarg in kwargs: - if kwarg in valid_options: - setattr(entity._descriptor, kwarg, kwargs[kwarg]) - else: - raise Exception("'%s' is not a valid option for Elixir entities." - % kwarg) - - -def using_table_options_handler(entity, *args, **kwargs): - entity._descriptor.table_args.extend(list(args)) - entity._descriptor.table_options.update(kwargs) - - -def using_mapper_options_handler(entity, *args, **kwargs): - entity._descriptor.mapper_options.update(kwargs) - - -using_options_defaults = ClassMutator(using_options_defaults_handler) -using_options = ClassMutator(using_options_handler) -using_table_options = ClassMutator(using_table_options_handler) -using_mapper_options = ClassMutator(using_mapper_options_handler) diff --git a/libs/elixir/properties.py b/libs/elixir/properties.py deleted file mode 100644 index 68ff8fab..00000000 --- a/libs/elixir/properties.py +++ /dev/null @@ -1,244 +0,0 @@ -''' -This module provides support for defining properties on your entities. It both -provides, the `Property` class which acts as a building block for common -properties such as fields and relationships (for those, please consult the -corresponding modules), but also provides some more specialized properties, -such as `ColumnProperty` and `Synonym`. It also provides the GenericProperty -class which allows you to wrap any SQLAlchemy property, and its DSL-syntax -equivalent: has_property_. - -`has_property` --------------- -The ``has_property`` statement allows you to define properties which rely on -their entity's table (and columns) being defined before they can be declared -themselves. The `has_property` statement takes two arguments: first the name of -the property to be defined and second a function (often given as an anonymous -lambda) taking one argument and returning the desired SQLAlchemy property. That -function will be called whenever the entity table is completely defined, and -will be given the .c attribute of the entity as argument (as a way to access -the entity columns). - -Here is a quick example of how to use ``has_property``. - -.. sourcecode:: python - - class OrderLine(Entity): - has_field('quantity', Float) - has_field('unit_price', Float) - has_property('price', - lambda c: column_property( - (c.quantity * c.unit_price).label('price'))) -''' - -from elixir.statements import PropertyStatement -from sqlalchemy.orm import column_property, synonym - -__doc_all__ = ['EntityBuilder', 'Property', 'GenericProperty', - 'ColumnProperty'] - -class EntityBuilder(object): - ''' - Abstract base class for all entity builders. An Entity builder is a class - of objects which can be added to an Entity (usually by using special - properties or statements) to "build" that entity. Building an entity, - meaning to add columns to its "main" table, create other tables, add - properties to its mapper, ... To do so an EntityBuilder must override the - corresponding method(s). This is to ensure the different operations happen - in the correct order (for example, that the table is fully created before - the mapper that use it is defined). - ''' - def create_pk_cols(self): - pass - - def create_non_pk_cols(self): - pass - - def before_table(self): - pass - - def create_tables(self): - ''' - Subclasses may override this method to create tables. - ''' - - def after_table(self): - pass - - def create_properties(self): - ''' - Subclasses may override this method to add properties to the involved - entity. - ''' - - def before_mapper(self): - pass - - def after_mapper(self): - pass - - def finalize(self): - pass - - # helper methods - def add_table_column(self, column): - self.entity._descriptor.add_column(column) - - def add_mapper_property(self, name, prop): - self.entity._descriptor.add_property(name, prop) - - def add_mapper_extension(self, ext): - self.entity._descriptor.add_mapper_extension(ext) - - -class CounterMeta(type): - ''' - A simple meta class which adds a ``_counter`` attribute to the instances of - the classes it is used on. This counter is simply incremented for each new - instance. - ''' - counter = 0 - - def __call__(self, *args, **kwargs): - instance = type.__call__(self, *args, **kwargs) - instance._counter = CounterMeta.counter - CounterMeta.counter += 1 - return instance - - -class Property(EntityBuilder): - ''' - Abstract base class for all properties of an Entity. - ''' - __metaclass__ = CounterMeta - - def __init__(self, *args, **kwargs): - self.entity = None - self.name = None - - def attach(self, entity, name): - """Attach this property to its entity, using 'name' as name. - - Properties will be attached in the order they were declared. - """ - self.entity = entity - self.name = name - - # register this property as a builder - entity._descriptor.builders.append(self) - - def __repr__(self): - return "Property(%s, %s)" % (self.name, self.entity) - - -class GenericProperty(Property): - ''' - Generic catch-all class to wrap an SQLAlchemy property. - - .. sourcecode:: python - - class OrderLine(Entity): - quantity = Field(Float) - unit_price = Field(Numeric) - price = GenericProperty(lambda c: column_property( - (c.quantity * c.unit_price).label('price'))) - ''' - - def __init__(self, prop, *args, **kwargs): - super(GenericProperty, self).__init__(*args, **kwargs) - self.prop = prop - #XXX: move this to Property? - self.args = args - self.kwargs = kwargs - - def create_properties(self): - if hasattr(self.prop, '__call__'): - prop_value = self.prop(self.entity.table.c) - else: - prop_value = self.prop - prop_value = self.evaluate_property(prop_value) - self.add_mapper_property(self.name, prop_value) - - def evaluate_property(self, prop): - if self.args or self.kwargs: - raise Exception('superfluous arguments passed to GenericProperty') - return prop - - -class ColumnProperty(GenericProperty): - ''' - A specialized form of the GenericProperty to generate SQLAlchemy - ``column_property``'s. - - It takes a function (often given as an anonymous lambda) as its first - argument. Other arguments and keyword arguments are forwarded to the - column_property construct. That first-argument function must accept exactly - one argument and must return the desired (scalar-returning) SQLAlchemy - ClauseElement. - - The function will be called whenever the entity table is completely - defined, and will be given - the .c attribute of the table of the entity as argument (as a way to - access the entity columns). The ColumnProperty will first wrap your - ClauseElement in an - "empty" label (ie it will be labelled automatically during queries), - then wrap that in a column_property. - - .. sourcecode:: python - - class OrderLine(Entity): - quantity = Field(Float) - unit_price = Field(Numeric) - price = ColumnProperty(lambda c: c.quantity * c.unit_price, - deferred=True) - - Please look at the `corresponding SQLAlchemy - documentation `_ for details. - ''' - - def evaluate_property(self, prop): - return column_property(prop.label(None), *self.args, **self.kwargs) - - -class Synonym(GenericProperty): - ''' - This class represents a synonym property of another property (column, ...) - of an entity. As opposed to the `synonym` kwarg to the Field class (which - share the same goal), this class can be used to define a synonym of a - property defined in a parent class (of the current class). On the other - hand, it cannot define a synonym for the purpose of using a standard python - property in queries. See the Field class for details on that usage. - - .. sourcecode:: python - - class Person(Entity): - name = Field(String(30)) - primary_email = Field(String(100)) - email_address = Synonym('primary_email') - - class User(Person): - user_name = Synonym('name') - password = Field(String(20)) - ''' - - def evaluate_property(self, prop): - return synonym(prop, *self.args, **self.kwargs) - -#class Composite(GenericProperty): -# def __init__(self, prop): -# super(GenericProperty, self).__init__() -# self.prop = prop - -# def evaluate_property(self, prop): -# return composite(prop.label(self.name)) - -#start = Composite(Point, lambda c: (c.x1, c.y1)) - -#mapper(Vertex, vertices, properties={ -# 'start':composite(Point, vertices.c.x1, vertices.c.y1), -# 'end':composite(Point, vertices.c.x2, vertices.c.y2) -#}) - - -has_property = PropertyStatement(GenericProperty) - diff --git a/libs/elixir/relationships.py b/libs/elixir/relationships.py deleted file mode 100644 index 6c14dbb6..00000000 --- a/libs/elixir/relationships.py +++ /dev/null @@ -1,1247 +0,0 @@ -''' -This module provides support for defining relationships between your Elixir -entities. Elixir currently supports two syntaxes to do so: the default -`Attribute-based syntax`_ which supports the following types of relationships: -ManyToOne_, OneToMany_, OneToOne_ and ManyToMany_, as well as a -`DSL-based syntax`_ which provides the following statements: belongs_to_, -has_many_, has_one_ and has_and_belongs_to_many_. - -====================== -Attribute-based syntax -====================== - -The first argument to all these "normal" relationship classes is the name of -the class (entity) you are relating to. - -Following that first mandatory argument, any number of additional keyword -arguments can be specified for advanced behavior. See each relationship type -for a list of their specific keyword arguments. At this point, we'll just note -that all the arguments that are not specifically processed by Elixir, as -mentioned in the documentation below are passed on to the SQLAlchemy -``relation`` function. So, please refer to the `SQLAlchemy relation function's -documentation `_ for further detail about which -keyword arguments are supported. - -You should keep in mind that the following -keyword arguments are automatically generated by Elixir and should not be used -unless you want to override the value provided by Elixir: ``uselist``, -``remote_side``, ``secondary``, ``primaryjoin`` and ``secondaryjoin``. - -Additionally, if you want a bidirectionnal relationship, you should define the -inverse relationship on the other entity explicitly (as opposed to how -SQLAlchemy's backrefs are defined). In non-ambiguous situations, Elixir will -match relationships together automatically. If there are several relationships -of the same type between two entities, Elixir is not able to determine which -relationship is the inverse of which, so you have to disambiguate the -situation by giving the name of the inverse relationship in the ``inverse`` -keyword argument. - -Here is a detailed explanation of each relation type: - -`ManyToOne` ------------ - -Describes the child's side of a parent-child relationship. For example, -a `Pet` object may belong to its owner, who is a `Person`. This could be -expressed like so: - -.. sourcecode:: python - - class Pet(Entity): - owner = ManyToOne('Person') - -Behind the scene, assuming the primary key of the `Person` entity is -an integer column named `id`, the ``ManyToOne`` relationship will -automatically add an integer column named `owner_id` to the entity, with a -foreign key referencing the `id` column of the `Person` entity. - -In addition to the keyword arguments inherited from SQLAlchemy's relation -function, ``ManyToOne`` relationships accept the following optional arguments -which will be directed to the created column: - -+----------------------+------------------------------------------------------+ -| Option Name | Description | -+======================+======================================================+ -| ``colname`` | Specify a custom name for the foreign key column(s). | -| | This argument accepts either a single string or a | -| | list of strings. The number of strings passed must | -| | match the number of primary key columns of the target| -| | entity. If this argument is not used, the name of the| -| | column(s) is generated with the pattern | -| | defined in options.FKCOL_NAMEFORMAT, which is, by | -| | default: "%(relname)s_%(key)s", where relname is the | -| | name of the ManyToOne relationship, and 'key' is the | -| | name (key) of the primary column in the target | -| | entity. That's with, in the above Pet/owner example, | -| | the name of the column would be: "owner_id". | -+----------------------+------------------------------------------------------+ -| ``required`` | Specify whether or not this field can be set to None | -| | (left without a value). Defaults to ``False``, | -| | unless the field is a primary key. | -+----------------------+------------------------------------------------------+ -| ``primary_key`` | Specify whether or not the column(s) created by this | -| | relationship should act as a primary_key. | -| | Defaults to ``False``. | -+----------------------+------------------------------------------------------+ -| ``column_kwargs`` | A dictionary holding any other keyword argument you | -| | might want to pass to the Column. | -+----------------------+------------------------------------------------------+ -| ``target_column`` | Name (or list of names) of the target column(s). | -| | If this argument is not specified, the target entity | -| | primary key column(s) are used. | -+----------------------+------------------------------------------------------+ - -The following optional arguments are also supported to customize the -ForeignKeyConstraint that is created: - -+----------------------+------------------------------------------------------+ -| Option Name | Description | -+======================+======================================================+ -| ``use_alter`` | If True, SQLAlchemy will add the constraint in a | -| | second SQL statement (as opposed to within the | -| | create table statement). This permits to define | -| | tables with a circular foreign key dependency | -| | between them. | -+----------------------+------------------------------------------------------+ -| ``ondelete`` | Value for the foreign key constraint ondelete clause.| -| | May be one of: ``cascade``, ``restrict``, | -| | ``set null``, or ``set default``. | -+----------------------+------------------------------------------------------+ -| ``onupdate`` | Value for the foreign key constraint onupdate clause.| -| | May be one of: ``cascade``, ``restrict``, | -| | ``set null``, or ``set default``. | -+----------------------+------------------------------------------------------+ -| ``constraint_kwargs``| A dictionary holding any other keyword argument you | -| | might want to pass to the Constraint. | -+----------------------+------------------------------------------------------+ - -In some cases, you may want to declare the foreign key column explicitly, -instead of letting it be generated automatically. There are several reasons to -that: it could be because you want to declare it with precise arguments and -using column_kwargs makes your code ugly, or because the name of -your column conflicts with the property name (in which case an error is -thrown). In those cases, you can use the ``field`` argument to specify an -already-declared field to be used for the foreign key column. - -For example, for the Pet example above, if you want the database column -(holding the foreign key) to be called 'owner', one should use the field -parameter to specify the field manually. - -.. sourcecode:: python - - class Pet(Entity): - owner_id = Field(Integer, colname='owner') - owner = ManyToOne('Person', field=owner_id) - -+----------------------+------------------------------------------------------+ -| Option Name | Description | -+======================+======================================================+ -| ``field`` | Specify the previously-declared field to be used for | -| | the foreign key column. Use of this parameter is | -| | mutually exclusive with the colname and column_kwargs| -| | arguments. | -+----------------------+------------------------------------------------------+ - - -Additionally, Elixir supports the belongs_to_ statement as an alternative, -DSL-based, syntax to define ManyToOne_ relationships. - - -`OneToMany` ------------ - -Describes the parent's side of a parent-child relationship when there can be -several children. For example, a `Person` object has many children, each of -them being a `Person`. This could be expressed like so: - -.. sourcecode:: python - - class Person(Entity): - parent = ManyToOne('Person') - children = OneToMany('Person') - -Note that a ``OneToMany`` relationship **cannot exist** without a -corresponding ``ManyToOne`` relationship in the other way. This is because the -``OneToMany`` relationship needs the foreign key created by the ``ManyToOne`` -relationship. - -In addition to keyword arguments inherited from SQLAlchemy, ``OneToMany`` -relationships accept the following optional (keyword) arguments: - -+--------------------+--------------------------------------------------------+ -| Option Name | Description | -+====================+========================================================+ -| ``order_by`` | Specify which field(s) should be used to sort the | -| | results given by accessing the relation field. | -| | Note that this sort order is only applied when loading | -| | objects from the database. Objects appended to the | -| | collection afterwards are not re-sorted in-memory on | -| | the fly. | -| | This argument accepts either a string or a list of | -| | strings, each corresponding to the name of a field in | -| | the target entity. These field names can optionally be | -| | prefixed by a minus (for descending order). | -+--------------------+--------------------------------------------------------+ -| ``filter`` | Specify a filter criterion (as a clause element) for | -| | this relationship. This criterion will be ``and_`` ed | -| | with the normal join criterion (primaryjoin) generated | -| | by Elixir for the relationship. For example: | -| | boston_addresses = | -| | OneToMany('Address', filter=Address.city == 'Boston') | -+--------------------+--------------------------------------------------------+ - -Additionally, Elixir supports an alternate, DSL-based, syntax to define -OneToMany_ relationships, with the has_many_ statement. - - -`OneToOne` ----------- - -Describes the parent's side of a parent-child relationship when there is only -one child. For example, a `Car` object has one gear stick, which is -represented as a `GearStick` object. This could be expressed like so: - -.. sourcecode:: python - - class Car(Entity): - gear_stick = OneToOne('GearStick', inverse='car') - - class GearStick(Entity): - car = ManyToOne('Car') - -Note that a ``OneToOne`` relationship **cannot exist** without a corresponding -``ManyToOne`` relationship in the other way. This is because the ``OneToOne`` -relationship needs the foreign_key created by the ``ManyToOne`` relationship. - -Additionally, Elixir supports an alternate, DSL-based, syntax to define -OneToOne_ relationships, with the has_one_ statement. - - -`ManyToMany` ------------- - -Describes a relationship in which one kind of entity can be related to several -objects of the other kind but the objects of that other kind can be related to -several objects of the first kind. For example, an `Article` can have several -tags, but the same `Tag` can be used on several articles. - -.. sourcecode:: python - - class Article(Entity): - tags = ManyToMany('Tag') - - class Tag(Entity): - articles = ManyToMany('Article') - -Behind the scene, the ``ManyToMany`` relationship will automatically create an -intermediate table to host its data. - -Note that you don't necessarily need to define the inverse relationship. In -our example, even though we want tags to be usable on several articles, we -might not be interested in which articles correspond to a particular tag. In -that case, we could have omitted the `Tag` side of the relationship. - -If your ``ManyToMany`` relationship is self-referencial, the entity -containing it is autoloaded (and you don't intend to specify both the -primaryjoin and secondaryjoin arguments manually), you must specify at least -one of either the ``remote_colname`` or ``local_colname`` argument. - -In addition to keyword arguments inherited from SQLAlchemy, ``ManyToMany`` -relationships accept the following optional (keyword) arguments: - -+--------------------+--------------------------------------------------------+ -| Option Name | Description | -+====================+========================================================+ -| ``tablename`` | Specify a custom name for the intermediary table. This | -| | can be used both when the tables needs to be created | -| | and when the table is autoloaded/reflected from the | -| | database. If this argument is not used, a name will be | -| | automatically generated by Elixir depending on the name| -| | of the tables of the two entities of the relationship, | -| | the name of the relationship, and, if present, the name| -| | of its inverse. Even though this argument is optional, | -| | it is wise to use it if you are not sure what are the | -| | exact consequence of using a generated table name. | -+--------------------+--------------------------------------------------------+ -| ``schema`` | Specify a custom schema for the intermediate table. | -| | This can be used both when the tables needs to | -| | be created and when the table is autoloaded/reflected | -| | from the database. | -+--------------------+--------------------------------------------------------+ -| ``remote_colname`` | A string or list of strings specifying the names of | -| | the column(s) in the intermediary table which | -| | reference the "remote"/target entity's table. | -+--------------------+--------------------------------------------------------+ -| ``local_colname`` | A string or list of strings specifying the names of | -| | the column(s) in the intermediary table which | -| | reference the "local"/current entity's table. | -+--------------------+--------------------------------------------------------+ -| ``table`` | Use a manually created table. If this argument is | -| | used, Elixir will not generate a table for this | -| | relationship, and use the one given instead. This | -| | argument only accepts SQLAlchemy's Table objects. | -+--------------------+--------------------------------------------------------+ -| ``order_by`` | Specify which field(s) should be used to sort the | -| | results given by accessing the relation field. | -| | Note that this sort order is only applied when loading | -| | objects from the database. Objects appended to the | -| | collection afterwards are not re-sorted in-memory on | -| | the fly. | -| | This argument accepts either a string or a list of | -| | strings, each corresponding to the name of a field in | -| | the target entity. These field names can optionally be | -| | prefixed by a minus (for descending order). | -+----------------------+------------------------------------------------------+ -| ``ondelete`` | Value for the foreign key constraint ondelete clause. | -| | May be one of: ``cascade``, ``restrict``, | -| | ``set null``, or ``set default``. | -+--------------------+--------------------------------------------------------+ -| ``onupdate`` | Value for the foreign key constraint onupdate clause. | -| | May be one of: ``cascade``, ``restrict``, | -| | ``set null``, or ``set default``. | -+--------------------+--------------------------------------------------------+ -| ``table_kwargs`` | A dictionary holding any other keyword argument you | -| | might want to pass to the underlying Table object. | -+--------------------+--------------------------------------------------------+ - - -================ -DSL-based syntax -================ - -The following DSL statements provide an alternative way to define relationships -between your entities. The first argument to all those statements is the name -of the relationship, the second is the 'kind' of object you are relating to -(it is usually given using the ``of_kind`` keyword). - -`belongs_to` ------------- - -The ``belongs_to`` statement is the DSL syntax equivalent to the ManyToOne_ -relationship. As such, it supports all the same arguments as ManyToOne_ -relationships. - -.. sourcecode:: python - - class Pet(Entity): - belongs_to('feeder', of_kind='Person') - belongs_to('owner', of_kind='Person', colname="owner_id") - - -`has_many` ----------- - -The ``has_many`` statement is the DSL syntax equivalent to the OneToMany_ -relationship. As such, it supports all the same arguments as OneToMany_ -relationships. - -.. sourcecode:: python - - class Person(Entity): - belongs_to('parent', of_kind='Person') - has_many('children', of_kind='Person') - -There is also an alternate form of the ``has_many`` relationship that takes -only two keyword arguments: ``through`` and ``via`` in order to encourage a -richer form of many-to-many relationship that is an alternative to the -``has_and_belongs_to_many`` statement. Here is an example: - -.. sourcecode:: python - - class Person(Entity): - has_field('name', Unicode) - has_many('assignments', of_kind='Assignment') - has_many('projects', through='assignments', via='project') - - class Assignment(Entity): - has_field('start_date', DateTime) - belongs_to('person', of_kind='Person') - belongs_to('project', of_kind='Project') - - class Project(Entity): - has_field('title', Unicode) - has_many('assignments', of_kind='Assignment') - -In the above example, a `Person` has many `projects` through the `Assignment` -relationship object, via a `project` attribute. - - -`has_one` ---------- - -The ``has_one`` statement is the DSL syntax equivalent to the OneToOne_ -relationship. As such, it supports all the same arguments as OneToOne_ -relationships. - -.. sourcecode:: python - - class Car(Entity): - has_one('gear_stick', of_kind='GearStick', inverse='car') - - class GearStick(Entity): - belongs_to('car', of_kind='Car') - - -`has_and_belongs_to_many` -------------------------- - -The ``has_and_belongs_to_many`` statement is the DSL syntax equivalent to the -ManyToMany_ relationship. As such, it supports all the same arguments as -ManyToMany_ relationships. - -.. sourcecode:: python - - class Article(Entity): - has_and_belongs_to_many('tags', of_kind='Tag') - - class Tag(Entity): - has_and_belongs_to_many('articles', of_kind='Article') - -''' - -import warnings - -from sqlalchemy import ForeignKeyConstraint, Column, Table, and_ -from sqlalchemy.orm import relation, backref, class_mapper -from sqlalchemy.ext.associationproxy import association_proxy - -import options -from elixir.statements import ClassMutator -from elixir.properties import Property -from elixir.entity import EntityMeta, DEBUG - -__doc_all__ = [] - - -class Relationship(Property): - ''' - Base class for relationships. - ''' - - def __init__(self, of_kind, inverse=None, *args, **kwargs): - super(Relationship, self).__init__() - - self.of_kind = of_kind - self.inverse_name = inverse - - self._target = None - - self.property = None # sqlalchemy property - self.backref = None # sqlalchemy backref - - #TODO: unused for now - self.args = args - self.kwargs = kwargs - - def attach(self, entity, name): - super(Relationship, self).attach(entity, name) - entity._descriptor.relationships.append(self) - - def create_pk_cols(self): - self.create_keys(True) - - def create_non_pk_cols(self): - self.create_keys(False) - - def create_keys(self, pk): - ''' - Subclasses (ie. concrete relationships) may override this method to - create foreign keys. - ''' - - def create_properties(self): - if self.property or self.backref: - return - - kwargs = self.get_prop_kwargs() - if 'order_by' in kwargs: - kwargs['order_by'] = \ - self.target._descriptor.translate_order_by(kwargs['order_by']) - - # transform callable arguments - for arg in ('primaryjoin', 'secondaryjoin', 'remote_side', - 'foreign_keys'): - kwarg = kwargs.get(arg, None) - if hasattr(kwarg, '__call__'): - kwargs[arg] = kwarg() - - # viewonly relationships need to create "standalone" relations (ie - # shouldn't be a backref of another relation). - if self.inverse and not kwargs.get('viewonly', False): - # check if the inverse was already processed (and thus has already - # defined a backref we can use) - if self.inverse.backref: - # let the user override the backref argument - if 'backref' not in kwargs: - kwargs['backref'] = self.inverse.backref - else: - # SQLAlchemy doesn't like when 'secondary' is both defined on - # the relation and the backref - kwargs.pop('secondary', None) - - # define backref for use by the inverse - self.backref = backref(self.name, **kwargs) - return - - self.property = relation(self.target, **kwargs) - self.add_mapper_property(self.name, self.property) - - @property - def target(self): - if not self._target: - if isinstance(self.of_kind, basestring): - collection = self.entity._descriptor.collection - self._target = collection.resolve(self.of_kind, self.entity) - else: - self._target = self.of_kind - return self._target - - @property - def inverse(self): - if not hasattr(self, '_inverse'): - if self.inverse_name: - desc = self.target._descriptor - inverse = desc.find_relationship(self.inverse_name) - if inverse is None: - raise Exception( - "Couldn't find a relationship named '%s' in " - "entity '%s' or its parent entities." - % (self.inverse_name, self.target.__name__)) - assert self.match_type_of(inverse), \ - "Relationships '%s' in entity '%s' and '%s' in entity " \ - "'%s' cannot be inverse of each other because their " \ - "types do not form a valid combination." % \ - (self.name, self.entity.__name__, - self.inverse_name, self.target.__name__) - else: - check_reverse = not self.kwargs.get('viewonly', False) - if isinstance(self.target, EntityMeta): - inverse = self.target._descriptor.get_inverse_relation( - self, check_reverse=check_reverse) - else: - inverse = None - self._inverse = inverse - if inverse and not self.kwargs.get('viewonly', False): - inverse._inverse = self - - return self._inverse - - def match_type_of(self, other): - return False - - def is_inverse(self, other): - # viewonly relationships are not symmetrical: a viewonly relationship - # should have exactly one inverse (a ManyToOne relationship), but that - # inverse shouldn't have the viewonly relationship as its inverse. - return not other.kwargs.get('viewonly', False) and \ - other is not self and \ - self.match_type_of(other) and \ - self.entity == other.target and \ - other.entity == self.target and \ - (self.inverse_name == other.name or not self.inverse_name) and \ - (other.inverse_name == self.name or not other.inverse_name) - - -class ManyToOne(Relationship): - ''' - - ''' - - def __init__(self, of_kind, - column_kwargs=None, - colname=None, required=None, primary_key=None, - field=None, - constraint_kwargs=None, - use_alter=None, ondelete=None, onupdate=None, - target_column=None, - *args, **kwargs): - - # 1) handle column-related args - - # check that the column arguments don't conflict - assert not (field and (column_kwargs or colname)), \ - "ManyToOne can accept the 'field' argument or column " \ - "arguments ('colname' or 'column_kwargs') but not both!" - - if colname and not isinstance(colname, list): - colname = [colname] - self.colname = colname or [] - - column_kwargs = column_kwargs or {} - # kwargs go by default to the relation(), so we need to manually - # extract those targeting the Column - if required is not None: - column_kwargs['nullable'] = not required - if primary_key is not None: - column_kwargs['primary_key'] = primary_key - # by default, created columns will have an index. - column_kwargs.setdefault('index', True) - self.column_kwargs = column_kwargs - - if field and not isinstance(field, list): - field = [field] - self.field = field or [] - - # 2) handle constraint kwargs - constraint_kwargs = constraint_kwargs or {} - if use_alter is not None: - constraint_kwargs['use_alter'] = use_alter - if ondelete is not None: - constraint_kwargs['ondelete'] = ondelete - if onupdate is not None: - constraint_kwargs['onupdate'] = onupdate - self.constraint_kwargs = constraint_kwargs - - # 3) misc arguments - if target_column and not isinstance(target_column, list): - target_column = [target_column] - self.target_column = target_column - - self.foreign_key = [] - self.primaryjoin_clauses = [] - - super(ManyToOne, self).__init__(of_kind, *args, **kwargs) - - def match_type_of(self, other): - return isinstance(other, (OneToMany, OneToOne)) - - @property - def target_table(self): - if isinstance(self.target, EntityMeta): - return self.target._descriptor.table - else: - return class_mapper(self.target).local_table - - def create_keys(self, pk): - ''' - Find all primary keys on the target and create foreign keys on the - source accordingly. - ''' - - if self.foreign_key: - return - - if self.column_kwargs.get('primary_key', False) != pk: - return - - source_desc = self.entity._descriptor - if isinstance(self.target, EntityMeta): - # make sure the target has all its pk set up - #FIXME: this is not enough when specifying target_column manually, - # on unique, non-pk col, see tests/test_m2o.py:test_non_pk_forward - self.target._descriptor.create_pk_cols() - - #XXX: another option, instead of the FakeTable, would be to create an - # EntityDescriptor for the SA class. - target_table = self.target_table - - if source_desc.autoload: - #TODO: allow target_column to be used as an alternative to - # specifying primaryjoin, to be consistent with non-autoloaded - # tables - if self.colname: - if 'primaryjoin' not in self.kwargs: - self.primaryjoin_clauses = \ - _get_join_clauses(self.entity.table, - self.colname, None, - target_table)[0] - if not self.primaryjoin_clauses: - colnames = ', '.join(self.colname) - raise Exception( - "Couldn't find a foreign key constraint in table " - "'%s' using the following columns: %s." - % (self.entity.table.name, colnames)) - else: - # in this case we let SA handle everything. - # XXX: we might want to try to build join clauses anyway so - # that we know whether there is an ambiguity or not, and - # suggest using colname if there is one - pass - if self.field: - raise NotImplementedError( - "'field' argument not allowed on autoloaded table " - "relationships.") - else: - fk_refcols = [] - fk_colnames = [] - - if self.target_column is None: - target_columns = target_table.primary_key.columns - else: - target_columns = [target_table.columns[col] - for col in self.target_column] - - if not target_columns: - raise Exception("No primary key found in target table ('%s') " - "for the '%s' relationship of the '%s' entity." - % (target_table.name, self.name, - self.entity.__name__)) - if self.colname and \ - len(self.colname) != len(target_columns): - raise Exception( - "The number of column names provided in the colname " - "keyword argument of the '%s' relationship of the " - "'%s' entity is not the same as the number of columns " - "of the primary key of '%s'." - % (self.name, self.entity.__name__, - self.target.__name__)) - - for key_num, target_col in enumerate(target_columns): - if self.field: - col = self.field[key_num].column - else: - if self.colname: - colname = self.colname[key_num] - else: - colname = options.FKCOL_NAMEFORMAT % \ - {'relname': self.name, - 'key': target_col.key} - - # We can't add the column to the table directly as the - # table might not be created yet. - col = Column(colname, target_col.type, - **self.column_kwargs) - source_desc.add_column(col) - - # If the column name was specified, and it is the same as - # this property's name, there is going to be a conflict. - # Don't allow this to happen. - if col.key == self.name: - raise ValueError( - "ManyToOne named '%s' in '%s' conficts " - " with the column of the same name. " - "You should probably define the foreign key " - "field manually and use the 'field' " - "argument on the ManyToOne relationship" - % (self.name, self.entity.__name__)) - - # Build the list of local columns which will be part of - # the foreign key - self.foreign_key.append(col) - - # Store the names of those columns - fk_colnames.append(col.key) - - # Build the list of column "paths" the foreign key will - # point to - fk_refcols.append("%s.%s" % \ - (target_table.fullname, target_col.key)) - - # Build up the primary join. This is needed when you have - # several ManyToOne relationships between two objects - self.primaryjoin_clauses.append(col == target_col) - - if 'name' not in self.constraint_kwargs: - # In some databases (at least MySQL) the constraint name needs - # to be unique for the whole database, instead of per table. - fk_name = options.CONSTRAINT_NAMEFORMAT % \ - {'tablename': source_desc.tablename, - 'colnames': '_'.join(fk_colnames)} - self.constraint_kwargs['name'] = fk_name - - source_desc.add_constraint( - ForeignKeyConstraint(fk_colnames, fk_refcols, - **self.constraint_kwargs)) - - def get_prop_kwargs(self): - kwargs = {'uselist': False} - - if self.entity.table is self.target_table: - # this is needed because otherwise SA has no way to know what is - # the direction of the relationship since both columns present in - # the primaryjoin belong to the same table. In other words, it is - # necessary to know if this particular relation - # is the many-to-one side, or the one-to-xxx side. The foreignkey - # doesn't help in this case. - kwargs['remote_side'] = \ - [col for col in self.target_table.primary_key.columns] - - if self.primaryjoin_clauses: - kwargs['primaryjoin'] = and_(*self.primaryjoin_clauses) - - kwargs.update(self.kwargs) - - return kwargs - - -class OneToOne(Relationship): - uselist = False - - def __init__(self, of_kind, filter=None, *args, **kwargs): - self.filter = filter - if filter is not None: - # We set viewonly to True by default for filtered relationships, - # unless manually overridden. - # This is not strictly necessary, as SQLAlchemy allows non viewonly - # relationships with a custom join/filter. The example at: - # SADOCS/05/mappers.html#advdatamapping_relation_customjoin - # is not viewonly. Those relationships can be used as if the extra - # filter wasn't present when inserting. This can lead to a - # confusing behavior (if you insert data which doesn't match the - # extra criterion it'll get inserted anyway but you won't see it - # when you query back the attribute after a round-trip to the - # database). - if 'viewonly' not in kwargs: - kwargs['viewonly'] = True - super(OneToOne, self).__init__(of_kind, *args, **kwargs) - - def match_type_of(self, other): - return isinstance(other, ManyToOne) - - def create_keys(self, pk): - # make sure an inverse relationship exists - if self.inverse is None: - raise Exception( - "Couldn't find any relationship in '%s' which " - "match as inverse of the '%s' relationship " - "defined in the '%s' entity. If you are using " - "inheritance you " - "might need to specify inverse relationships " - "manually by using the 'inverse' argument." - % (self.target, self.name, - self.entity)) - - def get_prop_kwargs(self): - kwargs = {'uselist': self.uselist} - - #TODO: for now, we don't break any test if we remove those 2 lines. - # So, we should either complete the selfref test to prove that they - # are indeed useful, or remove them. It might be they are indeed - # useless because the remote_side is already setup in the other way - # (ManyToOne). - if self.entity.table is self.target.table: - # When using a manual/autoloaded table, it will be assigned - # an empty list, which doesn't seem to upset SQLAlchemy - kwargs['remote_side'] = self.inverse.foreign_key - - # Contrary to ManyToMany relationships, we need to specify the join - # clauses even if this relationship is not self-referencial because - # there could be several ManyToOne from the target class to us. - joinclauses = self.inverse.primaryjoin_clauses - if self.filter: - # We need to make a copy of the joinclauses, to not add the filter - # on the backref - joinclauses = joinclauses[:] + [self.filter(self.target.table.c)] - if joinclauses: - kwargs['primaryjoin'] = and_(*joinclauses) - - kwargs.update(self.kwargs) - - return kwargs - - -class OneToMany(OneToOne): - uselist = True - - -class ManyToMany(Relationship): - uselist = True - - def __init__(self, of_kind, tablename=None, - local_colname=None, remote_colname=None, - ondelete=None, onupdate=None, - table=None, schema=None, - filter=None, - table_kwargs=None, - *args, **kwargs): - self.user_tablename = tablename - - if local_colname and not isinstance(local_colname, list): - local_colname = [local_colname] - self.local_colname = local_colname or [] - if remote_colname and not isinstance(remote_colname, list): - remote_colname = [remote_colname] - self.remote_colname = remote_colname or [] - - self.ondelete = ondelete - self.onupdate = onupdate - - self.table = table - self.schema = schema - - #TODO: this can probably be simplified/moved elsewhere since the - #argument disappeared - self.column_format = options.M2MCOL_NAMEFORMAT - if not hasattr(self.column_format, '__call__'): - # we need to store the format in a variable so that the - # closure of the lambda is correct - format = self.column_format - self.column_format = lambda data: format % data - if options.MIGRATION_TO_07_AID: - self.column_format = \ - migration_aid_m2m_column_formatter( - lambda data: options.OLD_M2MCOL_NAMEFORMAT % data, - self.column_format) - - self.filter = filter - if filter is not None: - # We set viewonly to True by default for filtered relationships, - # unless manually overridden. - if 'viewonly' not in kwargs: - kwargs['viewonly'] = True - - self.table_kwargs = table_kwargs or {} - - self.primaryjoin_clauses = [] - self.secondaryjoin_clauses = [] - - super(ManyToMany, self).__init__(of_kind, *args, **kwargs) - - def match_type_of(self, other): - return isinstance(other, ManyToMany) - - def create_tables(self): - if self.table is not None: - if 'primaryjoin' not in self.kwargs or \ - 'secondaryjoin' not in self.kwargs: - self._build_join_clauses() - assert self.inverse is None or self.inverse.table is None or \ - self.inverse.table is self.table - return - - if self.inverse: - inverse = self.inverse - if inverse.table is not None: - self.table = inverse.table - self.primaryjoin_clauses = inverse.secondaryjoin_clauses - self.secondaryjoin_clauses = inverse.primaryjoin_clauses - return - - assert not inverse.user_tablename or not self.user_tablename or \ - inverse.user_tablename == self.user_tablename - assert not inverse.remote_colname or not self.local_colname or \ - inverse.remote_colname == self.local_colname - assert not inverse.local_colname or not self.remote_colname or \ - inverse.local_colname == self.remote_colname - assert not inverse.schema or not self.schema or \ - inverse.schema == self.schema - assert not inverse.table_kwargs or not self.table_kwargs or \ - inverse.table_kwargs == self.table_kwargs - - self.user_tablename = inverse.user_tablename or self.user_tablename - self.local_colname = inverse.remote_colname or self.local_colname - self.remote_colname = inverse.local_colname or self.remote_colname - self.schema = inverse.schema or self.schema - self.local_colname = inverse.remote_colname or self.local_colname - - # compute table_kwargs - complete_kwargs = options.options_defaults['table_options'].copy() - complete_kwargs.update(self.table_kwargs) - - #needs: table_options['schema'], autoload, tablename, primary_keys, - #entity.__name__, table_fullname - e1_desc = self.entity._descriptor - e2_desc = self.target._descriptor - - e1_schema = e1_desc.table_options.get('schema', None) - e2_schema = e2_desc.table_options.get('schema', None) - schema = (self.schema is not None) and self.schema or e1_schema - - assert e1_schema == e2_schema or self.schema, \ - "Schema %r for entity %s differs from schema %r of entity %s." \ - " Consider using the schema-parameter. "\ - % (e1_schema, self.entity.__name__, - e2_schema, self.target.__name__) - - # First, we compute the name of the table. Note that some of the - # intermediary variables are reused later for the constraint - # names. - - # We use the name of the relation for the first entity - # (instead of the name of its primary key), so that we can - # have two many-to-many relations between the same objects - # without having a table name collision. - source_part = "%s_%s" % (e1_desc.tablename, self.name) - - # And we use only the name of the table of the second entity - # when there is no inverse, so that a many-to-many relation - # can be defined without an inverse. - if self.inverse: - target_part = "%s_%s" % (e2_desc.tablename, self.inverse.name) - else: - target_part = e2_desc.tablename - - if self.user_tablename: - tablename = self.user_tablename - else: - # We need to keep the table name consistent (independant of - # whether this relation or its inverse is setup first). - if self.inverse and source_part < target_part: - #XXX: use a different scheme for selfref (to not include the - # table name twice)? - tablename = "%s__%s" % (target_part, source_part) - else: - tablename = "%s__%s" % (source_part, target_part) - - if options.MIGRATION_TO_07_AID: - oldname = (self.inverse and - e1_desc.tablename < e2_desc.tablename) and \ - "%s__%s" % (target_part, source_part) or \ - "%s__%s" % (source_part, target_part) - if oldname != tablename: - warnings.warn( - "The generated table name for the '%s' relationship " - "on the '%s' entity changed from '%s' (the name " - "generated by Elixir 0.6.1 and earlier) to '%s'. " - "You should either rename the table in the database " - "to the new name or use the tablename argument on the " - "relationship to force the old name: tablename='%s'!" - % (self.name, self.entity.__name__, oldname, - tablename, oldname)) - - if e1_desc.autoload: - if not e2_desc.autoload: - raise Exception( - "Entity '%s' is autoloaded and its '%s' " - "ManyToMany relationship points to " - "the '%s' entity which is not autoloaded" - % (self.entity.__name__, self.name, - self.target.__name__)) - - self.table = Table(tablename, e1_desc.metadata, autoload=True, - **complete_kwargs) - if 'primaryjoin' not in self.kwargs or \ - 'secondaryjoin' not in self.kwargs: - self._build_join_clauses() - else: - # We pre-compute the names of the foreign key constraints - # pointing to the source (local) entity's table and to the - # target's table - - # In some databases (at least MySQL) the constraint names need - # to be unique for the whole database, instead of per table. - source_fk_name = "%s_fk" % source_part - if self.inverse: - target_fk_name = "%s_fk" % target_part - else: - target_fk_name = "%s_inverse_fk" % source_part - - columns = [] - constraints = [] - - for num, desc, fk_name, rel, inverse, colnames, join_clauses in ( - (0, e1_desc, source_fk_name, self, self.inverse, - self.local_colname, self.primaryjoin_clauses), - (1, e2_desc, target_fk_name, self.inverse, self, - self.remote_colname, self.secondaryjoin_clauses)): - - fk_colnames = [] - fk_refcols = [] - if colnames: - assert len(colnames) == len(desc.primary_keys) - else: - # The data generated here will be fed to the M2M column - # formatter to generate the name of the columns of the - # intermediate table for *one* side of the relationship, - # that is, from the intermediate table to the current - # entity, as stored in the "desc" variable. - data = {# A) relationships info - - # the name of the rel going *from* the entity - # we are currently generating a column pointing - # *to*. This is generally *not* what you want to - # use. eg in a "Post" and "Tag" example, with - # relationships named 'tags' and 'posts', when - # creating the columns from the intermediate - # table to the "Post" entity, 'relname' will - # contain 'tags'. - 'relname': rel and rel.name or 'inverse', - - # the name of the inverse relationship. In the - # above example, 'inversename' will contain - # 'posts'. - 'inversename': inverse and inverse.name - or 'inverse', - # is A == B? - 'selfref': e1_desc is e2_desc, - # provided for backward compatibility, DO NOT USE! - 'num': num, - # provided for backward compatibility, DO NOT USE! - 'numifself': e1_desc is e2_desc and str(num + 1) - or '', - # B) target information (from the perspective of - # the intermediate table) - 'target': desc.entity, - 'entity': desc.entity.__name__.lower(), - 'tablename': desc.tablename, - - # C) current (intermediate) table name - 'current_table': tablename - } - colnames = [] - for pk_col in desc.primary_keys: - data.update(key=pk_col.key) - colnames.append(self.column_format(data)) - - for pk_col, colname in zip(desc.primary_keys, colnames): - col = Column(colname, pk_col.type, primary_key=True) - columns.append(col) - - # Build the list of local columns which will be part - # of the foreign key. - fk_colnames.append(colname) - - # Build the list of column "paths" the foreign key will - # point to - target_path = "%s.%s" % (desc.table_fullname, pk_col.key) - fk_refcols.append(target_path) - - # Build join clauses (in case we have a self-ref) - if self.entity is self.target: - join_clauses.append(col == pk_col) - - onupdate = rel and rel.onupdate - ondelete = rel and rel.ondelete - - #FIXME: fk_name is misleading - constraints.append( - ForeignKeyConstraint(fk_colnames, fk_refcols, - name=fk_name, onupdate=onupdate, - ondelete=ondelete)) - - args = columns + constraints - - self.table = Table(tablename, e1_desc.metadata, - schema=schema, *args, **complete_kwargs) - if DEBUG: - print self.table.repr2() - - def _build_join_clauses(self): - # In the case we have a self-reference, we need to build join clauses - if self.entity is self.target: - if not self.local_colname and not self.remote_colname: - raise Exception( - "Self-referential ManyToMany " - "relationships in autoloaded entities need to have at " - "least one of either 'local_colname' or 'remote_colname' " - "argument specified. The '%s' relationship in the '%s' " - "entity doesn't have either." - % (self.name, self.entity.__name__)) - - self.primaryjoin_clauses, self.secondaryjoin_clauses = \ - _get_join_clauses(self.table, - self.local_colname, self.remote_colname, - self.entity.table) - - def get_prop_kwargs(self): - kwargs = {'secondary': self.table, - 'uselist': self.uselist} - - if self.filter: - # we need to make a copy of the joinclauses - secondaryjoin_clauses = self.secondaryjoin_clauses[:] + \ - [self.filter(self.target.table.c)] - else: - secondaryjoin_clauses = self.secondaryjoin_clauses - - if self.target is self.entity or self.filter: - kwargs['primaryjoin'] = and_(*self.primaryjoin_clauses) - kwargs['secondaryjoin'] = and_(*secondaryjoin_clauses) - - kwargs.update(self.kwargs) - - return kwargs - - def is_inverse(self, other): - return super(ManyToMany, self).is_inverse(other) and \ - (self.user_tablename == other.user_tablename or - (not self.user_tablename and not other.user_tablename)) - - -def migration_aid_m2m_column_formatter(oldformatter, newformatter): - def debug_formatter(data): - old_name = oldformatter(data) - new_name = newformatter(data) - if new_name != old_name: - complete_data = data.copy() - complete_data.update(old_name=old_name, - new_name=new_name, - targetname=data['target'].__name__) - # Specifying a stacklevel is useless in this case as the name - # generation is triggered by setup_all(), not by the declaration - # of the offending relationship. - warnings.warn("The '%(old_name)s' column in the " - "'%(current_table)s' table, used as the " - "intermediate table for the '%(relname)s' " - "relationship on the '%(targetname)s' entity " - "was renamed to '%(new_name)s'." - % complete_data) - return new_name - return debug_formatter - - -def _get_join_clauses(local_table, local_cols1, local_cols2, target_table): - primary_join, secondary_join = [], [] - cols1 = local_cols1[:] - cols1.sort() - cols1 = tuple(cols1) - - if local_cols2 is not None: - cols2 = local_cols2[:] - cols2.sort() - cols2 = tuple(cols2) - else: - cols2 = None - - # Build a map of fk constraints pointing to the correct table. - # The map is indexed on the local col names. - constraint_map = {} - for constraint in local_table.constraints: - if isinstance(constraint, ForeignKeyConstraint): - use_constraint = True - fk_colnames = [] - - # if all columns point to the correct table, we use the constraint - #TODO: check that it contains as many columns as the pk of the - #target entity, or even that it points to the actual pk columns - for fk in constraint.elements: - if fk.references(target_table): - # local column key - fk_colnames.append(fk.parent.key) - else: - use_constraint = False - if use_constraint: - fk_colnames.sort() - constraint_map[tuple(fk_colnames)] = constraint - - # Either the fk column names match explicitely with the columns given for - # one of the joins (primary or secondary), or we assume the current - # columns match because the columns for this join were not given and we - # know the other join is either not used (is None) or has an explicit - # match. - -#TODO: rewrite this. Even with the comment, I don't even understand it myself. - for cols, constraint in constraint_map.iteritems(): - if cols == cols1 or (cols != cols2 and - not cols1 and (cols2 in constraint_map or - cols2 is None)): - join = primary_join - elif cols == cols2 or (cols2 == () and cols1 in constraint_map): - join = secondary_join - else: - continue - for fk in constraint.elements: - join.append(fk.parent == fk.column) - return primary_join, secondary_join - - -def rel_mutator_handler(target): - def handler(entity, name, of_kind=None, through=None, via=None, - *args, **kwargs): - if through and via: - setattr(entity, name, - association_proxy(through, via, **kwargs)) - return - elif through or via: - raise Exception("'through' and 'via' relationship keyword " - "arguments should be used in combination.") - rel = target(of_kind, *args, **kwargs) - rel.attach(entity, name) - return handler - - -belongs_to = ClassMutator(rel_mutator_handler(ManyToOne)) -has_one = ClassMutator(rel_mutator_handler(OneToOne)) -has_many = ClassMutator(rel_mutator_handler(OneToMany)) -has_and_belongs_to_many = ClassMutator(rel_mutator_handler(ManyToMany)) diff --git a/libs/elixir/statements.py b/libs/elixir/statements.py deleted file mode 100644 index c21bf305..00000000 --- a/libs/elixir/statements.py +++ /dev/null @@ -1,59 +0,0 @@ -import sys - -MUTATORS = '__elixir_mutators__' - -class ClassMutator(object): - ''' - DSL-style syntax - - A ``ClassMutator`` object represents a DSL term. - ''' - - def __init__(self, handler): - ''' - Create a new ClassMutator, using the `handler` callable to process it - when the time will come. - ''' - self.handler = handler - - # called when a mutator (eg. "has_field(...)") is parsed - def __call__(self, *args, **kwargs): - # self in this case is the "generic" mutator (eg "has_field") - - # jam this mutator into the class's mutator list - class_locals = sys._getframe(1).f_locals - mutators = class_locals.setdefault(MUTATORS, []) - mutators.append((self, args, kwargs)) - - def process(self, entity, *args, **kwargs): - ''' - Process one mutator. This version simply calls the handler callable, - but another mutator (sub)class could do more processing. - ''' - self.handler(entity, *args, **kwargs) - - -#TODO: move this to the super class (to be created here) of EntityMeta -def process_mutators(entity): - ''' - Apply all mutators of the given entity. That is, loop over all mutators - in the class's mutator list and process them. - ''' - # we don't use getattr here to not inherit from the parent mutators - # inadvertantly if the current entity hasn't defined any mutator. - mutators = entity.__dict__.get(MUTATORS, []) - for mutator, args, kwargs in mutators: - mutator.process(entity, *args, **kwargs) - -class Statement(ClassMutator): - - def process(self, entity, *args, **kwargs): - builder = self.handler(entity, *args, **kwargs) - entity._descriptor.builders.append(builder) - -class PropertyStatement(ClassMutator): - - def process(self, entity, name, *args, **kwargs): - prop = self.handler(*args, **kwargs) - prop.attach(entity, name) - diff --git a/libs/importlib/__init__.py b/libs/importlib/__init__.py deleted file mode 100644 index ad31a1ac..00000000 --- a/libs/importlib/__init__.py +++ /dev/null @@ -1,38 +0,0 @@ -"""Backport of importlib.import_module from 3.x.""" -# While not critical (and in no way guaranteed!), it would be nice to keep this -# code compatible with Python 2.3. -import sys - -def _resolve_name(name, package, level): - """Return the absolute name of the module to be imported.""" - if not hasattr(package, 'rindex'): - raise ValueError("'package' not set to a string") - dot = len(package) - for x in xrange(level, 1, -1): - try: - dot = package.rindex('.', 0, dot) - except ValueError: - raise ValueError("attempted relative import beyond top-level " - "package") - return "%s.%s" % (package[:dot], name) - - -def import_module(name, package=None): - """Import a module. - - The 'package' argument is required when performing a relative import. It - specifies the package to use as the anchor point from which to resolve the - relative import to an absolute import. - - """ - if name.startswith('.'): - if not package: - raise TypeError("relative imports require the 'package' argument") - level = 0 - for character in name: - if character != '.': - break - level += 1 - name = _resolve_name(name[level:], package, level) - __import__(name) - return sys.modules[name] diff --git a/libs/logr/__init__.py b/libs/logr/__init__.py new file mode 100644 index 00000000..7a2d7b2e --- /dev/null +++ b/libs/logr/__init__.py @@ -0,0 +1,225 @@ +# logr - Simple python logging wrapper +# Packed by Dean Gardiner +# +# File part of: +# rdio-sock - Rdio WebSocket Library +# Copyright (C) 2013 fzza- + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + + +import inspect +import logging +import os +import sys + +IGNORE = () +PY3 = sys.version_info[0] == 3 + + +class Logr(object): + loggers = {} + handler = None + + trace_origin = False + name = "Logr" + + @staticmethod + def configure(level=logging.WARNING, handler=None, formatter=None, trace_origin=False, name="Logr"): + """Configure Logr + + @param handler: Logger message handler + @type handler: logging.Handler or None + + @param formatter: Logger message Formatter + @type formatter: logging.Formatter or None + """ + if formatter is None: + formatter = LogrFormatter() + + if handler is None: + handler = logging.StreamHandler() + + handler.setFormatter(formatter) + handler.setLevel(level) + Logr.handler = handler + + Logr.trace_origin = trace_origin + Logr.name = name + + @staticmethod + def configure_check(): + if Logr.handler is None: + Logr.configure() + + @staticmethod + def _get_name_from_path(filename): + try: + return os.path.splitext(os.path.basename(filename))[0] + except TypeError: + return "" + + @staticmethod + def get_frame_class(frame): + if len(frame.f_code.co_varnames) <= 0: + return None + + farg = frame.f_code.co_varnames[0] + + if farg not in frame.f_locals: + return None + + if farg == 'self': + return frame.f_locals[farg].__class__ + + if farg == 'cls': + return frame.f_locals[farg] + + return None + + + @staticmethod + def get_logger_name(): + if not Logr.trace_origin: + return Logr.name + + stack = inspect.stack() + + for x in xrange_six(len(stack)): + frame = stack[x][0] + name = None + + # Try find name of function defined inside a class + frame_class = Logr.get_frame_class(frame) + + if frame_class: + class_name = frame_class.__name__ + module_name = frame_class.__module__ + + if module_name != '__main__': + name = module_name + '.' + class_name + else: + name = class_name + + # Try find name of function defined outside of a class + if name is None: + if frame.f_code.co_name in frame.f_globals: + name = frame.f_globals.get('__name__') + if name == '__main__': + name = Logr._get_name_from_path(frame.f_globals.get('__file__')) + name = name + elif frame.f_code.co_name == '': + name = Logr._get_name_from_path(frame.f_globals.get('__file__')) + + if name is not None and name not in IGNORE: + return name + + return "" + + @staticmethod + def get_logger(): + """Get or create logger (if it does not exist) + + @rtype: RootLogger + """ + name = Logr.get_logger_name() + if name not in Logr.loggers: + Logr.configure_check() + Logr.loggers[name] = logging.Logger(name) + Logr.loggers[name].addHandler(Logr.handler) + return Logr.loggers[name] + + @staticmethod + def debug(msg, *args, **kwargs): + Logr.get_logger().debug(msg, *args, **kwargs) + + @staticmethod + def info(msg, *args, **kwargs): + Logr.get_logger().info(msg, *args, **kwargs) + + @staticmethod + def warning(msg, *args, **kwargs): + Logr.get_logger().warning(msg, *args, **kwargs) + + warn = warning + + @staticmethod + def error(msg, *args, **kwargs): + Logr.get_logger().error(msg, *args, **kwargs) + + @staticmethod + def exception(msg, *args, **kwargs): + Logr.get_logger().exception(msg, *args, **kwargs) + + @staticmethod + def critical(msg, *args, **kwargs): + Logr.get_logger().critical(msg, *args, **kwargs) + + fatal = critical + + @staticmethod + def log(level, msg, *args, **kwargs): + Logr.get_logger().log(level, msg, *args, **kwargs) + + +class LogrFormatter(logging.Formatter): + LENGTH_NAME = 32 + LENGTH_LEVEL_NAME = 5 + + def __init__(self, fmt=None, datefmt=None): + if sys.version_info[:2] > (2,6): + super(LogrFormatter, self).__init__(fmt, datefmt) + else: + logging.Formatter.__init__(self, fmt, datefmt) + + def usesTime(self): + return True + + def format(self, record): + record.message = record.getMessage() + if self.usesTime(): + record.asctime = self.formatTime(record, self.datefmt) + + s = "%(asctime)s %(name)s %(levelname)s %(message)s" % { + 'asctime': record.asctime, + 'name': record.name[-self.LENGTH_NAME:].rjust(self.LENGTH_NAME, ' '), + 'levelname': record.levelname[:self.LENGTH_LEVEL_NAME].ljust(self.LENGTH_LEVEL_NAME, ' '), + 'message': record.message + } + + if record.exc_info: + if not record.exc_text: + record.exc_text = self.formatException(record.exc_info) + if record.exc_text: + if s[-1:] != "\n": + s += "\n" + try: + s += record.exc_text + except UnicodeError: + s = s + record.exc_text.decode(sys.getfilesystemencoding(), + 'replace') + return s + + +def xrange_six(start, stop=None, step=None): + if stop is not None and step is not None: + if PY3: + return range(start, stop, step) + else: + return xrange(start, stop, step) + else: + if PY3: + return range(start) + else: + return xrange(start) diff --git a/libs/migrate/__init__.py b/libs/migrate/__init__.py deleted file mode 100644 index 0cfdb726..00000000 --- a/libs/migrate/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -""" - SQLAlchemy migrate provides two APIs :mod:`migrate.versioning` for - database schema version and repository management and - :mod:`migrate.changeset` that allows to define database schema changes - using Python. -""" - -from migrate.versioning import * -from migrate.changeset import * - -__version__ = '0.7.2' diff --git a/libs/migrate/changeset/__init__.py b/libs/migrate/changeset/__init__.py deleted file mode 100644 index 80ea152d..00000000 --- a/libs/migrate/changeset/__init__.py +++ /dev/null @@ -1,28 +0,0 @@ -""" - This module extends SQLAlchemy and provides additional DDL [#]_ - support. - - .. [#] SQL Data Definition Language -""" -import re -import warnings - -import sqlalchemy -from sqlalchemy import __version__ as _sa_version - -warnings.simplefilter('always', DeprecationWarning) - -_sa_version = tuple(int(re.match("\d+", x).group(0)) for x in _sa_version.split(".")) -SQLA_07 = _sa_version >= (0, 7) - -del re -del _sa_version - -from migrate.changeset.schema import * -from migrate.changeset.constraint import * - -sqlalchemy.schema.Table.__bases__ += (ChangesetTable, ) -sqlalchemy.schema.Column.__bases__ += (ChangesetColumn, ) -sqlalchemy.schema.Index.__bases__ += (ChangesetIndex, ) - -sqlalchemy.schema.DefaultClause.__bases__ += (ChangesetDefaultClause, ) diff --git a/libs/migrate/changeset/ansisql.py b/libs/migrate/changeset/ansisql.py deleted file mode 100644 index 9ded5605..00000000 --- a/libs/migrate/changeset/ansisql.py +++ /dev/null @@ -1,292 +0,0 @@ -""" - Extensions to SQLAlchemy for altering existing tables. - - At the moment, this isn't so much based off of ANSI as much as - things that just happen to work with multiple databases. -""" -import StringIO - -import sqlalchemy as sa -from sqlalchemy.schema import SchemaVisitor -from sqlalchemy.engine.default import DefaultDialect -from sqlalchemy.sql import ClauseElement -from sqlalchemy.schema import (ForeignKeyConstraint, - PrimaryKeyConstraint, - CheckConstraint, - UniqueConstraint, - Index) - -from migrate import exceptions -from migrate.changeset import constraint - -from sqlalchemy.schema import AddConstraint, DropConstraint -from sqlalchemy.sql.compiler import DDLCompiler -SchemaGenerator = SchemaDropper = DDLCompiler - - -class AlterTableVisitor(SchemaVisitor): - """Common operations for ``ALTER TABLE`` statements.""" - - # engine.Compiler looks for .statement - # when it spawns off a new compiler - statement = ClauseElement() - - def append(self, s): - """Append content to the SchemaIterator's query buffer.""" - - self.buffer.write(s) - - def execute(self): - """Execute the contents of the SchemaIterator's buffer.""" - try: - return self.connection.execute(self.buffer.getvalue()) - finally: - self.buffer.truncate(0) - - def __init__(self, dialect, connection, **kw): - self.connection = connection - self.buffer = StringIO.StringIO() - self.preparer = dialect.identifier_preparer - self.dialect = dialect - - def traverse_single(self, elem): - ret = super(AlterTableVisitor, self).traverse_single(elem) - if ret: - # adapt to 0.6 which uses a string-returning - # object - self.append(" %s" % ret) - - def _to_table(self, param): - """Returns the table object for the given param object.""" - if isinstance(param, (sa.Column, sa.Index, sa.schema.Constraint)): - ret = param.table - else: - ret = param - return ret - - def start_alter_table(self, param): - """Returns the start of an ``ALTER TABLE`` SQL-Statement. - - Use the param object to determine the table name and use it - for building the SQL statement. - - :param param: object to determine the table from - :type param: :class:`sqlalchemy.Column`, :class:`sqlalchemy.Index`, - :class:`sqlalchemy.schema.Constraint`, :class:`sqlalchemy.Table`, - or string (table name) - """ - table = self._to_table(param) - self.append('\nALTER TABLE %s ' % self.preparer.format_table(table)) - return table - - -class ANSIColumnGenerator(AlterTableVisitor, SchemaGenerator): - """Extends ansisql generator for column creation (alter table add col)""" - - def visit_column(self, column): - """Create a column (table already exists). - - :param column: column object - :type column: :class:`sqlalchemy.Column` instance - """ - if column.default is not None: - self.traverse_single(column.default) - - table = self.start_alter_table(column) - self.append("ADD ") - self.append(self.get_column_specification(column)) - - for cons in column.constraints: - self.traverse_single(cons) - self.execute() - - # ALTER TABLE STATEMENTS - - # add indexes and unique constraints - if column.index_name: - Index(column.index_name,column).create() - elif column.unique_name: - constraint.UniqueConstraint(column, - name=column.unique_name).create() - - # SA bounds FK constraints to table, add manually - for fk in column.foreign_keys: - self.add_foreignkey(fk.constraint) - - # add primary key constraint if needed - if column.primary_key_name: - cons = constraint.PrimaryKeyConstraint(column, - name=column.primary_key_name) - cons.create() - - def add_foreignkey(self, fk): - self.connection.execute(AddConstraint(fk)) - -class ANSIColumnDropper(AlterTableVisitor, SchemaDropper): - """Extends ANSI SQL dropper for column dropping (``ALTER TABLE - DROP COLUMN``). - """ - - def visit_column(self, column): - """Drop a column from its table. - - :param column: the column object - :type column: :class:`sqlalchemy.Column` - """ - table = self.start_alter_table(column) - self.append('DROP COLUMN %s' % self.preparer.format_column(column)) - self.execute() - - -class ANSISchemaChanger(AlterTableVisitor, SchemaGenerator): - """Manages changes to existing schema elements. - - Note that columns are schema elements; ``ALTER TABLE ADD COLUMN`` - is in SchemaGenerator. - - All items may be renamed. Columns can also have many of their properties - - type, for example - changed. - - Each function is passed a tuple, containing (object, name); where - object is a type of object you'd expect for that function - (ie. table for visit_table) and name is the object's new - name. NONE means the name is unchanged. - """ - - def visit_table(self, table): - """Rename a table. Other ops aren't supported.""" - self.start_alter_table(table) - self.append("RENAME TO %s" % self.preparer.quote(table.new_name, - table.quote)) - self.execute() - - def visit_index(self, index): - """Rename an index""" - if hasattr(self, '_validate_identifier'): - # SA <= 0.6.3 - self.append("ALTER INDEX %s RENAME TO %s" % ( - self.preparer.quote( - self._validate_identifier( - index.name, True), index.quote), - self.preparer.quote( - self._validate_identifier( - index.new_name, True), index.quote))) - else: - # SA >= 0.6.5 - self.append("ALTER INDEX %s RENAME TO %s" % ( - self.preparer.quote( - self._index_identifier( - index.name), index.quote), - self.preparer.quote( - self._index_identifier( - index.new_name), index.quote))) - self.execute() - - def visit_column(self, delta): - """Rename/change a column.""" - # ALTER COLUMN is implemented as several ALTER statements - keys = delta.keys() - if 'type' in keys: - self._run_subvisit(delta, self._visit_column_type) - if 'nullable' in keys: - self._run_subvisit(delta, self._visit_column_nullable) - if 'server_default' in keys: - # Skip 'default': only handle server-side defaults, others - # are managed by the app, not the db. - self._run_subvisit(delta, self._visit_column_default) - if 'name' in keys: - self._run_subvisit(delta, self._visit_column_name, start_alter=False) - - def _run_subvisit(self, delta, func, start_alter=True): - """Runs visit method based on what needs to be changed on column""" - table = self._to_table(delta.table) - col_name = delta.current_name - if start_alter: - self.start_alter_column(table, col_name) - ret = func(table, delta.result_column, delta) - self.execute() - - def start_alter_column(self, table, col_name): - """Starts ALTER COLUMN""" - self.start_alter_table(table) - self.append("ALTER COLUMN %s " % self.preparer.quote(col_name, table.quote)) - - def _visit_column_nullable(self, table, column, delta): - nullable = delta['nullable'] - if nullable: - self.append("DROP NOT NULL") - else: - self.append("SET NOT NULL") - - def _visit_column_default(self, table, column, delta): - default_text = self.get_column_default_string(column) - if default_text is not None: - self.append("SET DEFAULT %s" % default_text) - else: - self.append("DROP DEFAULT") - - def _visit_column_type(self, table, column, delta): - type_ = delta['type'] - type_text = str(type_.compile(dialect=self.dialect)) - self.append("TYPE %s" % type_text) - - def _visit_column_name(self, table, column, delta): - self.start_alter_table(table) - col_name = self.preparer.quote(delta.current_name, table.quote) - new_name = self.preparer.format_column(delta.result_column) - self.append('RENAME COLUMN %s TO %s' % (col_name, new_name)) - - -class ANSIConstraintCommon(AlterTableVisitor): - """ - Migrate's constraints require a separate creation function from - SA's: Migrate's constraints are created independently of a table; - SA's are created at the same time as the table. - """ - - def get_constraint_name(self, cons): - """Gets a name for the given constraint. - - If the name is already set it will be used otherwise the - constraint's :meth:`autoname ` - method is used. - - :param cons: constraint object - """ - if cons.name is not None: - ret = cons.name - else: - ret = cons.name = cons.autoname() - return self.preparer.quote(ret, cons.quote) - - def visit_migrate_primary_key_constraint(self, *p, **k): - self._visit_constraint(*p, **k) - - def visit_migrate_foreign_key_constraint(self, *p, **k): - self._visit_constraint(*p, **k) - - def visit_migrate_check_constraint(self, *p, **k): - self._visit_constraint(*p, **k) - - def visit_migrate_unique_constraint(self, *p, **k): - self._visit_constraint(*p, **k) - -class ANSIConstraintGenerator(ANSIConstraintCommon, SchemaGenerator): - def _visit_constraint(self, constraint): - constraint.name = self.get_constraint_name(constraint) - self.append(self.process(AddConstraint(constraint))) - self.execute() - -class ANSIConstraintDropper(ANSIConstraintCommon, SchemaDropper): - def _visit_constraint(self, constraint): - constraint.name = self.get_constraint_name(constraint) - self.append(self.process(DropConstraint(constraint, cascade=constraint.cascade))) - self.execute() - - -class ANSIDialect(DefaultDialect): - columngenerator = ANSIColumnGenerator - columndropper = ANSIColumnDropper - schemachanger = ANSISchemaChanger - constraintgenerator = ANSIConstraintGenerator - constraintdropper = ANSIConstraintDropper diff --git a/libs/migrate/changeset/constraint.py b/libs/migrate/changeset/constraint.py deleted file mode 100644 index 96407bd7..00000000 --- a/libs/migrate/changeset/constraint.py +++ /dev/null @@ -1,199 +0,0 @@ -""" - This module defines standalone schema constraint classes. -""" -from sqlalchemy import schema - -from migrate.exceptions import * - -class ConstraintChangeset(object): - """Base class for Constraint classes.""" - - def _normalize_columns(self, cols, table_name=False): - """Given: column objects or names; return col names and - (maybe) a table""" - colnames = [] - table = None - for col in cols: - if isinstance(col, schema.Column): - if col.table is not None and table is None: - table = col.table - if table_name: - col = '.'.join((col.table.name, col.name)) - else: - col = col.name - colnames.append(col) - return colnames, table - - def __do_imports(self, visitor_name, *a, **kw): - engine = kw.pop('engine', self.table.bind) - from migrate.changeset.databases.visitor import (get_engine_visitor, - run_single_visitor) - visitorcallable = get_engine_visitor(engine, visitor_name) - run_single_visitor(engine, visitorcallable, self, *a, **kw) - - def create(self, *a, **kw): - """Create the constraint in the database. - - :param engine: the database engine to use. If this is \ - :keyword:`None` the instance's engine will be used - :type engine: :class:`sqlalchemy.engine.base.Engine` - :param connection: reuse connection istead of creating new one. - :type connection: :class:`sqlalchemy.engine.base.Connection` instance - """ - # TODO: set the parent here instead of in __init__ - self.__do_imports('constraintgenerator', *a, **kw) - - def drop(self, *a, **kw): - """Drop the constraint from the database. - - :param engine: the database engine to use. If this is - :keyword:`None` the instance's engine will be used - :param cascade: Issue CASCADE drop if database supports it - :type engine: :class:`sqlalchemy.engine.base.Engine` - :type cascade: bool - :param connection: reuse connection istead of creating new one. - :type connection: :class:`sqlalchemy.engine.base.Connection` instance - :returns: Instance with cleared columns - """ - self.cascade = kw.pop('cascade', False) - self.__do_imports('constraintdropper', *a, **kw) - # the spirit of Constraint objects is that they - # are immutable (just like in a DB. they're only ADDed - # or DROPped). - #self.columns.clear() - return self - - -class PrimaryKeyConstraint(ConstraintChangeset, schema.PrimaryKeyConstraint): - """Construct PrimaryKeyConstraint - - Migrate's additional parameters: - - :param cols: Columns in constraint. - :param table: If columns are passed as strings, this kw is required - :type table: Table instance - :type cols: strings or Column instances - """ - - __migrate_visit_name__ = 'migrate_primary_key_constraint' - - def __init__(self, *cols, **kwargs): - colnames, table = self._normalize_columns(cols) - table = kwargs.pop('table', table) - super(PrimaryKeyConstraint, self).__init__(*colnames, **kwargs) - if table is not None: - self._set_parent(table) - - - def autoname(self): - """Mimic the database's automatic constraint names""" - return "%s_pkey" % self.table.name - - -class ForeignKeyConstraint(ConstraintChangeset, schema.ForeignKeyConstraint): - """Construct ForeignKeyConstraint - - Migrate's additional parameters: - - :param columns: Columns in constraint - :param refcolumns: Columns that this FK reffers to in another table. - :param table: If columns are passed as strings, this kw is required - :type table: Table instance - :type columns: list of strings or Column instances - :type refcolumns: list of strings or Column instances - """ - - __migrate_visit_name__ = 'migrate_foreign_key_constraint' - - def __init__(self, columns, refcolumns, *args, **kwargs): - colnames, table = self._normalize_columns(columns) - table = kwargs.pop('table', table) - refcolnames, reftable = self._normalize_columns(refcolumns, - table_name=True) - super(ForeignKeyConstraint, self).__init__(colnames, refcolnames, *args, - **kwargs) - if table is not None: - self._set_parent(table) - - @property - def referenced(self): - return [e.column for e in self.elements] - - @property - def reftable(self): - return self.referenced[0].table - - def autoname(self): - """Mimic the database's automatic constraint names""" - if hasattr(self.columns, 'keys'): - # SA <= 0.5 - firstcol = self.columns[self.columns.keys()[0]] - ret = "%(table)s_%(firstcolumn)s_fkey" % dict( - table=firstcol.table.name, - firstcolumn=firstcol.name,) - else: - # SA >= 0.6 - ret = "%(table)s_%(firstcolumn)s_fkey" % dict( - table=self.table.name, - firstcolumn=self.columns[0],) - return ret - - -class CheckConstraint(ConstraintChangeset, schema.CheckConstraint): - """Construct CheckConstraint - - Migrate's additional parameters: - - :param sqltext: Plain SQL text to check condition - :param columns: If not name is applied, you must supply this kw\ - to autoname constraint - :param table: If columns are passed as strings, this kw is required - :type table: Table instance - :type columns: list of Columns instances - :type sqltext: string - """ - - __migrate_visit_name__ = 'migrate_check_constraint' - - def __init__(self, sqltext, *args, **kwargs): - cols = kwargs.pop('columns', []) - if not cols and not kwargs.get('name', False): - raise InvalidConstraintError('You must either set "name"' - 'parameter or "columns" to autogenarate it.') - colnames, table = self._normalize_columns(cols) - table = kwargs.pop('table', table) - schema.CheckConstraint.__init__(self, sqltext, *args, **kwargs) - if table is not None: - self._set_parent(table) - self.colnames = colnames - - def autoname(self): - return "%(table)s_%(cols)s_check" % \ - dict(table=self.table.name, cols="_".join(self.colnames)) - - -class UniqueConstraint(ConstraintChangeset, schema.UniqueConstraint): - """Construct UniqueConstraint - - Migrate's additional parameters: - - :param cols: Columns in constraint. - :param table: If columns are passed as strings, this kw is required - :type table: Table instance - :type cols: strings or Column instances - - .. versionadded:: 0.6.0 - """ - - __migrate_visit_name__ = 'migrate_unique_constraint' - - def __init__(self, *cols, **kwargs): - self.colnames, table = self._normalize_columns(cols) - table = kwargs.pop('table', table) - super(UniqueConstraint, self).__init__(*self.colnames, **kwargs) - if table is not None: - self._set_parent(table) - - def autoname(self): - """Mimic the database's automatic constraint names""" - return "%s_%s_key" % (self.table.name, self.colnames[0]) diff --git a/libs/migrate/changeset/databases/__init__.py b/libs/migrate/changeset/databases/__init__.py deleted file mode 100644 index 85469183..00000000 --- a/libs/migrate/changeset/databases/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -""" - This module contains database dialect specific changeset - implementations. -""" -__all__ = [ - 'postgres', - 'sqlite', - 'mysql', - 'oracle', -] diff --git a/libs/migrate/changeset/databases/firebird.py b/libs/migrate/changeset/databases/firebird.py deleted file mode 100644 index 226728b9..00000000 --- a/libs/migrate/changeset/databases/firebird.py +++ /dev/null @@ -1,93 +0,0 @@ -""" - Firebird database specific implementations of changeset classes. -""" -from sqlalchemy.databases import firebird as sa_base -from sqlalchemy.schema import PrimaryKeyConstraint -from migrate import exceptions -from migrate.changeset import ansisql - - -FBSchemaGenerator = sa_base.FBDDLCompiler - -class FBColumnGenerator(FBSchemaGenerator, ansisql.ANSIColumnGenerator): - """Firebird column generator implementation.""" - - -class FBColumnDropper(ansisql.ANSIColumnDropper): - """Firebird column dropper implementation.""" - - def visit_column(self, column): - """Firebird supports 'DROP col' instead of 'DROP COLUMN col' syntax - - Drop primary key and unique constraints if dropped column is referencing it.""" - if column.primary_key: - if column.table.primary_key.columns.contains_column(column): - column.table.primary_key.drop() - # TODO: recreate primary key if it references more than this column - - for index in column.table.indexes: - # "column in index.columns" causes problems as all - # column objects compare equal and return a SQL expression - if column.name in [col.name for col in index.columns]: - index.drop() - # TODO: recreate index if it references more than this column - - for cons in column.table.constraints: - if isinstance(cons,PrimaryKeyConstraint): - # will be deleted only when the column its on - # is deleted! - continue - - should_drop = column.name in cons.columns - if should_drop: - self.start_alter_table(column) - self.append("DROP CONSTRAINT ") - self.append(self.preparer.format_constraint(cons)) - self.execute() - # TODO: recreate unique constraint if it refenrences more than this column - - self.start_alter_table(column) - self.append('DROP %s' % self.preparer.format_column(column)) - self.execute() - - -class FBSchemaChanger(ansisql.ANSISchemaChanger): - """Firebird schema changer implementation.""" - - def visit_table(self, table): - """Rename table not supported""" - raise exceptions.NotSupportedError( - "Firebird does not support renaming tables.") - - def _visit_column_name(self, table, column, delta): - self.start_alter_table(table) - col_name = self.preparer.quote(delta.current_name, table.quote) - new_name = self.preparer.format_column(delta.result_column) - self.append('ALTER COLUMN %s TO %s' % (col_name, new_name)) - - def _visit_column_nullable(self, table, column, delta): - """Changing NULL is not supported""" - # TODO: http://www.firebirdfaq.org/faq103/ - raise exceptions.NotSupportedError( - "Firebird does not support altering NULL bevahior.") - - -class FBConstraintGenerator(ansisql.ANSIConstraintGenerator): - """Firebird constraint generator implementation.""" - - -class FBConstraintDropper(ansisql.ANSIConstraintDropper): - """Firebird constaint dropper implementation.""" - - def cascade_constraint(self, constraint): - """Cascading constraints is not supported""" - raise exceptions.NotSupportedError( - "Firebird does not support cascading constraints") - - -class FBDialect(ansisql.ANSIDialect): - columngenerator = FBColumnGenerator - columndropper = FBColumnDropper - schemachanger = FBSchemaChanger - constraintgenerator = FBConstraintGenerator - constraintdropper = FBConstraintDropper diff --git a/libs/migrate/changeset/databases/mysql.py b/libs/migrate/changeset/databases/mysql.py deleted file mode 100644 index 6987b4bb..00000000 --- a/libs/migrate/changeset/databases/mysql.py +++ /dev/null @@ -1,65 +0,0 @@ -""" - MySQL database specific implementations of changeset classes. -""" - -from sqlalchemy.databases import mysql as sa_base -from sqlalchemy import types as sqltypes - -from migrate import exceptions -from migrate.changeset import ansisql - - -MySQLSchemaGenerator = sa_base.MySQLDDLCompiler - -class MySQLColumnGenerator(MySQLSchemaGenerator, ansisql.ANSIColumnGenerator): - pass - - -class MySQLColumnDropper(ansisql.ANSIColumnDropper): - pass - - -class MySQLSchemaChanger(MySQLSchemaGenerator, ansisql.ANSISchemaChanger): - - def visit_column(self, delta): - table = delta.table - colspec = self.get_column_specification(delta.result_column) - if delta.result_column.autoincrement: - primary_keys = [c for c in table.primary_key.columns - if (c.autoincrement and - isinstance(c.type, sqltypes.Integer) and - not c.foreign_keys)] - - if primary_keys: - first = primary_keys.pop(0) - if first.name == delta.current_name: - colspec += " AUTO_INCREMENT" - old_col_name = self.preparer.quote(delta.current_name, table.quote) - - self.start_alter_table(table) - - self.append("CHANGE COLUMN %s " % old_col_name) - self.append(colspec) - self.execute() - - def visit_index(self, param): - # If MySQL can do this, I can't find how - raise exceptions.NotSupportedError("MySQL cannot rename indexes") - - -class MySQLConstraintGenerator(ansisql.ANSIConstraintGenerator): - pass - - -class MySQLConstraintDropper(MySQLSchemaGenerator, ansisql.ANSIConstraintDropper): - def visit_migrate_check_constraint(self, *p, **k): - raise exceptions.NotSupportedError("MySQL does not support CHECK" - " constraints, use triggers instead.") - - -class MySQLDialect(ansisql.ANSIDialect): - columngenerator = MySQLColumnGenerator - columndropper = MySQLColumnDropper - schemachanger = MySQLSchemaChanger - constraintgenerator = MySQLConstraintGenerator - constraintdropper = MySQLConstraintDropper diff --git a/libs/migrate/changeset/databases/oracle.py b/libs/migrate/changeset/databases/oracle.py deleted file mode 100644 index 2f16b5b5..00000000 --- a/libs/migrate/changeset/databases/oracle.py +++ /dev/null @@ -1,108 +0,0 @@ -""" - Oracle database specific implementations of changeset classes. -""" -import sqlalchemy as sa -from sqlalchemy.databases import oracle as sa_base - -from migrate import exceptions -from migrate.changeset import ansisql - - -OracleSchemaGenerator = sa_base.OracleDDLCompiler - - -class OracleColumnGenerator(OracleSchemaGenerator, ansisql.ANSIColumnGenerator): - pass - - -class OracleColumnDropper(ansisql.ANSIColumnDropper): - pass - - -class OracleSchemaChanger(OracleSchemaGenerator, ansisql.ANSISchemaChanger): - - def get_column_specification(self, column, **kwargs): - # Ignore the NOT NULL generated - override_nullable = kwargs.pop('override_nullable', None) - if override_nullable: - orig = column.nullable - column.nullable = True - ret = super(OracleSchemaChanger, self).get_column_specification( - column, **kwargs) - if override_nullable: - column.nullable = orig - return ret - - def visit_column(self, delta): - keys = delta.keys() - - if 'name' in keys: - self._run_subvisit(delta, - self._visit_column_name, - start_alter=False) - - if len(set(('type', 'nullable', 'server_default')).intersection(keys)): - self._run_subvisit(delta, - self._visit_column_change, - start_alter=False) - - def _visit_column_change(self, table, column, delta): - # Oracle cannot drop a default once created, but it can set it - # to null. We'll do that if default=None - # http://forums.oracle.com/forums/message.jspa?messageID=1273234#1273234 - dropdefault_hack = (column.server_default is None \ - and 'server_default' in delta.keys()) - # Oracle apparently doesn't like it when we say "not null" if - # the column's already not null. Fudge it, so we don't need a - # new function - notnull_hack = ((not column.nullable) \ - and ('nullable' not in delta.keys())) - # We need to specify NULL if we're removing a NOT NULL - # constraint - null_hack = (column.nullable and ('nullable' in delta.keys())) - - if dropdefault_hack: - column.server_default = sa.PassiveDefault(sa.sql.null()) - if notnull_hack: - column.nullable = True - colspec = self.get_column_specification(column, - override_nullable=null_hack) - if null_hack: - colspec += ' NULL' - if notnull_hack: - column.nullable = False - if dropdefault_hack: - column.server_default = None - - self.start_alter_table(table) - self.append("MODIFY (") - self.append(colspec) - self.append(")") - - -class OracleConstraintCommon(object): - - def get_constraint_name(self, cons): - # Oracle constraints can't guess their name like other DBs - if not cons.name: - raise exceptions.NotSupportedError( - "Oracle constraint names must be explicitly stated") - return cons.name - - -class OracleConstraintGenerator(OracleConstraintCommon, - ansisql.ANSIConstraintGenerator): - pass - - -class OracleConstraintDropper(OracleConstraintCommon, - ansisql.ANSIConstraintDropper): - pass - - -class OracleDialect(ansisql.ANSIDialect): - columngenerator = OracleColumnGenerator - columndropper = OracleColumnDropper - schemachanger = OracleSchemaChanger - constraintgenerator = OracleConstraintGenerator - constraintdropper = OracleConstraintDropper diff --git a/libs/migrate/changeset/databases/postgres.py b/libs/migrate/changeset/databases/postgres.py deleted file mode 100644 index 10ea094c..00000000 --- a/libs/migrate/changeset/databases/postgres.py +++ /dev/null @@ -1,42 +0,0 @@ -""" - `PostgreSQL`_ database specific implementations of changeset classes. - - .. _`PostgreSQL`: http://www.postgresql.org/ -""" -from migrate.changeset import ansisql - -from sqlalchemy.databases import postgresql as sa_base -PGSchemaGenerator = sa_base.PGDDLCompiler - - -class PGColumnGenerator(PGSchemaGenerator, ansisql.ANSIColumnGenerator): - """PostgreSQL column generator implementation.""" - pass - - -class PGColumnDropper(ansisql.ANSIColumnDropper): - """PostgreSQL column dropper implementation.""" - pass - - -class PGSchemaChanger(ansisql.ANSISchemaChanger): - """PostgreSQL schema changer implementation.""" - pass - - -class PGConstraintGenerator(ansisql.ANSIConstraintGenerator): - """PostgreSQL constraint generator implementation.""" - pass - - -class PGConstraintDropper(ansisql.ANSIConstraintDropper): - """PostgreSQL constaint dropper implementation.""" - pass - - -class PGDialect(ansisql.ANSIDialect): - columngenerator = PGColumnGenerator - columndropper = PGColumnDropper - schemachanger = PGSchemaChanger - constraintgenerator = PGConstraintGenerator - constraintdropper = PGConstraintDropper diff --git a/libs/migrate/changeset/databases/sqlite.py b/libs/migrate/changeset/databases/sqlite.py deleted file mode 100644 index 5ddd3f17..00000000 --- a/libs/migrate/changeset/databases/sqlite.py +++ /dev/null @@ -1,153 +0,0 @@ -""" - `SQLite`_ database specific implementations of changeset classes. - - .. _`SQLite`: http://www.sqlite.org/ -""" -from UserDict import DictMixin -from copy import copy - -from sqlalchemy.databases import sqlite as sa_base - -from migrate import exceptions -from migrate.changeset import ansisql - - -SQLiteSchemaGenerator = sa_base.SQLiteDDLCompiler - - -class SQLiteCommon(object): - - def _not_supported(self, op): - raise exceptions.NotSupportedError("SQLite does not support " - "%s; see http://www.sqlite.org/lang_altertable.html" % op) - - -class SQLiteHelper(SQLiteCommon): - - def recreate_table(self,table,column=None,delta=None): - table_name = self.preparer.format_table(table) - - # we remove all indexes so as not to have - # problems during copy and re-create - for index in table.indexes: - index.drop() - - self.append('ALTER TABLE %s RENAME TO migration_tmp' % table_name) - self.execute() - - insertion_string = self._modify_table(table, column, delta) - - table.create(bind=self.connection) - self.append(insertion_string % {'table_name': table_name}) - self.execute() - self.append('DROP TABLE migration_tmp') - self.execute() - - def visit_column(self, delta): - if isinstance(delta, DictMixin): - column = delta.result_column - table = self._to_table(delta.table) - else: - column = delta - table = self._to_table(column.table) - self.recreate_table(table,column,delta) - -class SQLiteColumnGenerator(SQLiteSchemaGenerator, - ansisql.ANSIColumnGenerator, - # at the end so we get the normal - # visit_column by default - SQLiteHelper, - SQLiteCommon - ): - """SQLite ColumnGenerator""" - - def _modify_table(self, table, column, delta): - columns = ' ,'.join(map( - self.preparer.format_column, - [c for c in table.columns if c.name!=column.name])) - return ('INSERT INTO %%(table_name)s (%(cols)s) ' - 'SELECT %(cols)s from migration_tmp')%{'cols':columns} - - def visit_column(self,column): - if column.foreign_keys: - SQLiteHelper.visit_column(self,column) - else: - super(SQLiteColumnGenerator,self).visit_column(column) - -class SQLiteColumnDropper(SQLiteHelper, ansisql.ANSIColumnDropper): - """SQLite ColumnDropper""" - - def _modify_table(self, table, column, delta): - - columns = ' ,'.join(map(self.preparer.format_column, table.columns)) - return 'INSERT INTO %(table_name)s SELECT ' + columns + \ - ' from migration_tmp' - - def visit_column(self,column): - # For SQLite, we *have* to remove the column here so the table - # is re-created properly. - column.remove_from_table(column.table,unset_table=False) - super(SQLiteColumnDropper,self).visit_column(column) - - -class SQLiteSchemaChanger(SQLiteHelper, ansisql.ANSISchemaChanger): - """SQLite SchemaChanger""" - - def _modify_table(self, table, column, delta): - return 'INSERT INTO %(table_name)s SELECT * from migration_tmp' - - def visit_index(self, index): - """Does not support ALTER INDEX""" - self._not_supported('ALTER INDEX') - - -class SQLiteConstraintGenerator(ansisql.ANSIConstraintGenerator, SQLiteHelper, SQLiteCommon): - - def visit_migrate_primary_key_constraint(self, constraint): - tmpl = "CREATE UNIQUE INDEX %s ON %s ( %s )" - cols = ', '.join(map(self.preparer.format_column, constraint.columns)) - tname = self.preparer.format_table(constraint.table) - name = self.get_constraint_name(constraint) - msg = tmpl % (name, tname, cols) - self.append(msg) - self.execute() - - def _modify_table(self, table, column, delta): - return 'INSERT INTO %(table_name)s SELECT * from migration_tmp' - - def visit_migrate_foreign_key_constraint(self, *p, **k): - self.recreate_table(p[0].table) - - def visit_migrate_unique_constraint(self, *p, **k): - self.recreate_table(p[0].table) - - -class SQLiteConstraintDropper(ansisql.ANSIColumnDropper, - SQLiteCommon, - ansisql.ANSIConstraintCommon): - - def visit_migrate_primary_key_constraint(self, constraint): - tmpl = "DROP INDEX %s " - name = self.get_constraint_name(constraint) - msg = tmpl % (name) - self.append(msg) - self.execute() - - def visit_migrate_foreign_key_constraint(self, *p, **k): - self._not_supported('ALTER TABLE DROP CONSTRAINT') - - def visit_migrate_check_constraint(self, *p, **k): - self._not_supported('ALTER TABLE DROP CONSTRAINT') - - def visit_migrate_unique_constraint(self, *p, **k): - self._not_supported('ALTER TABLE DROP CONSTRAINT') - - -# TODO: technically primary key is a NOT NULL + UNIQUE constraint, should add NOT NULL to index - -class SQLiteDialect(ansisql.ANSIDialect): - columngenerator = SQLiteColumnGenerator - columndropper = SQLiteColumnDropper - schemachanger = SQLiteSchemaChanger - constraintgenerator = SQLiteConstraintGenerator - constraintdropper = SQLiteConstraintDropper diff --git a/libs/migrate/changeset/databases/visitor.py b/libs/migrate/changeset/databases/visitor.py deleted file mode 100644 index 228b4d3f..00000000 --- a/libs/migrate/changeset/databases/visitor.py +++ /dev/null @@ -1,78 +0,0 @@ -""" - Module for visitor class mapping. -""" -import sqlalchemy as sa - -from migrate.changeset import ansisql -from migrate.changeset.databases import (sqlite, - postgres, - mysql, - oracle, - firebird) - - -# Map SA dialects to the corresponding Migrate extensions -DIALECTS = { - "default": ansisql.ANSIDialect, - "sqlite": sqlite.SQLiteDialect, - "postgres": postgres.PGDialect, - "postgresql": postgres.PGDialect, - "mysql": mysql.MySQLDialect, - "oracle": oracle.OracleDialect, - "firebird": firebird.FBDialect, -} - - -def get_engine_visitor(engine, name): - """ - Get the visitor implementation for the given database engine. - - :param engine: SQLAlchemy Engine - :param name: Name of the visitor - :type name: string - :type engine: Engine - :returns: visitor - """ - # TODO: link to supported visitors - return get_dialect_visitor(engine.dialect, name) - - -def get_dialect_visitor(sa_dialect, name): - """ - Get the visitor implementation for the given dialect. - - Finds the visitor implementation based on the dialect class and - returns and instance initialized with the given name. - - Binds dialect specific preparer to visitor. - """ - - # map sa dialect to migrate dialect and return visitor - sa_dialect_name = getattr(sa_dialect, 'name', 'default') - migrate_dialect_cls = DIALECTS[sa_dialect_name] - visitor = getattr(migrate_dialect_cls, name) - - # bind preparer - visitor.preparer = sa_dialect.preparer(sa_dialect) - - return visitor - -def run_single_visitor(engine, visitorcallable, element, - connection=None, **kwargs): - """Taken from :meth:`sqlalchemy.engine.base.Engine._run_single_visitor` - with support for migrate visitors. - """ - if connection is None: - conn = engine.contextual_connect(close_with_result=False) - else: - conn = connection - visitor = visitorcallable(engine.dialect, conn) - try: - if hasattr(element, '__migrate_visit_name__'): - fn = getattr(visitor, 'visit_' + element.__migrate_visit_name__) - else: - fn = getattr(visitor, 'visit_' + element.__visit_name__) - fn(element, **kwargs) - finally: - if connection is None: - conn.close() diff --git a/libs/migrate/changeset/schema.py b/libs/migrate/changeset/schema.py deleted file mode 100644 index c467cc53..00000000 --- a/libs/migrate/changeset/schema.py +++ /dev/null @@ -1,655 +0,0 @@ -""" - Schema module providing common schema operations. -""" -import warnings - -from UserDict import DictMixin - -import sqlalchemy - -from sqlalchemy.schema import ForeignKeyConstraint -from sqlalchemy.schema import UniqueConstraint - -from migrate.exceptions import * -from migrate.changeset import SQLA_07 -from migrate.changeset.databases.visitor import (get_engine_visitor, - run_single_visitor) - - -__all__ = [ - 'create_column', - 'drop_column', - 'alter_column', - 'rename_table', - 'rename_index', - 'ChangesetTable', - 'ChangesetColumn', - 'ChangesetIndex', - 'ChangesetDefaultClause', - 'ColumnDelta', -] - -def create_column(column, table=None, *p, **kw): - """Create a column, given the table. - - API to :meth:`ChangesetColumn.create`. - """ - if table is not None: - return table.create_column(column, *p, **kw) - return column.create(*p, **kw) - - -def drop_column(column, table=None, *p, **kw): - """Drop a column, given the table. - - API to :meth:`ChangesetColumn.drop`. - """ - if table is not None: - return table.drop_column(column, *p, **kw) - return column.drop(*p, **kw) - - -def rename_table(table, name, engine=None, **kw): - """Rename a table. - - If Table instance is given, engine is not used. - - API to :meth:`ChangesetTable.rename`. - - :param table: Table to be renamed. - :param name: New name for Table. - :param engine: Engine instance. - :type table: string or Table instance - :type name: string - :type engine: obj - """ - table = _to_table(table, engine) - table.rename(name, **kw) - - -def rename_index(index, name, table=None, engine=None, **kw): - """Rename an index. - - If Index instance is given, - table and engine are not used. - - API to :meth:`ChangesetIndex.rename`. - - :param index: Index to be renamed. - :param name: New name for index. - :param table: Table to which Index is reffered. - :param engine: Engine instance. - :type index: string or Index instance - :type name: string - :type table: string or Table instance - :type engine: obj - """ - index = _to_index(index, table, engine) - index.rename(name, **kw) - - -def alter_column(*p, **k): - """Alter a column. - - This is a helper function that creates a :class:`ColumnDelta` and - runs it. - - :argument column: - The name of the column to be altered or a - :class:`ChangesetColumn` column representing it. - - :param table: - A :class:`~sqlalchemy.schema.Table` or table name to - for the table where the column will be changed. - - :param engine: - The :class:`~sqlalchemy.engine.base.Engine` to use for table - reflection and schema alterations. - - :returns: A :class:`ColumnDelta` instance representing the change. - - - """ - - if 'table' not in k and isinstance(p[0], sqlalchemy.Column): - k['table'] = p[0].table - if 'engine' not in k: - k['engine'] = k['table'].bind - - # deprecation - if len(p) >= 2 and isinstance(p[1], sqlalchemy.Column): - warnings.warn( - "Passing a Column object to alter_column is deprecated." - " Just pass in keyword parameters instead.", - MigrateDeprecationWarning - ) - engine = k['engine'] - - # enough tests seem to break when metadata is always altered - # that this crutch has to be left in until they can be sorted - # out - k['alter_metadata']=True - - delta = ColumnDelta(*p, **k) - - visitorcallable = get_engine_visitor(engine, 'schemachanger') - engine._run_visitor(visitorcallable, delta) - - return delta - - -def _to_table(table, engine=None): - """Return if instance of Table, else construct new with metadata""" - if isinstance(table, sqlalchemy.Table): - return table - - # Given: table name, maybe an engine - meta = sqlalchemy.MetaData() - if engine is not None: - meta.bind = engine - return sqlalchemy.Table(table, meta) - - -def _to_index(index, table=None, engine=None): - """Return if instance of Index, else construct new with metadata""" - if isinstance(index, sqlalchemy.Index): - return index - - # Given: index name; table name required - table = _to_table(table, engine) - ret = sqlalchemy.Index(index) - ret.table = table - return ret - - -class ColumnDelta(DictMixin, sqlalchemy.schema.SchemaItem): - """Extracts the differences between two columns/column-parameters - - May receive parameters arranged in several different ways: - - * **current_column, new_column, \*p, \*\*kw** - Additional parameters can be specified to override column - differences. - - * **current_column, \*p, \*\*kw** - Additional parameters alter current_column. Table name is extracted - from current_column object. - Name is changed to current_column.name from current_name, - if current_name is specified. - - * **current_col_name, \*p, \*\*kw** - Table kw must specified. - - :param table: Table at which current Column should be bound to.\ - If table name is given, reflection will be used. - :type table: string or Table instance - - :param metadata: A :class:`MetaData` instance to store - reflected table names - - :param engine: When reflecting tables, either engine or metadata must \ - be specified to acquire engine object. - :type engine: :class:`Engine` instance - :returns: :class:`ColumnDelta` instance provides interface for altered attributes to \ - `result_column` through :func:`dict` alike object. - - * :class:`ColumnDelta`.result_column is altered column with new attributes - - * :class:`ColumnDelta`.current_name is current name of column in db - - - """ - - # Column attributes that can be altered - diff_keys = ('name', 'type', 'primary_key', 'nullable', - 'server_onupdate', 'server_default', 'autoincrement') - diffs = dict() - __visit_name__ = 'column' - - def __init__(self, *p, **kw): - # 'alter_metadata' is not a public api. It exists purely - # as a crutch until the tests that fail when 'alter_metadata' - # behaviour always happens can be sorted out - self.alter_metadata = kw.pop("alter_metadata", False) - - self.meta = kw.pop("metadata", None) - self.engine = kw.pop("engine", None) - - # Things are initialized differently depending on how many column - # parameters are given. Figure out how many and call the appropriate - # method. - if len(p) >= 1 and isinstance(p[0], sqlalchemy.Column): - # At least one column specified - if len(p) >= 2 and isinstance(p[1], sqlalchemy.Column): - # Two columns specified - diffs = self.compare_2_columns(*p, **kw) - else: - # Exactly one column specified - diffs = self.compare_1_column(*p, **kw) - else: - # Zero columns specified - if not len(p) or not isinstance(p[0], basestring): - raise ValueError("First argument must be column name") - diffs = self.compare_parameters(*p, **kw) - - self.apply_diffs(diffs) - - def __repr__(self): - return '' % ( - self.alter_metadata, - super(ColumnDelta, self).__repr__() - ) - - def __getitem__(self, key): - if key not in self.keys(): - raise KeyError("No such diff key, available: %s" % self.diffs ) - return getattr(self.result_column, key) - - def __setitem__(self, key, value): - if key not in self.keys(): - raise KeyError("No such diff key, available: %s" % self.diffs ) - setattr(self.result_column, key, value) - - def __delitem__(self, key): - raise NotImplementedError - - def keys(self): - return self.diffs.keys() - - def compare_parameters(self, current_name, *p, **k): - """Compares Column objects with reflection""" - self.table = k.pop('table') - self.result_column = self._table.c.get(current_name) - if len(p): - k = self._extract_parameters(p, k, self.result_column) - return k - - def compare_1_column(self, col, *p, **k): - """Compares one Column object""" - self.table = k.pop('table', None) - if self.table is None: - self.table = col.table - self.result_column = col - if len(p): - k = self._extract_parameters(p, k, self.result_column) - return k - - def compare_2_columns(self, old_col, new_col, *p, **k): - """Compares two Column objects""" - self.process_column(new_col) - self.table = k.pop('table', None) - # we cannot use bool() on table in SA06 - if self.table is None: - self.table = old_col.table - if self.table is None: - new_col.table - self.result_column = old_col - - # set differences - # leave out some stuff for later comp - for key in (set(self.diff_keys) - set(('type',))): - val = getattr(new_col, key, None) - if getattr(self.result_column, key, None) != val: - k.setdefault(key, val) - - # inspect types - if not self.are_column_types_eq(self.result_column.type, new_col.type): - k.setdefault('type', new_col.type) - - if len(p): - k = self._extract_parameters(p, k, self.result_column) - return k - - def apply_diffs(self, diffs): - """Populate dict and column object with new values""" - self.diffs = diffs - for key in self.diff_keys: - if key in diffs: - setattr(self.result_column, key, diffs[key]) - - self.process_column(self.result_column) - - # create an instance of class type if not yet - if 'type' in diffs and callable(self.result_column.type): - self.result_column.type = self.result_column.type() - - # add column to the table - if self.table is not None and self.alter_metadata: - self.result_column.add_to_table(self.table) - - def are_column_types_eq(self, old_type, new_type): - """Compares two types to be equal""" - ret = old_type.__class__ == new_type.__class__ - - # String length is a special case - if ret and isinstance(new_type, sqlalchemy.types.String): - ret = (getattr(old_type, 'length', None) == \ - getattr(new_type, 'length', None)) - return ret - - def _extract_parameters(self, p, k, column): - """Extracts data from p and modifies diffs""" - p = list(p) - while len(p): - if isinstance(p[0], basestring): - k.setdefault('name', p.pop(0)) - elif isinstance(p[0], sqlalchemy.types.AbstractType): - k.setdefault('type', p.pop(0)) - elif callable(p[0]): - p[0] = p[0]() - else: - break - - if len(p): - new_col = column.copy_fixed() - new_col._init_items(*p) - k = self.compare_2_columns(column, new_col, **k) - return k - - def process_column(self, column): - """Processes default values for column""" - # XXX: this is a snippet from SA processing of positional parameters - toinit = list() - - if column.server_default is not None: - if isinstance(column.server_default, sqlalchemy.FetchedValue): - toinit.append(column.server_default) - else: - toinit.append(sqlalchemy.DefaultClause(column.server_default)) - if column.server_onupdate is not None: - if isinstance(column.server_onupdate, FetchedValue): - toinit.append(column.server_default) - else: - toinit.append(sqlalchemy.DefaultClause(column.server_onupdate, - for_update=True)) - if toinit: - column._init_items(*toinit) - - def _get_table(self): - return getattr(self, '_table', None) - - def _set_table(self, table): - if isinstance(table, basestring): - if self.alter_metadata: - if not self.meta: - raise ValueError("metadata must be specified for table" - " reflection when using alter_metadata") - meta = self.meta - if self.engine: - meta.bind = self.engine - else: - if not self.engine and not self.meta: - raise ValueError("engine or metadata must be specified" - " to reflect tables") - if not self.engine: - self.engine = self.meta.bind - meta = sqlalchemy.MetaData(bind=self.engine) - self._table = sqlalchemy.Table(table, meta, autoload=True) - elif isinstance(table, sqlalchemy.Table): - self._table = table - if not self.alter_metadata: - self._table.meta = sqlalchemy.MetaData(bind=self._table.bind) - def _get_result_column(self): - return getattr(self, '_result_column', None) - - def _set_result_column(self, column): - """Set Column to Table based on alter_metadata evaluation.""" - self.process_column(column) - if not hasattr(self, 'current_name'): - self.current_name = column.name - if self.alter_metadata: - self._result_column = column - else: - self._result_column = column.copy_fixed() - - table = property(_get_table, _set_table) - result_column = property(_get_result_column, _set_result_column) - - -class ChangesetTable(object): - """Changeset extensions to SQLAlchemy tables.""" - - def create_column(self, column, *p, **kw): - """Creates a column. - - The column parameter may be a column definition or the name of - a column in this table. - - API to :meth:`ChangesetColumn.create` - - :param column: Column to be created - :type column: Column instance or string - """ - if not isinstance(column, sqlalchemy.Column): - # It's a column name - column = getattr(self.c, str(column)) - column.create(table=self, *p, **kw) - - def drop_column(self, column, *p, **kw): - """Drop a column, given its name or definition. - - API to :meth:`ChangesetColumn.drop` - - :param column: Column to be droped - :type column: Column instance or string - """ - if not isinstance(column, sqlalchemy.Column): - # It's a column name - try: - column = getattr(self.c, str(column)) - except AttributeError: - # That column isn't part of the table. We don't need - # its entire definition to drop the column, just its - # name, so create a dummy column with the same name. - column = sqlalchemy.Column(str(column), sqlalchemy.Integer()) - column.drop(table=self, *p, **kw) - - def rename(self, name, connection=None, **kwargs): - """Rename this table. - - :param name: New name of the table. - :type name: string - :param connection: reuse connection istead of creating new one. - :type connection: :class:`sqlalchemy.engine.base.Connection` instance - """ - engine = self.bind - self.new_name = name - visitorcallable = get_engine_visitor(engine, 'schemachanger') - run_single_visitor(engine, visitorcallable, self, connection, **kwargs) - - # Fix metadata registration - self.name = name - self.deregister() - self._set_parent(self.metadata) - - def _meta_key(self): - """Get the meta key for this table.""" - return sqlalchemy.schema._get_table_key(self.name, self.schema) - - def deregister(self): - """Remove this table from its metadata""" - if SQLA_07: - self.metadata._remove_table(self.name, self.schema) - else: - key = self._meta_key() - meta = self.metadata - if key in meta.tables: - del meta.tables[key] - - -class ChangesetColumn(object): - """Changeset extensions to SQLAlchemy columns.""" - - def alter(self, *p, **k): - """Makes a call to :func:`alter_column` for the column this - method is called on. - """ - if 'table' not in k: - k['table'] = self.table - if 'engine' not in k: - k['engine'] = k['table'].bind - return alter_column(self, *p, **k) - - def create(self, table=None, index_name=None, unique_name=None, - primary_key_name=None, populate_default=True, connection=None, **kwargs): - """Create this column in the database. - - Assumes the given table exists. ``ALTER TABLE ADD COLUMN``, - for most databases. - - :param table: Table instance to create on. - :param index_name: Creates :class:`ChangesetIndex` on this column. - :param unique_name: Creates :class:\ -`~migrate.changeset.constraint.UniqueConstraint` on this column. - :param primary_key_name: Creates :class:\ -`~migrate.changeset.constraint.PrimaryKeyConstraint` on this column. - :param populate_default: If True, created column will be \ -populated with defaults - :param connection: reuse connection istead of creating new one. - :type table: Table instance - :type index_name: string - :type unique_name: string - :type primary_key_name: string - :type populate_default: bool - :type connection: :class:`sqlalchemy.engine.base.Connection` instance - - :returns: self - """ - self.populate_default = populate_default - self.index_name = index_name - self.unique_name = unique_name - self.primary_key_name = primary_key_name - for cons in ('index_name', 'unique_name', 'primary_key_name'): - self._check_sanity_constraints(cons) - - self.add_to_table(table) - engine = self.table.bind - visitorcallable = get_engine_visitor(engine, 'columngenerator') - engine._run_visitor(visitorcallable, self, connection, **kwargs) - - # TODO: reuse existing connection - if self.populate_default and self.default is not None: - stmt = table.update().values({self: engine._execute_default(self.default)}) - engine.execute(stmt) - - return self - - def drop(self, table=None, connection=None, **kwargs): - """Drop this column from the database, leaving its table intact. - - ``ALTER TABLE DROP COLUMN``, for most databases. - - :param connection: reuse connection istead of creating new one. - :type connection: :class:`sqlalchemy.engine.base.Connection` instance - """ - if table is not None: - self.table = table - engine = self.table.bind - visitorcallable = get_engine_visitor(engine, 'columndropper') - engine._run_visitor(visitorcallable, self, connection, **kwargs) - self.remove_from_table(self.table, unset_table=False) - self.table = None - return self - - def add_to_table(self, table): - if table is not None and self.table is None: - if SQLA_07: - table.append_column(self) - else: - self._set_parent(table) - - def _col_name_in_constraint(self,cons,name): - return False - - def remove_from_table(self, table, unset_table=True): - # TODO: remove primary keys, constraints, etc - if unset_table: - self.table = None - - to_drop = set() - for index in table.indexes: - columns = [] - for col in index.columns: - if col.name!=self.name: - columns.append(col) - if columns: - index.columns=columns - else: - to_drop.add(index) - table.indexes = table.indexes - to_drop - - to_drop = set() - for cons in table.constraints: - # TODO: deal with other types of constraint - if isinstance(cons,(ForeignKeyConstraint, - UniqueConstraint)): - for col_name in cons.columns: - if not isinstance(col_name,basestring): - col_name = col_name.name - if self.name==col_name: - to_drop.add(cons) - table.constraints = table.constraints - to_drop - - if table.c.contains_column(self): - if SQLA_07: - table._columns.remove(self) - else: - table.c.remove(self) - - # TODO: this is fixed in 0.6 - def copy_fixed(self, **kw): - """Create a copy of this ``Column``, with all attributes.""" - return sqlalchemy.Column(self.name, self.type, self.default, - key=self.key, - primary_key=self.primary_key, - nullable=self.nullable, - quote=self.quote, - index=self.index, - unique=self.unique, - onupdate=self.onupdate, - autoincrement=self.autoincrement, - server_default=self.server_default, - server_onupdate=self.server_onupdate, - *[c.copy(**kw) for c in self.constraints]) - - def _check_sanity_constraints(self, name): - """Check if constraints names are correct""" - obj = getattr(self, name) - if (getattr(self, name[:-5]) and not obj): - raise InvalidConstraintError("Column.create() accepts index_name," - " primary_key_name and unique_name to generate constraints") - if not isinstance(obj, basestring) and obj is not None: - raise InvalidConstraintError( - "%s argument for column must be constraint name" % name) - - -class ChangesetIndex(object): - """Changeset extensions to SQLAlchemy Indexes.""" - - __visit_name__ = 'index' - - def rename(self, name, connection=None, **kwargs): - """Change the name of an index. - - :param name: New name of the Index. - :type name: string - :param connection: reuse connection istead of creating new one. - :type connection: :class:`sqlalchemy.engine.base.Connection` instance - """ - engine = self.table.bind - self.new_name = name - visitorcallable = get_engine_visitor(engine, 'schemachanger') - engine._run_visitor(visitorcallable, self, connection, **kwargs) - self.name = name - - -class ChangesetDefaultClause(object): - """Implements comparison between :class:`DefaultClause` instances""" - - def __eq__(self, other): - if isinstance(other, self.__class__): - if self.arg == other.arg: - return True - - def __ne__(self, other): - return not self.__eq__(other) diff --git a/libs/migrate/exceptions.py b/libs/migrate/exceptions.py deleted file mode 100644 index cb8c4094..00000000 --- a/libs/migrate/exceptions.py +++ /dev/null @@ -1,87 +0,0 @@ -""" - Provide exception classes for :mod:`migrate` -""" - - -class Error(Exception): - """Error base class.""" - - -class ApiError(Error): - """Base class for API errors.""" - - -class KnownError(ApiError): - """A known error condition.""" - - -class UsageError(ApiError): - """A known error condition where help should be displayed.""" - - -class ControlledSchemaError(Error): - """Base class for controlled schema errors.""" - - -class InvalidVersionError(ControlledSchemaError): - """Invalid version number.""" - - -class DatabaseNotControlledError(ControlledSchemaError): - """Database should be under version control, but it's not.""" - - -class DatabaseAlreadyControlledError(ControlledSchemaError): - """Database shouldn't be under version control, but it is""" - - -class WrongRepositoryError(ControlledSchemaError): - """This database is under version control by another repository.""" - - -class NoSuchTableError(ControlledSchemaError): - """The table does not exist.""" - - -class PathError(Error): - """Base class for path errors.""" - - -class PathNotFoundError(PathError): - """A path with no file was required; found a file.""" - - -class PathFoundError(PathError): - """A path with a file was required; found no file.""" - - -class RepositoryError(Error): - """Base class for repository errors.""" - - -class InvalidRepositoryError(RepositoryError): - """Invalid repository error.""" - - -class ScriptError(Error): - """Base class for script errors.""" - - -class InvalidScriptError(ScriptError): - """Invalid script error.""" - - -class InvalidVersionError(Error): - """Invalid version error.""" - -# migrate.changeset - -class NotSupportedError(Error): - """Not supported error""" - - -class InvalidConstraintError(Error): - """Invalid constraint error""" - -class MigrateDeprecationWarning(DeprecationWarning): - """Warning for deprecated features in Migrate""" diff --git a/libs/migrate/versioning/__init__.py b/libs/migrate/versioning/__init__.py deleted file mode 100644 index 8b5a7363..00000000 --- a/libs/migrate/versioning/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -""" - This package provides functionality to create and manage - repositories of database schema changesets and to apply these - changesets to databases. -""" diff --git a/libs/migrate/versioning/api.py b/libs/migrate/versioning/api.py deleted file mode 100644 index 570dc086..00000000 --- a/libs/migrate/versioning/api.py +++ /dev/null @@ -1,384 +0,0 @@ -""" - This module provides an external API to the versioning system. - - .. versionchanged:: 0.6.0 - :func:`migrate.versioning.api.test` and schema diff functions - changed order of positional arguments so all accept `url` and `repository` - as first arguments. - - .. versionchanged:: 0.5.4 - ``--preview_sql`` displays source file when using SQL scripts. - If Python script is used, it runs the action with mocked engine and - returns captured SQL statements. - - .. versionchanged:: 0.5.4 - Deprecated ``--echo`` parameter in favour of new - :func:`migrate.versioning.util.construct_engine` behavior. -""" - -# Dear migrate developers, -# -# please do not comment this module using sphinx syntax because its -# docstrings are presented as user help and most users cannot -# interpret sphinx annotated ReStructuredText. -# -# Thanks, -# Jan Dittberner - -import sys -import inspect -import logging - -from migrate import exceptions -from migrate.versioning import (repository, schema, version, - script as script_) # command name conflict -from migrate.versioning.util import catch_known_errors, with_engine - - -log = logging.getLogger(__name__) -command_desc = { - 'help': 'displays help on a given command', - 'create': 'create an empty repository at the specified path', - 'script': 'create an empty change Python script', - 'script_sql': 'create empty change SQL scripts for given database', - 'version': 'display the latest version available in a repository', - 'db_version': 'show the current version of the repository under version control', - 'source': 'display the Python code for a particular version in this repository', - 'version_control': 'mark a database as under this repository\'s version control', - 'upgrade': 'upgrade a database to a later version', - 'downgrade': 'downgrade a database to an earlier version', - 'drop_version_control': 'removes version control from a database', - 'manage': 'creates a Python script that runs Migrate with a set of default values', - 'test': 'performs the upgrade and downgrade command on the given database', - 'compare_model_to_db': 'compare MetaData against the current database state', - 'create_model': 'dump the current database as a Python model to stdout', - 'make_update_script_for_model': 'create a script changing the old MetaData to the new (current) MetaData', - 'update_db_from_model': 'modify the database to match the structure of the current MetaData', -} -__all__ = command_desc.keys() - -Repository = repository.Repository -ControlledSchema = schema.ControlledSchema -VerNum = version.VerNum -PythonScript = script_.PythonScript -SqlScript = script_.SqlScript - - -# deprecated -def help(cmd=None, **opts): - """%prog help COMMAND - - Displays help on a given command. - """ - if cmd is None: - raise exceptions.UsageError(None) - try: - func = globals()[cmd] - except: - raise exceptions.UsageError( - "'%s' isn't a valid command. Try 'help COMMAND'" % cmd) - ret = func.__doc__ - if sys.argv[0]: - ret = ret.replace('%prog', sys.argv[0]) - return ret - -@catch_known_errors -def create(repository, name, **opts): - """%prog create REPOSITORY_PATH NAME [--table=TABLE] - - Create an empty repository at the specified path. - - You can specify the version_table to be used; by default, it is - 'migrate_version'. This table is created in all version-controlled - databases. - """ - repo_path = Repository.create(repository, name, **opts) - - -@catch_known_errors -def script(description, repository, **opts): - """%prog script DESCRIPTION REPOSITORY_PATH - - Create an empty change script using the next unused version number - appended with the given description. - - For instance, manage.py script "Add initial tables" creates: - repository/versions/001_Add_initial_tables.py - """ - repo = Repository(repository) - repo.create_script(description, **opts) - - -@catch_known_errors -def script_sql(database, description, repository, **opts): - """%prog script_sql DATABASE DESCRIPTION REPOSITORY_PATH - - Create empty change SQL scripts for given DATABASE, where DATABASE - is either specific ('postgresql', 'mysql', 'oracle', 'sqlite', etc.) - or generic ('default'). - - For instance, manage.py script_sql postgresql description creates: - repository/versions/001_description_postgresql_upgrade.sql and - repository/versions/001_description_postgresql_downgrade.sql - """ - repo = Repository(repository) - repo.create_script_sql(database, description, **opts) - - -def version(repository, **opts): - """%prog version REPOSITORY_PATH - - Display the latest version available in a repository. - """ - repo = Repository(repository) - return repo.latest - - -@with_engine -def db_version(url, repository, **opts): - """%prog db_version URL REPOSITORY_PATH - - Show the current version of the repository with the given - connection string, under version control of the specified - repository. - - The url should be any valid SQLAlchemy connection string. - """ - engine = opts.pop('engine') - schema = ControlledSchema(engine, repository) - return schema.version - - -def source(version, dest=None, repository=None, **opts): - """%prog source VERSION [DESTINATION] --repository=REPOSITORY_PATH - - Display the Python code for a particular version in this - repository. Save it to the file at DESTINATION or, if omitted, - send to stdout. - """ - if repository is None: - raise exceptions.UsageError("A repository must be specified") - repo = Repository(repository) - ret = repo.version(version).script().source() - if dest is not None: - dest = open(dest, 'w') - dest.write(ret) - dest.close() - ret = None - return ret - - -def upgrade(url, repository, version=None, **opts): - """%prog upgrade URL REPOSITORY_PATH [VERSION] [--preview_py|--preview_sql] - - Upgrade a database to a later version. - - This runs the upgrade() function defined in your change scripts. - - By default, the database is updated to the latest available - version. You may specify a version instead, if you wish. - - You may preview the Python or SQL code to be executed, rather than - actually executing it, using the appropriate 'preview' option. - """ - err = "Cannot upgrade a database of version %s to version %s. "\ - "Try 'downgrade' instead." - return _migrate(url, repository, version, upgrade=True, err=err, **opts) - - -def downgrade(url, repository, version, **opts): - """%prog downgrade URL REPOSITORY_PATH VERSION [--preview_py|--preview_sql] - - Downgrade a database to an earlier version. - - This is the reverse of upgrade; this runs the downgrade() function - defined in your change scripts. - - You may preview the Python or SQL code to be executed, rather than - actually executing it, using the appropriate 'preview' option. - """ - err = "Cannot downgrade a database of version %s to version %s. "\ - "Try 'upgrade' instead." - return _migrate(url, repository, version, upgrade=False, err=err, **opts) - -@with_engine -def test(url, repository, **opts): - """%prog test URL REPOSITORY_PATH [VERSION] - - Performs the upgrade and downgrade option on the given - database. This is not a real test and may leave the database in a - bad state. You should therefore better run the test on a copy of - your database. - """ - engine = opts.pop('engine') - repos = Repository(repository) - - # Upgrade - log.info("Upgrading...") - script = repos.version(None).script(engine.name, 'upgrade') - script.run(engine, 1) - log.info("done") - - log.info("Downgrading...") - script = repos.version(None).script(engine.name, 'downgrade') - script.run(engine, -1) - log.info("done") - log.info("Success") - - -@with_engine -def version_control(url, repository, version=None, **opts): - """%prog version_control URL REPOSITORY_PATH [VERSION] - - Mark a database as under this repository's version control. - - Once a database is under version control, schema changes should - only be done via change scripts in this repository. - - This creates the table version_table in the database. - - The url should be any valid SQLAlchemy connection string. - - By default, the database begins at version 0 and is assumed to be - empty. If the database is not empty, you may specify a version at - which to begin instead. No attempt is made to verify this - version's correctness - the database schema is expected to be - identical to what it would be if the database were created from - scratch. - """ - engine = opts.pop('engine') - ControlledSchema.create(engine, repository, version) - - -@with_engine -def drop_version_control(url, repository, **opts): - """%prog drop_version_control URL REPOSITORY_PATH - - Removes version control from a database. - """ - engine = opts.pop('engine') - schema = ControlledSchema(engine, repository) - schema.drop() - - -def manage(file, **opts): - """%prog manage FILENAME [VARIABLES...] - - Creates a script that runs Migrate with a set of default values. - - For example:: - - %prog manage manage.py --repository=/path/to/repository \ ---url=sqlite:///project.db - - would create the script manage.py. The following two commands - would then have exactly the same results:: - - python manage.py version - %prog version --repository=/path/to/repository - """ - Repository.create_manage_file(file, **opts) - - -@with_engine -def compare_model_to_db(url, repository, model, **opts): - """%prog compare_model_to_db URL REPOSITORY_PATH MODEL - - Compare the current model (assumed to be a module level variable - of type sqlalchemy.MetaData) against the current database. - - NOTE: This is EXPERIMENTAL. - """ # TODO: get rid of EXPERIMENTAL label - engine = opts.pop('engine') - return ControlledSchema.compare_model_to_db(engine, model, repository) - - -@with_engine -def create_model(url, repository, **opts): - """%prog create_model URL REPOSITORY_PATH [DECLERATIVE=True] - - Dump the current database as a Python model to stdout. - - NOTE: This is EXPERIMENTAL. - """ # TODO: get rid of EXPERIMENTAL label - engine = opts.pop('engine') - declarative = opts.get('declarative', False) - return ControlledSchema.create_model(engine, repository, declarative) - - -@catch_known_errors -@with_engine -def make_update_script_for_model(url, repository, oldmodel, model, **opts): - """%prog make_update_script_for_model URL OLDMODEL MODEL REPOSITORY_PATH - - Create a script changing the old Python model to the new (current) - Python model, sending to stdout. - - NOTE: This is EXPERIMENTAL. - """ # TODO: get rid of EXPERIMENTAL label - engine = opts.pop('engine') - return PythonScript.make_update_script_for_model( - engine, oldmodel, model, repository, **opts) - - -@with_engine -def update_db_from_model(url, repository, model, **opts): - """%prog update_db_from_model URL REPOSITORY_PATH MODEL - - Modify the database to match the structure of the current Python - model. This also sets the db_version number to the latest in the - repository. - - NOTE: This is EXPERIMENTAL. - """ # TODO: get rid of EXPERIMENTAL label - engine = opts.pop('engine') - schema = ControlledSchema(engine, repository) - schema.update_db_from_model(model) - -@with_engine -def _migrate(url, repository, version, upgrade, err, **opts): - engine = opts.pop('engine') - url = str(engine.url) - schema = ControlledSchema(engine, repository) - version = _migrate_version(schema, version, upgrade, err) - - changeset = schema.changeset(version) - for ver, change in changeset: - nextver = ver + changeset.step - log.info('%s -> %s... ', ver, nextver) - - if opts.get('preview_sql'): - if isinstance(change, PythonScript): - log.info(change.preview_sql(url, changeset.step, **opts)) - elif isinstance(change, SqlScript): - log.info(change.source()) - - elif opts.get('preview_py'): - if not isinstance(change, PythonScript): - raise exceptions.UsageError("Python source can be only displayed" - " for python migration files") - source_ver = max(ver, nextver) - module = schema.repository.version(source_ver).script().module - funcname = upgrade and "upgrade" or "downgrade" - func = getattr(module, funcname) - log.info(inspect.getsource(func)) - else: - schema.runchange(ver, change, changeset.step) - log.info('done') - - -def _migrate_version(schema, version, upgrade, err): - if version is None: - return version - # Version is specified: ensure we're upgrading in the right direction - # (current version < target version for upgrading; reverse for down) - version = VerNum(version) - cur = schema.version - if upgrade is not None: - if upgrade: - direction = cur <= version - else: - direction = cur >= version - if not direction: - raise exceptions.KnownError(err % (cur, version)) - return version diff --git a/libs/migrate/versioning/cfgparse.py b/libs/migrate/versioning/cfgparse.py deleted file mode 100644 index ff27d672..00000000 --- a/libs/migrate/versioning/cfgparse.py +++ /dev/null @@ -1,27 +0,0 @@ -""" - Configuration parser module. -""" - -from ConfigParser import ConfigParser - -from migrate.versioning.config import * -from migrate.versioning import pathed - - -class Parser(ConfigParser): - """A project configuration file.""" - - def to_dict(self, sections=None): - """It's easier to access config values like dictionaries""" - return self._sections - - -class Config(pathed.Pathed, Parser): - """Configuration class.""" - - def __init__(self, path, *p, **k): - """Confirm the config file exists; read it.""" - self.require_found(path) - pathed.Pathed.__init__(self, path) - Parser.__init__(self, *p, **k) - self.read(path) diff --git a/libs/migrate/versioning/config.py b/libs/migrate/versioning/config.py deleted file mode 100644 index 2429fd8b..00000000 --- a/libs/migrate/versioning/config.py +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- - -from sqlalchemy.util import OrderedDict - - -__all__ = ['databases', 'operations'] - -databases = ('sqlite', 'postgres', 'mysql', 'oracle', 'mssql', 'firebird') - -# Map operation names to function names -operations = OrderedDict() -operations['upgrade'] = 'upgrade' -operations['downgrade'] = 'downgrade' diff --git a/libs/migrate/versioning/genmodel.py b/libs/migrate/versioning/genmodel.py deleted file mode 100644 index 85df6276..00000000 --- a/libs/migrate/versioning/genmodel.py +++ /dev/null @@ -1,285 +0,0 @@ -""" -Code to generate a Python model from a database or differences -between a model and database. - -Some of this is borrowed heavily from the AutoCode project at: -http://code.google.com/p/sqlautocode/ -""" - -import sys -import logging - -import sqlalchemy - -import migrate -import migrate.changeset - - -log = logging.getLogger(__name__) -HEADER = """ -## File autogenerated by genmodel.py - -from sqlalchemy import * -meta = MetaData() -""" - -DECLARATIVE_HEADER = """ -## File autogenerated by genmodel.py - -from sqlalchemy import * -from sqlalchemy.ext import declarative - -Base = declarative.declarative_base() -""" - - -class ModelGenerator(object): - """Various transformations from an A, B diff. - - In the implementation, A tends to be called the model and B - the database (although this is not true of all diffs). - The diff is directionless, but transformations apply the diff - in a particular direction, described in the method name. - """ - - def __init__(self, diff, engine, declarative=False): - self.diff = diff - self.engine = engine - self.declarative = declarative - - def column_repr(self, col): - kwarg = [] - if col.key != col.name: - kwarg.append('key') - if col.primary_key: - col.primary_key = True # otherwise it dumps it as 1 - kwarg.append('primary_key') - if not col.nullable: - kwarg.append('nullable') - if col.onupdate: - kwarg.append('onupdate') - if col.default: - if col.primary_key: - # I found that PostgreSQL automatically creates a - # default value for the sequence, but let's not show - # that. - pass - else: - kwarg.append('default') - args = ['%s=%r' % (k, getattr(col, k)) for k in kwarg] - - # crs: not sure if this is good idea, but it gets rid of extra - # u'' - name = col.name.encode('utf8') - - type_ = col.type - for cls in col.type.__class__.__mro__: - if cls.__module__ == 'sqlalchemy.types' and \ - not cls.__name__.isupper(): - if cls is not type_.__class__: - type_ = cls() - break - - type_repr = repr(type_) - if type_repr.endswith('()'): - type_repr = type_repr[:-2] - - constraints = [repr(cn) for cn in col.constraints] - - data = { - 'name': name, - 'commonStuff': ', '.join([type_repr] + constraints + args), - } - - if self.declarative: - return """%(name)s = Column(%(commonStuff)s)""" % data - else: - return """Column(%(name)r, %(commonStuff)s)""" % data - - def _getTableDefn(self, table, metaName='meta'): - out = [] - tableName = table.name - if self.declarative: - out.append("class %(table)s(Base):" % {'table': tableName}) - out.append(" __tablename__ = '%(table)s'\n" % - {'table': tableName}) - for col in table.columns: - out.append(" %s" % self.column_repr(col)) - out.append('\n') - else: - out.append("%(table)s = Table('%(table)s', %(meta)s," % - {'table': tableName, 'meta': metaName}) - for col in table.columns: - out.append(" %s," % self.column_repr(col)) - out.append(")\n") - return out - - def _get_tables(self,missingA=False,missingB=False,modified=False): - to_process = [] - for bool_,names,metadata in ( - (missingA,self.diff.tables_missing_from_A,self.diff.metadataB), - (missingB,self.diff.tables_missing_from_B,self.diff.metadataA), - (modified,self.diff.tables_different,self.diff.metadataA), - ): - if bool_: - for name in names: - yield metadata.tables.get(name) - - def genBDefinition(self): - """Generates the source code for a definition of B. - - Assumes a diff where A is empty. - - Was: toPython. Assume database (B) is current and model (A) is empty. - """ - - out = [] - if self.declarative: - out.append(DECLARATIVE_HEADER) - else: - out.append(HEADER) - out.append("") - for table in self._get_tables(missingA=True): - out.extend(self._getTableDefn(table)) - return '\n'.join(out) - - def genB2AMigration(self, indent=' '): - '''Generate a migration from B to A. - - Was: toUpgradeDowngradePython - Assume model (A) is most current and database (B) is out-of-date. - ''' - - decls = ['from migrate.changeset import schema', - 'pre_meta = MetaData()', - 'post_meta = MetaData()', - ] - upgradeCommands = ['pre_meta.bind = migrate_engine', - 'post_meta.bind = migrate_engine'] - downgradeCommands = list(upgradeCommands) - - for tn in self.diff.tables_missing_from_A: - pre_table = self.diff.metadataB.tables[tn] - decls.extend(self._getTableDefn(pre_table, metaName='pre_meta')) - upgradeCommands.append( - "pre_meta.tables[%(table)r].drop()" % {'table': tn}) - downgradeCommands.append( - "pre_meta.tables[%(table)r].create()" % {'table': tn}) - - for tn in self.diff.tables_missing_from_B: - post_table = self.diff.metadataA.tables[tn] - decls.extend(self._getTableDefn(post_table, metaName='post_meta')) - upgradeCommands.append( - "post_meta.tables[%(table)r].create()" % {'table': tn}) - downgradeCommands.append( - "post_meta.tables[%(table)r].drop()" % {'table': tn}) - - for (tn, td) in self.diff.tables_different.iteritems(): - if td.columns_missing_from_A or td.columns_different: - pre_table = self.diff.metadataB.tables[tn] - decls.extend(self._getTableDefn( - pre_table, metaName='pre_meta')) - if td.columns_missing_from_B or td.columns_different: - post_table = self.diff.metadataA.tables[tn] - decls.extend(self._getTableDefn( - post_table, metaName='post_meta')) - - for col in td.columns_missing_from_A: - upgradeCommands.append( - 'pre_meta.tables[%r].columns[%r].drop()' % (tn, col)) - downgradeCommands.append( - 'pre_meta.tables[%r].columns[%r].create()' % (tn, col)) - for col in td.columns_missing_from_B: - upgradeCommands.append( - 'post_meta.tables[%r].columns[%r].create()' % (tn, col)) - downgradeCommands.append( - 'post_meta.tables[%r].columns[%r].drop()' % (tn, col)) - for modelCol, databaseCol, modelDecl, databaseDecl in td.columns_different: - upgradeCommands.append( - 'assert False, "Can\'t alter columns: %s:%s=>%s"' % ( - tn, modelCol.name, databaseCol.name)) - downgradeCommands.append( - 'assert False, "Can\'t alter columns: %s:%s=>%s"' % ( - tn, modelCol.name, databaseCol.name)) - - return ( - '\n'.join(decls), - '\n'.join('%s%s' % (indent, line) for line in upgradeCommands), - '\n'.join('%s%s' % (indent, line) for line in downgradeCommands)) - - def _db_can_handle_this_change(self,td): - """Check if the database can handle going from B to A.""" - - if (td.columns_missing_from_B - and not td.columns_missing_from_A - and not td.columns_different): - # Even sqlite can handle column additions. - return True - else: - return not self.engine.url.drivername.startswith('sqlite') - - def runB2A(self): - """Goes from B to A. - - Was: applyModel. Apply model (A) to current database (B). - """ - - meta = sqlalchemy.MetaData(self.engine) - - for table in self._get_tables(missingA=True): - table = table.tometadata(meta) - table.drop() - for table in self._get_tables(missingB=True): - table = table.tometadata(meta) - table.create() - for modelTable in self._get_tables(modified=True): - tableName = modelTable.name - modelTable = modelTable.tometadata(meta) - dbTable = self.diff.metadataB.tables[tableName] - - td = self.diff.tables_different[tableName] - - if self._db_can_handle_this_change(td): - - for col in td.columns_missing_from_B: - modelTable.columns[col].create() - for col in td.columns_missing_from_A: - dbTable.columns[col].drop() - # XXX handle column changes here. - else: - # Sqlite doesn't support drop column, so you have to - # do more: create temp table, copy data to it, drop - # old table, create new table, copy data back. - # - # I wonder if this is guaranteed to be unique? - tempName = '_temp_%s' % modelTable.name - - def getCopyStatement(): - preparer = self.engine.dialect.preparer - commonCols = [] - for modelCol in modelTable.columns: - if modelCol.name in dbTable.columns: - commonCols.append(modelCol.name) - commonColsStr = ', '.join(commonCols) - return 'INSERT INTO %s (%s) SELECT %s FROM %s' % \ - (tableName, commonColsStr, commonColsStr, tempName) - - # Move the data in one transaction, so that we don't - # leave the database in a nasty state. - connection = self.engine.connect() - trans = connection.begin() - try: - connection.execute( - 'CREATE TEMPORARY TABLE %s as SELECT * from %s' % \ - (tempName, modelTable.name)) - # make sure the drop takes place inside our - # transaction with the bind parameter - modelTable.drop(bind=connection) - modelTable.create(bind=connection) - connection.execute(getCopyStatement()) - connection.execute('DROP TABLE %s' % tempName) - trans.commit() - except: - trans.rollback() - raise - diff --git a/libs/migrate/versioning/migrate_repository.py b/libs/migrate/versioning/migrate_repository.py deleted file mode 100644 index 53833bbc..00000000 --- a/libs/migrate/versioning/migrate_repository.py +++ /dev/null @@ -1,100 +0,0 @@ -""" - Script to migrate repository from sqlalchemy <= 0.4.4 to the new - repository schema. This shouldn't use any other migrate modules, so - that it can work in any version. -""" - -import os -import sys -import logging - -log = logging.getLogger(__name__) - - -def usage(): - """Gives usage information.""" - print """Usage: %(prog)s repository-to-migrate - - Upgrade your repository to the new flat format. - - NOTE: You should probably make a backup before running this. - """ % {'prog': sys.argv[0]} - - sys.exit(1) - - -def delete_file(filepath): - """Deletes a file and prints a message.""" - log.info('Deleting file: %s' % filepath) - os.remove(filepath) - - -def move_file(src, tgt): - """Moves a file and prints a message.""" - log.info('Moving file %s to %s' % (src, tgt)) - if os.path.exists(tgt): - raise Exception( - 'Cannot move file %s because target %s already exists' % \ - (src, tgt)) - os.rename(src, tgt) - - -def delete_directory(dirpath): - """Delete a directory and print a message.""" - log.info('Deleting directory: %s' % dirpath) - os.rmdir(dirpath) - - -def migrate_repository(repos): - """Does the actual migration to the new repository format.""" - log.info('Migrating repository at: %s to new format' % repos) - versions = '%s/versions' % repos - dirs = os.listdir(versions) - # Only use int's in list. - numdirs = [int(dirname) for dirname in dirs if dirname.isdigit()] - numdirs.sort() # Sort list. - for dirname in numdirs: - origdir = '%s/%s' % (versions, dirname) - log.info('Working on directory: %s' % origdir) - files = os.listdir(origdir) - files.sort() - for filename in files: - # Delete compiled Python files. - if filename.endswith('.pyc') or filename.endswith('.pyo'): - delete_file('%s/%s' % (origdir, filename)) - - # Delete empty __init__.py files. - origfile = '%s/__init__.py' % origdir - if os.path.exists(origfile) and len(open(origfile).read()) == 0: - delete_file(origfile) - - # Move sql upgrade scripts. - if filename.endswith('.sql'): - version, dbms, operation = filename.split('.', 3)[0:3] - origfile = '%s/%s' % (origdir, filename) - # For instance: 2.postgres.upgrade.sql -> - # 002_postgres_upgrade.sql - tgtfile = '%s/%03d_%s_%s.sql' % ( - versions, int(version), dbms, operation) - move_file(origfile, tgtfile) - - # Move Python upgrade script. - pyfile = '%s.py' % dirname - pyfilepath = '%s/%s' % (origdir, pyfile) - if os.path.exists(pyfilepath): - tgtfile = '%s/%03d.py' % (versions, int(dirname)) - move_file(pyfilepath, tgtfile) - - # Try to remove directory. Will fail if it's not empty. - delete_directory(origdir) - - -def main(): - """Main function to be called when using this script.""" - if len(sys.argv) != 2: - usage() - migrate_repository(sys.argv[1]) - - -if __name__ == '__main__': - main() diff --git a/libs/migrate/versioning/pathed.py b/libs/migrate/versioning/pathed.py deleted file mode 100644 index fbee0e46..00000000 --- a/libs/migrate/versioning/pathed.py +++ /dev/null @@ -1,75 +0,0 @@ -""" - A path/directory class. -""" - -import os -import shutil -import logging - -from migrate import exceptions -from migrate.versioning.config import * -from migrate.versioning.util import KeyedInstance - - -log = logging.getLogger(__name__) - -class Pathed(KeyedInstance): - """ - A class associated with a path/directory tree. - - Only one instance of this class may exist for a particular file; - __new__ will return an existing instance if possible - """ - parent = None - - @classmethod - def _key(cls, path): - return str(path) - - def __init__(self, path): - self.path = path - if self.__class__.parent is not None: - self._init_parent(path) - - def _init_parent(self, path): - """Try to initialize this object's parent, if it has one""" - parent_path = self.__class__._parent_path(path) - self.parent = self.__class__.parent(parent_path) - log.debug("Getting parent %r:%r" % (self.__class__.parent, parent_path)) - self.parent._init_child(path, self) - - def _init_child(self, child, path): - """Run when a child of this object is initialized. - - Parameters: the child object; the path to this object (its - parent) - """ - - @classmethod - def _parent_path(cls, path): - """ - Fetch the path of this object's parent from this object's path. - """ - # os.path.dirname(), but strip directories like files (like - # unix basename) - # - # Treat directories like files... - if path[-1] == '/': - path = path[:-1] - ret = os.path.dirname(path) - return ret - - @classmethod - def require_notfound(cls, path): - """Ensures a given path does not already exist""" - if os.path.exists(path): - raise exceptions.PathFoundError(path) - - @classmethod - def require_found(cls, path): - """Ensures a given path already exists""" - if not os.path.exists(path): - raise exceptions.PathNotFoundError(path) - - def __str__(self): - return self.path diff --git a/libs/migrate/versioning/repository.py b/libs/migrate/versioning/repository.py deleted file mode 100644 index 6e2f678f..00000000 --- a/libs/migrate/versioning/repository.py +++ /dev/null @@ -1,242 +0,0 @@ -""" - SQLAlchemy migrate repository management. -""" -import os -import shutil -import string -import logging - -from pkg_resources import resource_filename -from tempita import Template as TempitaTemplate - -from migrate import exceptions -from migrate.versioning import version, pathed, cfgparse -from migrate.versioning.template import Template -from migrate.versioning.config import * - - -log = logging.getLogger(__name__) - -class Changeset(dict): - """A collection of changes to be applied to a database. - - Changesets are bound to a repository and manage a set of - scripts from that repository. - - Behaves like a dict, for the most part. Keys are ordered based on step value. - """ - - def __init__(self, start, *changes, **k): - """ - Give a start version; step must be explicitly stated. - """ - self.step = k.pop('step', 1) - self.start = version.VerNum(start) - self.end = self.start - for change in changes: - self.add(change) - - def __iter__(self): - return iter(self.items()) - - def keys(self): - """ - In a series of upgrades x -> y, keys are version x. Sorted. - """ - ret = super(Changeset, self).keys() - # Reverse order if downgrading - ret.sort(reverse=(self.step < 1)) - return ret - - def values(self): - return [self[k] for k in self.keys()] - - def items(self): - return zip(self.keys(), self.values()) - - def add(self, change): - """Add new change to changeset""" - key = self.end - self.end += self.step - self[key] = change - - def run(self, *p, **k): - """Run the changeset scripts""" - for version, script in self: - script.run(*p, **k) - - -class Repository(pathed.Pathed): - """A project's change script repository""" - - _config = 'migrate.cfg' - _versions = 'versions' - - def __init__(self, path): - log.debug('Loading repository %s...' % path) - self.verify(path) - super(Repository, self).__init__(path) - self.config = cfgparse.Config(os.path.join(self.path, self._config)) - self.versions = version.Collection(os.path.join(self.path, - self._versions)) - log.debug('Repository %s loaded successfully' % path) - log.debug('Config: %r' % self.config.to_dict()) - - @classmethod - def verify(cls, path): - """ - Ensure the target path is a valid repository. - - :raises: :exc:`InvalidRepositoryError ` - """ - # Ensure the existence of required files - try: - cls.require_found(path) - cls.require_found(os.path.join(path, cls._config)) - cls.require_found(os.path.join(path, cls._versions)) - except exceptions.PathNotFoundError, e: - raise exceptions.InvalidRepositoryError(path) - - @classmethod - def prepare_config(cls, tmpl_dir, name, options=None): - """ - Prepare a project configuration file for a new project. - - :param tmpl_dir: Path to Repository template - :param config_file: Name of the config file in Repository template - :param name: Repository name - :type tmpl_dir: string - :type config_file: string - :type name: string - :returns: Populated config file - """ - if options is None: - options = {} - options.setdefault('version_table', 'migrate_version') - options.setdefault('repository_id', name) - options.setdefault('required_dbs', []) - options.setdefault('use_timestamp_numbering', False) - - tmpl = open(os.path.join(tmpl_dir, cls._config)).read() - ret = TempitaTemplate(tmpl).substitute(options) - - # cleanup - del options['__template_name__'] - - return ret - - @classmethod - def create(cls, path, name, **opts): - """Create a repository at a specified path""" - cls.require_notfound(path) - theme = opts.pop('templates_theme', None) - t_path = opts.pop('templates_path', None) - - # Create repository - tmpl_dir = Template(t_path).get_repository(theme=theme) - shutil.copytree(tmpl_dir, path) - - # Edit config defaults - config_text = cls.prepare_config(tmpl_dir, name, options=opts) - fd = open(os.path.join(path, cls._config), 'w') - fd.write(config_text) - fd.close() - - opts['repository_name'] = name - - # Create a management script - manager = os.path.join(path, 'manage.py') - Repository.create_manage_file(manager, templates_theme=theme, - templates_path=t_path, **opts) - - return cls(path) - - def create_script(self, description, **k): - """API to :meth:`migrate.versioning.version.Collection.create_new_python_version`""" - - k['use_timestamp_numbering'] = self.use_timestamp_numbering - self.versions.create_new_python_version(description, **k) - - def create_script_sql(self, database, description, **k): - """API to :meth:`migrate.versioning.version.Collection.create_new_sql_version`""" - k['use_timestamp_numbering'] = self.use_timestamp_numbering - self.versions.create_new_sql_version(database, description, **k) - - @property - def latest(self): - """API to :attr:`migrate.versioning.version.Collection.latest`""" - return self.versions.latest - - @property - def version_table(self): - """Returns version_table name specified in config""" - return self.config.get('db_settings', 'version_table') - - @property - def id(self): - """Returns repository id specified in config""" - return self.config.get('db_settings', 'repository_id') - - @property - def use_timestamp_numbering(self): - """Returns use_timestamp_numbering specified in config""" - if self.config.has_option('db_settings', 'use_timestamp_numbering'): - return self.config.getboolean('db_settings', 'use_timestamp_numbering') - return False - - def version(self, *p, **k): - """API to :attr:`migrate.versioning.version.Collection.version`""" - return self.versions.version(*p, **k) - - @classmethod - def clear(cls): - # TODO: deletes repo - super(Repository, cls).clear() - version.Collection.clear() - - def changeset(self, database, start, end=None): - """Create a changeset to migrate this database from ver. start to end/latest. - - :param database: name of database to generate changeset - :param start: version to start at - :param end: version to end at (latest if None given) - :type database: string - :type start: int - :type end: int - :returns: :class:`Changeset instance ` - """ - start = version.VerNum(start) - - if end is None: - end = self.latest - else: - end = version.VerNum(end) - - if start <= end: - step = 1 - range_mod = 1 - op = 'upgrade' - else: - step = -1 - range_mod = 0 - op = 'downgrade' - - versions = range(start + range_mod, end + range_mod, step) - changes = [self.version(v).script(database, op) for v in versions] - ret = Changeset(start, step=step, *changes) - return ret - - @classmethod - def create_manage_file(cls, file_, **opts): - """Create a project management script (manage.py) - - :param file_: Destination file to be written - :param opts: Options that are passed to :func:`migrate.versioning.shell.main` - """ - mng_file = Template(opts.pop('templates_path', None))\ - .get_manage(theme=opts.pop('templates_theme', None)) - - tmpl = open(mng_file).read() - fd = open(file_, 'w') - fd.write(TempitaTemplate(tmpl).substitute(opts)) - fd.close() diff --git a/libs/migrate/versioning/schema.py b/libs/migrate/versioning/schema.py deleted file mode 100644 index e4d93653..00000000 --- a/libs/migrate/versioning/schema.py +++ /dev/null @@ -1,220 +0,0 @@ -""" - Database schema version management. -""" -import sys -import logging - -from sqlalchemy import (Table, Column, MetaData, String, Text, Integer, - create_engine) -from sqlalchemy.sql import and_ -from sqlalchemy import exceptions as sa_exceptions -from sqlalchemy.sql import bindparam - -from migrate import exceptions -from migrate.changeset import SQLA_07 -from migrate.versioning import genmodel, schemadiff -from migrate.versioning.repository import Repository -from migrate.versioning.util import load_model -from migrate.versioning.version import VerNum - - -log = logging.getLogger(__name__) - -class ControlledSchema(object): - """A database under version control""" - - def __init__(self, engine, repository): - if isinstance(repository, basestring): - repository = Repository(repository) - self.engine = engine - self.repository = repository - self.meta = MetaData(engine) - self.load() - - def __eq__(self, other): - """Compare two schemas by repositories and versions""" - return (self.repository is other.repository \ - and self.version == other.version) - - def load(self): - """Load controlled schema version info from DB""" - tname = self.repository.version_table - try: - if not hasattr(self, 'table') or self.table is None: - self.table = Table(tname, self.meta, autoload=True) - - result = self.engine.execute(self.table.select( - self.table.c.repository_id == str(self.repository.id))) - - data = list(result)[0] - except: - cls, exc, tb = sys.exc_info() - raise exceptions.DatabaseNotControlledError, exc.__str__(), tb - - self.version = data['version'] - return data - - def drop(self): - """ - Remove version control from a database. - """ - if SQLA_07: - try: - self.table.drop() - except sa_exceptions.DatabaseError: - raise exceptions.DatabaseNotControlledError(str(self.table)) - else: - try: - self.table.drop() - except (sa_exceptions.SQLError): - raise exceptions.DatabaseNotControlledError(str(self.table)) - - def changeset(self, version=None): - """API to Changeset creation. - - Uses self.version for start version and engine.name - to get database name. - """ - database = self.engine.name - start_ver = self.version - changeset = self.repository.changeset(database, start_ver, version) - return changeset - - def runchange(self, ver, change, step): - startver = ver - endver = ver + step - # Current database version must be correct! Don't run if corrupt! - if self.version != startver: - raise exceptions.InvalidVersionError("%s is not %s" % \ - (self.version, startver)) - # Run the change - change.run(self.engine, step) - - # Update/refresh database version - self.update_repository_table(startver, endver) - self.load() - - def update_repository_table(self, startver, endver): - """Update version_table with new information""" - update = self.table.update(and_(self.table.c.version == int(startver), - self.table.c.repository_id == str(self.repository.id))) - self.engine.execute(update, version=int(endver)) - - def upgrade(self, version=None): - """ - Upgrade (or downgrade) to a specified version, or latest version. - """ - changeset = self.changeset(version) - for ver, change in changeset: - self.runchange(ver, change, changeset.step) - - def update_db_from_model(self, model): - """ - Modify the database to match the structure of the current Python model. - """ - model = load_model(model) - - diff = schemadiff.getDiffOfModelAgainstDatabase( - model, self.engine, excludeTables=[self.repository.version_table] - ) - genmodel.ModelGenerator(diff,self.engine).runB2A() - - self.update_repository_table(self.version, int(self.repository.latest)) - - self.load() - - @classmethod - def create(cls, engine, repository, version=None): - """ - Declare a database to be under a repository's version control. - - :raises: :exc:`DatabaseAlreadyControlledError` - :returns: :class:`ControlledSchema` - """ - # Confirm that the version # is valid: positive, integer, - # exists in repos - if isinstance(repository, basestring): - repository = Repository(repository) - version = cls._validate_version(repository, version) - table = cls._create_table_version(engine, repository, version) - # TODO: history table - # Load repository information and return - return cls(engine, repository) - - @classmethod - def _validate_version(cls, repository, version): - """ - Ensures this is a valid version number for this repository. - - :raises: :exc:`InvalidVersionError` if invalid - :return: valid version number - """ - if version is None: - version = 0 - try: - version = VerNum(version) # raises valueerror - if version < 0 or version > repository.latest: - raise ValueError() - except ValueError: - raise exceptions.InvalidVersionError(version) - return version - - @classmethod - def _create_table_version(cls, engine, repository, version): - """ - Creates the versioning table in a database. - - :raises: :exc:`DatabaseAlreadyControlledError` - """ - # Create tables - tname = repository.version_table - meta = MetaData(engine) - - table = Table( - tname, meta, - Column('repository_id', String(250), primary_key=True), - Column('repository_path', Text), - Column('version', Integer), ) - - # there can be multiple repositories/schemas in the same db - if not table.exists(): - table.create() - - # test for existing repository_id - s = table.select(table.c.repository_id == bindparam("repository_id")) - result = engine.execute(s, repository_id=repository.id) - if result.fetchone(): - raise exceptions.DatabaseAlreadyControlledError - - # Insert data - engine.execute(table.insert().values( - repository_id=repository.id, - repository_path=repository.path, - version=int(version))) - return table - - @classmethod - def compare_model_to_db(cls, engine, model, repository): - """ - Compare the current model against the current database. - """ - if isinstance(repository, basestring): - repository = Repository(repository) - model = load_model(model) - - diff = schemadiff.getDiffOfModelAgainstDatabase( - model, engine, excludeTables=[repository.version_table]) - return diff - - @classmethod - def create_model(cls, engine, repository, declarative=False): - """ - Dump the current database as a Python model. - """ - if isinstance(repository, basestring): - repository = Repository(repository) - - diff = schemadiff.getDiffOfModelAgainstDatabase( - MetaData(), engine, excludeTables=[repository.version_table] - ) - return genmodel.ModelGenerator(diff, engine, declarative).genBDefinition() diff --git a/libs/migrate/versioning/schemadiff.py b/libs/migrate/versioning/schemadiff.py deleted file mode 100644 index 04cf83e6..00000000 --- a/libs/migrate/versioning/schemadiff.py +++ /dev/null @@ -1,292 +0,0 @@ -""" - Schema differencing support. -""" - -import logging -import sqlalchemy - -from sqlalchemy.types import Float - -log = logging.getLogger(__name__) - -def getDiffOfModelAgainstDatabase(metadata, engine, excludeTables=None): - """ - Return differences of model against database. - - :return: object which will evaluate to :keyword:`True` if there \ - are differences else :keyword:`False`. - """ - db_metadata = sqlalchemy.MetaData(engine, reflect=True) - - # sqlite will include a dynamically generated 'sqlite_sequence' table if - # there are autoincrement sequences in the database; this should not be - # compared. - if engine.dialect.name == 'sqlite': - if 'sqlite_sequence' in db_metadata.tables: - db_metadata.remove(db_metadata.tables['sqlite_sequence']) - - return SchemaDiff(metadata, db_metadata, - labelA='model', - labelB='database', - excludeTables=excludeTables) - - -def getDiffOfModelAgainstModel(metadataA, metadataB, excludeTables=None): - """ - Return differences of model against another model. - - :return: object which will evaluate to :keyword:`True` if there \ - are differences else :keyword:`False`. - """ - return SchemaDiff(metadataA, metadataB, excludeTables) - - -class ColDiff(object): - """ - Container for differences in one :class:`~sqlalchemy.schema.Column` - between two :class:`~sqlalchemy.schema.Table` instances, ``A`` - and ``B``. - - .. attribute:: col_A - - The :class:`~sqlalchemy.schema.Column` object for A. - - .. attribute:: col_B - - The :class:`~sqlalchemy.schema.Column` object for B. - - .. attribute:: type_A - - The most generic type of the :class:`~sqlalchemy.schema.Column` - object in A. - - .. attribute:: type_B - - The most generic type of the :class:`~sqlalchemy.schema.Column` - object in A. - - """ - - diff = False - - def __init__(self,col_A,col_B): - self.col_A = col_A - self.col_B = col_B - - self.type_A = col_A.type - self.type_B = col_B.type - - self.affinity_A = self.type_A._type_affinity - self.affinity_B = self.type_B._type_affinity - - if self.affinity_A is not self.affinity_B: - self.diff = True - return - - if isinstance(self.type_A,Float) or isinstance(self.type_B,Float): - if not (isinstance(self.type_A,Float) and isinstance(self.type_B,Float)): - self.diff=True - return - - for attr in ('precision','scale','length'): - A = getattr(self.type_A,attr,None) - B = getattr(self.type_B,attr,None) - if not (A is None or B is None) and A!=B: - self.diff=True - return - - def __nonzero__(self): - return self.diff - -class TableDiff(object): - """ - Container for differences in one :class:`~sqlalchemy.schema.Table` - between two :class:`~sqlalchemy.schema.MetaData` instances, ``A`` - and ``B``. - - .. attribute:: columns_missing_from_A - - A sequence of column names that were found in B but weren't in - A. - - .. attribute:: columns_missing_from_B - - A sequence of column names that were found in A but weren't in - B. - - .. attribute:: columns_different - - A dictionary containing information about columns that were - found to be different. - It maps column names to a :class:`ColDiff` objects describing the - differences found. - """ - __slots__ = ( - 'columns_missing_from_A', - 'columns_missing_from_B', - 'columns_different', - ) - - def __nonzero__(self): - return bool( - self.columns_missing_from_A or - self.columns_missing_from_B or - self.columns_different - ) - -class SchemaDiff(object): - """ - Compute the difference between two :class:`~sqlalchemy.schema.MetaData` - objects. - - The string representation of a :class:`SchemaDiff` will summarise - the changes found between the two - :class:`~sqlalchemy.schema.MetaData` objects. - - The length of a :class:`SchemaDiff` will give the number of - changes found, enabling it to be used much like a boolean in - expressions. - - :param metadataA: - First :class:`~sqlalchemy.schema.MetaData` to compare. - - :param metadataB: - Second :class:`~sqlalchemy.schema.MetaData` to compare. - - :param labelA: - The label to use in messages about the first - :class:`~sqlalchemy.schema.MetaData`. - - :param labelB: - The label to use in messages about the second - :class:`~sqlalchemy.schema.MetaData`. - - :param excludeTables: - A sequence of table names to exclude. - - .. attribute:: tables_missing_from_A - - A sequence of table names that were found in B but weren't in - A. - - .. attribute:: tables_missing_from_B - - A sequence of table names that were found in A but weren't in - B. - - .. attribute:: tables_different - - A dictionary containing information about tables that were found - to be different. - It maps table names to a :class:`TableDiff` objects describing the - differences found. - """ - - def __init__(self, - metadataA, metadataB, - labelA='metadataA', - labelB='metadataB', - excludeTables=None): - - self.metadataA, self.metadataB = metadataA, metadataB - self.labelA, self.labelB = labelA, labelB - self.label_width = max(len(labelA),len(labelB)) - excludeTables = set(excludeTables or []) - - A_table_names = set(metadataA.tables.keys()) - B_table_names = set(metadataB.tables.keys()) - - self.tables_missing_from_A = sorted( - B_table_names - A_table_names - excludeTables - ) - self.tables_missing_from_B = sorted( - A_table_names - B_table_names - excludeTables - ) - - self.tables_different = {} - for table_name in A_table_names.intersection(B_table_names): - - td = TableDiff() - - A_table = metadataA.tables[table_name] - B_table = metadataB.tables[table_name] - - A_column_names = set(A_table.columns.keys()) - B_column_names = set(B_table.columns.keys()) - - td.columns_missing_from_A = sorted( - B_column_names - A_column_names - ) - - td.columns_missing_from_B = sorted( - A_column_names - B_column_names - ) - - td.columns_different = {} - - for col_name in A_column_names.intersection(B_column_names): - - cd = ColDiff( - A_table.columns.get(col_name), - B_table.columns.get(col_name) - ) - - if cd: - td.columns_different[col_name]=cd - - # XXX - index and constraint differences should - # be checked for here - - if td: - self.tables_different[table_name]=td - - def __str__(self): - ''' Summarize differences. ''' - out = [] - column_template =' %%%is: %%r' % self.label_width - - for names,label in ( - (self.tables_missing_from_A,self.labelA), - (self.tables_missing_from_B,self.labelB), - ): - if names: - out.append( - ' tables missing from %s: %s' % ( - label,', '.join(sorted(names)) - ) - ) - - for name,td in sorted(self.tables_different.items()): - out.append( - ' table with differences: %s' % name - ) - for names,label in ( - (td.columns_missing_from_A,self.labelA), - (td.columns_missing_from_B,self.labelB), - ): - if names: - out.append( - ' %s missing these columns: %s' % ( - label,', '.join(sorted(names)) - ) - ) - for name,cd in td.columns_different.items(): - out.append(' column with differences: %s' % name) - out.append(column_template % (self.labelA,cd.col_A)) - out.append(column_template % (self.labelB,cd.col_B)) - - if out: - out.insert(0, 'Schema diffs:') - return '\n'.join(out) - else: - return 'No schema diffs' - - def __len__(self): - """ - Used in bool evaluation, return of 0 means no diffs. - """ - return ( - len(self.tables_missing_from_A) + - len(self.tables_missing_from_B) + - len(self.tables_different) - ) diff --git a/libs/migrate/versioning/script/__init__.py b/libs/migrate/versioning/script/__init__.py deleted file mode 100644 index c788edaa..00000000 --- a/libs/migrate/versioning/script/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -from migrate.versioning.script.base import BaseScript -from migrate.versioning.script.py import PythonScript -from migrate.versioning.script.sql import SqlScript diff --git a/libs/migrate/versioning/script/base.py b/libs/migrate/versioning/script/base.py deleted file mode 100644 index 42872352..00000000 --- a/libs/migrate/versioning/script/base.py +++ /dev/null @@ -1,57 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -import logging - -from migrate import exceptions -from migrate.versioning.config import operations -from migrate.versioning import pathed - - -log = logging.getLogger(__name__) - -class BaseScript(pathed.Pathed): - """Base class for other types of scripts. - All scripts have the following properties: - - source (script.source()) - The source code of the script - version (script.version()) - The version number of the script - operations (script.operations()) - The operations defined by the script: upgrade(), downgrade() or both. - Returns a tuple of operations. - Can also check for an operation with ex. script.operation(Script.ops.up) - """ # TODO: sphinxfy this and implement it correctly - - def __init__(self, path): - log.debug('Loading script %s...' % path) - self.verify(path) - super(BaseScript, self).__init__(path) - log.debug('Script %s loaded successfully' % path) - - @classmethod - def verify(cls, path): - """Ensure this is a valid script - This version simply ensures the script file's existence - - :raises: :exc:`InvalidScriptError ` - """ - try: - cls.require_found(path) - except: - raise exceptions.InvalidScriptError(path) - - def source(self): - """:returns: source code of the script. - :rtype: string - """ - fd = open(self.path) - ret = fd.read() - fd.close() - return ret - - def run(self, engine): - """Core of each BaseScript subclass. - This method executes the script. - """ - raise NotImplementedError() diff --git a/libs/migrate/versioning/script/py.py b/libs/migrate/versioning/script/py.py deleted file mode 100644 index 3a090d49..00000000 --- a/libs/migrate/versioning/script/py.py +++ /dev/null @@ -1,160 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -import shutil -import warnings -import logging -import inspect -from StringIO import StringIO - -import migrate -from migrate.versioning import genmodel, schemadiff -from migrate.versioning.config import operations -from migrate.versioning.template import Template -from migrate.versioning.script import base -from migrate.versioning.util import import_path, load_model, with_engine -from migrate.exceptions import MigrateDeprecationWarning, InvalidScriptError, ScriptError - -log = logging.getLogger(__name__) -__all__ = ['PythonScript'] - - -class PythonScript(base.BaseScript): - """Base for Python scripts""" - - @classmethod - def create(cls, path, **opts): - """Create an empty migration script at specified path - - :returns: :class:`PythonScript instance `""" - cls.require_notfound(path) - - src = Template(opts.pop('templates_path', None)).get_script(theme=opts.pop('templates_theme', None)) - shutil.copy(src, path) - - return cls(path) - - @classmethod - def make_update_script_for_model(cls, engine, oldmodel, - model, repository, **opts): - """Create a migration script based on difference between two SA models. - - :param repository: path to migrate repository - :param oldmodel: dotted.module.name:SAClass or SAClass object - :param model: dotted.module.name:SAClass or SAClass object - :param engine: SQLAlchemy engine - :type repository: string or :class:`Repository instance ` - :type oldmodel: string or Class - :type model: string or Class - :type engine: Engine instance - :returns: Upgrade / Downgrade script - :rtype: string - """ - - if isinstance(repository, basestring): - # oh dear, an import cycle! - from migrate.versioning.repository import Repository - repository = Repository(repository) - - oldmodel = load_model(oldmodel) - model = load_model(model) - - # Compute differences. - diff = schemadiff.getDiffOfModelAgainstModel( - model, - oldmodel, - excludeTables=[repository.version_table]) - # TODO: diff can be False (there is no difference?) - decls, upgradeCommands, downgradeCommands = \ - genmodel.ModelGenerator(diff,engine).genB2AMigration() - - # Store differences into file. - src = Template(opts.pop('templates_path', None)).get_script(opts.pop('templates_theme', None)) - f = open(src) - contents = f.read() - f.close() - - # generate source - search = 'def upgrade(migrate_engine):' - contents = contents.replace(search, '\n\n'.join((decls, search)), 1) - if upgradeCommands: - contents = contents.replace(' pass', upgradeCommands, 1) - if downgradeCommands: - contents = contents.replace(' pass', downgradeCommands, 1) - return contents - - @classmethod - def verify_module(cls, path): - """Ensure path is a valid script - - :param path: Script location - :type path: string - :raises: :exc:`InvalidScriptError ` - :returns: Python module - """ - # Try to import and get the upgrade() func - module = import_path(path) - try: - assert callable(module.upgrade) - except Exception, e: - raise InvalidScriptError(path + ': %s' % str(e)) - return module - - def preview_sql(self, url, step, **args): - """Mocks SQLAlchemy Engine to store all executed calls in a string - and runs :meth:`PythonScript.run ` - - :returns: SQL file - """ - buf = StringIO() - args['engine_arg_strategy'] = 'mock' - args['engine_arg_executor'] = lambda s, p = '': buf.write(str(s) + p) - - @with_engine - def go(url, step, **kw): - engine = kw.pop('engine') - self.run(engine, step) - return buf.getvalue() - - return go(url, step, **args) - - def run(self, engine, step): - """Core method of Script file. - Exectues :func:`update` or :func:`downgrade` functions - - :param engine: SQLAlchemy Engine - :param step: Operation to run - :type engine: string - :type step: int - """ - if step > 0: - op = 'upgrade' - elif step < 0: - op = 'downgrade' - else: - raise ScriptError("%d is not a valid step" % step) - - funcname = base.operations[op] - script_func = self._func(funcname) - - # check for old way of using engine - if not inspect.getargspec(script_func)[0]: - raise TypeError("upgrade/downgrade functions must accept engine" - " parameter (since version 0.5.4)") - - script_func(engine) - - @property - def module(self): - """Calls :meth:`migrate.versioning.script.py.verify_module` - and returns it. - """ - if not hasattr(self, '_module'): - self._module = self.verify_module(self.path) - return self._module - - def _func(self, funcname): - if not hasattr(self.module, funcname): - msg = "Function '%s' is not defined in this script" - raise ScriptError(msg % funcname) - return getattr(self.module, funcname) diff --git a/libs/migrate/versioning/script/sql.py b/libs/migrate/versioning/script/sql.py deleted file mode 100644 index ed807641..00000000 --- a/libs/migrate/versioning/script/sql.py +++ /dev/null @@ -1,49 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -import logging -import shutil - -from migrate.versioning.script import base -from migrate.versioning.template import Template - - -log = logging.getLogger(__name__) - -class SqlScript(base.BaseScript): - """A file containing plain SQL statements.""" - - @classmethod - def create(cls, path, **opts): - """Create an empty migration script at specified path - - :returns: :class:`SqlScript instance `""" - cls.require_notfound(path) - - src = Template(opts.pop('templates_path', None)).get_sql_script(theme=opts.pop('templates_theme', None)) - shutil.copy(src, path) - return cls(path) - - # TODO: why is step parameter even here? - def run(self, engine, step=None, executemany=True): - """Runs SQL script through raw dbapi execute call""" - text = self.source() - # Don't rely on SA's autocommit here - # (SA uses .startswith to check if a commit is needed. What if script - # starts with a comment?) - conn = engine.connect() - try: - trans = conn.begin() - try: - # HACK: SQLite doesn't allow multiple statements through - # its execute() method, but it provides executescript() instead - dbapi = conn.engine.raw_connection() - if executemany and getattr(dbapi, 'executescript', None): - dbapi.executescript(text) - else: - conn.execute(text) - trans.commit() - except: - trans.rollback() - raise - finally: - conn.close() diff --git a/libs/migrate/versioning/shell.py b/libs/migrate/versioning/shell.py deleted file mode 100644 index ad7b6798..00000000 --- a/libs/migrate/versioning/shell.py +++ /dev/null @@ -1,214 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -"""The migrate command-line tool.""" - -import sys -import inspect -import logging -from optparse import OptionParser, BadOptionError - -from migrate import exceptions -from migrate.versioning import api -from migrate.versioning.config import * -from migrate.versioning.util import asbool - - -alias = dict( - s=api.script, - vc=api.version_control, - dbv=api.db_version, - v=api.version, -) - -def alias_setup(): - global alias - for key, val in alias.iteritems(): - setattr(api, key, val) -alias_setup() - - -class PassiveOptionParser(OptionParser): - - def _process_args(self, largs, rargs, values): - """little hack to support all --some_option=value parameters""" - - while rargs: - arg = rargs[0] - if arg == "--": - del rargs[0] - return - elif arg[0:2] == "--": - # if parser does not know about the option - # pass it along (make it anonymous) - try: - opt = arg.split('=', 1)[0] - self._match_long_opt(opt) - except BadOptionError: - largs.append(arg) - del rargs[0] - else: - self._process_long_opt(rargs, values) - elif arg[:1] == "-" and len(arg) > 1: - self._process_short_opts(rargs, values) - elif self.allow_interspersed_args: - largs.append(arg) - del rargs[0] - -def main(argv=None, **kwargs): - """Shell interface to :mod:`migrate.versioning.api`. - - kwargs are default options that can be overriden with passing - --some_option as command line option - - :param disable_logging: Let migrate configure logging - :type disable_logging: bool - """ - if argv is not None: - argv = argv - else: - argv = list(sys.argv[1:]) - commands = list(api.__all__) - commands.sort() - - usage = """%%prog COMMAND ... - - Available commands: - %s - - Enter "%%prog help COMMAND" for information on a particular command. - """ % '\n\t'.join(["%s - %s" % (command.ljust(28), api.command_desc.get(command)) for command in commands]) - - parser = PassiveOptionParser(usage=usage) - parser.add_option("-d", "--debug", - action="store_true", - dest="debug", - default=False, - help="Shortcut to turn on DEBUG mode for logging") - parser.add_option("-q", "--disable_logging", - action="store_true", - dest="disable_logging", - default=False, - help="Use this option to disable logging configuration") - help_commands = ['help', '-h', '--help'] - HELP = False - - try: - command = argv.pop(0) - if command in help_commands: - HELP = True - command = argv.pop(0) - except IndexError: - parser.print_help() - return - - command_func = getattr(api, command, None) - if command_func is None or command.startswith('_'): - parser.error("Invalid command %s" % command) - - parser.set_usage(inspect.getdoc(command_func)) - f_args, f_varargs, f_kwargs, f_defaults = inspect.getargspec(command_func) - for arg in f_args: - parser.add_option( - "--%s" % arg, - dest=arg, - action='store', - type="string") - - # display help of the current command - if HELP: - parser.print_help() - return - - options, args = parser.parse_args(argv) - - # override kwargs with anonymous parameters - override_kwargs = dict() - for arg in list(args): - if arg.startswith('--'): - args.remove(arg) - if '=' in arg: - opt, value = arg[2:].split('=', 1) - else: - opt = arg[2:] - value = True - override_kwargs[opt] = value - - # override kwargs with options if user is overwriting - for key, value in options.__dict__.iteritems(): - if value is not None: - override_kwargs[key] = value - - # arguments that function accepts without passed kwargs - f_required = list(f_args) - candidates = dict(kwargs) - candidates.update(override_kwargs) - for key, value in candidates.iteritems(): - if key in f_args: - f_required.remove(key) - - # map function arguments to parsed arguments - for arg in args: - try: - kw = f_required.pop(0) - except IndexError: - parser.error("Too many arguments for command %s: %s" % (command, - arg)) - kwargs[kw] = arg - - # apply overrides - kwargs.update(override_kwargs) - - # configure options - for key, value in options.__dict__.iteritems(): - kwargs.setdefault(key, value) - - # configure logging - if not asbool(kwargs.pop('disable_logging', False)): - # filter to log =< INFO into stdout and rest to stderr - class SingleLevelFilter(logging.Filter): - def __init__(self, min=None, max=None): - self.min = min or 0 - self.max = max or 100 - - def filter(self, record): - return self.min <= record.levelno <= self.max - - logger = logging.getLogger() - h1 = logging.StreamHandler(sys.stdout) - f1 = SingleLevelFilter(max=logging.INFO) - h1.addFilter(f1) - h2 = logging.StreamHandler(sys.stderr) - f2 = SingleLevelFilter(min=logging.WARN) - h2.addFilter(f2) - logger.addHandler(h1) - logger.addHandler(h2) - - if options.debug: - logger.setLevel(logging.DEBUG) - else: - logger.setLevel(logging.INFO) - - log = logging.getLogger(__name__) - - # check if all args are given - try: - num_defaults = len(f_defaults) - except TypeError: - num_defaults = 0 - f_args_default = f_args[len(f_args) - num_defaults:] - required = list(set(f_required) - set(f_args_default)) - if required: - parser.error("Not enough arguments for command %s: %s not specified" \ - % (command, ', '.join(required))) - - # handle command - try: - ret = command_func(**kwargs) - if ret is not None: - log.info(ret) - except (exceptions.UsageError, exceptions.KnownError), e: - parser.error(e.args[0]) - -if __name__ == "__main__": - main() diff --git a/libs/migrate/versioning/template.py b/libs/migrate/versioning/template.py deleted file mode 100644 index 182898a6..00000000 --- a/libs/migrate/versioning/template.py +++ /dev/null @@ -1,93 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -import os -import shutil -import sys - -from pkg_resources import resource_filename - -from migrate.versioning.config import * -from migrate.versioning import pathed - - -class Collection(pathed.Pathed): - """A collection of templates of a specific type""" - _mask = None - - def get_path(self, file): - return os.path.join(self.path, str(file)) - - -class RepositoryCollection(Collection): - _mask = '%s' - -class ScriptCollection(Collection): - _mask = '%s.py_tmpl' - -class ManageCollection(Collection): - _mask = '%s.py_tmpl' - -class SQLScriptCollection(Collection): - _mask = '%s.py_tmpl' - -class Template(pathed.Pathed): - """Finds the paths/packages of various Migrate templates. - - :param path: Templates are loaded from migrate package - if `path` is not provided. - """ - pkg = 'migrate.versioning.templates' - - def __new__(cls, path=None): - if path is None: - path = cls._find_path(cls.pkg) - return super(Template, cls).__new__(cls, path) - - def __init__(self, path=None): - if path is None: - path = Template._find_path(self.pkg) - super(Template, self).__init__(path) - self.repository = RepositoryCollection(os.path.join(path, 'repository')) - self.script = ScriptCollection(os.path.join(path, 'script')) - self.manage = ManageCollection(os.path.join(path, 'manage')) - self.sql_script = SQLScriptCollection(os.path.join(path, 'sql_script')) - - @classmethod - def _find_path(cls, pkg): - """Returns absolute path to dotted python package.""" - tmp_pkg = pkg.rsplit('.', 1) - - if len(tmp_pkg) != 1: - return resource_filename(tmp_pkg[0], tmp_pkg[1]) - else: - return resource_filename(tmp_pkg[0], '') - - def _get_item(self, collection, theme=None): - """Locates and returns collection. - - :param collection: name of collection to locate - :param type_: type of subfolder in collection (defaults to "_default") - :returns: (package, source) - :rtype: str, str - """ - item = getattr(self, collection) - theme_mask = getattr(item, '_mask') - theme = theme_mask % (theme or 'default') - return item.get_path(theme) - - def get_repository(self, *a, **kw): - """Calls self._get_item('repository', *a, **kw)""" - return self._get_item('repository', *a, **kw) - - def get_script(self, *a, **kw): - """Calls self._get_item('script', *a, **kw)""" - return self._get_item('script', *a, **kw) - - def get_sql_script(self, *a, **kw): - """Calls self._get_item('sql_script', *a, **kw)""" - return self._get_item('sql_script', *a, **kw) - - def get_manage(self, *a, **kw): - """Calls self._get_item('manage', *a, **kw)""" - return self._get_item('manage', *a, **kw) diff --git a/libs/migrate/versioning/templates/manage/default.py_tmpl b/libs/migrate/versioning/templates/manage/default.py_tmpl deleted file mode 100644 index f6d75c50..00000000 --- a/libs/migrate/versioning/templates/manage/default.py_tmpl +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env python -from migrate.versioning.shell import main - -{{py: -_vars = locals().copy() -del _vars['__template_name__'] -_vars.pop('repository_name', None) -defaults = ", ".join(["%s='%s'" % var for var in _vars.iteritems()]) -}} - -if __name__ == '__main__': - main({{ defaults }}) diff --git a/libs/migrate/versioning/templates/manage/pylons.py_tmpl b/libs/migrate/versioning/templates/manage/pylons.py_tmpl deleted file mode 100644 index cc2f7885..00000000 --- a/libs/migrate/versioning/templates/manage/pylons.py_tmpl +++ /dev/null @@ -1,30 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -import sys - -from sqlalchemy import engine_from_config -from paste.deploy.loadwsgi import ConfigLoader - -from migrate.versioning.shell import main -from {{ locals().pop('repository_name') }}.model import migrations - - -if '-c' in sys.argv: - pos = sys.argv.index('-c') - conf_path = sys.argv[pos + 1] - del sys.argv[pos:pos + 2] -else: - conf_path = 'development.ini' - -{{py: -_vars = locals().copy() -del _vars['__template_name__'] -defaults = ", ".join(["%s='%s'" % var for var in _vars.iteritems()]) -}} - -conf_dict = ConfigLoader(conf_path).parser._sections['app:main'] - -# migrate supports passing url as an existing Engine instance (since 0.6.0) -# usage: migrate -c path/to/config.ini COMMANDS -if __name__ == '__main__': - main(url=engine_from_config(conf_dict), repository=migrations.__path__[0],{{ defaults }}) diff --git a/libs/migrate/versioning/templates/repository/default/README b/libs/migrate/versioning/templates/repository/default/README deleted file mode 100644 index 6218f8ca..00000000 --- a/libs/migrate/versioning/templates/repository/default/README +++ /dev/null @@ -1,4 +0,0 @@ -This is a database migration repository. - -More information at -http://code.google.com/p/sqlalchemy-migrate/ diff --git a/libs/migrate/versioning/templates/repository/default/migrate.cfg b/libs/migrate/versioning/templates/repository/default/migrate.cfg deleted file mode 100644 index dae06123..00000000 --- a/libs/migrate/versioning/templates/repository/default/migrate.cfg +++ /dev/null @@ -1,25 +0,0 @@ -[db_settings] -# Used to identify which repository this database is versioned under. -# You can use the name of your project. -repository_id={{ locals().pop('repository_id') }} - -# The name of the database table used to track the schema version. -# This name shouldn't already be used by your project. -# If this is changed once a database is under version control, you'll need to -# change the table name in each database too. -version_table={{ locals().pop('version_table') }} - -# When committing a change script, Migrate will attempt to generate the -# sql for all supported databases; normally, if one of them fails - probably -# because you don't have that database installed - it is ignored and the -# commit continues, perhaps ending successfully. -# Databases in this list MUST compile successfully during a commit, or the -# entire commit will fail. List the databases your application will actually -# be using to ensure your updates to that database work properly. -# This must be a list; example: ['postgres','sqlite'] -required_dbs={{ locals().pop('required_dbs') }} - -# When creating new change scripts, Migrate will stamp the new script with -# a version number. By default this is latest_version + 1. You can set this -# to 'true' to tell Migrate to use the UTC timestamp instead. -use_timestamp_numbering={{ locals().pop('use_timestamp_numbering') }} diff --git a/libs/migrate/versioning/templates/repository/pylons/README b/libs/migrate/versioning/templates/repository/pylons/README deleted file mode 100644 index 6218f8ca..00000000 --- a/libs/migrate/versioning/templates/repository/pylons/README +++ /dev/null @@ -1,4 +0,0 @@ -This is a database migration repository. - -More information at -http://code.google.com/p/sqlalchemy-migrate/ diff --git a/libs/migrate/versioning/templates/repository/pylons/migrate.cfg b/libs/migrate/versioning/templates/repository/pylons/migrate.cfg deleted file mode 100644 index dae06123..00000000 --- a/libs/migrate/versioning/templates/repository/pylons/migrate.cfg +++ /dev/null @@ -1,25 +0,0 @@ -[db_settings] -# Used to identify which repository this database is versioned under. -# You can use the name of your project. -repository_id={{ locals().pop('repository_id') }} - -# The name of the database table used to track the schema version. -# This name shouldn't already be used by your project. -# If this is changed once a database is under version control, you'll need to -# change the table name in each database too. -version_table={{ locals().pop('version_table') }} - -# When committing a change script, Migrate will attempt to generate the -# sql for all supported databases; normally, if one of them fails - probably -# because you don't have that database installed - it is ignored and the -# commit continues, perhaps ending successfully. -# Databases in this list MUST compile successfully during a commit, or the -# entire commit will fail. List the databases your application will actually -# be using to ensure your updates to that database work properly. -# This must be a list; example: ['postgres','sqlite'] -required_dbs={{ locals().pop('required_dbs') }} - -# When creating new change scripts, Migrate will stamp the new script with -# a version number. By default this is latest_version + 1. You can set this -# to 'true' to tell Migrate to use the UTC timestamp instead. -use_timestamp_numbering={{ locals().pop('use_timestamp_numbering') }} diff --git a/libs/migrate/versioning/templates/script/default.py_tmpl b/libs/migrate/versioning/templates/script/default.py_tmpl deleted file mode 100644 index 58d874bf..00000000 --- a/libs/migrate/versioning/templates/script/default.py_tmpl +++ /dev/null @@ -1,13 +0,0 @@ -from sqlalchemy import * -from migrate import * - - -def upgrade(migrate_engine): - # Upgrade operations go here. Don't create your own engine; bind - # migrate_engine to your metadata - pass - - -def downgrade(migrate_engine): - # Operations to reverse the above upgrade go here. - pass diff --git a/libs/migrate/versioning/templates/script/pylons.py_tmpl b/libs/migrate/versioning/templates/script/pylons.py_tmpl deleted file mode 100644 index 58d874bf..00000000 --- a/libs/migrate/versioning/templates/script/pylons.py_tmpl +++ /dev/null @@ -1,13 +0,0 @@ -from sqlalchemy import * -from migrate import * - - -def upgrade(migrate_engine): - # Upgrade operations go here. Don't create your own engine; bind - # migrate_engine to your metadata - pass - - -def downgrade(migrate_engine): - # Operations to reverse the above upgrade go here. - pass diff --git a/libs/migrate/versioning/templates/sql_script/default.py_tmpl b/libs/migrate/versioning/templates/sql_script/default.py_tmpl deleted file mode 100644 index e69de29b..00000000 diff --git a/libs/migrate/versioning/templates/sql_script/pylons.py_tmpl b/libs/migrate/versioning/templates/sql_script/pylons.py_tmpl deleted file mode 100644 index e69de29b..00000000 diff --git a/libs/migrate/versioning/util/__init__.py b/libs/migrate/versioning/util/__init__.py deleted file mode 100644 index 9b79f409..00000000 --- a/libs/migrate/versioning/util/__init__.py +++ /dev/null @@ -1,179 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -""".. currentmodule:: migrate.versioning.util""" - -import warnings -import logging -from decorator import decorator -from pkg_resources import EntryPoint - -from sqlalchemy import create_engine -from sqlalchemy.engine import Engine -from sqlalchemy.pool import StaticPool - -from migrate import exceptions -from migrate.versioning.util.keyedinstance import KeyedInstance -from migrate.versioning.util.importpath import import_path - - -log = logging.getLogger(__name__) - -def load_model(dotted_name): - """Import module and use module-level variable". - - :param dotted_name: path to model in form of string: ``some.python.module:Class`` - - .. versionchanged:: 0.5.4 - - """ - if isinstance(dotted_name, basestring): - if ':' not in dotted_name: - # backwards compatibility - warnings.warn('model should be in form of module.model:User ' - 'and not module.model.User', exceptions.MigrateDeprecationWarning) - dotted_name = ':'.join(dotted_name.rsplit('.', 1)) - return EntryPoint.parse('x=%s' % dotted_name).load(False) - else: - # Assume it's already loaded. - return dotted_name - -def asbool(obj): - """Do everything to use object as bool""" - if isinstance(obj, basestring): - obj = obj.strip().lower() - if obj in ['true', 'yes', 'on', 'y', 't', '1']: - return True - elif obj in ['false', 'no', 'off', 'n', 'f', '0']: - return False - else: - raise ValueError("String is not true/false: %r" % obj) - if obj in (True, False): - return bool(obj) - else: - raise ValueError("String is not true/false: %r" % obj) - -def guess_obj_type(obj): - """Do everything to guess object type from string - - Tries to convert to `int`, `bool` and finally returns if not succeded. - - .. versionadded: 0.5.4 - """ - - result = None - - try: - result = int(obj) - except: - pass - - if result is None: - try: - result = asbool(obj) - except: - pass - - if result is not None: - return result - else: - return obj - -@decorator -def catch_known_errors(f, *a, **kw): - """Decorator that catches known api errors - - .. versionadded: 0.5.4 - """ - - try: - return f(*a, **kw) - except exceptions.PathFoundError, e: - raise exceptions.KnownError("The path %s already exists" % e.args[0]) - -def construct_engine(engine, **opts): - """.. versionadded:: 0.5.4 - - Constructs and returns SQLAlchemy engine. - - Currently, there are 2 ways to pass create_engine options to :mod:`migrate.versioning.api` functions: - - :param engine: connection string or a existing engine - :param engine_dict: python dictionary of options to pass to `create_engine` - :param engine_arg_*: keyword parameters to pass to `create_engine` (evaluated with :func:`migrate.versioning.util.guess_obj_type`) - :type engine_dict: dict - :type engine: string or Engine instance - :type engine_arg_*: string - :returns: SQLAlchemy Engine - - .. note:: - - keyword parameters override ``engine_dict`` values. - - """ - if isinstance(engine, Engine): - return engine - elif not isinstance(engine, basestring): - raise ValueError("you need to pass either an existing engine or a database uri") - - # get options for create_engine - if opts.get('engine_dict') and isinstance(opts['engine_dict'], dict): - kwargs = opts['engine_dict'] - else: - kwargs = dict() - - # DEPRECATED: handle echo the old way - echo = asbool(opts.get('echo', False)) - if echo: - warnings.warn('echo=True parameter is deprecated, pass ' - 'engine_arg_echo=True or engine_dict={"echo": True}', - exceptions.MigrateDeprecationWarning) - kwargs['echo'] = echo - - # parse keyword arguments - for key, value in opts.iteritems(): - if key.startswith('engine_arg_'): - kwargs[key[11:]] = guess_obj_type(value) - - log.debug('Constructing engine') - # TODO: return create_engine(engine, poolclass=StaticPool, **kwargs) - # seems like 0.5.x branch does not work with engine.dispose and staticpool - return create_engine(engine, **kwargs) - -@decorator -def with_engine(f, *a, **kw): - """Decorator for :mod:`migrate.versioning.api` functions - to safely close resources after function usage. - - Passes engine parameters to :func:`construct_engine` and - resulting parameter is available as kw['engine']. - - Engine is disposed after wrapped function is executed. - - .. versionadded: 0.6.0 - """ - url = a[0] - engine = construct_engine(url, **kw) - - try: - kw['engine'] = engine - return f(*a, **kw) - finally: - if isinstance(engine, Engine): - log.debug('Disposing SQLAlchemy engine %s', engine) - engine.dispose() - - -class Memoize: - """Memoize(fn) - an instance which acts like fn but memoizes its arguments - Will only work on functions with non-mutable arguments - - ActiveState Code 52201 - """ - def __init__(self, fn): - self.fn = fn - self.memo = {} - - def __call__(self, *args): - if not self.memo.has_key(args): - self.memo[args] = self.fn(*args) - return self.memo[args] diff --git a/libs/migrate/versioning/util/importpath.py b/libs/migrate/versioning/util/importpath.py deleted file mode 100644 index 59b57f14..00000000 --- a/libs/migrate/versioning/util/importpath.py +++ /dev/null @@ -1,16 +0,0 @@ -import os -import sys - -def import_path(fullpath): - """ Import a file with full path specification. Allows one to - import from anywhere, something __import__ does not do. - """ - # http://zephyrfalcon.org/weblog/arch_d7_2002_08_31.html - path, filename = os.path.split(fullpath) - filename, ext = os.path.splitext(filename) - sys.path.append(path) - module = __import__(filename) - reload(module) # Might be out of date during tests - del sys.path[-1] - return module - diff --git a/libs/migrate/versioning/util/keyedinstance.py b/libs/migrate/versioning/util/keyedinstance.py deleted file mode 100644 index 3f6cb916..00000000 --- a/libs/migrate/versioning/util/keyedinstance.py +++ /dev/null @@ -1,36 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -class KeyedInstance(object): - """A class whose instances have a unique identifier of some sort - No two instances with the same unique ID should exist - if we try to create - a second instance, the first should be returned. - """ - - _instances = dict() - - def __new__(cls, *p, **k): - instances = cls._instances - clskey = str(cls) - if clskey not in instances: - instances[clskey] = dict() - instances = instances[clskey] - - key = cls._key(*p, **k) - if key not in instances: - instances[key] = super(KeyedInstance, cls).__new__(cls) - return instances[key] - - @classmethod - def _key(cls, *p, **k): - """Given a unique identifier, return a dictionary key - This should be overridden by child classes, to specify which parameters - should determine an object's uniqueness - """ - raise NotImplementedError() - - @classmethod - def clear(cls): - # Allow cls.clear() as well as uniqueInstance.clear(cls) - if str(cls) in cls._instances: - del cls._instances[str(cls)] diff --git a/libs/migrate/versioning/version.py b/libs/migrate/versioning/version.py deleted file mode 100644 index d5a5be98..00000000 --- a/libs/migrate/versioning/version.py +++ /dev/null @@ -1,238 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -import os -import re -import shutil -import logging - -from migrate import exceptions -from migrate.versioning import pathed, script -from datetime import datetime - - -log = logging.getLogger(__name__) - -class VerNum(object): - """A version number that behaves like a string and int at the same time""" - - _instances = dict() - - def __new__(cls, value): - val = str(value) - if val not in cls._instances: - cls._instances[val] = super(VerNum, cls).__new__(cls) - ret = cls._instances[val] - return ret - - def __init__(self,value): - self.value = str(int(value)) - if self < 0: - raise ValueError("Version number cannot be negative") - - def __add__(self, value): - ret = int(self) + int(value) - return VerNum(ret) - - def __sub__(self, value): - return self + (int(value) * -1) - - def __cmp__(self, value): - return int(self) - int(value) - - def __repr__(self): - return "" % self.value - - def __str__(self): - return str(self.value) - - def __int__(self): - return int(self.value) - - -class Collection(pathed.Pathed): - """A collection of versioning scripts in a repository""" - - FILENAME_WITH_VERSION = re.compile(r'^(\d{3,}).*') - - def __init__(self, path): - """Collect current version scripts in repository - and store them in self.versions - """ - super(Collection, self).__init__(path) - - # Create temporary list of files, allowing skipped version numbers. - files = os.listdir(path) - if '1' in files: - # deprecation - raise Exception('It looks like you have a repository in the old ' - 'format (with directories for each version). ' - 'Please convert repository before proceeding.') - - tempVersions = dict() - for filename in files: - match = self.FILENAME_WITH_VERSION.match(filename) - if match: - num = int(match.group(1)) - tempVersions.setdefault(num, []).append(filename) - else: - pass # Must be a helper file or something, let's ignore it. - - # Create the versions member where the keys - # are VerNum's and the values are Version's. - self.versions = dict() - for num, files in tempVersions.items(): - self.versions[VerNum(num)] = Version(num, path, files) - - @property - def latest(self): - """:returns: Latest version in Collection""" - return max([VerNum(0)] + self.versions.keys()) - - def _next_ver_num(self, use_timestamp_numbering): - if use_timestamp_numbering == True: - return VerNum(int(datetime.utcnow().strftime('%Y%m%d%H%M%S'))) - else: - return self.latest + 1 - - def create_new_python_version(self, description, **k): - """Create Python files for new version""" - ver = self._next_ver_num(k.pop('use_timestamp_numbering', False)) - extra = str_to_filename(description) - - if extra: - if extra == '_': - extra = '' - elif not extra.startswith('_'): - extra = '_%s' % extra - - filename = '%03d%s.py' % (ver, extra) - filepath = self._version_path(filename) - - script.PythonScript.create(filepath, **k) - self.versions[ver] = Version(ver, self.path, [filename]) - - def create_new_sql_version(self, database, description, **k): - """Create SQL files for new version""" - ver = self._next_ver_num(k.pop('use_timestamp_numbering', False)) - self.versions[ver] = Version(ver, self.path, []) - - extra = str_to_filename(description) - - if extra: - if extra == '_': - extra = '' - elif not extra.startswith('_'): - extra = '_%s' % extra - - # Create new files. - for op in ('upgrade', 'downgrade'): - filename = '%03d%s_%s_%s.sql' % (ver, extra, database, op) - filepath = self._version_path(filename) - script.SqlScript.create(filepath, **k) - self.versions[ver].add_script(filepath) - - def version(self, vernum=None): - """Returns latest Version if vernum is not given. - Otherwise, returns wanted version""" - if vernum is None: - vernum = self.latest - return self.versions[VerNum(vernum)] - - @classmethod - def clear(cls): - super(Collection, cls).clear() - - def _version_path(self, ver): - """Returns path of file in versions repository""" - return os.path.join(self.path, str(ver)) - - -class Version(object): - """A single version in a collection - :param vernum: Version Number - :param path: Path to script files - :param filelist: List of scripts - :type vernum: int, VerNum - :type path: string - :type filelist: list - """ - - def __init__(self, vernum, path, filelist): - self.version = VerNum(vernum) - - # Collect scripts in this folder - self.sql = dict() - self.python = None - - for script in filelist: - self.add_script(os.path.join(path, script)) - - def script(self, database=None, operation=None): - """Returns SQL or Python Script""" - for db in (database, 'default'): - # Try to return a .sql script first - try: - return self.sql[db][operation] - except KeyError: - continue # No .sql script exists - - # TODO: maybe add force Python parameter? - ret = self.python - - assert ret is not None, \ - "There is no script for %d version" % self.version - return ret - - def add_script(self, path): - """Add script to Collection/Version""" - if path.endswith(Extensions.py): - self._add_script_py(path) - elif path.endswith(Extensions.sql): - self._add_script_sql(path) - - SQL_FILENAME = re.compile(r'^.*\.sql') - - def _add_script_sql(self, path): - basename = os.path.basename(path) - match = self.SQL_FILENAME.match(basename) - - if match: - basename = basename.replace('.sql', '') - parts = basename.split('_') - if len(parts) < 3: - raise exceptions.ScriptError( - "Invalid SQL script name %s " % basename + \ - "(needs to be ###_description_database_operation.sql)") - version = parts[0] - op = parts[-1] - dbms = parts[-2] - else: - raise exceptions.ScriptError( - "Invalid SQL script name %s " % basename + \ - "(needs to be ###_description_database_operation.sql)") - - # File the script into a dictionary - self.sql.setdefault(dbms, {})[op] = script.SqlScript(path) - - def _add_script_py(self, path): - if self.python is not None: - raise exceptions.ScriptError('You can only have one Python script ' - 'per version, but you have: %s and %s' % (self.python, path)) - self.python = script.PythonScript(path) - - -class Extensions: - """A namespace for file extensions""" - py = 'py' - sql = 'sql' - -def str_to_filename(s): - """Replaces spaces, (double and single) quotes - and double underscores to underscores - """ - - s = s.replace(' ', '_').replace('"', '_').replace("'", '_').replace(".", "_") - while '__' in s: - s = s.replace('__', '_') - return s diff --git a/libs/qbittorrent/__init__.py b/libs/qbittorrent/__init__.py new file mode 100644 index 00000000..5e3048b2 --- /dev/null +++ b/libs/qbittorrent/__init__.py @@ -0,0 +1 @@ +__version__ = '0.1' \ No newline at end of file diff --git a/libs/qbittorrent/base.py b/libs/qbittorrent/base.py new file mode 100644 index 00000000..328e008a --- /dev/null +++ b/libs/qbittorrent/base.py @@ -0,0 +1,62 @@ +from urlparse import urljoin +import logging + +log = logging.getLogger(__name__) + + +class Base(object): + properties = {} + + def __init__(self, url, session, client=None): + self._client = client + self._url = url + self._session = session + + @staticmethod + def _convert(response, response_type): + if response_type == 'json': + try: + return response.json() + except ValueError: + pass + + return response + + def _get(self, path='', response_type='json', **kwargs): + r = self._session.get(urljoin(self._url, path), **kwargs) + return self._convert(r, response_type) + + def _post(self, path='', response_type='json', data=None, **kwargs): + r = self._session.post(urljoin(self._url, path), data, **kwargs) + return self._convert(r, response_type) + + def _fill(self, data): + for key, value in data.items(): + if self.set_property(self, key, value): + continue + + log.debug('%s is missing item with key "%s" and value %s', self.__class__, key, repr(value)) + + @classmethod + def parse(cls, client, data): + obj = cls(client._url, client._session, client) + obj._fill(data) + + return obj + + @classmethod + def set_property(cls, obj, key, value): + prop = cls.properties.get(key, {}) + + if prop.get('key'): + key = prop['key'] + + if not hasattr(obj, key): + return False + + + if prop.get('parse'): + value = prop['parse'](value) + + setattr(obj, key, value) + return True diff --git a/libs/qbittorrent/client.py b/libs/qbittorrent/client.py new file mode 100644 index 00000000..bc59cd0e --- /dev/null +++ b/libs/qbittorrent/client.py @@ -0,0 +1,72 @@ +from qbittorrent.base import Base +from qbittorrent.torrent import Torrent +from requests import Session +from requests.auth import HTTPDigestAuth +import time + + +class QBittorrentClient(Base): + def __init__(self, url, username=None, password=None): + super(QBittorrentClient, self).__init__(url, Session()) + + if username and password: + self._session.auth = HTTPDigestAuth(username, password) + + def test_connection(self): + r = self._get(response_type='response') + + return r.status_code == 200 + + def add_file(self, file): + self._post('command/upload', files={'torrent': file}) + + def add_url(self, urls): + if type(urls) is not list: + urls = [urls] + + urls = '%0A'.join(urls) + + self._post('command/download', data={'urls': urls}) + + def get_torrents(self): + """Fetch all torrents + + :return: list of Torrent + """ + r = self._get('json/torrents') + + return [Torrent.parse(self, x) for x in r] + + def get_torrent(self, hash, include_general=True, max_retries=5): + """Fetch details for torrent by info_hash. + + :param info_hash: Torrent info hash + :param include_general: Include general torrent properties + :param max_retries: Maximum number of retries to wait for torrent to appear in client + + :rtype: Torrent or None + """ + + torrent = None + retries = 0 + + # Try find torrent in client + while retries < max_retries: + # TODO this wouldn't be very efficient with large numbers of torrents on the client + torrents = dict([(t.hash, t) for t in self.get_torrents()]) + + if hash in torrents: + torrent = torrents[hash] + break + + retries += 1 + time.sleep(1) + + if torrent is None: + return None + + # Fetch general properties for torrent + if include_general: + torrent.update_general() + + return torrent diff --git a/libs/qbittorrent/file.py b/libs/qbittorrent/file.py new file mode 100644 index 00000000..29ba04e5 --- /dev/null +++ b/libs/qbittorrent/file.py @@ -0,0 +1,15 @@ +from qbittorrent.base import Base + + +class File(Base): + def __init__(self, url, session, client=None): + super(File, self).__init__(url, session, client) + + self.name = None + + self.progress = None + self.priority = None + + self.is_seed = None + + self.size = None diff --git a/libs/qbittorrent/helpers.py b/libs/qbittorrent/helpers.py new file mode 100644 index 00000000..253f03e8 --- /dev/null +++ b/libs/qbittorrent/helpers.py @@ -0,0 +1,7 @@ +def try_convert(value, to_type, default=None): + try: + return to_type(value) + except ValueError: + return default + except TypeError: + return default diff --git a/libs/qbittorrent/torrent.py b/libs/qbittorrent/torrent.py new file mode 100644 index 00000000..68ec2ce0 --- /dev/null +++ b/libs/qbittorrent/torrent.py @@ -0,0 +1,96 @@ +from qbittorrent.base import Base +from qbittorrent.file import File +from qbittorrent.helpers import try_convert + + +class Torrent(Base): + properties = { + 'num_seeds': { + 'key': 'seeds', + 'parse': lambda value: try_convert(value, int) + }, + 'num_leechs': { + 'key': 'leechs', + 'parse': lambda value: try_convert(value, int) + }, + 'ratio': { + 'parse': lambda value: try_convert(value, float) + } + } + + def __init__(self, url, session, client=None): + super(Torrent, self).__init__(url, session, client) + + self.hash = None + self.name = None + + self.state = None + self.ratio = None + self.progress = None + self.priority = None + + self.seeds = None + self.leechs = None + + # General properties + self.comment = None + self.save_path = None + + self.eta = None + self.size = None + self.dlspeed = None + self.upspeed = None + self.nb_connections = None + self.share_ratio = None + self.piece_size = None + self.total_wasted = None + self.total_downloaded = None + self.total_uploaded = None + self.creation_date = None + self.time_elapsed = None + self.up_limit = None + self.dl_limit = None + + # + # Commands + # + + def pause(self): + self._post('command/pause', data={'hash': self.hash}) + + def resume(self): + self._post('command/resume', data={'hash': self.hash}) + + def remove(self): + self._post('command/delete', data={'hashes': self.hash}) + + def delete(self): + self._post('command/deletePerm', data={'hashes': self.hash}) + + def recheck(self): + self._post('command/recheck', data={'hash': self.hash}) + + # + # Fetch details + # + + def get_files(self): + r = self._get('json/propertiesFiles/%s' % self.hash) + + return [File.parse(self._client, x) for x in r] + + def get_trackers(self): + pass + + # + # Update torrent details + # + + def update_general(self): + r = self._get('json/propertiesGeneral/%s' % self.hash) + + if r: + self._fill(r) + return True + + return False diff --git a/libs/requests/__init__.py b/libs/requests/__init__.py index d5d258e8..bba19002 100644 --- a/libs/requests/__init__.py +++ b/libs/requests/__init__.py @@ -36,17 +36,17 @@ usage: The other HTTP methods are supported - see `requests.api`. Full documentation is at . -:copyright: (c) 2013 by Kenneth Reitz. +:copyright: (c) 2014 by Kenneth Reitz. :license: Apache 2.0, see LICENSE for more details. """ __title__ = 'requests' -__version__ = '2.1.0' -__build__ = 0x020100 +__version__ = '2.3.0' +__build__ = 0x020300 __author__ = 'Kenneth Reitz' __license__ = 'Apache 2.0' -__copyright__ = 'Copyright 2013 Kenneth Reitz' +__copyright__ = 'Copyright 2014 Kenneth Reitz' # Attempt to enable urllib3's SNI support, if possible try: diff --git a/libs/requests/adapters.py b/libs/requests/adapters.py index b62f64c8..eb7a2d28 100644 --- a/libs/requests/adapters.py +++ b/libs/requests/adapters.py @@ -16,7 +16,7 @@ from .packages.urllib3.response import HTTPResponse from .packages.urllib3.util import Timeout as TimeoutSauce from .compat import urlparse, basestring, urldefrag, unquote from .utils import (DEFAULT_CA_BUNDLE_PATH, get_encoding_from_headers, - except_on_missing_scheme, get_auth_from_url) + prepend_scheme_if_needed, get_auth_from_url) from .structures import CaseInsensitiveDict from .packages.urllib3.exceptions import MaxRetryError from .packages.urllib3.exceptions import TimeoutError @@ -55,14 +55,16 @@ class HTTPAdapter(BaseAdapter): :param pool_connections: The number of urllib3 connection pools to cache. :param pool_maxsize: The maximum number of connections to save in the pool. - :param max_retries: The maximum number of retries each connection should attempt. + :param int max_retries: The maximum number of retries each connection + should attempt. Note, this applies only to failed connections and + timeouts, never to requests where the server returns a response. :param pool_block: Whether the connection pool should block for connections. Usage:: >>> import requests >>> s = requests.Session() - >>> a = requests.adapters.HTTPAdapter() + >>> a = requests.adapters.HTTPAdapter(max_retries=3) >>> s.mount('http://', a) """ __attrs__ = ['max_retries', 'config', '_pool_connections', '_pool_maxsize', @@ -88,6 +90,11 @@ class HTTPAdapter(BaseAdapter): self.__attrs__) def __setstate__(self, state): + # Can't handle by adding 'proxy_manager' to self.__attrs__ because + # because self.poolmanager uses a lambda function, which isn't pickleable. + self.proxy_manager = {} + self.config = {} + for attr, value in state.items(): setattr(self, attr, value) @@ -196,13 +203,16 @@ class HTTPAdapter(BaseAdapter): proxy = proxies.get(urlparse(url.lower()).scheme) if proxy: - except_on_missing_scheme(proxy) + proxy = prepend_scheme_if_needed(proxy, 'http') proxy_headers = self.proxy_headers(proxy) if not proxy in self.proxy_manager: self.proxy_manager[proxy] = proxy_from_url( proxy, - proxy_headers=proxy_headers) + proxy_headers=proxy_headers, + num_pools=self._pool_connections, + maxsize=self._pool_maxsize, + block=self._pool_block) conn = self.proxy_manager[proxy].connection_from_url(url) else: @@ -276,10 +286,6 @@ class HTTPAdapter(BaseAdapter): username, password = get_auth_from_url(proxy) if username and password: - # Proxy auth usernames and passwords will be urlencoded, we need - # to decode them. - username = unquote(username) - password = unquote(password) headers['Proxy-Authorization'] = _basic_auth_str(username, password) @@ -304,10 +310,7 @@ class HTTPAdapter(BaseAdapter): chunked = not (request.body is None or 'Content-Length' in request.headers) - if stream: - timeout = TimeoutSauce(connect=timeout) - else: - timeout = TimeoutSauce(connect=timeout, read=timeout) + timeout = TimeoutSauce(connect=timeout, read=timeout) try: if not chunked: @@ -366,25 +369,20 @@ class HTTPAdapter(BaseAdapter): conn._put_conn(low_conn) except socket.error as sockerr: - raise ConnectionError(sockerr) + raise ConnectionError(sockerr, request=request) except MaxRetryError as e: - raise ConnectionError(e) + raise ConnectionError(e, request=request) except _ProxyError as e: raise ProxyError(e) except (_SSLError, _HTTPError) as e: if isinstance(e, _SSLError): - raise SSLError(e) + raise SSLError(e, request=request) elif isinstance(e, TimeoutError): - raise Timeout(e) + raise Timeout(e, request=request) else: raise - r = self.build_response(request, resp) - - if not stream: - r.content - - return r + return self.build_response(request, resp) diff --git a/libs/requests/api.py b/libs/requests/api.py index baf43dd6..01d853d5 100644 --- a/libs/requests/api.py +++ b/libs/requests/api.py @@ -26,7 +26,7 @@ def request(method, url, **kwargs): :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`. :param files: (optional) Dictionary of 'name': file-like-objects (or {'name': ('filename', fileobj)}) for multipart encoding upload. :param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth. - :param timeout: (optional) Float describing the timeout of the request. + :param timeout: (optional) Float describing the timeout of the request in seconds. :param allow_redirects: (optional) Boolean. Set to True if POST/PUT/DELETE redirect following is allowed. :param proxies: (optional) Dictionary mapping protocol to the URL of the proxy. :param verify: (optional) if ``True``, the SSL cert will be verified. A CA_BUNDLE path can also be provided. diff --git a/libs/requests/auth.py b/libs/requests/auth.py index 6664cd80..9f831b7a 100644 --- a/libs/requests/auth.py +++ b/libs/requests/auth.py @@ -11,7 +11,6 @@ import os import re import time import hashlib -import logging from base64 import b64encode @@ -19,8 +18,6 @@ from .compat import urlparse, str from .cookies import extract_cookies_to_jar from .utils import parse_dict_header -log = logging.getLogger(__name__) - CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded' CONTENT_TYPE_MULTI_PART = 'multipart/form-data' diff --git a/libs/requests/compat.py b/libs/requests/compat.py index 0d61a572..84d703b6 100644 --- a/libs/requests/compat.py +++ b/libs/requests/compat.py @@ -4,7 +4,7 @@ pythoncompat """ -from .packages import charade as chardet +from .packages import chardet import sys @@ -75,7 +75,9 @@ is_solaris = ('solar==' in str(sys.platform).lower()) # Complete guess. try: import simplejson as json -except ImportError: +except (ImportError, SyntaxError): + # simplejson does not support Python 3.2, it thows a SyntaxError + # because of u'...' Unicode literals. import json # --------- diff --git a/libs/requests/cookies.py b/libs/requests/cookies.py index c465f552..831c49c6 100644 --- a/libs/requests/cookies.py +++ b/libs/requests/cookies.py @@ -198,30 +198,39 @@ class RequestsCookieJar(cookielib.CookieJar, collections.MutableMapping): self.set_cookie(c) return c + def iterkeys(self): + """Dict-like iterkeys() that returns an iterator of names of cookies from the jar. + See itervalues() and iteritems().""" + for cookie in iter(self): + yield cookie.name + def keys(self): """Dict-like keys() that returns a list of names of cookies from the jar. See values() and items().""" - keys = [] + return list(self.iterkeys()) + + def itervalues(self): + """Dict-like itervalues() that returns an iterator of values of cookies from the jar. + See iterkeys() and iteritems().""" for cookie in iter(self): - keys.append(cookie.name) - return keys + yield cookie.value def values(self): """Dict-like values() that returns a list of values of cookies from the jar. See keys() and items().""" - values = [] + return list(self.itervalues()) + + def iteritems(self): + """Dict-like iteritems() that returns an iterator of name-value tuples from the jar. + See iterkeys() and itervalues().""" for cookie in iter(self): - values.append(cookie.value) - return values + yield cookie.name, cookie.value def items(self): """Dict-like items() that returns a list of name-value tuples from the jar. See keys() and values(). Allows client-code to call "dict(RequestsCookieJar) and get a vanilla python dict of key value pairs.""" - items = [] - for cookie in iter(self): - items.append((cookie.name, cookie.value)) - return items + return list(self.iteritems()) def list_domains(self): """Utility method to list all the domains in the jar.""" @@ -378,29 +387,29 @@ def create_cookie(name, value, **kwargs): def morsel_to_cookie(morsel): """Convert a Morsel object into a Cookie containing the one k/v pair.""" + expires = None - if morsel["max-age"]: - expires = time.time() + morsel["max-age"] + if morsel['max-age']: + expires = time.time() + morsel['max-age'] elif morsel['expires']: - expires = morsel['expires'] - if type(expires) == type(""): - time_template = "%a, %d-%b-%Y %H:%M:%S GMT" - expires = time.mktime(time.strptime(expires, time_template)) - c = create_cookie( - name=morsel.key, - value=morsel.value, - version=morsel['version'] or 0, - port=None, - domain=morsel['domain'], - path=morsel['path'], - secure=bool(morsel['secure']), - expires=expires, - discard=False, + time_template = '%a, %d-%b-%Y %H:%M:%S GMT' + expires = time.mktime( + time.strptime(morsel['expires'], time_template)) - time.timezone + return create_cookie( comment=morsel['comment'], comment_url=bool(morsel['comment']), + discard=False, + domain=morsel['domain'], + expires=expires, + name=morsel.key, + path=morsel['path'], + port=None, rest={'HttpOnly': morsel['httponly']}, - rfc2109=False,) - return c + rfc2109=False, + secure=bool(morsel['secure']), + value=morsel.value, + version=morsel['version'] or 0, + ) def cookiejar_from_dict(cookie_dict, cookiejar=None, overwrite=True): diff --git a/libs/requests/exceptions.py b/libs/requests/exceptions.py index bc42b5ff..a4ee9d63 100644 --- a/libs/requests/exceptions.py +++ b/libs/requests/exceptions.py @@ -7,21 +7,29 @@ requests.exceptions This module contains the set of Requests' exceptions. """ +from .packages.urllib3.exceptions import HTTPError as BaseHTTPError class RequestException(IOError): """There was an ambiguous exception that occurred while handling your request.""" + def __init__(self, *args, **kwargs): + """ + Initialize RequestException with `request` and `response` objects. + """ + response = kwargs.pop('response', None) + self.response = response + self.request = kwargs.pop('request', None) + if (response is not None and not self.request and + hasattr(response, 'request')): + self.request = self.response.request + super(RequestException, self).__init__(*args, **kwargs) + class HTTPError(RequestException): """An HTTP error occurred.""" - def __init__(self, *args, **kwargs): - """ Initializes HTTPError with optional `response` object. """ - self.response = kwargs.pop('response', None) - super(HTTPError, self).__init__(*args, **kwargs) - class ConnectionError(RequestException): """A Connection error occurred.""" @@ -61,3 +69,7 @@ class InvalidURL(RequestException, ValueError): class ChunkedEncodingError(RequestException): """The server declared chunked encoding but sent an invalid chunk.""" + + +class ContentDecodingError(RequestException, BaseHTTPError): + """Failed to decode response content""" diff --git a/libs/requests/models.py b/libs/requests/models.py index 34dce181..5aad8ce4 100644 --- a/libs/requests/models.py +++ b/libs/requests/models.py @@ -8,7 +8,6 @@ This module contains the primary objects that power Requests. """ import collections -import logging import datetime from io import BytesIO, UnsupportedOperation @@ -20,9 +19,10 @@ from .cookies import cookiejar_from_dict, get_cookie_header from .packages.urllib3.fields import RequestField from .packages.urllib3.filepost import encode_multipart_formdata from .packages.urllib3.util import parse_url +from .packages.urllib3.exceptions import DecodeError from .exceptions import ( HTTPError, RequestException, MissingSchema, InvalidURL, - ChunkedEncodingError) + ChunkedEncodingError, ContentDecodingError) from .utils import ( guess_filename, get_auth_from_url, requote_uri, stream_decode_response_unicode, to_key_val_list, parse_header_links, @@ -30,12 +30,20 @@ from .utils import ( from .compat import ( cookielib, urlunparse, urlsplit, urlencode, str, bytes, StringIO, is_py2, chardet, json, builtin_str, basestring, IncompleteRead) +from .status_codes import codes +#: The set of HTTP status codes that indicate an automatically +#: processable redirect. +REDIRECT_STATI = ( + codes.moved, # 301 + codes.found, # 302 + codes.other, # 303 + codes.temporary_moved, # 307 +) +DEFAULT_REDIRECT_LIMIT = 30 CONTENT_CHUNK_SIZE = 10 * 1024 ITER_CHUNK_SIZE = 512 -log = logging.getLogger(__name__) - class RequestEncodingMixin(object): @property @@ -301,8 +309,8 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin): p = PreparedRequest() p.method = self.method p.url = self.url - p.headers = self.headers.copy() - p._cookies = self._cookies.copy() + p.headers = self.headers.copy() if self.headers is not None else None + p._cookies = self._cookies.copy() if self._cookies is not None else None p.body = self.body p.hooks = self.hooks return p @@ -400,9 +408,7 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin): is_stream = all([ hasattr(data, '__iter__'), - not isinstance(data, basestring), - not isinstance(data, list), - not isinstance(data, dict) + not isinstance(data, (basestring, list, tuple, dict)) ]) try: @@ -427,7 +433,7 @@ class PreparedRequest(RequestEncodingMixin, RequestHooksMixin): else: if data: body = self._encode_params(data) - if isinstance(data, str) or isinstance(data, builtin_str) or hasattr(data, 'read'): + if isinstance(data, basestring) or hasattr(data, 'read'): content_type = None else: content_type = 'application/x-www-form-urlencoded' @@ -516,7 +522,7 @@ class Response(object): self._content = False self._content_consumed = False - #: Integer Code of responded HTTP Status. + #: Integer Code of responded HTTP Status, e.g. 404 or 200. self.status_code = None #: Case-insensitive Dictionary of Response Headers. @@ -525,7 +531,7 @@ class Response(object): self.headers = CaseInsensitiveDict() #: File-like object representation of response (for advanced usage). - #: Requires that ``stream=True` on the request. + #: Use of ``raw`` requires that ``stream=True`` be set on the request. # This requirement does not apply for use internally to Requests. self.raw = None @@ -540,6 +546,7 @@ class Response(object): #: up here. The list is sorted from the oldest to the most recent request. self.history = [] + #: Textual reason of responded HTTP Status, e.g. "Not Found" or "OK". self.reason = None #: A CookieJar of Cookies the server sent back. @@ -549,6 +556,10 @@ class Response(object): #: and the arrival of the response (as a timedelta) self.elapsed = datetime.timedelta(0) + #: The :class:`PreparedRequest ` object to which this + #: is a response. + self.request = None + def __getstate__(self): # Consume everything; accessing the content attribute makes # sure the content has been fully read. @@ -566,6 +577,7 @@ class Response(object): # pickled objects do not have .raw setattr(self, '_content_consumed', True) + setattr(self, 'raw', None) def __repr__(self): return '' % (self.status_code) @@ -590,10 +602,16 @@ class Response(object): return False return True + @property + def is_redirect(self): + """True if this Response is a well-formed HTTP redirect that could have + been processed automatically (by :meth:`Session.resolve_redirects`). + """ + return ('location' in self.headers and self.status_code in REDIRECT_STATI) + @property def apparent_encoding(self): - """The apparent encoding, provided by the lovely Charade library - (Thanks, Ian!).""" + """The apparent encoding, provided by the chardet library""" return chardet.detect(self.content)['encoding'] def iter_content(self, chunk_size=1, decode_unicode=False): @@ -602,20 +620,20 @@ class Response(object): large responses. The chunk size is the number of bytes it should read into memory. This is not necessarily the length of each item returned as decoding can take place. - """ - if self._content_consumed: - # simulate reading small chunks of the content - return iter_slices(self._content, chunk_size) + If decode_unicode is True, content will be decoded using the best + available encoding based on the response. + """ def generate(): try: # Special case for urllib3. try: - for chunk in self.raw.stream(chunk_size, - decode_content=True): + for chunk in self.raw.stream(chunk_size, decode_content=True): yield chunk except IncompleteRead as e: raise ChunkedEncodingError(e) + except DecodeError as e: + raise ContentDecodingError(e) except AttributeError: # Standard file-like object. while True: @@ -626,12 +644,17 @@ class Response(object): self._content_consumed = True - gen = generate() + # simulate reading small chunks of the content + reused_chunks = iter_slices(self._content, chunk_size) + + stream_chunks = generate() + + chunks = reused_chunks if self._content_consumed else stream_chunks if decode_unicode: - gen = stream_decode_response_unicode(gen, self) + chunks = stream_decode_response_unicode(chunks, self) - return gen + return chunks def iter_lines(self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=None): """Iterates over the response data, one line at a time. When @@ -641,8 +664,7 @@ class Response(object): pending = None - for chunk in self.iter_content(chunk_size=chunk_size, - decode_unicode=decode_unicode): + for chunk in self.iter_content(chunk_size=chunk_size, decode_unicode=decode_unicode): if pending is not None: chunk = pending + chunk @@ -688,7 +710,12 @@ class Response(object): """Content of the response, in unicode. If Response.encoding is None, encoding will be guessed using - ``charade``. + ``chardet``. + + The encoding of the response content is determined based solely on HTTP + headers, following RFC 2616 to the letter. If you can take advantage of + non-HTTP knowledge to make a better guess at the encoding, you should + set ``r.encoding`` appropriately before accessing this property. """ # Try charset from content-type @@ -729,7 +756,14 @@ class Response(object): # a best guess). encoding = guess_json_utf(self.content) if encoding is not None: - return json.loads(self.content.decode(encoding), **kwargs) + try: + return json.loads(self.content.decode(encoding), **kwargs) + except UnicodeDecodeError: + # Wrong UTF codec detected; usually because it's not UTF-8 + # but some other 8-bit codec. This is an RFC violation, + # and the server didn't bother to tell us what codec *was* + # used. + pass return json.loads(self.text, **kwargs) @property @@ -765,8 +799,8 @@ class Response(object): raise HTTPError(http_error_msg, response=self) def close(self): - """Closes the underlying file descriptor and releases the connection - back to the pool. + """Releases the connection back to the pool. Once this method has been + called the underlying ``raw`` object must not be accessed again. *Note: Should not normally need to be called explicitly.* """ diff --git a/libs/requests/packages/README.rst b/libs/requests/packages/README.rst new file mode 100644 index 00000000..c42f376b --- /dev/null +++ b/libs/requests/packages/README.rst @@ -0,0 +1,8 @@ +If you are planning to submit a pull request to requests with any changes in +this library do not go any further. These are independent libraries which we +vendor into requests. Any changes necessary to these libraries must be made in +them and submitted as separate pull requests to those libraries. + +urllib3 pull requests go here: https://github.com/shazow/urllib3 + +chardet pull requests go here: https://github.com/chardet/chardet diff --git a/libs/requests/packages/charade/__main__.py b/libs/requests/packages/charade/__main__.py deleted file mode 100644 index 0fc37c34..00000000 --- a/libs/requests/packages/charade/__main__.py +++ /dev/null @@ -1,7 +0,0 @@ -''' -support ';python -m charade [file2] ...' package execution syntax (2.7+) -''' - -from charade import charade_cli - -charade_cli() diff --git a/libs/requests/packages/charade/__init__.py b/libs/requests/packages/chardet/__init__.py similarity index 57% rename from libs/requests/packages/charade/__init__.py rename to libs/requests/packages/chardet/__init__.py index 743c874d..e4f0799d 100644 --- a/libs/requests/packages/charade/__init__.py +++ b/libs/requests/packages/chardet/__init__.py @@ -15,7 +15,7 @@ # 02110-1301 USA ######################### END LICENSE BLOCK ######################### -__version__ = "1.0.3" +__version__ = "2.2.1" from sys import version_info @@ -30,37 +30,3 @@ def detect(aBuf): u.feed(aBuf) u.close() return u.result - -def _description_of(path): - """Return a string describing the probable encoding of a file.""" - from charade.universaldetector import UniversalDetector - - u = UniversalDetector() - for line in open(path, 'rb'): - u.feed(line) - u.close() - result = u.result - if result['encoding']: - return '%s: %s with confidence %s' % (path, - result['encoding'], - result['confidence']) - else: - return '%s: no result' % path - - -def charade_cli(): - """ - Script which takes one or more file paths and reports on their detected - encodings - - Example:: - - % chardetect.py somefile someotherfile - somefile: windows-1252 with confidence 0.5 - someotherfile: ascii with confidence 1.0 - - """ - from sys import argv - for path in argv[1:]: - print(_description_of(path)) - \ No newline at end of file diff --git a/libs/requests/packages/charade/big5freq.py b/libs/requests/packages/chardet/big5freq.py similarity index 100% rename from libs/requests/packages/charade/big5freq.py rename to libs/requests/packages/chardet/big5freq.py diff --git a/libs/requests/packages/charade/big5prober.py b/libs/requests/packages/chardet/big5prober.py similarity index 100% rename from libs/requests/packages/charade/big5prober.py rename to libs/requests/packages/chardet/big5prober.py diff --git a/libs/requests/packages/chardet/chardetect.py b/libs/requests/packages/chardet/chardetect.py new file mode 100644 index 00000000..ecd0163b --- /dev/null +++ b/libs/requests/packages/chardet/chardetect.py @@ -0,0 +1,46 @@ +#!/usr/bin/env python +""" +Script which takes one or more file paths and reports on their detected +encodings + +Example:: + + % chardetect somefile someotherfile + somefile: windows-1252 with confidence 0.5 + someotherfile: ascii with confidence 1.0 + +If no paths are provided, it takes its input from stdin. + +""" +from io import open +from sys import argv, stdin + +from chardet.universaldetector import UniversalDetector + + +def description_of(file, name='stdin'): + """Return a string describing the probable encoding of a file.""" + u = UniversalDetector() + for line in file: + u.feed(line) + u.close() + result = u.result + if result['encoding']: + return '%s: %s with confidence %s' % (name, + result['encoding'], + result['confidence']) + else: + return '%s: no result' % name + + +def main(): + if len(argv) <= 1: + print(description_of(stdin)) + else: + for path in argv[1:]: + with open(path, 'rb') as f: + print(description_of(f, path)) + + +if __name__ == '__main__': + main() diff --git a/libs/requests/packages/charade/chardistribution.py b/libs/requests/packages/chardet/chardistribution.py similarity index 100% rename from libs/requests/packages/charade/chardistribution.py rename to libs/requests/packages/chardet/chardistribution.py diff --git a/libs/requests/packages/charade/charsetgroupprober.py b/libs/requests/packages/chardet/charsetgroupprober.py similarity index 100% rename from libs/requests/packages/charade/charsetgroupprober.py rename to libs/requests/packages/chardet/charsetgroupprober.py diff --git a/libs/requests/packages/charade/charsetprober.py b/libs/requests/packages/chardet/charsetprober.py similarity index 100% rename from libs/requests/packages/charade/charsetprober.py rename to libs/requests/packages/chardet/charsetprober.py diff --git a/libs/requests/packages/charade/codingstatemachine.py b/libs/requests/packages/chardet/codingstatemachine.py similarity index 100% rename from libs/requests/packages/charade/codingstatemachine.py rename to libs/requests/packages/chardet/codingstatemachine.py diff --git a/libs/requests/packages/charade/compat.py b/libs/requests/packages/chardet/compat.py similarity index 100% rename from libs/requests/packages/charade/compat.py rename to libs/requests/packages/chardet/compat.py diff --git a/libs/requests/packages/charade/constants.py b/libs/requests/packages/chardet/constants.py similarity index 100% rename from libs/requests/packages/charade/constants.py rename to libs/requests/packages/chardet/constants.py diff --git a/libs/requests/packages/charade/cp949prober.py b/libs/requests/packages/chardet/cp949prober.py similarity index 100% rename from libs/requests/packages/charade/cp949prober.py rename to libs/requests/packages/chardet/cp949prober.py diff --git a/libs/requests/packages/charade/escprober.py b/libs/requests/packages/chardet/escprober.py similarity index 100% rename from libs/requests/packages/charade/escprober.py rename to libs/requests/packages/chardet/escprober.py diff --git a/libs/requests/packages/charade/escsm.py b/libs/requests/packages/chardet/escsm.py similarity index 100% rename from libs/requests/packages/charade/escsm.py rename to libs/requests/packages/chardet/escsm.py diff --git a/libs/requests/packages/charade/eucjpprober.py b/libs/requests/packages/chardet/eucjpprober.py similarity index 100% rename from libs/requests/packages/charade/eucjpprober.py rename to libs/requests/packages/chardet/eucjpprober.py diff --git a/libs/requests/packages/charade/euckrfreq.py b/libs/requests/packages/chardet/euckrfreq.py similarity index 100% rename from libs/requests/packages/charade/euckrfreq.py rename to libs/requests/packages/chardet/euckrfreq.py diff --git a/libs/requests/packages/charade/euckrprober.py b/libs/requests/packages/chardet/euckrprober.py similarity index 100% rename from libs/requests/packages/charade/euckrprober.py rename to libs/requests/packages/chardet/euckrprober.py diff --git a/libs/requests/packages/charade/euctwfreq.py b/libs/requests/packages/chardet/euctwfreq.py similarity index 100% rename from libs/requests/packages/charade/euctwfreq.py rename to libs/requests/packages/chardet/euctwfreq.py diff --git a/libs/requests/packages/charade/euctwprober.py b/libs/requests/packages/chardet/euctwprober.py similarity index 100% rename from libs/requests/packages/charade/euctwprober.py rename to libs/requests/packages/chardet/euctwprober.py diff --git a/libs/requests/packages/charade/gb2312freq.py b/libs/requests/packages/chardet/gb2312freq.py similarity index 100% rename from libs/requests/packages/charade/gb2312freq.py rename to libs/requests/packages/chardet/gb2312freq.py diff --git a/libs/requests/packages/charade/gb2312prober.py b/libs/requests/packages/chardet/gb2312prober.py similarity index 100% rename from libs/requests/packages/charade/gb2312prober.py rename to libs/requests/packages/chardet/gb2312prober.py diff --git a/libs/requests/packages/charade/hebrewprober.py b/libs/requests/packages/chardet/hebrewprober.py similarity index 100% rename from libs/requests/packages/charade/hebrewprober.py rename to libs/requests/packages/chardet/hebrewprober.py diff --git a/libs/requests/packages/charade/jisfreq.py b/libs/requests/packages/chardet/jisfreq.py similarity index 100% rename from libs/requests/packages/charade/jisfreq.py rename to libs/requests/packages/chardet/jisfreq.py diff --git a/libs/requests/packages/charade/jpcntx.py b/libs/requests/packages/chardet/jpcntx.py similarity index 99% rename from libs/requests/packages/charade/jpcntx.py rename to libs/requests/packages/chardet/jpcntx.py index 04634422..f7f69ba4 100644 --- a/libs/requests/packages/charade/jpcntx.py +++ b/libs/requests/packages/chardet/jpcntx.py @@ -169,7 +169,7 @@ class JapaneseContextAnalysis: def get_confidence(self): # This is just one way to calculate confidence. It works well for me. if self._mTotalRel > MINIMUM_DATA_THRESHOLD: - return float(self._mTotalRel - self._mRelSample[0]) / self._mTotalRel + return (self._mTotalRel - self._mRelSample[0]) / self._mTotalRel else: return DONT_KNOW diff --git a/libs/requests/packages/charade/langbulgarianmodel.py b/libs/requests/packages/chardet/langbulgarianmodel.py similarity index 100% rename from libs/requests/packages/charade/langbulgarianmodel.py rename to libs/requests/packages/chardet/langbulgarianmodel.py diff --git a/libs/requests/packages/charade/langcyrillicmodel.py b/libs/requests/packages/chardet/langcyrillicmodel.py similarity index 100% rename from libs/requests/packages/charade/langcyrillicmodel.py rename to libs/requests/packages/chardet/langcyrillicmodel.py diff --git a/libs/requests/packages/charade/langgreekmodel.py b/libs/requests/packages/chardet/langgreekmodel.py similarity index 100% rename from libs/requests/packages/charade/langgreekmodel.py rename to libs/requests/packages/chardet/langgreekmodel.py diff --git a/libs/requests/packages/charade/langhebrewmodel.py b/libs/requests/packages/chardet/langhebrewmodel.py similarity index 100% rename from libs/requests/packages/charade/langhebrewmodel.py rename to libs/requests/packages/chardet/langhebrewmodel.py diff --git a/libs/requests/packages/charade/langhungarianmodel.py b/libs/requests/packages/chardet/langhungarianmodel.py similarity index 100% rename from libs/requests/packages/charade/langhungarianmodel.py rename to libs/requests/packages/chardet/langhungarianmodel.py diff --git a/libs/requests/packages/charade/langthaimodel.py b/libs/requests/packages/chardet/langthaimodel.py similarity index 100% rename from libs/requests/packages/charade/langthaimodel.py rename to libs/requests/packages/chardet/langthaimodel.py diff --git a/libs/requests/packages/charade/latin1prober.py b/libs/requests/packages/chardet/latin1prober.py similarity index 98% rename from libs/requests/packages/charade/latin1prober.py rename to libs/requests/packages/chardet/latin1prober.py index 5e2c9f90..ad695f57 100644 --- a/libs/requests/packages/charade/latin1prober.py +++ b/libs/requests/packages/chardet/latin1prober.py @@ -129,7 +129,7 @@ class Latin1Prober(CharSetProber): if total < 0.01: confidence = 0.0 else: - confidence = ((float(self._mFreqCounter[3]) / total) + confidence = ((self._mFreqCounter[3] / total) - (self._mFreqCounter[1] * 20.0 / total)) if confidence < 0.0: confidence = 0.0 diff --git a/libs/requests/packages/charade/mbcharsetprober.py b/libs/requests/packages/chardet/mbcharsetprober.py similarity index 100% rename from libs/requests/packages/charade/mbcharsetprober.py rename to libs/requests/packages/chardet/mbcharsetprober.py diff --git a/libs/requests/packages/charade/mbcsgroupprober.py b/libs/requests/packages/chardet/mbcsgroupprober.py similarity index 100% rename from libs/requests/packages/charade/mbcsgroupprober.py rename to libs/requests/packages/chardet/mbcsgroupprober.py diff --git a/libs/requests/packages/charade/mbcssm.py b/libs/requests/packages/chardet/mbcssm.py similarity index 100% rename from libs/requests/packages/charade/mbcssm.py rename to libs/requests/packages/chardet/mbcssm.py diff --git a/libs/requests/packages/charade/sbcharsetprober.py b/libs/requests/packages/chardet/sbcharsetprober.py similarity index 100% rename from libs/requests/packages/charade/sbcharsetprober.py rename to libs/requests/packages/chardet/sbcharsetprober.py diff --git a/libs/requests/packages/charade/sbcsgroupprober.py b/libs/requests/packages/chardet/sbcsgroupprober.py similarity index 100% rename from libs/requests/packages/charade/sbcsgroupprober.py rename to libs/requests/packages/chardet/sbcsgroupprober.py diff --git a/libs/requests/packages/charade/sjisprober.py b/libs/requests/packages/chardet/sjisprober.py similarity index 100% rename from libs/requests/packages/charade/sjisprober.py rename to libs/requests/packages/chardet/sjisprober.py diff --git a/libs/requests/packages/charade/universaldetector.py b/libs/requests/packages/chardet/universaldetector.py similarity index 90% rename from libs/requests/packages/charade/universaldetector.py rename to libs/requests/packages/chardet/universaldetector.py index 7ccea84f..9a03ad3d 100644 --- a/libs/requests/packages/charade/universaldetector.py +++ b/libs/requests/packages/chardet/universaldetector.py @@ -74,10 +74,12 @@ class UniversalDetector: if aBuf[:3] == codecs.BOM: # EF BB BF UTF-8 with BOM self.result = {'encoding': "UTF-8", 'confidence': 1.0} - elif aBuf[:4] in (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE): + elif aBuf[:4] == codecs.BOM_UTF32_LE: # FF FE 00 00 UTF-32, little-endian BOM + self.result = {'encoding': "UTF-32LE", 'confidence': 1.0} + elif aBuf[:4] == codecs.BOM_UTF32_BE: # 00 00 FE FF UTF-32, big-endian BOM - self.result = {'encoding': "UTF-32", 'confidence': 1.0} + self.result = {'encoding': "UTF-32BE", 'confidence': 1.0} elif aBuf[:4] == b'\xFE\xFF\x00\x00': # FE FF 00 00 UCS-4, unusual octet order BOM (3412) self.result = { @@ -90,10 +92,12 @@ class UniversalDetector: 'encoding': "X-ISO-10646-UCS-4-2143", 'confidence': 1.0 } - elif aBuf[:2] == codecs.BOM_LE or aBuf[:2] == codecs.BOM_BE: + elif aBuf[:2] == codecs.BOM_LE: # FF FE UTF-16, little endian BOM + self.result = {'encoding': "UTF-16LE", 'confidence': 1.0} + elif aBuf[:2] == codecs.BOM_BE: # FE FF UTF-16, big endian BOM - self.result = {'encoding': "UTF-16", 'confidence': 1.0} + self.result = {'encoding': "UTF-16BE", 'confidence': 1.0} self._mGotData = True if self.result['encoding'] and (self.result['confidence'] > 0.0): @@ -113,10 +117,8 @@ class UniversalDetector: if not self._mEscCharSetProber: self._mEscCharSetProber = EscCharSetProber() if self._mEscCharSetProber.feed(aBuf) == constants.eFoundIt: - self.result = { - 'encoding': self._mEscCharSetProber.get_charset_name(), - 'confidence': self._mEscCharSetProber.get_confidence() - } + self.result = {'encoding': self._mEscCharSetProber.get_charset_name(), + 'confidence': self._mEscCharSetProber.get_confidence()} self.done = True elif self._mInputState == eHighbyte: if not self._mCharSetProbers: diff --git a/libs/requests/packages/charade/utf8prober.py b/libs/requests/packages/chardet/utf8prober.py similarity index 100% rename from libs/requests/packages/charade/utf8prober.py rename to libs/requests/packages/chardet/utf8prober.py diff --git a/libs/requests/packages/urllib3/_collections.py b/libs/requests/packages/urllib3/_collections.py index 5907b0dc..9cea3a44 100644 --- a/libs/requests/packages/urllib3/_collections.py +++ b/libs/requests/packages/urllib3/_collections.py @@ -4,7 +4,7 @@ # This module is part of urllib3 and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php -from collections import MutableMapping +from collections import Mapping, MutableMapping try: from threading import RLock except ImportError: # Platform-specific: No threads available @@ -20,9 +20,10 @@ try: # Python 2.7+ from collections import OrderedDict except ImportError: from .packages.ordered_dict import OrderedDict +from .packages.six import itervalues -__all__ = ['RecentlyUsedContainer'] +__all__ = ['RecentlyUsedContainer', 'HTTPHeaderDict'] _Null = object() @@ -101,3 +102,104 @@ class RecentlyUsedContainer(MutableMapping): def keys(self): with self.lock: return self._container.keys() + + +class HTTPHeaderDict(MutableMapping): + """ + :param headers: + An iterable of field-value pairs. Must not contain multiple field names + when compared case-insensitively. + + :param kwargs: + Additional field-value pairs to pass in to ``dict.update``. + + A ``dict`` like container for storing HTTP Headers. + + Field names are stored and compared case-insensitively in compliance with + RFC 2616. Iteration provides the first case-sensitive key seen for each + case-insensitive pair. + + Using ``__setitem__`` syntax overwrites fields that compare equal + case-insensitively in order to maintain ``dict``'s api. For fields that + compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add`` + in a loop. + + If multiple fields that are equal case-insensitively are passed to the + constructor or ``.update``, the behavior is undefined and some will be + lost. + + >>> headers = HTTPHeaderDict() + >>> headers.add('Set-Cookie', 'foo=bar') + >>> headers.add('set-cookie', 'baz=quxx') + >>> headers['content-length'] = '7' + >>> headers['SET-cookie'] + 'foo=bar, baz=quxx' + >>> headers['Content-Length'] + '7' + + If you want to access the raw headers with their original casing + for debugging purposes you can access the private ``._data`` attribute + which is a normal python ``dict`` that maps the case-insensitive key to a + list of tuples stored as (case-sensitive-original-name, value). Using the + structure from above as our example: + + >>> headers._data + {'set-cookie': [('Set-Cookie', 'foo=bar'), ('set-cookie', 'baz=quxx')], + 'content-length': [('content-length', '7')]} + """ + + def __init__(self, headers=None, **kwargs): + self._data = {} + if headers is None: + headers = {} + self.update(headers, **kwargs) + + def add(self, key, value): + """Adds a (name, value) pair, doesn't overwrite the value if it already + exists. + + >>> headers = HTTPHeaderDict(foo='bar') + >>> headers.add('Foo', 'baz') + >>> headers['foo'] + 'bar, baz' + """ + self._data.setdefault(key.lower(), []).append((key, value)) + + def getlist(self, key): + """Returns a list of all the values for the named field. Returns an + empty list if the key doesn't exist.""" + return self[key].split(', ') if key in self else [] + + def copy(self): + h = HTTPHeaderDict() + for key in self._data: + for rawkey, value in self._data[key]: + h.add(rawkey, value) + return h + + def __eq__(self, other): + if not isinstance(other, Mapping): + return False + other = HTTPHeaderDict(other) + return dict((k1, self[k1]) for k1 in self._data) == \ + dict((k2, other[k2]) for k2 in other._data) + + def __getitem__(self, key): + values = self._data[key.lower()] + return ', '.join(value[1] for value in values) + + def __setitem__(self, key, value): + self._data[key.lower()] = [(key, value)] + + def __delitem__(self, key): + del self._data[key.lower()] + + def __len__(self): + return len(self._data) + + def __iter__(self): + for headers in itervalues(self._data): + yield headers[0][0] + + def __repr__(self): + return '%s(%r)' % (self.__class__.__name__, dict(self.items())) diff --git a/libs/requests/packages/urllib3/connection.py b/libs/requests/packages/urllib3/connection.py index e240786a..5feb3322 100644 --- a/libs/requests/packages/urllib3/connection.py +++ b/libs/requests/packages/urllib3/connection.py @@ -4,13 +4,14 @@ # This module is part of urllib3 and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php +import sys import socket from socket import timeout as SocketTimeout try: # Python 3 - from http.client import HTTPConnection, HTTPException + from http.client import HTTPConnection as _HTTPConnection, HTTPException except ImportError: - from httplib import HTTPConnection, HTTPException + from httplib import HTTPConnection as _HTTPConnection, HTTPException class DummyConnection(object): "Used to detect a failed ConnectionCls import." @@ -24,9 +25,9 @@ try: # Compiled with SSL? pass try: # Python 3 - from http.client import HTTPSConnection + from http.client import HTTPSConnection as _HTTPSConnection except ImportError: - from httplib import HTTPSConnection + from httplib import HTTPSConnection as _HTTPSConnection import ssl BaseSSLError = ssl.SSLError @@ -38,6 +39,7 @@ from .exceptions import ( ConnectTimeoutError, ) from .packages.ssl_match_hostname import match_hostname +from .packages import six from .util import ( assert_fingerprint, resolve_cert_reqs, @@ -45,6 +47,88 @@ from .util import ( ssl_wrap_socket, ) + +port_by_scheme = { + 'http': 80, + 'https': 443, +} + + +class HTTPConnection(_HTTPConnection, object): + """ + Based on httplib.HTTPConnection but provides an extra constructor + backwards-compatibility layer between older and newer Pythons. + """ + + default_port = port_by_scheme['http'] + + # By default, disable Nagle's Algorithm. + tcp_nodelay = 1 + + def __init__(self, *args, **kw): + if six.PY3: # Python 3 + kw.pop('strict', None) + if sys.version_info < (2, 7): # Python 2.6 and older + kw.pop('source_address', None) + + # Pre-set source_address in case we have an older Python like 2.6. + self.source_address = kw.get('source_address') + + # Superclass also sets self.source_address in Python 2.7+. + _HTTPConnection.__init__(self, *args, **kw) + + def _new_conn(self): + """ Establish a socket connection and set nodelay settings on it. + + :return: a new socket connection + """ + extra_args = [] + if self.source_address: # Python 2.7+ + extra_args.append(self.source_address) + + conn = socket.create_connection( + (self.host, self.port), self.timeout, *extra_args) + conn.setsockopt( + socket.IPPROTO_TCP, socket.TCP_NODELAY, self.tcp_nodelay) + + return conn + + def _prepare_conn(self, conn): + self.sock = conn + # the _tunnel_host attribute was added in python 2.6.3 (via + # http://hg.python.org/cpython/rev/0f57b30a152f) so pythons 2.6(0-2) do + # not have them. + if getattr(self, '_tunnel_host', None): + # TODO: Fix tunnel so it doesn't depend on self.sock state. + self._tunnel() + + def connect(self): + conn = self._new_conn() + self._prepare_conn(conn) + + +class HTTPSConnection(HTTPConnection): + default_port = port_by_scheme['https'] + + def __init__(self, host, port=None, key_file=None, cert_file=None, + strict=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT, **kw): + + HTTPConnection.__init__(self, host, port, strict=strict, + timeout=timeout, **kw) + + self.key_file = key_file + self.cert_file = cert_file + + # Required property for Google AppEngine 1.9.0 which otherwise causes + # HTTPS requests to go out as HTTP. (See Issue #356) + self._protocol = 'https' + + def connect(self): + conn = self._new_conn() + self._prepare_conn(conn) + self.sock = ssl.wrap_socket(conn, self.key_file, self.cert_file) + + class VerifiedHTTPSConnection(HTTPSConnection): """ Based on httplib.HTTPSConnection but wraps the socket with @@ -53,6 +137,7 @@ class VerifiedHTTPSConnection(HTTPSConnection): cert_reqs = None ca_certs = None ssl_version = None + conn_kw = {} def set_cert(self, key_file=None, cert_file=None, cert_reqs=None, ca_certs=None, @@ -67,31 +152,41 @@ class VerifiedHTTPSConnection(HTTPSConnection): def connect(self): # Add certificate verification + try: sock = socket.create_connection( - address=(self.host, self.port), - timeout=self.timeout, - ) + address=(self.host, self.port), timeout=self.timeout, + **self.conn_kw) except SocketTimeout: - raise ConnectTimeoutError( - self, "Connection to %s timed out. (connect timeout=%s)" % - (self.host, self.timeout)) + raise ConnectTimeoutError( + self, "Connection to %s timed out. (connect timeout=%s)" % + (self.host, self.timeout)) + + sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, + self.tcp_nodelay) resolved_cert_reqs = resolve_cert_reqs(self.cert_reqs) resolved_ssl_version = resolve_ssl_version(self.ssl_version) - if self._tunnel_host: + hostname = self.host + if getattr(self, '_tunnel_host', None): + # _tunnel_host was added in Python 2.6.3 + # (See: http://hg.python.org/cpython/rev/0f57b30a152f) + self.sock = sock # Calls self._set_hostport(), so self.host is # self._tunnel_host below. self._tunnel() + # Override the host with the one we're requesting data from. + hostname = self._tunnel_host + # Wrap socket using verification with the root certs in # trusted_root_certs self.sock = ssl_wrap_socket(sock, self.key_file, self.cert_file, cert_reqs=resolved_cert_reqs, ca_certs=self.ca_certs, - server_hostname=self.host, + server_hostname=hostname, ssl_version=resolved_ssl_version) if resolved_cert_reqs != ssl.CERT_NONE: @@ -100,8 +195,10 @@ class VerifiedHTTPSConnection(HTTPSConnection): self.assert_fingerprint) elif self.assert_hostname is not False: match_hostname(self.sock.getpeercert(), - self.assert_hostname or self.host) + self.assert_hostname or hostname) if ssl: + # Make a copy for testing. + UnverifiedHTTPSConnection = HTTPSConnection HTTPSConnection = VerifiedHTTPSConnection diff --git a/libs/requests/packages/urllib3/connectionpool.py b/libs/requests/packages/urllib3/connectionpool.py index 72011b5a..95a53a7d 100644 --- a/libs/requests/packages/urllib3/connectionpool.py +++ b/libs/requests/packages/urllib3/connectionpool.py @@ -4,6 +4,7 @@ # This module is part of urllib3 and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php +import sys import errno import logging @@ -19,9 +20,11 @@ except ImportError: from .exceptions import ( ClosedPoolError, + ConnectionError, ConnectTimeoutError, EmptyPoolError, HostChangedError, + LocationParseError, MaxRetryError, SSLError, TimeoutError, @@ -31,6 +34,7 @@ from .exceptions import ( from .packages.ssl_match_hostname import CertificateError from .packages import six from .connection import ( + port_by_scheme, DummyConnection, HTTPConnection, HTTPSConnection, VerifiedHTTPSConnection, HTTPException, BaseSSLError, @@ -38,7 +42,6 @@ from .connection import ( from .request import RequestMethods from .response import HTTPResponse from .util import ( - assert_fingerprint, get_host, is_connection_dropped, Timeout, @@ -51,12 +54,6 @@ log = logging.getLogger(__name__) _Default = object() -port_by_scheme = { - 'http': 80, - 'https': 443, -} - - ## Pool objects class ConnectionPool(object): @@ -69,6 +66,9 @@ class ConnectionPool(object): QueueCls = LifoQueue def __init__(self, host, port=None): + if host is None: + raise LocationParseError(host) + # httplib doesn't like it when we include brackets in ipv6 addresses host = host.strip('[]') @@ -140,7 +140,7 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods): def __init__(self, host, port=None, strict=False, timeout=Timeout.DEFAULT_TIMEOUT, maxsize=1, block=False, - headers=None, _proxy=None, _proxy_headers=None): + headers=None, _proxy=None, _proxy_headers=None, **conn_kw): ConnectionPool.__init__(self, host, port) RequestMethods.__init__(self, headers) @@ -167,21 +167,26 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods): self.num_connections = 0 self.num_requests = 0 + if sys.version_info < (2, 7): # Python 2.6 and older + conn_kw.pop('source_address', None) + self.conn_kw = conn_kw + def _new_conn(self): """ - Return a fresh :class:`httplib.HTTPConnection`. + Return a fresh :class:`HTTPConnection`. """ self.num_connections += 1 log.info("Starting new HTTP connection (%d): %s" % (self.num_connections, self.host)) - extra_params = {} - if not six.PY3: # Python 2 - extra_params['strict'] = self.strict - - return self.ConnectionCls(host=self.host, port=self.port, + conn = self.ConnectionCls(host=self.host, port=self.port, timeout=self.timeout.connect_timeout, - **extra_params) + strict=self.strict, **self.conn_kw) + if self.proxy is not None: + # Enable Nagle's algorithm for proxies, to avoid packet + # fragmentation. + conn.tcp_nodelay = 0 + return conn def _get_conn(self, timeout=None): """ @@ -238,8 +243,9 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods): pass except Full: # This should never happen if self.block == True - log.warning("HttpConnectionPool is full, discarding connection: %s" - % self.host) + log.warning( + "Connection pool is full, discarding connection: %s" % + self.host) # Connection never got put back into the pool, close it. if conn: @@ -260,7 +266,7 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods): def _make_request(self, conn, method, url, timeout=_Default, **httplib_request_kw): """ - Perform a request on a given httplib connection object taken from our + Perform a request on a given urllib connection object taken from our pool. :param conn: @@ -371,9 +377,11 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods): # TODO: Add optional support for socket.gethostbyname checking. scheme, host, port = get_host(url) + # Use explicit default port for comparison when none is given if self.port and not port: - # Use explicit default port for comparison when none is given. port = port_by_scheme.get(scheme) + elif not self.port and port == port_by_scheme.get(scheme): + port = None return (scheme, host, port) == (self.scheme, self.host, self.port) @@ -412,10 +420,13 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods): :param retries: Number of retries to allow before raising a MaxRetryError exception. + If `False`, then retries are disabled and any exception is raised + immediately. :param redirect: If True, automatically handle redirects (status codes 301, 302, - 303, 307, 308). Each redirect counts as a retry. + 303, 307, 308). Each redirect counts as a retry. Disabling retries + will disable redirect, too. :param assert_same_host: If ``True``, will make sure that the host of the pool requests is @@ -449,7 +460,7 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods): if headers is None: headers = self.headers - if retries < 0: + if retries < 0 and retries is not False: raise MaxRetryError(self, url) if release_conn is None: @@ -468,6 +479,10 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods): headers = headers.copy() headers.update(self.proxy_headers) + # Must keep the exception bound to a separate variable or else Python 3 + # complains about UnboundLocalError. + err = None + try: # Request a connection from the queue conn = self._get_conn(timeout=pool_timeout) @@ -495,38 +510,41 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods): # ``response.read()``) except Empty: - # Timed out by queue + # Timed out by queue. raise EmptyPoolError(self, "No pool connections are available.") - except BaseSSLError as e: + except (BaseSSLError, CertificateError) as e: + # Release connection unconditionally because there is no way to + # close it externally in case of exception. + release_conn = True raise SSLError(e) - except CertificateError as e: - # Name mismatch - raise SSLError(e) + except (TimeoutError, HTTPException, SocketError) as e: + if conn: + # Discard the connection for these exceptions. It will be + # be replaced during the next _get_conn() call. + conn.close() + conn = None - except TimeoutError as e: - # Connection broken, discard. - conn = None - # Save the error off for retry logic. - err = e + if not retries: + if isinstance(e, TimeoutError): + # TimeoutError is exempt from MaxRetryError-wrapping. + # FIXME: ... Not sure why. Add a reason here. + raise - if retries == 0: - raise + # Wrap unexpected exceptions with the most appropriate + # module-level exception and re-raise. + if isinstance(e, SocketError) and self.proxy: + raise ProxyError('Cannot connect to proxy.', e) - except (HTTPException, SocketError) as e: - if isinstance(e, SocketError) and self.proxy is not None: - raise ProxyError('Cannot connect to proxy. ' - 'Socket error: %s.' % e) + if retries is False: + raise ConnectionError('Connection failed.', e) - # Connection broken, discard. It will be replaced next _get_conn(). - conn = None - # This is necessary so we can access e below - err = e - - if retries == 0: raise MaxRetryError(self, url, e) + # Keep track of the error for the retry warning. + err = e + finally: if release_conn: # Put the connection back to be reused. If the connection is @@ -536,8 +554,8 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods): if not conn: # Try again - log.warn("Retrying (%d attempts remain) after connection " - "broken by '%r': %s" % (retries, err, url)) + log.warning("Retrying (%d attempts remain) after connection " + "broken by '%r': %s" % (retries, err, url)) return self.urlopen(method, url, body, headers, retries - 1, redirect, assert_same_host, timeout=timeout, pool_timeout=pool_timeout, @@ -545,7 +563,7 @@ class HTTPConnectionPool(ConnectionPool, RequestMethods): # Handle redirect? redirect_location = redirect and response.get_redirect_location() - if redirect_location: + if redirect_location and retries is not False: if response.status == 303: method = 'GET' log.info("Redirecting %s -> %s" % (url, redirect_location)) @@ -563,7 +581,7 @@ class HTTPSConnectionPool(HTTPConnectionPool): When Python is compiled with the :mod:`ssl` module, then :class:`.VerifiedHTTPSConnection` is used, which *can* verify certificates, - instead of :class:`httplib.HTTPSConnection`. + instead of :class:`.HTTPSConnection`. :class:`.VerifiedHTTPSConnection` uses one of ``assert_fingerprint``, ``assert_hostname`` and ``host`` in this order to verify connections. @@ -584,10 +602,14 @@ class HTTPSConnectionPool(HTTPConnectionPool): _proxy=None, _proxy_headers=None, key_file=None, cert_file=None, cert_reqs=None, ca_certs=None, ssl_version=None, - assert_hostname=None, assert_fingerprint=None): + assert_hostname=None, assert_fingerprint=None, + **conn_kw): + + if sys.version_info < (2, 7): # Python 2.6 or older + conn_kw.pop('source_address', None) HTTPConnectionPool.__init__(self, host, port, strict, timeout, maxsize, - block, headers, _proxy, _proxy_headers) + block, headers, _proxy, _proxy_headers, **conn_kw) self.key_file = key_file self.cert_file = cert_file self.cert_reqs = cert_reqs @@ -595,6 +617,7 @@ class HTTPSConnectionPool(HTTPConnectionPool): self.ssl_version = ssl_version self.assert_hostname = assert_hostname self.assert_fingerprint = assert_fingerprint + self.conn_kw = conn_kw def _prepare_conn(self, conn): """ @@ -610,6 +633,7 @@ class HTTPSConnectionPool(HTTPConnectionPool): assert_hostname=self.assert_hostname, assert_fingerprint=self.assert_fingerprint) conn.ssl_version = self.ssl_version + conn.conn_kw = self.conn_kw if self.proxy is not None: # Python 2.7+ @@ -646,10 +670,15 @@ class HTTPSConnectionPool(HTTPConnectionPool): extra_params = {} if not six.PY3: # Python 2 extra_params['strict'] = self.strict + extra_params.update(self.conn_kw) conn = self.ConnectionCls(host=actual_host, port=actual_port, timeout=self.timeout.connect_timeout, **extra_params) + if self.proxy is not None: + # Enable Nagle's algorithm for proxies, to avoid packet + # fragmentation. + conn.tcp_nodelay = 0 return self._prepare_conn(conn) diff --git a/libs/requests/packages/urllib3/contrib/pyopenssl.py b/libs/requests/packages/urllib3/contrib/pyopenssl.py index f78e7170..21a12c68 100644 --- a/libs/requests/packages/urllib3/contrib/pyopenssl.py +++ b/libs/requests/packages/urllib3/contrib/pyopenssl.py @@ -1,4 +1,7 @@ -'''SSL with SNI-support for Python 2. +'''SSL with SNI_-support for Python 2. Follow these instructions if you would +like to verify SSL certificates in Python 2. Note, the default libraries do +*not* do certificate checking; you need to do additional work to validate +certificates yourself. This needs the following packages installed: @@ -6,9 +9,15 @@ This needs the following packages installed: * ndg-httpsclient (tested with 0.3.2) * pyasn1 (tested with 0.1.6) -To activate it call :func:`~urllib3.contrib.pyopenssl.inject_into_urllib3`. -This can be done in a ``sitecustomize`` module, or at any other time before -your application begins using ``urllib3``, like this:: +You can install them with the following command: + + pip install pyopenssl ndg-httpsclient pyasn1 + +To activate certificate checking, call +:func:`~urllib3.contrib.pyopenssl.inject_into_urllib3` from your Python code +before you begin making HTTP requests. This can be done in a ``sitecustomize`` +module, or at any other time before your application begins using ``urllib3``, +like this:: try: import urllib3.contrib.pyopenssl @@ -18,13 +27,31 @@ your application begins using ``urllib3``, like this:: Now you can use :mod:`urllib3` as you normally would, and it will support SNI when the required modules are installed. + +Activating this module also has the positive side effect of disabling SSL/TLS +encryption in Python 2 (see `CRIME attack`_). + +If you want to configure the default list of supported cipher suites, you can +set the ``urllib3.contrib.pyopenssl.DEFAULT_SSL_CIPHER_LIST`` variable. + +Module Variables +---------------- + +:var DEFAULT_SSL_CIPHER_LIST: The list of supported SSL/TLS cipher suites. + Default: ``ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:ECDH+AES128:DH+AES: + ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+3DES:!aNULL:!MD5:!DSS`` + +.. _sni: https://en.wikipedia.org/wiki/Server_Name_Indication +.. _crime attack: https://en.wikipedia.org/wiki/CRIME_(security_exploit) + ''' from ndg.httpsclient.ssl_peer_verification import SUBJ_ALT_NAME_SUPPORT -from ndg.httpsclient.subj_alt_name import SubjectAltName +from ndg.httpsclient.subj_alt_name import SubjectAltName as BaseSubjectAltName import OpenSSL.SSL from pyasn1.codec.der import decoder as der_decoder -from socket import _fileobject +from pyasn1.type import univ, constraint +from socket import _fileobject, timeout import ssl import select from cStringIO import StringIO @@ -50,6 +77,23 @@ _openssl_verify = { + OpenSSL.SSL.VERIFY_FAIL_IF_NO_PEER_CERT, } +# A secure default. +# Sources for more information on TLS ciphers: +# +# - https://wiki.mozilla.org/Security/Server_Side_TLS +# - https://www.ssllabs.com/projects/best-practices/index.html +# - https://hynek.me/articles/hardening-your-web-servers-ssl-ciphers/ +# +# The general intent is: +# - Prefer cipher suites that offer perfect forward secrecy (DHE/ECDHE), +# - prefer ECDHE over DHE for better performance, +# - prefer any AES-GCM over any AES-CBC for better performance and security, +# - use 3DES as fallback which is secure but slow, +# - disable NULL authentication, MD5 MACs and DSS for security reasons. +DEFAULT_SSL_CIPHER_LIST = "ECDH+AESGCM:DH+AESGCM:ECDH+AES256:DH+AES256:" + \ + "ECDH+AES128:DH+AES:ECDH+3DES:DH+3DES:RSA+AESGCM:RSA+AES:RSA+3DES:" + \ + "!aNULL:!MD5:!DSS" + orig_util_HAS_SNI = util.HAS_SNI orig_connection_ssl_wrap_socket = connection.ssl_wrap_socket @@ -69,6 +113,17 @@ def extract_from_urllib3(): util.HAS_SNI = orig_util_HAS_SNI +### Note: This is a slightly bug-fixed version of same from ndg-httpsclient. +class SubjectAltName(BaseSubjectAltName): + '''ASN.1 implementation for subjectAltNames support''' + + # There is no limit to how many SAN certificates a certificate may have, + # however this needs to have some limit so we'll set an arbitrarily high + # limit. + sizeSpec = univ.SequenceOf.sizeSpec + \ + constraint.ValueSizeConstraint(1, 1024) + + ### Note: This is a slightly bug-fixed version of same from ndg-httpsclient. def get_subj_alt_name(peer_cert): # Search through extensions @@ -102,6 +157,13 @@ def get_subj_alt_name(peer_cert): class fileobject(_fileobject): + def _wait_for_sock(self): + rd, wd, ed = select.select([self._sock], [], [], + self._sock.gettimeout()) + if not rd: + raise timeout() + + def read(self, size=-1): # Use max, disallow tiny reads in a loop as they are very inefficient. # We never leave read() with any leftover data from a new recv() call @@ -119,6 +181,7 @@ class fileobject(_fileobject): try: data = self._sock.recv(rbufsize) except OpenSSL.SSL.WantReadError: + self._wait_for_sock() continue if not data: break @@ -146,6 +209,7 @@ class fileobject(_fileobject): try: data = self._sock.recv(left) except OpenSSL.SSL.WantReadError: + self._wait_for_sock() continue if not data: break @@ -197,6 +261,7 @@ class fileobject(_fileobject): break buffers.append(data) except OpenSSL.SSL.WantReadError: + self._wait_for_sock() continue break return "".join(buffers) @@ -207,6 +272,7 @@ class fileobject(_fileobject): try: data = self._sock.recv(self._rbufsize) except OpenSSL.SSL.WantReadError: + self._wait_for_sock() continue if not data: break @@ -234,7 +300,8 @@ class fileobject(_fileobject): try: data = self._sock.recv(self._rbufsize) except OpenSSL.SSL.WantReadError: - continue + self._wait_for_sock() + continue if not data: break left = size - buf_len @@ -329,6 +396,15 @@ def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None, ctx.load_verify_locations(ca_certs, None) except OpenSSL.SSL.Error as e: raise ssl.SSLError('bad ca_certs: %r' % ca_certs, e) + else: + ctx.set_default_verify_paths() + + # Disable TLS compression to migitate CRIME attack (issue #309) + OP_NO_COMPRESSION = 0x20000 + ctx.set_options(OP_NO_COMPRESSION) + + # Set list of supported ciphersuites. + ctx.set_cipher_list(DEFAULT_SSL_CIPHER_LIST) cnx = OpenSSL.SSL.Connection(ctx, sock) cnx.set_tlsext_host_name(server_hostname) diff --git a/libs/requests/packages/urllib3/exceptions.py b/libs/requests/packages/urllib3/exceptions.py index 98ef9abc..b4df831f 100644 --- a/libs/requests/packages/urllib3/exceptions.py +++ b/libs/requests/packages/urllib3/exceptions.py @@ -44,6 +44,11 @@ class ProxyError(HTTPError): pass +class ConnectionError(HTTPError): + "Raised when a normal connection fails." + pass + + class DecodeError(HTTPError): "Raised when automatic decoding based on Content-Type fails." pass diff --git a/libs/requests/packages/urllib3/fields.py b/libs/requests/packages/urllib3/fields.py index ed017657..da79e929 100644 --- a/libs/requests/packages/urllib3/fields.py +++ b/libs/requests/packages/urllib3/fields.py @@ -15,7 +15,7 @@ def guess_content_type(filename, default='application/octet-stream'): Guess the "Content-Type" of a file. :param filename: - The filename to guess the "Content-Type" of using :mod:`mimetimes`. + The filename to guess the "Content-Type" of using :mod:`mimetypes`. :param default: If no "Content-Type" can be guessed, default to `default`. """ diff --git a/libs/requests/packages/urllib3/filepost.py b/libs/requests/packages/urllib3/filepost.py index 4575582e..e8b30bdd 100644 --- a/libs/requests/packages/urllib3/filepost.py +++ b/libs/requests/packages/urllib3/filepost.py @@ -46,16 +46,15 @@ def iter_field_objects(fields): def iter_fields(fields): """ + .. deprecated:: 1.6 + Iterate over fields. - .. deprecated :: - - The addition of `~urllib3.fields.RequestField` makes this function - obsolete. Instead, use :func:`iter_field_objects`, which returns - `~urllib3.fields.RequestField` objects, instead. + The addition of :class:`~urllib3.fields.RequestField` makes this function + obsolete. Instead, use :func:`iter_field_objects`, which returns + :class:`~urllib3.fields.RequestField` objects. Supports list of (k, v) tuples and dicts. - """ if isinstance(fields, dict): return ((k, v) for k, v in six.iteritems(fields)) diff --git a/libs/requests/packages/urllib3/packages/ssl_match_hostname/__init__.py b/libs/requests/packages/urllib3/packages/ssl_match_hostname/__init__.py index 3aa5b2e1..dd59a75f 100644 --- a/libs/requests/packages/urllib3/packages/ssl_match_hostname/__init__.py +++ b/libs/requests/packages/urllib3/packages/ssl_match_hostname/__init__.py @@ -7,7 +7,7 @@ except ImportError: from backports.ssl_match_hostname import CertificateError, match_hostname except ImportError: # Our vendored copy - from _implementation import CertificateError, match_hostname + from ._implementation import CertificateError, match_hostname # Not needed, but documenting what we provide. __all__ = ('CertificateError', 'match_hostname') diff --git a/libs/requests/packages/urllib3/poolmanager.py b/libs/requests/packages/urllib3/poolmanager.py index c16519f8..f18ff2bb 100644 --- a/libs/requests/packages/urllib3/poolmanager.py +++ b/libs/requests/packages/urllib3/poolmanager.py @@ -1,5 +1,5 @@ # urllib3/poolmanager.py -# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt) +# Copyright 2008-2014 Andrey Petrov and contributors (see CONTRIBUTORS.txt) # # This module is part of urllib3 and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php @@ -176,7 +176,7 @@ class ProxyManager(PoolManager): Behaves just like :class:`PoolManager`, but sends all requests through the defined proxy, using the CONNECT method for HTTPS URLs. - :param poxy_url: + :param proxy_url: The URL of the proxy to be used. :param proxy_headers: diff --git a/libs/requests/packages/urllib3/request.py b/libs/requests/packages/urllib3/request.py index 66a9a0e6..2a92cc20 100644 --- a/libs/requests/packages/urllib3/request.py +++ b/libs/requests/packages/urllib3/request.py @@ -45,7 +45,6 @@ class RequestMethods(object): """ _encode_url_methods = set(['DELETE', 'GET', 'HEAD', 'OPTIONS']) - _encode_body_methods = set(['PATCH', 'POST', 'PUT', 'TRACE']) def __init__(self, headers=None): self.headers = headers or {} diff --git a/libs/requests/packages/urllib3/response.py b/libs/requests/packages/urllib3/response.py index 6a1fe1a7..db441828 100644 --- a/libs/requests/packages/urllib3/response.py +++ b/libs/requests/packages/urllib3/response.py @@ -9,6 +9,7 @@ import logging import zlib import io +from ._collections import HTTPHeaderDict from .exceptions import DecodeError from .packages.six import string_types as basestring, binary_type from .util import is_fp_closed @@ -79,7 +80,10 @@ class HTTPResponse(io.IOBase): def __init__(self, body='', headers=None, status=0, version=0, reason=None, strict=0, preload_content=True, decode_content=True, original_response=None, pool=None, connection=None): - self.headers = headers or {} + + self.headers = HTTPHeaderDict() + if headers: + self.headers.update(headers) self.status = status self.version = version self.reason = reason @@ -249,17 +253,9 @@ class HTTPResponse(io.IOBase): with ``original_response=r``. """ - # Normalize headers between different versions of Python - headers = {} + headers = HTTPHeaderDict() for k, v in r.getheaders(): - # Python 3: Header keys are returned capitalised - k = k.lower() - - has_value = headers.get(k) - if has_value: # Python 3: Repeating header keys are unmerged. - v = ', '.join([has_value, v]) - - headers[k] = v + headers.add(k, v) # HTTPResponse objects in Python 3 don't have a .strict attribute strict = getattr(r, 'strict', 0) diff --git a/libs/requests/packages/urllib3/util.py b/libs/requests/packages/urllib3/util.py deleted file mode 100644 index 46a0c48d..00000000 --- a/libs/requests/packages/urllib3/util.py +++ /dev/null @@ -1,643 +0,0 @@ -# urllib3/util.py -# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt) -# -# This module is part of urllib3 and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - - -from base64 import b64encode -from binascii import hexlify, unhexlify -from collections import namedtuple -from hashlib import md5, sha1 -from socket import error as SocketError, _GLOBAL_DEFAULT_TIMEOUT -import time - -try: - from select import poll, POLLIN -except ImportError: # `poll` doesn't exist on OSX and other platforms - poll = False - try: - from select import select - except ImportError: # `select` doesn't exist on AppEngine. - select = False - -try: # Test for SSL features - SSLContext = None - HAS_SNI = False - - import ssl - from ssl import wrap_socket, CERT_NONE, PROTOCOL_SSLv23 - from ssl import SSLContext # Modern SSL? - from ssl import HAS_SNI # Has SNI? -except ImportError: - pass - -from .packages import six -from .exceptions import LocationParseError, SSLError, TimeoutStateError - - -_Default = object() -# The default timeout to use for socket connections. This is the attribute used -# by httplib to define the default timeout - - -def current_time(): - """ - Retrieve the current time, this function is mocked out in unit testing. - """ - return time.time() - - -class Timeout(object): - """ - Utility object for storing timeout values. - - Example usage: - - .. code-block:: python - - timeout = urllib3.util.Timeout(connect=2.0, read=7.0) - pool = HTTPConnectionPool('www.google.com', 80, timeout=timeout) - pool.request(...) # Etc, etc - - :param connect: - The maximum amount of time to wait for a connection attempt to a server - to succeed. Omitting the parameter will default the connect timeout to - the system default, probably `the global default timeout in socket.py - `_. - None will set an infinite timeout for connection attempts. - - :type connect: integer, float, or None - - :param read: - The maximum amount of time to wait between consecutive - read operations for a response from the server. Omitting - the parameter will default the read timeout to the system - default, probably `the global default timeout in socket.py - `_. - None will set an infinite timeout. - - :type read: integer, float, or None - - :param total: - This combines the connect and read timeouts into one; the read timeout - will be set to the time leftover from the connect attempt. In the - event that both a connect timeout and a total are specified, or a read - timeout and a total are specified, the shorter timeout will be applied. - - Defaults to None. - - :type total: integer, float, or None - - .. note:: - - Many factors can affect the total amount of time for urllib3 to return - an HTTP response. Specifically, Python's DNS resolver does not obey the - timeout specified on the socket. Other factors that can affect total - request time include high CPU load, high swap, the program running at a - low priority level, or other behaviors. The observed running time for - urllib3 to return a response may be greater than the value passed to - `total`. - - In addition, the read and total timeouts only measure the time between - read operations on the socket connecting the client and the server, - not the total amount of time for the request to return a complete - response. For most requests, the timeout is raised because the server - has not sent the first byte in the specified time. This is not always - the case; if a server streams one byte every fifteen seconds, a timeout - of 20 seconds will not ever trigger, even though the request will - take several minutes to complete. - - If your goal is to cut off any request after a set amount of wall clock - time, consider having a second "watcher" thread to cut off a slow - request. - """ - - #: A sentinel object representing the default timeout value - DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT - - def __init__(self, total=None, connect=_Default, read=_Default): - self._connect = self._validate_timeout(connect, 'connect') - self._read = self._validate_timeout(read, 'read') - self.total = self._validate_timeout(total, 'total') - self._start_connect = None - - def __str__(self): - return '%s(connect=%r, read=%r, total=%r)' % ( - type(self).__name__, self._connect, self._read, self.total) - - - @classmethod - def _validate_timeout(cls, value, name): - """ Check that a timeout attribute is valid - - :param value: The timeout value to validate - :param name: The name of the timeout attribute to validate. This is used - for clear error messages - :return: the value - :raises ValueError: if the type is not an integer or a float, or if it - is a numeric value less than zero - """ - if value is _Default: - return cls.DEFAULT_TIMEOUT - - if value is None or value is cls.DEFAULT_TIMEOUT: - return value - - try: - float(value) - except (TypeError, ValueError): - raise ValueError("Timeout value %s was %s, but it must be an " - "int or float." % (name, value)) - - try: - if value < 0: - raise ValueError("Attempted to set %s timeout to %s, but the " - "timeout cannot be set to a value less " - "than 0." % (name, value)) - except TypeError: # Python 3 - raise ValueError("Timeout value %s was %s, but it must be an " - "int or float." % (name, value)) - - return value - - @classmethod - def from_float(cls, timeout): - """ Create a new Timeout from a legacy timeout value. - - The timeout value used by httplib.py sets the same timeout on the - connect(), and recv() socket requests. This creates a :class:`Timeout` - object that sets the individual timeouts to the ``timeout`` value passed - to this function. - - :param timeout: The legacy timeout value - :type timeout: integer, float, sentinel default object, or None - :return: a Timeout object - :rtype: :class:`Timeout` - """ - return Timeout(read=timeout, connect=timeout) - - def clone(self): - """ Create a copy of the timeout object - - Timeout properties are stored per-pool but each request needs a fresh - Timeout object to ensure each one has its own start/stop configured. - - :return: a copy of the timeout object - :rtype: :class:`Timeout` - """ - # We can't use copy.deepcopy because that will also create a new object - # for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to - # detect the user default. - return Timeout(connect=self._connect, read=self._read, - total=self.total) - - def start_connect(self): - """ Start the timeout clock, used during a connect() attempt - - :raises urllib3.exceptions.TimeoutStateError: if you attempt - to start a timer that has been started already. - """ - if self._start_connect is not None: - raise TimeoutStateError("Timeout timer has already been started.") - self._start_connect = current_time() - return self._start_connect - - def get_connect_duration(self): - """ Gets the time elapsed since the call to :meth:`start_connect`. - - :return: the elapsed time - :rtype: float - :raises urllib3.exceptions.TimeoutStateError: if you attempt - to get duration for a timer that hasn't been started. - """ - if self._start_connect is None: - raise TimeoutStateError("Can't get connect duration for timer " - "that has not started.") - return current_time() - self._start_connect - - @property - def connect_timeout(self): - """ Get the value to use when setting a connection timeout. - - This will be a positive float or integer, the value None - (never timeout), or the default system timeout. - - :return: the connect timeout - :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None - """ - if self.total is None: - return self._connect - - if self._connect is None or self._connect is self.DEFAULT_TIMEOUT: - return self.total - - return min(self._connect, self.total) - - @property - def read_timeout(self): - """ Get the value for the read timeout. - - This assumes some time has elapsed in the connection timeout and - computes the read timeout appropriately. - - If self.total is set, the read timeout is dependent on the amount of - time taken by the connect timeout. If the connection time has not been - established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be - raised. - - :return: the value to use for the read timeout - :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None - :raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect` - has not yet been called on this object. - """ - if (self.total is not None and - self.total is not self.DEFAULT_TIMEOUT and - self._read is not None and - self._read is not self.DEFAULT_TIMEOUT): - # in case the connect timeout has not yet been established. - if self._start_connect is None: - return self._read - return max(0, min(self.total - self.get_connect_duration(), - self._read)) - elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT: - return max(0, self.total - self.get_connect_duration()) - else: - return self._read - - -class Url(namedtuple('Url', ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment'])): - """ - Datastructure for representing an HTTP URL. Used as a return value for - :func:`parse_url`. - """ - slots = () - - def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None, query=None, fragment=None): - return super(Url, cls).__new__(cls, scheme, auth, host, port, path, query, fragment) - - @property - def hostname(self): - """For backwards-compatibility with urlparse. We're nice like that.""" - return self.host - - @property - def request_uri(self): - """Absolute path including the query string.""" - uri = self.path or '/' - - if self.query is not None: - uri += '?' + self.query - - return uri - - @property - def netloc(self): - """Network location including host and port""" - if self.port: - return '%s:%d' % (self.host, self.port) - return self.host - - -def split_first(s, delims): - """ - Given a string and an iterable of delimiters, split on the first found - delimiter. Return two split parts and the matched delimiter. - - If not found, then the first part is the full input string. - - Example: :: - - >>> split_first('foo/bar?baz', '?/=') - ('foo', 'bar?baz', '/') - >>> split_first('foo/bar?baz', '123') - ('foo/bar?baz', '', None) - - Scales linearly with number of delims. Not ideal for large number of delims. - """ - min_idx = None - min_delim = None - for d in delims: - idx = s.find(d) - if idx < 0: - continue - - if min_idx is None or idx < min_idx: - min_idx = idx - min_delim = d - - if min_idx is None or min_idx < 0: - return s, '', None - - return s[:min_idx], s[min_idx+1:], min_delim - - -def parse_url(url): - """ - Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is - performed to parse incomplete urls. Fields not provided will be None. - - Partly backwards-compatible with :mod:`urlparse`. - - Example: :: - - >>> parse_url('http://google.com/mail/') - Url(scheme='http', host='google.com', port=None, path='/', ...) - >>> parse_url('google.com:80') - Url(scheme=None, host='google.com', port=80, path=None, ...) - >>> parse_url('/foo?bar') - Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...) - """ - - # While this code has overlap with stdlib's urlparse, it is much - # simplified for our needs and less annoying. - # Additionally, this implementations does silly things to be optimal - # on CPython. - - scheme = None - auth = None - host = None - port = None - path = None - fragment = None - query = None - - # Scheme - if '://' in url: - scheme, url = url.split('://', 1) - - # Find the earliest Authority Terminator - # (http://tools.ietf.org/html/rfc3986#section-3.2) - url, path_, delim = split_first(url, ['/', '?', '#']) - - if delim: - # Reassemble the path - path = delim + path_ - - # Auth - if '@' in url: - # Last '@' denotes end of auth part - auth, url = url.rsplit('@', 1) - - # IPv6 - if url and url[0] == '[': - host, url = url.split(']', 1) - host += ']' - - # Port - if ':' in url: - _host, port = url.split(':', 1) - - if not host: - host = _host - - if port: - # If given, ports must be integers. - if not port.isdigit(): - raise LocationParseError("Failed to parse: %s" % url) - port = int(port) - else: - # Blank ports are cool, too. (rfc3986#section-3.2.3) - port = None - - elif not host and url: - host = url - - if not path: - return Url(scheme, auth, host, port, path, query, fragment) - - # Fragment - if '#' in path: - path, fragment = path.split('#', 1) - - # Query - if '?' in path: - path, query = path.split('?', 1) - - return Url(scheme, auth, host, port, path, query, fragment) - - -def get_host(url): - """ - Deprecated. Use :func:`.parse_url` instead. - """ - p = parse_url(url) - return p.scheme or 'http', p.hostname, p.port - - -def make_headers(keep_alive=None, accept_encoding=None, user_agent=None, - basic_auth=None, proxy_basic_auth=None): - """ - Shortcuts for generating request headers. - - :param keep_alive: - If ``True``, adds 'connection: keep-alive' header. - - :param accept_encoding: - Can be a boolean, list, or string. - ``True`` translates to 'gzip,deflate'. - List will get joined by comma. - String will be used as provided. - - :param user_agent: - String representing the user-agent you want, such as - "python-urllib3/0.6" - - :param basic_auth: - Colon-separated username:password string for 'authorization: basic ...' - auth header. - - :param proxy_basic_auth: - Colon-separated username:password string for 'proxy-authorization: basic ...' - auth header. - - Example: :: - - >>> make_headers(keep_alive=True, user_agent="Batman/1.0") - {'connection': 'keep-alive', 'user-agent': 'Batman/1.0'} - >>> make_headers(accept_encoding=True) - {'accept-encoding': 'gzip,deflate'} - """ - headers = {} - if accept_encoding: - if isinstance(accept_encoding, str): - pass - elif isinstance(accept_encoding, list): - accept_encoding = ','.join(accept_encoding) - else: - accept_encoding = 'gzip,deflate' - headers['accept-encoding'] = accept_encoding - - if user_agent: - headers['user-agent'] = user_agent - - if keep_alive: - headers['connection'] = 'keep-alive' - - if basic_auth: - headers['authorization'] = 'Basic ' + \ - b64encode(six.b(basic_auth)).decode('utf-8') - - if proxy_basic_auth: - headers['proxy-authorization'] = 'Basic ' + \ - b64encode(six.b(proxy_basic_auth)).decode('utf-8') - - return headers - - -def is_connection_dropped(conn): # Platform-specific - """ - Returns True if the connection is dropped and should be closed. - - :param conn: - :class:`httplib.HTTPConnection` object. - - Note: For platforms like AppEngine, this will always return ``False`` to - let the platform handle connection recycling transparently for us. - """ - sock = getattr(conn, 'sock', False) - if not sock: # Platform-specific: AppEngine - return False - - if not poll: - if not select: # Platform-specific: AppEngine - return False - - try: - return select([sock], [], [], 0.0)[0] - except SocketError: - return True - - # This version is better on platforms that support it. - p = poll() - p.register(sock, POLLIN) - for (fno, ev) in p.poll(0.0): - if fno == sock.fileno(): - # Either data is buffered (bad), or the connection is dropped. - return True - - -def resolve_cert_reqs(candidate): - """ - Resolves the argument to a numeric constant, which can be passed to - the wrap_socket function/method from the ssl module. - Defaults to :data:`ssl.CERT_NONE`. - If given a string it is assumed to be the name of the constant in the - :mod:`ssl` module or its abbrevation. - (So you can specify `REQUIRED` instead of `CERT_REQUIRED`. - If it's neither `None` nor a string we assume it is already the numeric - constant which can directly be passed to wrap_socket. - """ - if candidate is None: - return CERT_NONE - - if isinstance(candidate, str): - res = getattr(ssl, candidate, None) - if res is None: - res = getattr(ssl, 'CERT_' + candidate) - return res - - return candidate - - -def resolve_ssl_version(candidate): - """ - like resolve_cert_reqs - """ - if candidate is None: - return PROTOCOL_SSLv23 - - if isinstance(candidate, str): - res = getattr(ssl, candidate, None) - if res is None: - res = getattr(ssl, 'PROTOCOL_' + candidate) - return res - - return candidate - - -def assert_fingerprint(cert, fingerprint): - """ - Checks if given fingerprint matches the supplied certificate. - - :param cert: - Certificate as bytes object. - :param fingerprint: - Fingerprint as string of hexdigits, can be interspersed by colons. - """ - - # Maps the length of a digest to a possible hash function producing - # this digest. - hashfunc_map = { - 16: md5, - 20: sha1 - } - - fingerprint = fingerprint.replace(':', '').lower() - - digest_length, rest = divmod(len(fingerprint), 2) - - if rest or digest_length not in hashfunc_map: - raise SSLError('Fingerprint is of invalid length.') - - # We need encode() here for py32; works on py2 and p33. - fingerprint_bytes = unhexlify(fingerprint.encode()) - - hashfunc = hashfunc_map[digest_length] - - cert_digest = hashfunc(cert).digest() - - if not cert_digest == fingerprint_bytes: - raise SSLError('Fingerprints did not match. Expected "{0}", got "{1}".' - .format(hexlify(fingerprint_bytes), - hexlify(cert_digest))) - -def is_fp_closed(obj): - """ - Checks whether a given file-like object is closed. - - :param obj: - The file-like object to check. - """ - if hasattr(obj, 'fp'): - # Object is a container for another file-like object that gets released - # on exhaustion (e.g. HTTPResponse) - return obj.fp is None - - return obj.closed - - -if SSLContext is not None: # Python 3.2+ - def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None, - ca_certs=None, server_hostname=None, - ssl_version=None): - """ - All arguments except `server_hostname` have the same meaning as for - :func:`ssl.wrap_socket` - - :param server_hostname: - Hostname of the expected certificate - """ - context = SSLContext(ssl_version) - context.verify_mode = cert_reqs - if ca_certs: - try: - context.load_verify_locations(ca_certs) - # Py32 raises IOError - # Py33 raises FileNotFoundError - except Exception as e: # Reraise as SSLError - raise SSLError(e) - if certfile: - # FIXME: This block needs a test. - context.load_cert_chain(certfile, keyfile) - if HAS_SNI: # Platform-specific: OpenSSL with enabled SNI - return context.wrap_socket(sock, server_hostname=server_hostname) - return context.wrap_socket(sock) - -else: # Python 3.1 and earlier - def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None, - ca_certs=None, server_hostname=None, - ssl_version=None): - return wrap_socket(sock, keyfile=keyfile, certfile=certfile, - ca_certs=ca_certs, cert_reqs=cert_reqs, - ssl_version=ssl_version) diff --git a/libs/requests/packages/urllib3/util/__init__.py b/libs/requests/packages/urllib3/util/__init__.py new file mode 100644 index 00000000..a40185ee --- /dev/null +++ b/libs/requests/packages/urllib3/util/__init__.py @@ -0,0 +1,27 @@ +# urllib3/util/__init__.py +# Copyright 2008-2014 Andrey Petrov and contributors (see CONTRIBUTORS.txt) +# +# This module is part of urllib3 and is released under +# the MIT License: http://www.opensource.org/licenses/mit-license.php + +from .connection import is_connection_dropped +from .request import make_headers +from .response import is_fp_closed +from .ssl_ import ( + SSLContext, + HAS_SNI, + assert_fingerprint, + resolve_cert_reqs, + resolve_ssl_version, + ssl_wrap_socket, +) +from .timeout import ( + current_time, + Timeout, +) +from .url import ( + get_host, + parse_url, + split_first, + Url, +) diff --git a/libs/requests/packages/urllib3/util/connection.py b/libs/requests/packages/urllib3/util/connection.py new file mode 100644 index 00000000..8deeab5c --- /dev/null +++ b/libs/requests/packages/urllib3/util/connection.py @@ -0,0 +1,45 @@ +from socket import error as SocketError +try: + from select import poll, POLLIN +except ImportError: # `poll` doesn't exist on OSX and other platforms + poll = False + try: + from select import select + except ImportError: # `select` doesn't exist on AppEngine. + select = False + +def is_connection_dropped(conn): # Platform-specific + """ + Returns True if the connection is dropped and should be closed. + + :param conn: + :class:`httplib.HTTPConnection` object. + + Note: For platforms like AppEngine, this will always return ``False`` to + let the platform handle connection recycling transparently for us. + """ + sock = getattr(conn, 'sock', False) + if sock is False: # Platform-specific: AppEngine + return False + if sock is None: # Connection already closed (such as by httplib). + return False + + if not poll: + if not select: # Platform-specific: AppEngine + return False + + try: + return select([sock], [], [], 0.0)[0] + except SocketError: + return True + + # This version is better on platforms that support it. + p = poll() + p.register(sock, POLLIN) + for (fno, ev) in p.poll(0.0): + if fno == sock.fileno(): + # Either data is buffered (bad), or the connection is dropped. + return True + + + diff --git a/libs/requests/packages/urllib3/util/request.py b/libs/requests/packages/urllib3/util/request.py new file mode 100644 index 00000000..d48d6513 --- /dev/null +++ b/libs/requests/packages/urllib3/util/request.py @@ -0,0 +1,68 @@ +from base64 import b64encode + +from ..packages import six + + +ACCEPT_ENCODING = 'gzip,deflate' + + +def make_headers(keep_alive=None, accept_encoding=None, user_agent=None, + basic_auth=None, proxy_basic_auth=None): + """ + Shortcuts for generating request headers. + + :param keep_alive: + If ``True``, adds 'connection: keep-alive' header. + + :param accept_encoding: + Can be a boolean, list, or string. + ``True`` translates to 'gzip,deflate'. + List will get joined by comma. + String will be used as provided. + + :param user_agent: + String representing the user-agent you want, such as + "python-urllib3/0.6" + + :param basic_auth: + Colon-separated username:password string for 'authorization: basic ...' + auth header. + + :param proxy_basic_auth: + Colon-separated username:password string for 'proxy-authorization: basic ...' + auth header. + + Example: :: + + >>> make_headers(keep_alive=True, user_agent="Batman/1.0") + {'connection': 'keep-alive', 'user-agent': 'Batman/1.0'} + >>> make_headers(accept_encoding=True) + {'accept-encoding': 'gzip,deflate'} + """ + headers = {} + if accept_encoding: + if isinstance(accept_encoding, str): + pass + elif isinstance(accept_encoding, list): + accept_encoding = ','.join(accept_encoding) + else: + accept_encoding = ACCEPT_ENCODING + headers['accept-encoding'] = accept_encoding + + if user_agent: + headers['user-agent'] = user_agent + + if keep_alive: + headers['connection'] = 'keep-alive' + + if basic_auth: + headers['authorization'] = 'Basic ' + \ + b64encode(six.b(basic_auth)).decode('utf-8') + + if proxy_basic_auth: + headers['proxy-authorization'] = 'Basic ' + \ + b64encode(six.b(proxy_basic_auth)).decode('utf-8') + + return headers + + diff --git a/libs/requests/packages/urllib3/util/response.py b/libs/requests/packages/urllib3/util/response.py new file mode 100644 index 00000000..d0325bc6 --- /dev/null +++ b/libs/requests/packages/urllib3/util/response.py @@ -0,0 +1,13 @@ +def is_fp_closed(obj): + """ + Checks whether a given file-like object is closed. + + :param obj: + The file-like object to check. + """ + if hasattr(obj, 'fp'): + # Object is a container for another file-like object that gets released + # on exhaustion (e.g. HTTPResponse) + return obj.fp is None + + return obj.closed diff --git a/libs/requests/packages/urllib3/util/ssl_.py b/libs/requests/packages/urllib3/util/ssl_.py new file mode 100644 index 00000000..dee4b876 --- /dev/null +++ b/libs/requests/packages/urllib3/util/ssl_.py @@ -0,0 +1,133 @@ +from binascii import hexlify, unhexlify +from hashlib import md5, sha1 + +from ..exceptions import SSLError + + +try: # Test for SSL features + SSLContext = None + HAS_SNI = False + + import ssl + from ssl import wrap_socket, CERT_NONE, PROTOCOL_SSLv23 + from ssl import SSLContext # Modern SSL? + from ssl import HAS_SNI # Has SNI? +except ImportError: + pass + + +def assert_fingerprint(cert, fingerprint): + """ + Checks if given fingerprint matches the supplied certificate. + + :param cert: + Certificate as bytes object. + :param fingerprint: + Fingerprint as string of hexdigits, can be interspersed by colons. + """ + + # Maps the length of a digest to a possible hash function producing + # this digest. + hashfunc_map = { + 16: md5, + 20: sha1 + } + + fingerprint = fingerprint.replace(':', '').lower() + + digest_length, rest = divmod(len(fingerprint), 2) + + if rest or digest_length not in hashfunc_map: + raise SSLError('Fingerprint is of invalid length.') + + # We need encode() here for py32; works on py2 and p33. + fingerprint_bytes = unhexlify(fingerprint.encode()) + + hashfunc = hashfunc_map[digest_length] + + cert_digest = hashfunc(cert).digest() + + if not cert_digest == fingerprint_bytes: + raise SSLError('Fingerprints did not match. Expected "{0}", got "{1}".' + .format(hexlify(fingerprint_bytes), + hexlify(cert_digest))) + + +def resolve_cert_reqs(candidate): + """ + Resolves the argument to a numeric constant, which can be passed to + the wrap_socket function/method from the ssl module. + Defaults to :data:`ssl.CERT_NONE`. + If given a string it is assumed to be the name of the constant in the + :mod:`ssl` module or its abbrevation. + (So you can specify `REQUIRED` instead of `CERT_REQUIRED`. + If it's neither `None` nor a string we assume it is already the numeric + constant which can directly be passed to wrap_socket. + """ + if candidate is None: + return CERT_NONE + + if isinstance(candidate, str): + res = getattr(ssl, candidate, None) + if res is None: + res = getattr(ssl, 'CERT_' + candidate) + return res + + return candidate + + +def resolve_ssl_version(candidate): + """ + like resolve_cert_reqs + """ + if candidate is None: + return PROTOCOL_SSLv23 + + if isinstance(candidate, str): + res = getattr(ssl, candidate, None) + if res is None: + res = getattr(ssl, 'PROTOCOL_' + candidate) + return res + + return candidate + + +if SSLContext is not None: # Python 3.2+ + def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None, + ca_certs=None, server_hostname=None, + ssl_version=None): + """ + All arguments except `server_hostname` have the same meaning as for + :func:`ssl.wrap_socket` + + :param server_hostname: + Hostname of the expected certificate + """ + context = SSLContext(ssl_version) + context.verify_mode = cert_reqs + + # Disable TLS compression to migitate CRIME attack (issue #309) + OP_NO_COMPRESSION = 0x20000 + context.options |= OP_NO_COMPRESSION + + if ca_certs: + try: + context.load_verify_locations(ca_certs) + # Py32 raises IOError + # Py33 raises FileNotFoundError + except Exception as e: # Reraise as SSLError + raise SSLError(e) + if certfile: + # FIXME: This block needs a test. + context.load_cert_chain(certfile, keyfile) + if HAS_SNI: # Platform-specific: OpenSSL with enabled SNI + return context.wrap_socket(sock, server_hostname=server_hostname) + return context.wrap_socket(sock) + +else: # Python 3.1 and earlier + def ssl_wrap_socket(sock, keyfile=None, certfile=None, cert_reqs=None, + ca_certs=None, server_hostname=None, + ssl_version=None): + return wrap_socket(sock, keyfile=keyfile, certfile=certfile, + ca_certs=ca_certs, cert_reqs=cert_reqs, + ssl_version=ssl_version) diff --git a/libs/requests/packages/urllib3/util/timeout.py b/libs/requests/packages/urllib3/util/timeout.py new file mode 100644 index 00000000..4f947cb2 --- /dev/null +++ b/libs/requests/packages/urllib3/util/timeout.py @@ -0,0 +1,234 @@ +from socket import _GLOBAL_DEFAULT_TIMEOUT +import time + +from ..exceptions import TimeoutStateError + + +def current_time(): + """ + Retrieve the current time, this function is mocked out in unit testing. + """ + return time.time() + + +_Default = object() +# The default timeout to use for socket connections. This is the attribute used +# by httplib to define the default timeout + + +class Timeout(object): + """ + Utility object for storing timeout values. + + Example usage: + + .. code-block:: python + + timeout = urllib3.util.Timeout(connect=2.0, read=7.0) + pool = HTTPConnectionPool('www.google.com', 80, timeout=timeout) + pool.request(...) # Etc, etc + + :param connect: + The maximum amount of time to wait for a connection attempt to a server + to succeed. Omitting the parameter will default the connect timeout to + the system default, probably `the global default timeout in socket.py + `_. + None will set an infinite timeout for connection attempts. + + :type connect: integer, float, or None + + :param read: + The maximum amount of time to wait between consecutive + read operations for a response from the server. Omitting + the parameter will default the read timeout to the system + default, probably `the global default timeout in socket.py + `_. + None will set an infinite timeout. + + :type read: integer, float, or None + + :param total: + This combines the connect and read timeouts into one; the read timeout + will be set to the time leftover from the connect attempt. In the + event that both a connect timeout and a total are specified, or a read + timeout and a total are specified, the shorter timeout will be applied. + + Defaults to None. + + :type total: integer, float, or None + + .. note:: + + Many factors can affect the total amount of time for urllib3 to return + an HTTP response. Specifically, Python's DNS resolver does not obey the + timeout specified on the socket. Other factors that can affect total + request time include high CPU load, high swap, the program running at a + low priority level, or other behaviors. The observed running time for + urllib3 to return a response may be greater than the value passed to + `total`. + + In addition, the read and total timeouts only measure the time between + read operations on the socket connecting the client and the server, + not the total amount of time for the request to return a complete + response. For most requests, the timeout is raised because the server + has not sent the first byte in the specified time. This is not always + the case; if a server streams one byte every fifteen seconds, a timeout + of 20 seconds will not ever trigger, even though the request will + take several minutes to complete. + + If your goal is to cut off any request after a set amount of wall clock + time, consider having a second "watcher" thread to cut off a slow + request. + """ + + #: A sentinel object representing the default timeout value + DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT + + def __init__(self, total=None, connect=_Default, read=_Default): + self._connect = self._validate_timeout(connect, 'connect') + self._read = self._validate_timeout(read, 'read') + self.total = self._validate_timeout(total, 'total') + self._start_connect = None + + def __str__(self): + return '%s(connect=%r, read=%r, total=%r)' % ( + type(self).__name__, self._connect, self._read, self.total) + + + @classmethod + def _validate_timeout(cls, value, name): + """ Check that a timeout attribute is valid + + :param value: The timeout value to validate + :param name: The name of the timeout attribute to validate. This is used + for clear error messages + :return: the value + :raises ValueError: if the type is not an integer or a float, or if it + is a numeric value less than zero + """ + if value is _Default: + return cls.DEFAULT_TIMEOUT + + if value is None or value is cls.DEFAULT_TIMEOUT: + return value + + try: + float(value) + except (TypeError, ValueError): + raise ValueError("Timeout value %s was %s, but it must be an " + "int or float." % (name, value)) + + try: + if value < 0: + raise ValueError("Attempted to set %s timeout to %s, but the " + "timeout cannot be set to a value less " + "than 0." % (name, value)) + except TypeError: # Python 3 + raise ValueError("Timeout value %s was %s, but it must be an " + "int or float." % (name, value)) + + return value + + @classmethod + def from_float(cls, timeout): + """ Create a new Timeout from a legacy timeout value. + + The timeout value used by httplib.py sets the same timeout on the + connect(), and recv() socket requests. This creates a :class:`Timeout` + object that sets the individual timeouts to the ``timeout`` value passed + to this function. + + :param timeout: The legacy timeout value + :type timeout: integer, float, sentinel default object, or None + :return: a Timeout object + :rtype: :class:`Timeout` + """ + return Timeout(read=timeout, connect=timeout) + + def clone(self): + """ Create a copy of the timeout object + + Timeout properties are stored per-pool but each request needs a fresh + Timeout object to ensure each one has its own start/stop configured. + + :return: a copy of the timeout object + :rtype: :class:`Timeout` + """ + # We can't use copy.deepcopy because that will also create a new object + # for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to + # detect the user default. + return Timeout(connect=self._connect, read=self._read, + total=self.total) + + def start_connect(self): + """ Start the timeout clock, used during a connect() attempt + + :raises urllib3.exceptions.TimeoutStateError: if you attempt + to start a timer that has been started already. + """ + if self._start_connect is not None: + raise TimeoutStateError("Timeout timer has already been started.") + self._start_connect = current_time() + return self._start_connect + + def get_connect_duration(self): + """ Gets the time elapsed since the call to :meth:`start_connect`. + + :return: the elapsed time + :rtype: float + :raises urllib3.exceptions.TimeoutStateError: if you attempt + to get duration for a timer that hasn't been started. + """ + if self._start_connect is None: + raise TimeoutStateError("Can't get connect duration for timer " + "that has not started.") + return current_time() - self._start_connect + + @property + def connect_timeout(self): + """ Get the value to use when setting a connection timeout. + + This will be a positive float or integer, the value None + (never timeout), or the default system timeout. + + :return: the connect timeout + :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None + """ + if self.total is None: + return self._connect + + if self._connect is None or self._connect is self.DEFAULT_TIMEOUT: + return self.total + + return min(self._connect, self.total) + + @property + def read_timeout(self): + """ Get the value for the read timeout. + + This assumes some time has elapsed in the connection timeout and + computes the read timeout appropriately. + + If self.total is set, the read timeout is dependent on the amount of + time taken by the connect timeout. If the connection time has not been + established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be + raised. + + :return: the value to use for the read timeout + :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None + :raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect` + has not yet been called on this object. + """ + if (self.total is not None and + self.total is not self.DEFAULT_TIMEOUT and + self._read is not None and + self._read is not self.DEFAULT_TIMEOUT): + # in case the connect timeout has not yet been established. + if self._start_connect is None: + return self._read + return max(0, min(self.total - self.get_connect_duration(), + self._read)) + elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT: + return max(0, self.total - self.get_connect_duration()) + else: + return self._read diff --git a/libs/requests/packages/urllib3/util/url.py b/libs/requests/packages/urllib3/util/url.py new file mode 100644 index 00000000..362d2160 --- /dev/null +++ b/libs/requests/packages/urllib3/util/url.py @@ -0,0 +1,162 @@ +from collections import namedtuple + +from ..exceptions import LocationParseError + + +class Url(namedtuple('Url', ['scheme', 'auth', 'host', 'port', 'path', 'query', 'fragment'])): + """ + Datastructure for representing an HTTP URL. Used as a return value for + :func:`parse_url`. + """ + slots = () + + def __new__(cls, scheme=None, auth=None, host=None, port=None, path=None, query=None, fragment=None): + return super(Url, cls).__new__(cls, scheme, auth, host, port, path, query, fragment) + + @property + def hostname(self): + """For backwards-compatibility with urlparse. We're nice like that.""" + return self.host + + @property + def request_uri(self): + """Absolute path including the query string.""" + uri = self.path or '/' + + if self.query is not None: + uri += '?' + self.query + + return uri + + @property + def netloc(self): + """Network location including host and port""" + if self.port: + return '%s:%d' % (self.host, self.port) + return self.host + + +def split_first(s, delims): + """ + Given a string and an iterable of delimiters, split on the first found + delimiter. Return two split parts and the matched delimiter. + + If not found, then the first part is the full input string. + + Example: :: + + >>> split_first('foo/bar?baz', '?/=') + ('foo', 'bar?baz', '/') + >>> split_first('foo/bar?baz', '123') + ('foo/bar?baz', '', None) + + Scales linearly with number of delims. Not ideal for large number of delims. + """ + min_idx = None + min_delim = None + for d in delims: + idx = s.find(d) + if idx < 0: + continue + + if min_idx is None or idx < min_idx: + min_idx = idx + min_delim = d + + if min_idx is None or min_idx < 0: + return s, '', None + + return s[:min_idx], s[min_idx+1:], min_delim + + +def parse_url(url): + """ + Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is + performed to parse incomplete urls. Fields not provided will be None. + + Partly backwards-compatible with :mod:`urlparse`. + + Example: :: + + >>> parse_url('http://google.com/mail/') + Url(scheme='http', host='google.com', port=None, path='/', ...) + >>> parse_url('google.com:80') + Url(scheme=None, host='google.com', port=80, path=None, ...) + >>> parse_url('/foo?bar') + Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...) + """ + + # While this code has overlap with stdlib's urlparse, it is much + # simplified for our needs and less annoying. + # Additionally, this implementations does silly things to be optimal + # on CPython. + + scheme = None + auth = None + host = None + port = None + path = None + fragment = None + query = None + + # Scheme + if '://' in url: + scheme, url = url.split('://', 1) + + # Find the earliest Authority Terminator + # (http://tools.ietf.org/html/rfc3986#section-3.2) + url, path_, delim = split_first(url, ['/', '?', '#']) + + if delim: + # Reassemble the path + path = delim + path_ + + # Auth + if '@' in url: + # Last '@' denotes end of auth part + auth, url = url.rsplit('@', 1) + + # IPv6 + if url and url[0] == '[': + host, url = url.split(']', 1) + host += ']' + + # Port + if ':' in url: + _host, port = url.split(':', 1) + + if not host: + host = _host + + if port: + # If given, ports must be integers. + if not port.isdigit(): + raise LocationParseError(url) + port = int(port) + else: + # Blank ports are cool, too. (rfc3986#section-3.2.3) + port = None + + elif not host and url: + host = url + + if not path: + return Url(scheme, auth, host, port, path, query, fragment) + + # Fragment + if '#' in path: + path, fragment = path.split('#', 1) + + # Query + if '?' in path: + path, query = path.split('?', 1) + + return Url(scheme, auth, host, port, path, query, fragment) + + +def get_host(url): + """ + Deprecated. Use :func:`.parse_url` instead. + """ + p = parse_url(url) + return p.scheme or 'http', p.hostname, p.port diff --git a/libs/requests/sessions.py b/libs/requests/sessions.py index 06e17d4b..df85a25c 100644 --- a/libs/requests/sessions.py +++ b/libs/requests/sessions.py @@ -12,27 +12,28 @@ import os from collections import Mapping from datetime import datetime +from .auth import _basic_auth_str from .compat import cookielib, OrderedDict, urljoin, urlparse, builtin_str from .cookies import ( cookiejar_from_dict, extract_cookies_to_jar, RequestsCookieJar, merge_cookies) -from .models import Request, PreparedRequest +from .models import Request, PreparedRequest, DEFAULT_REDIRECT_LIMIT from .hooks import default_hooks, dispatch_hook -from .utils import to_key_val_list, default_headers -from .exceptions import TooManyRedirects, InvalidSchema +from .utils import to_key_val_list, default_headers, to_native_string +from .exceptions import ( + TooManyRedirects, InvalidSchema, ChunkedEncodingError, ContentDecodingError) from .structures import CaseInsensitiveDict from .adapters import HTTPAdapter -from .utils import requote_uri, get_environ_proxies, get_netrc_auth +from .utils import ( + requote_uri, get_environ_proxies, get_netrc_auth, should_bypass_proxies, + get_auth_from_url +) from .status_codes import codes -REDIRECT_STATI = ( - codes.moved, # 301 - codes.found, # 302 - codes.other, # 303 - codes.temporary_moved, # 307 -) -DEFAULT_REDIRECT_LIMIT = 30 + +# formerly defined here, reexposed here for backward compatibility +from .models import REDIRECT_STATI def merge_setting(request_setting, session_setting, dict_class=OrderedDict): @@ -63,6 +64,8 @@ def merge_setting(request_setting, session_setting, dict_class=OrderedDict): if v is None: del merged_setting[k] + merged_setting = dict((k, v) for (k, v) in merged_setting.items() if v is not None) + return merged_setting @@ -89,11 +92,13 @@ class SessionRedirectMixin(object): i = 0 - # ((resp.status_code is codes.see_other)) - while ('location' in resp.headers and resp.status_code in REDIRECT_STATI): + while resp.is_redirect: prepared_request = req.copy() - resp.content # Consume socket so it can be released + try: + resp.content # Consume socket so it can be released + except (ChunkedEncodingError, ContentDecodingError, RuntimeError): + resp.raw.read(decode_content=False) if i >= self.max_redirects: raise TooManyRedirects('Exceeded %s redirects.' % self.max_redirects) @@ -121,7 +126,7 @@ class SessionRedirectMixin(object): else: url = requote_uri(url) - prepared_request.url = url + prepared_request.url = to_native_string(url) # http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.4 if (resp.status_code == codes.see_other and @@ -153,12 +158,19 @@ class SessionRedirectMixin(object): except KeyError: pass - extract_cookies_to_jar(prepared_request._cookies, - prepared_request, resp.raw) + extract_cookies_to_jar(prepared_request._cookies, prepared_request, resp.raw) + prepared_request._cookies.update(self.cookies) prepared_request.prepare_cookies(prepared_request._cookies) + # Rebuild auth and proxy information. + proxies = self.rebuild_proxies(prepared_request, proxies) + self.rebuild_auth(prepared_request, resp) + + # Override the original request. + req = prepared_request + resp = self.send( - prepared_request, + req, stream=stream, timeout=timeout, verify=verify, @@ -172,6 +184,68 @@ class SessionRedirectMixin(object): i += 1 yield resp + def rebuild_auth(self, prepared_request, response): + """ + When being redirected we may want to strip authentication from the + request to avoid leaking credentials. This method intelligently removes + and reapplies authentication where possible to avoid credential loss. + """ + headers = prepared_request.headers + url = prepared_request.url + + if 'Authorization' in headers: + # If we get redirected to a new host, we should strip out any + # authentication headers. + original_parsed = urlparse(response.request.url) + redirect_parsed = urlparse(url) + + if (original_parsed.hostname != redirect_parsed.hostname): + del headers['Authorization'] + + # .netrc might have more auth for us on our new host. + new_auth = get_netrc_auth(url) if self.trust_env else None + if new_auth is not None: + prepared_request.prepare_auth(new_auth) + + return + + def rebuild_proxies(self, prepared_request, proxies): + """ + This method re-evaluates the proxy configuration by considering the + environment variables. If we are redirected to a URL covered by + NO_PROXY, we strip the proxy configuration. Otherwise, we set missing + proxy keys for this URL (in case they were stripped by a previous + redirect). + + This method also replaces the Proxy-Authorization header where + necessary. + """ + headers = prepared_request.headers + url = prepared_request.url + scheme = urlparse(url).scheme + new_proxies = proxies.copy() if proxies is not None else {} + + if self.trust_env and not should_bypass_proxies(url): + environ_proxies = get_environ_proxies(url) + + proxy = environ_proxies.get(scheme) + + if proxy: + new_proxies.setdefault(scheme, environ_proxies[scheme]) + + if 'Proxy-Authorization' in headers: + del headers['Proxy-Authorization'] + + try: + username, password = get_auth_from_url(new_proxies[scheme]) + except KeyError: + username, password = None, None + + if username and password: + headers['Proxy-Authorization'] = _basic_auth_str(username, password) + + return new_proxies + class Session(SessionRedirectMixin): """A Requests session. @@ -255,7 +329,7 @@ class Session(SessionRedirectMixin): :class:`Session`. :param request: :class:`Request` instance to prepare with this - session's settings. + session's settings. """ cookies = request.cookies or {} @@ -319,7 +393,7 @@ class Session(SessionRedirectMixin): :param auth: (optional) Auth tuple or callable to enable Basic/Digest/Custom HTTP Auth. :param timeout: (optional) Float describing the timeout of the - request. + request in seconds. :param allow_redirects: (optional) Boolean. Set to True by default. :param proxies: (optional) Dictionary mapping protocol to the URL of the proxy. @@ -466,8 +540,7 @@ class Session(SessionRedirectMixin): if not isinstance(request, PreparedRequest): raise ValueError('You can only send PreparedRequests.') - # Set up variables needed for resolve_redirects and dispatching of - # hooks + # Set up variables needed for resolve_redirects and dispatching of hooks allow_redirects = kwargs.pop('allow_redirects', True) stream = kwargs.get('stream') timeout = kwargs.get('timeout') @@ -481,8 +554,10 @@ class Session(SessionRedirectMixin): # Start time (approximately) of the request start = datetime.utcnow() + # Send the request r = adapter.send(request, **kwargs) + # Total elapsed time of the request (approximately) r.elapsed = datetime.utcnow() - start @@ -491,15 +566,20 @@ class Session(SessionRedirectMixin): # Persist cookies if r.history: + # If the hooks create history then we want those cookies too for resp in r.history: extract_cookies_to_jar(self.cookies, resp.request, resp.raw) + extract_cookies_to_jar(self.cookies, request, r.raw) # Redirect resolving generator. - gen = self.resolve_redirects(r, request, stream=stream, - timeout=timeout, verify=verify, cert=cert, - proxies=proxies) + gen = self.resolve_redirects(r, request, + stream=stream, + timeout=timeout, + verify=verify, + cert=cert, + proxies=proxies) # Resolve redirects if allowed. history = [resp for resp in gen] if allow_redirects else [] @@ -510,7 +590,10 @@ class Session(SessionRedirectMixin): history.insert(0, r) # Get the last request made r = history.pop() - r.history = tuple(history) + r.history = history + + if not stream: + r.content return r @@ -533,8 +616,10 @@ class Session(SessionRedirectMixin): """Registers a connection adapter to a prefix. Adapters are sorted in descending order by key length.""" + self.adapters[prefix] = adapter keys_to_move = [k for k in self.adapters if len(k) < len(prefix)] + for key in keys_to_move: self.adapters[key] = self.adapters.pop(key) diff --git a/libs/requests/structures.py b/libs/requests/structures.py index a1759137..66cdad86 100644 --- a/libs/requests/structures.py +++ b/libs/requests/structures.py @@ -8,30 +8,7 @@ Data structures that power Requests. """ -import os import collections -from itertools import islice - - -class IteratorProxy(object): - """docstring for IteratorProxy""" - def __init__(self, i): - self.i = i - # self.i = chain.from_iterable(i) - - def __iter__(self): - return self.i - - def __len__(self): - if hasattr(self.i, '__len__'): - return len(self.i) - if hasattr(self.i, 'len'): - return self.i.len - if hasattr(self.i, 'fileno'): - return os.fstat(self.i.fileno()).st_size - - def read(self, n): - return "".join(islice(self.i, None, n)) class CaseInsensitiveDict(collections.MutableMapping): @@ -106,8 +83,7 @@ class CaseInsensitiveDict(collections.MutableMapping): return CaseInsensitiveDict(self._store.values()) def __repr__(self): - return '%s(%r)' % (self.__class__.__name__, dict(self.items())) - + return str(dict(self.items())) class LookupDict(dict): """Dictionary lookup object.""" diff --git a/libs/requests/utils.py b/libs/requests/utils.py index c7e2b089..68e50cf0 100644 --- a/libs/requests/utils.py +++ b/libs/requests/utils.py @@ -24,10 +24,10 @@ from . import __version__ from . import certs from .compat import parse_http_list as _parse_list_header from .compat import (quote, urlparse, bytes, str, OrderedDict, unquote, is_py2, - builtin_str, getproxies, proxy_bypass) + builtin_str, getproxies, proxy_bypass, urlunparse) from .cookies import RequestsCookieJar, cookiejar_from_dict from .structures import CaseInsensitiveDict -from .exceptions import MissingSchema, InvalidURL +from .exceptions import InvalidURL _hush_pyflakes = (RequestsCookieJar,) @@ -61,25 +61,34 @@ def super_len(o): return os.fstat(fileno).st_size if hasattr(o, 'getvalue'): - # e.g. BytesIO, cStringIO.StringI + # e.g. BytesIO, cStringIO.StringIO return len(o.getvalue()) + def get_netrc_auth(url): """Returns the Requests tuple auth for a given url from netrc.""" try: from netrc import netrc, NetrcParseError - locations = (os.path.expanduser('~/{0}'.format(f)) for f in NETRC_FILES) netrc_path = None - for loc in locations: - if os.path.exists(loc) and not netrc_path: + for f in NETRC_FILES: + try: + loc = os.path.expanduser('~/{0}'.format(f)) + except KeyError: + # os.path.expanduser can fail when $HOME is undefined and + # getpwuid fails. See http://bugs.python.org/issue20164 & + # https://github.com/kennethreitz/requests/issues/1846 + return + + if os.path.exists(loc): netrc_path = loc + break # Abort early if there isn't one. if netrc_path is None: - return netrc_path + return ri = urlparse(url) @@ -457,9 +466,10 @@ def is_valid_cidr(string_network): return True -def get_environ_proxies(url): - """Return a dict of environment proxies.""" - +def should_bypass_proxies(url): + """ + Returns whether we should bypass proxies or not. + """ get_proxy = lambda k: os.environ.get(k) or os.environ.get(k.upper()) # First check whether no_proxy is defined. If it is, check that the URL @@ -477,23 +487,36 @@ def get_environ_proxies(url): for proxy_ip in no_proxy: if is_valid_cidr(proxy_ip): if address_in_network(ip, proxy_ip): - return {} + return True else: for host in no_proxy: if netloc.endswith(host) or netloc.split(':')[0].endswith(host): # The URL does match something in no_proxy, so we don't want # to apply the proxies on this URL. - return {} + return True # If the system proxy settings indicate that this URL should be bypassed, # don't proxy. - if proxy_bypass(netloc): - return {} + # The proxy_bypass function is incredibly buggy on OS X in early versions + # of Python 2.6, so allow this call to fail. Only catch the specific + # exceptions we've seen, though: this call failing in other ways can reveal + # legitimate problems. + try: + bypass = proxy_bypass(netloc) + except (TypeError, socket.gaierror): + bypass = False - # If we get here, we either didn't have no_proxy set or we're not going - # anywhere that no_proxy applies to, and the system settings don't require - # bypassing the proxy for the current URL. - return getproxies() + if bypass: + return True + + return False + +def get_environ_proxies(url): + """Return a dict of environment proxies.""" + if should_bypass_proxies(url): + return {} + else: + return getproxies() def default_user_agent(name="python-requests"): @@ -530,7 +553,7 @@ def default_user_agent(name="python-requests"): def default_headers(): return CaseInsensitiveDict({ 'User-Agent': default_user_agent(), - 'Accept-Encoding': ', '.join(('gzip', 'deflate', 'compress')), + 'Accept-Encoding': ', '.join(('gzip', 'deflate')), 'Accept': '*/*' }) @@ -604,24 +627,31 @@ def guess_json_utf(data): return None -def except_on_missing_scheme(url): - """Given a URL, raise a MissingSchema exception if the scheme is missing. - """ - scheme, netloc, path, params, query, fragment = urlparse(url) +def prepend_scheme_if_needed(url, new_scheme): + '''Given a URL that may or may not have a scheme, prepend the given scheme. + Does not replace a present scheme with the one provided as an argument.''' + scheme, netloc, path, params, query, fragment = urlparse(url, new_scheme) - if not scheme: - raise MissingSchema('Proxy URLs must have explicit schemes.') + # urlparse is a finicky beast, and sometimes decides that there isn't a + # netloc present. Assume that it's being over-cautious, and switch netloc + # and path if urlparse decided there was no netloc. + if not netloc: + netloc, path = path, netloc + + return urlunparse((scheme, netloc, path, params, query, fragment)) def get_auth_from_url(url): """Given a url with authentication components, extract them into a tuple of username,password.""" - if url: - url = unquote(url) - parsed = urlparse(url) - return (parsed.username, parsed.password) - else: - return ('', '') + parsed = urlparse(url) + + try: + auth = (unquote(parsed.username), unquote(parsed.password)) + except (AttributeError, TypeError): + auth = ('', '') + + return auth def to_native_string(string, encoding='ascii'): diff --git a/libs/rtorrent/__init__.py b/libs/rtorrent/__init__.py index d70e6e10..a3f16078 100755 --- a/libs/rtorrent/__init__.py +++ b/libs/rtorrent/__init__.py @@ -89,13 +89,16 @@ class RTorrent: def _get_conn(self): """Get ServerProxy instance""" - if self.username is not None and self.password is not None: + + if self.username and self.password: if self.scheme == 'scgi': raise NotImplementedError() + secure = self.scheme == 'https' + return self.sp( self.uri, - transport=BasicAuthTransport(self.username, self.password), + transport=BasicAuthTransport(secure, self.username, self.password), **self.sp_kwargs ) diff --git a/libs/rtorrent/lib/xmlrpc/basic_auth.py b/libs/rtorrent/lib/xmlrpc/basic_auth.py index 20c02d9a..c5654a21 100644 --- a/libs/rtorrent/lib/xmlrpc/basic_auth.py +++ b/libs/rtorrent/lib/xmlrpc/basic_auth.py @@ -20,24 +20,46 @@ # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -from base64 import encodestring -import string +from base64 import b64encode +import httplib import xmlrpclib class BasicAuthTransport(xmlrpclib.Transport): - def __init__(self, username=None, password=None): + def __init__(self, secure=False, username=None, password=None): xmlrpclib.Transport.__init__(self) + self.secure = secure + self.username = username self.password = password def send_auth(self, h): - if self.username is not None and self.password is not None: - h.putheader('AUTHORIZATION', "Basic %s" % string.replace( - encodestring("%s:%s" % (self.username, self.password)), - "\012", "" - )) + if not self.username or not self.password: + return + + auth = b64encode("%s:%s" % (self.username, self.password)) + + h.putheader('Authorization', "Basic %s" % auth) + + def make_connection(self, host): + if self._connection and host == self._connection[0]: + return self._connection[1] + + chost, self._extra_headers, x509 = self.get_host_info(host) + + if self.secure: + try: + self._connection = host, httplib.HTTPSConnection(chost, None, **(x509 or {})) + except AttributeError: + raise NotImplementedError( + "your version of httplib doesn't support HTTPS" + ) + else: + self._connection = host, httplib.HTTPConnection(chost) + + return self._connection[1] + def single_request(self, host, handler, request_body, verbose=0): # issue XML-RPC request diff --git a/libs/sqlalchemy/__init__.py b/libs/sqlalchemy/__init__.py deleted file mode 100644 index 9a21a70f..00000000 --- a/libs/sqlalchemy/__init__.py +++ /dev/null @@ -1,128 +0,0 @@ -# sqlalchemy/__init__.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -import inspect -import sys - -import sqlalchemy.exc as exceptions - -from sqlalchemy.sql import ( - alias, - and_, - asc, - between, - bindparam, - case, - cast, - collate, - delete, - desc, - distinct, - except_, - except_all, - exists, - extract, - func, - insert, - intersect, - intersect_all, - join, - literal, - literal_column, - modifier, - not_, - null, - or_, - outerjoin, - outparam, - over, - select, - subquery, - text, - tuple_, - type_coerce, - union, - union_all, - update, - ) - -from sqlalchemy.types import ( - BIGINT, - BINARY, - BLOB, - BOOLEAN, - BigInteger, - Binary, - Boolean, - CHAR, - CLOB, - DATE, - DATETIME, - DECIMAL, - Date, - DateTime, - Enum, - FLOAT, - Float, - INT, - INTEGER, - Integer, - Interval, - LargeBinary, - NCHAR, - NVARCHAR, - NUMERIC, - Numeric, - PickleType, - REAL, - SMALLINT, - SmallInteger, - String, - TEXT, - TIME, - TIMESTAMP, - Text, - Time, - TypeDecorator, - Unicode, - UnicodeText, - VARBINARY, - VARCHAR, - ) - - -from sqlalchemy.schema import ( - CheckConstraint, - Column, - ColumnDefault, - Constraint, - DDL, - DefaultClause, - FetchedValue, - ForeignKey, - ForeignKeyConstraint, - Index, - MetaData, - PassiveDefault, - PrimaryKeyConstraint, - Sequence, - Table, - ThreadLocalMetaData, - UniqueConstraint, - ) - -from sqlalchemy.engine import create_engine, engine_from_config - - -__all__ = sorted(name for name, obj in locals().items() - if not (name.startswith('_') or inspect.ismodule(obj))) - -__version__ = '0.7.10' - -del inspect, sys - -from sqlalchemy import util as _sa_util -_sa_util.importlater.resolve_all() diff --git a/libs/sqlalchemy/cextension/processors.c b/libs/sqlalchemy/cextension/processors.c deleted file mode 100644 index 427db5d8..00000000 --- a/libs/sqlalchemy/cextension/processors.c +++ /dev/null @@ -1,464 +0,0 @@ -/* -processors.c -Copyright (C) 2010 Gaetan de Menten gdementen@gmail.com - -This module is part of SQLAlchemy and is released under -the MIT License: http://www.opensource.org/licenses/mit-license.php -*/ - -#include -#include - -#if PY_VERSION_HEX < 0x02050000 && !defined(PY_SSIZE_T_MIN) -typedef int Py_ssize_t; -#define PY_SSIZE_T_MAX INT_MAX -#define PY_SSIZE_T_MIN INT_MIN -#endif - - -static PyObject * -int_to_boolean(PyObject *self, PyObject *arg) -{ - long l = 0; - PyObject *res; - - if (arg == Py_None) - Py_RETURN_NONE; - - l = PyInt_AsLong(arg); - if (l == 0) { - res = Py_False; - } else if (l == 1) { - res = Py_True; - } else if ((l == -1) && PyErr_Occurred()) { - /* -1 can be either the actual value, or an error flag. */ - return NULL; - } else { - PyErr_SetString(PyExc_ValueError, - "int_to_boolean only accepts None, 0 or 1"); - return NULL; - } - - Py_INCREF(res); - return res; -} - -static PyObject * -to_str(PyObject *self, PyObject *arg) -{ - if (arg == Py_None) - Py_RETURN_NONE; - - return PyObject_Str(arg); -} - -static PyObject * -to_float(PyObject *self, PyObject *arg) -{ - if (arg == Py_None) - Py_RETURN_NONE; - - return PyNumber_Float(arg); -} - -static PyObject * -str_to_datetime(PyObject *self, PyObject *arg) -{ - const char *str; - unsigned int year, month, day, hour, minute, second, microsecond = 0; - PyObject *err_repr; - - if (arg == Py_None) - Py_RETURN_NONE; - - str = PyString_AsString(arg); - if (str == NULL) { - err_repr = PyObject_Repr(arg); - if (err_repr == NULL) - return NULL; - PyErr_Format( - PyExc_ValueError, - "Couldn't parse datetime string '%.200s' " - "- value is not a string.", - PyString_AsString(err_repr)); - Py_DECREF(err_repr); - return NULL; - } - - /* microseconds are optional */ - /* - TODO: this is slightly less picky than the Python version which would - not accept "2000-01-01 00:00:00.". I don't know which is better, but they - should be coherent. - */ - if (sscanf(str, "%4u-%2u-%2u %2u:%2u:%2u.%6u", &year, &month, &day, - &hour, &minute, &second, µsecond) < 6) { - err_repr = PyObject_Repr(arg); - if (err_repr == NULL) - return NULL; - PyErr_Format( - PyExc_ValueError, - "Couldn't parse datetime string: %.200s", - PyString_AsString(err_repr)); - Py_DECREF(err_repr); - return NULL; - } - return PyDateTime_FromDateAndTime(year, month, day, - hour, minute, second, microsecond); -} - -static PyObject * -str_to_time(PyObject *self, PyObject *arg) -{ - const char *str; - unsigned int hour, minute, second, microsecond = 0; - PyObject *err_repr; - - if (arg == Py_None) - Py_RETURN_NONE; - - str = PyString_AsString(arg); - if (str == NULL) { - err_repr = PyObject_Repr(arg); - if (err_repr == NULL) - return NULL; - PyErr_Format( - PyExc_ValueError, - "Couldn't parse time string '%.200s' - value is not a string.", - PyString_AsString(err_repr)); - Py_DECREF(err_repr); - return NULL; - } - - /* microseconds are optional */ - /* - TODO: this is slightly less picky than the Python version which would - not accept "00:00:00.". I don't know which is better, but they should be - coherent. - */ - if (sscanf(str, "%2u:%2u:%2u.%6u", &hour, &minute, &second, - µsecond) < 3) { - err_repr = PyObject_Repr(arg); - if (err_repr == NULL) - return NULL; - PyErr_Format( - PyExc_ValueError, - "Couldn't parse time string: %.200s", - PyString_AsString(err_repr)); - Py_DECREF(err_repr); - return NULL; - } - return PyTime_FromTime(hour, minute, second, microsecond); -} - -static PyObject * -str_to_date(PyObject *self, PyObject *arg) -{ - const char *str; - unsigned int year, month, day; - PyObject *err_repr; - - if (arg == Py_None) - Py_RETURN_NONE; - - str = PyString_AsString(arg); - if (str == NULL) { - err_repr = PyObject_Repr(arg); - if (err_repr == NULL) - return NULL; - PyErr_Format( - PyExc_ValueError, - "Couldn't parse date string '%.200s' - value is not a string.", - PyString_AsString(err_repr)); - Py_DECREF(err_repr); - return NULL; - } - - if (sscanf(str, "%4u-%2u-%2u", &year, &month, &day) != 3) { - err_repr = PyObject_Repr(arg); - if (err_repr == NULL) - return NULL; - PyErr_Format( - PyExc_ValueError, - "Couldn't parse date string: %.200s", - PyString_AsString(err_repr)); - Py_DECREF(err_repr); - return NULL; - } - return PyDate_FromDate(year, month, day); -} - - -/*********** - * Structs * - ***********/ - -typedef struct { - PyObject_HEAD - PyObject *encoding; - PyObject *errors; -} UnicodeResultProcessor; - -typedef struct { - PyObject_HEAD - PyObject *type; - PyObject *format; -} DecimalResultProcessor; - - - -/************************** - * UnicodeResultProcessor * - **************************/ - -static int -UnicodeResultProcessor_init(UnicodeResultProcessor *self, PyObject *args, - PyObject *kwds) -{ - PyObject *encoding, *errors = NULL; - static char *kwlist[] = {"encoding", "errors", NULL}; - - if (!PyArg_ParseTupleAndKeywords(args, kwds, "S|S:__init__", kwlist, - &encoding, &errors)) - return -1; - - Py_INCREF(encoding); - self->encoding = encoding; - - if (errors) { - Py_INCREF(errors); - } else { - errors = PyString_FromString("strict"); - if (errors == NULL) - return -1; - } - self->errors = errors; - - return 0; -} - -static PyObject * -UnicodeResultProcessor_process(UnicodeResultProcessor *self, PyObject *value) -{ - const char *encoding, *errors; - char *str; - Py_ssize_t len; - - if (value == Py_None) - Py_RETURN_NONE; - - if (PyString_AsStringAndSize(value, &str, &len)) - return NULL; - - encoding = PyString_AS_STRING(self->encoding); - errors = PyString_AS_STRING(self->errors); - - return PyUnicode_Decode(str, len, encoding, errors); -} - -static void -UnicodeResultProcessor_dealloc(UnicodeResultProcessor *self) -{ - Py_XDECREF(self->encoding); - Py_XDECREF(self->errors); - self->ob_type->tp_free((PyObject*)self); -} - -static PyMethodDef UnicodeResultProcessor_methods[] = { - {"process", (PyCFunction)UnicodeResultProcessor_process, METH_O, - "The value processor itself."}, - {NULL} /* Sentinel */ -}; - -static PyTypeObject UnicodeResultProcessorType = { - PyObject_HEAD_INIT(NULL) - 0, /* ob_size */ - "sqlalchemy.cprocessors.UnicodeResultProcessor", /* tp_name */ - sizeof(UnicodeResultProcessor), /* tp_basicsize */ - 0, /* tp_itemsize */ - (destructor)UnicodeResultProcessor_dealloc, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_compare */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */ - "UnicodeResultProcessor objects", /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - UnicodeResultProcessor_methods, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - (initproc)UnicodeResultProcessor_init, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ -}; - -/************************** - * DecimalResultProcessor * - **************************/ - -static int -DecimalResultProcessor_init(DecimalResultProcessor *self, PyObject *args, - PyObject *kwds) -{ - PyObject *type, *format; - - if (!PyArg_ParseTuple(args, "OS", &type, &format)) - return -1; - - Py_INCREF(type); - self->type = type; - - Py_INCREF(format); - self->format = format; - - return 0; -} - -static PyObject * -DecimalResultProcessor_process(DecimalResultProcessor *self, PyObject *value) -{ - PyObject *str, *result, *args; - - if (value == Py_None) - Py_RETURN_NONE; - - args = PyTuple_Pack(1, value); - if (args == NULL) - return NULL; - - str = PyString_Format(self->format, args); - Py_DECREF(args); - if (str == NULL) - return NULL; - - result = PyObject_CallFunctionObjArgs(self->type, str, NULL); - Py_DECREF(str); - return result; -} - -static void -DecimalResultProcessor_dealloc(DecimalResultProcessor *self) -{ - Py_XDECREF(self->type); - Py_XDECREF(self->format); - self->ob_type->tp_free((PyObject*)self); -} - -static PyMethodDef DecimalResultProcessor_methods[] = { - {"process", (PyCFunction)DecimalResultProcessor_process, METH_O, - "The value processor itself."}, - {NULL} /* Sentinel */ -}; - -static PyTypeObject DecimalResultProcessorType = { - PyObject_HEAD_INIT(NULL) - 0, /* ob_size */ - "sqlalchemy.DecimalResultProcessor", /* tp_name */ - sizeof(DecimalResultProcessor), /* tp_basicsize */ - 0, /* tp_itemsize */ - (destructor)DecimalResultProcessor_dealloc, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_compare */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - 0, /* tp_as_sequence */ - 0, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - 0, /* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */ - "DecimalResultProcessor objects", /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - 0, /* tp_iter */ - 0, /* tp_iternext */ - DecimalResultProcessor_methods, /* tp_methods */ - 0, /* tp_members */ - 0, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - (initproc)DecimalResultProcessor_init, /* tp_init */ - 0, /* tp_alloc */ - 0, /* tp_new */ -}; - -#ifndef PyMODINIT_FUNC /* declarations for DLL import/export */ -#define PyMODINIT_FUNC void -#endif - - -static PyMethodDef module_methods[] = { - {"int_to_boolean", int_to_boolean, METH_O, - "Convert an integer to a boolean."}, - {"to_str", to_str, METH_O, - "Convert any value to its string representation."}, - {"to_float", to_float, METH_O, - "Convert any value to its floating point representation."}, - {"str_to_datetime", str_to_datetime, METH_O, - "Convert an ISO string to a datetime.datetime object."}, - {"str_to_time", str_to_time, METH_O, - "Convert an ISO string to a datetime.time object."}, - {"str_to_date", str_to_date, METH_O, - "Convert an ISO string to a datetime.date object."}, - {NULL, NULL, 0, NULL} /* Sentinel */ -}; - -PyMODINIT_FUNC -initcprocessors(void) -{ - PyObject *m; - - UnicodeResultProcessorType.tp_new = PyType_GenericNew; - if (PyType_Ready(&UnicodeResultProcessorType) < 0) - return; - - DecimalResultProcessorType.tp_new = PyType_GenericNew; - if (PyType_Ready(&DecimalResultProcessorType) < 0) - return; - - m = Py_InitModule3("cprocessors", module_methods, - "Module containing C versions of data processing functions."); - if (m == NULL) - return; - - PyDateTime_IMPORT; - - Py_INCREF(&UnicodeResultProcessorType); - PyModule_AddObject(m, "UnicodeResultProcessor", - (PyObject *)&UnicodeResultProcessorType); - - Py_INCREF(&DecimalResultProcessorType); - PyModule_AddObject(m, "DecimalResultProcessor", - (PyObject *)&DecimalResultProcessorType); -} - diff --git a/libs/sqlalchemy/cextension/resultproxy.c b/libs/sqlalchemy/cextension/resultproxy.c deleted file mode 100644 index ca9d28e6..00000000 --- a/libs/sqlalchemy/cextension/resultproxy.c +++ /dev/null @@ -1,638 +0,0 @@ -/* -resultproxy.c -Copyright (C) 2010 Gaetan de Menten gdementen@gmail.com - -This module is part of SQLAlchemy and is released under -the MIT License: http://www.opensource.org/licenses/mit-license.php -*/ - -#include - -#if PY_VERSION_HEX < 0x02050000 && !defined(PY_SSIZE_T_MIN) -typedef int Py_ssize_t; -#define PY_SSIZE_T_MAX INT_MAX -#define PY_SSIZE_T_MIN INT_MIN -typedef Py_ssize_t (*lenfunc)(PyObject *); -#define PyInt_FromSsize_t(x) PyInt_FromLong(x) -typedef intargfunc ssizeargfunc; -#endif - - -/*********** - * Structs * - ***********/ - -typedef struct { - PyObject_HEAD - PyObject *parent; - PyObject *row; - PyObject *processors; - PyObject *keymap; -} BaseRowProxy; - -/**************** - * BaseRowProxy * - ****************/ - -static PyObject * -safe_rowproxy_reconstructor(PyObject *self, PyObject *args) -{ - PyObject *cls, *state, *tmp; - BaseRowProxy *obj; - - if (!PyArg_ParseTuple(args, "OO", &cls, &state)) - return NULL; - - obj = (BaseRowProxy *)PyObject_CallMethod(cls, "__new__", "O", cls); - if (obj == NULL) - return NULL; - - tmp = PyObject_CallMethod((PyObject *)obj, "__setstate__", "O", state); - if (tmp == NULL) { - Py_DECREF(obj); - return NULL; - } - Py_DECREF(tmp); - - if (obj->parent == NULL || obj->row == NULL || - obj->processors == NULL || obj->keymap == NULL) { - PyErr_SetString(PyExc_RuntimeError, - "__setstate__ for BaseRowProxy subclasses must set values " - "for parent, row, processors and keymap"); - Py_DECREF(obj); - return NULL; - } - - return (PyObject *)obj; -} - -static int -BaseRowProxy_init(BaseRowProxy *self, PyObject *args, PyObject *kwds) -{ - PyObject *parent, *row, *processors, *keymap; - - if (!PyArg_UnpackTuple(args, "BaseRowProxy", 4, 4, - &parent, &row, &processors, &keymap)) - return -1; - - Py_INCREF(parent); - self->parent = parent; - - if (!PySequence_Check(row)) { - PyErr_SetString(PyExc_TypeError, "row must be a sequence"); - return -1; - } - Py_INCREF(row); - self->row = row; - - if (!PyList_CheckExact(processors)) { - PyErr_SetString(PyExc_TypeError, "processors must be a list"); - return -1; - } - Py_INCREF(processors); - self->processors = processors; - - if (!PyDict_CheckExact(keymap)) { - PyErr_SetString(PyExc_TypeError, "keymap must be a dict"); - return -1; - } - Py_INCREF(keymap); - self->keymap = keymap; - - return 0; -} - -/* We need the reduce method because otherwise the default implementation - * does very weird stuff for pickle protocol 0 and 1. It calls - * BaseRowProxy.__new__(RowProxy_instance) upon *pickling*. - */ -static PyObject * -BaseRowProxy_reduce(PyObject *self) -{ - PyObject *method, *state; - PyObject *module, *reconstructor, *cls; - - method = PyObject_GetAttrString(self, "__getstate__"); - if (method == NULL) - return NULL; - - state = PyObject_CallObject(method, NULL); - Py_DECREF(method); - if (state == NULL) - return NULL; - - module = PyImport_ImportModule("sqlalchemy.engine.base"); - if (module == NULL) - return NULL; - - reconstructor = PyObject_GetAttrString(module, "rowproxy_reconstructor"); - Py_DECREF(module); - if (reconstructor == NULL) { - Py_DECREF(state); - return NULL; - } - - cls = PyObject_GetAttrString(self, "__class__"); - if (cls == NULL) { - Py_DECREF(reconstructor); - Py_DECREF(state); - return NULL; - } - - return Py_BuildValue("(N(NN))", reconstructor, cls, state); -} - -static void -BaseRowProxy_dealloc(BaseRowProxy *self) -{ - Py_XDECREF(self->parent); - Py_XDECREF(self->row); - Py_XDECREF(self->processors); - Py_XDECREF(self->keymap); - self->ob_type->tp_free((PyObject *)self); -} - -static PyObject * -BaseRowProxy_processvalues(PyObject *values, PyObject *processors, int astuple) -{ - Py_ssize_t num_values, num_processors; - PyObject **valueptr, **funcptr, **resultptr; - PyObject *func, *result, *processed_value, *values_fastseq; - - num_values = PySequence_Length(values); - num_processors = PyList_Size(processors); - if (num_values != num_processors) { - PyErr_Format(PyExc_RuntimeError, - "number of values in row (%d) differ from number of column " - "processors (%d)", - (int)num_values, (int)num_processors); - return NULL; - } - - if (astuple) { - result = PyTuple_New(num_values); - } else { - result = PyList_New(num_values); - } - if (result == NULL) - return NULL; - - values_fastseq = PySequence_Fast(values, "row must be a sequence"); - if (values_fastseq == NULL) - return NULL; - - valueptr = PySequence_Fast_ITEMS(values_fastseq); - funcptr = PySequence_Fast_ITEMS(processors); - resultptr = PySequence_Fast_ITEMS(result); - while (--num_values >= 0) { - func = *funcptr; - if (func != Py_None) { - processed_value = PyObject_CallFunctionObjArgs(func, *valueptr, - NULL); - if (processed_value == NULL) { - Py_DECREF(values_fastseq); - Py_DECREF(result); - return NULL; - } - *resultptr = processed_value; - } else { - Py_INCREF(*valueptr); - *resultptr = *valueptr; - } - valueptr++; - funcptr++; - resultptr++; - } - Py_DECREF(values_fastseq); - return result; -} - -static PyListObject * -BaseRowProxy_values(BaseRowProxy *self) -{ - return (PyListObject *)BaseRowProxy_processvalues(self->row, - self->processors, 0); -} - -static PyObject * -BaseRowProxy_iter(BaseRowProxy *self) -{ - PyObject *values, *result; - - values = BaseRowProxy_processvalues(self->row, self->processors, 1); - if (values == NULL) - return NULL; - - result = PyObject_GetIter(values); - Py_DECREF(values); - if (result == NULL) - return NULL; - - return result; -} - -static Py_ssize_t -BaseRowProxy_length(BaseRowProxy *self) -{ - return PySequence_Length(self->row); -} - -static PyObject * -BaseRowProxy_subscript(BaseRowProxy *self, PyObject *key) -{ - PyObject *processors, *values; - PyObject *processor, *value, *processed_value; - PyObject *row, *record, *result, *indexobject; - PyObject *exc_module, *exception, *cstr_obj; - char *cstr_key; - long index; - int key_fallback = 0; - int tuple_check = 0; - - if (PyInt_CheckExact(key)) { - index = PyInt_AS_LONG(key); - } else if (PyLong_CheckExact(key)) { - index = PyLong_AsLong(key); - if ((index == -1) && PyErr_Occurred()) - /* -1 can be either the actual value, or an error flag. */ - return NULL; - } else if (PySlice_Check(key)) { - values = PyObject_GetItem(self->row, key); - if (values == NULL) - return NULL; - - processors = PyObject_GetItem(self->processors, key); - if (processors == NULL) { - Py_DECREF(values); - return NULL; - } - - result = BaseRowProxy_processvalues(values, processors, 1); - Py_DECREF(values); - Py_DECREF(processors); - return result; - } else { - record = PyDict_GetItem((PyObject *)self->keymap, key); - if (record == NULL) { - record = PyObject_CallMethod(self->parent, "_key_fallback", - "O", key); - if (record == NULL) - return NULL; - key_fallback = 1; - } - - indexobject = PyTuple_GetItem(record, 2); - if (indexobject == NULL) - return NULL; - - if (key_fallback) { - Py_DECREF(record); - } - - if (indexobject == Py_None) { - exc_module = PyImport_ImportModule("sqlalchemy.exc"); - if (exc_module == NULL) - return NULL; - - exception = PyObject_GetAttrString(exc_module, - "InvalidRequestError"); - Py_DECREF(exc_module); - if (exception == NULL) - return NULL; - - // wow. this seems quite excessive. - cstr_obj = PyObject_Str(key); - if (cstr_obj == NULL) - return NULL; - cstr_key = PyString_AsString(cstr_obj); - if (cstr_key == NULL) { - Py_DECREF(cstr_obj); - return NULL; - } - Py_DECREF(cstr_obj); - - PyErr_Format(exception, - "Ambiguous column name '%.200s' in result set! " - "try 'use_labels' option on select statement.", cstr_key); - return NULL; - } - - index = PyInt_AsLong(indexobject); - if ((index == -1) && PyErr_Occurred()) - /* -1 can be either the actual value, or an error flag. */ - return NULL; - } - processor = PyList_GetItem(self->processors, index); - if (processor == NULL) - return NULL; - - row = self->row; - if (PyTuple_CheckExact(row)) { - value = PyTuple_GetItem(row, index); - tuple_check = 1; - } - else { - value = PySequence_GetItem(row, index); - tuple_check = 0; - } - - if (value == NULL) - return NULL; - - if (processor != Py_None) { - processed_value = PyObject_CallFunctionObjArgs(processor, value, NULL); - if (!tuple_check) { - Py_DECREF(value); - } - return processed_value; - } else { - if (tuple_check) { - Py_INCREF(value); - } - return value; - } -} - -static PyObject * -BaseRowProxy_getitem(PyObject *self, Py_ssize_t i) -{ - return BaseRowProxy_subscript((BaseRowProxy*)self, PyInt_FromSsize_t(i)); -} - -static PyObject * -BaseRowProxy_getattro(BaseRowProxy *self, PyObject *name) -{ - PyObject *tmp; - - if (!(tmp = PyObject_GenericGetAttr((PyObject *)self, name))) { - if (!PyErr_ExceptionMatches(PyExc_AttributeError)) - return NULL; - PyErr_Clear(); - } - else - return tmp; - - tmp = BaseRowProxy_subscript(self, name); - if (tmp == NULL && PyErr_ExceptionMatches(PyExc_KeyError)) { - PyErr_Format( - PyExc_AttributeError, - "Could not locate column in row for column '%.200s'", - PyString_AsString(name) - ); - return NULL; - } - return tmp; -} - -/*********************** - * getters and setters * - ***********************/ - -static PyObject * -BaseRowProxy_getparent(BaseRowProxy *self, void *closure) -{ - Py_INCREF(self->parent); - return self->parent; -} - -static int -BaseRowProxy_setparent(BaseRowProxy *self, PyObject *value, void *closure) -{ - PyObject *module, *cls; - - if (value == NULL) { - PyErr_SetString(PyExc_TypeError, - "Cannot delete the 'parent' attribute"); - return -1; - } - - module = PyImport_ImportModule("sqlalchemy.engine.base"); - if (module == NULL) - return -1; - - cls = PyObject_GetAttrString(module, "ResultMetaData"); - Py_DECREF(module); - if (cls == NULL) - return -1; - - if (PyObject_IsInstance(value, cls) != 1) { - PyErr_SetString(PyExc_TypeError, - "The 'parent' attribute value must be an instance of " - "ResultMetaData"); - return -1; - } - Py_DECREF(cls); - Py_XDECREF(self->parent); - Py_INCREF(value); - self->parent = value; - - return 0; -} - -static PyObject * -BaseRowProxy_getrow(BaseRowProxy *self, void *closure) -{ - Py_INCREF(self->row); - return self->row; -} - -static int -BaseRowProxy_setrow(BaseRowProxy *self, PyObject *value, void *closure) -{ - if (value == NULL) { - PyErr_SetString(PyExc_TypeError, - "Cannot delete the 'row' attribute"); - return -1; - } - - if (!PySequence_Check(value)) { - PyErr_SetString(PyExc_TypeError, - "The 'row' attribute value must be a sequence"); - return -1; - } - - Py_XDECREF(self->row); - Py_INCREF(value); - self->row = value; - - return 0; -} - -static PyObject * -BaseRowProxy_getprocessors(BaseRowProxy *self, void *closure) -{ - Py_INCREF(self->processors); - return self->processors; -} - -static int -BaseRowProxy_setprocessors(BaseRowProxy *self, PyObject *value, void *closure) -{ - if (value == NULL) { - PyErr_SetString(PyExc_TypeError, - "Cannot delete the 'processors' attribute"); - return -1; - } - - if (!PyList_CheckExact(value)) { - PyErr_SetString(PyExc_TypeError, - "The 'processors' attribute value must be a list"); - return -1; - } - - Py_XDECREF(self->processors); - Py_INCREF(value); - self->processors = value; - - return 0; -} - -static PyObject * -BaseRowProxy_getkeymap(BaseRowProxy *self, void *closure) -{ - Py_INCREF(self->keymap); - return self->keymap; -} - -static int -BaseRowProxy_setkeymap(BaseRowProxy *self, PyObject *value, void *closure) -{ - if (value == NULL) { - PyErr_SetString(PyExc_TypeError, - "Cannot delete the 'keymap' attribute"); - return -1; - } - - if (!PyDict_CheckExact(value)) { - PyErr_SetString(PyExc_TypeError, - "The 'keymap' attribute value must be a dict"); - return -1; - } - - Py_XDECREF(self->keymap); - Py_INCREF(value); - self->keymap = value; - - return 0; -} - -static PyGetSetDef BaseRowProxy_getseters[] = { - {"_parent", - (getter)BaseRowProxy_getparent, (setter)BaseRowProxy_setparent, - "ResultMetaData", - NULL}, - {"_row", - (getter)BaseRowProxy_getrow, (setter)BaseRowProxy_setrow, - "Original row tuple", - NULL}, - {"_processors", - (getter)BaseRowProxy_getprocessors, (setter)BaseRowProxy_setprocessors, - "list of type processors", - NULL}, - {"_keymap", - (getter)BaseRowProxy_getkeymap, (setter)BaseRowProxy_setkeymap, - "Key to (processor, index) dict", - NULL}, - {NULL} -}; - -static PyMethodDef BaseRowProxy_methods[] = { - {"values", (PyCFunction)BaseRowProxy_values, METH_NOARGS, - "Return the values represented by this BaseRowProxy as a list."}, - {"__reduce__", (PyCFunction)BaseRowProxy_reduce, METH_NOARGS, - "Pickle support method."}, - {NULL} /* Sentinel */ -}; - -static PySequenceMethods BaseRowProxy_as_sequence = { - (lenfunc)BaseRowProxy_length, /* sq_length */ - 0, /* sq_concat */ - 0, /* sq_repeat */ - (ssizeargfunc)BaseRowProxy_getitem, /* sq_item */ - 0, /* sq_slice */ - 0, /* sq_ass_item */ - 0, /* sq_ass_slice */ - 0, /* sq_contains */ - 0, /* sq_inplace_concat */ - 0, /* sq_inplace_repeat */ -}; - -static PyMappingMethods BaseRowProxy_as_mapping = { - (lenfunc)BaseRowProxy_length, /* mp_length */ - (binaryfunc)BaseRowProxy_subscript, /* mp_subscript */ - 0 /* mp_ass_subscript */ -}; - -static PyTypeObject BaseRowProxyType = { - PyObject_HEAD_INIT(NULL) - 0, /* ob_size */ - "sqlalchemy.cresultproxy.BaseRowProxy", /* tp_name */ - sizeof(BaseRowProxy), /* tp_basicsize */ - 0, /* tp_itemsize */ - (destructor)BaseRowProxy_dealloc, /* tp_dealloc */ - 0, /* tp_print */ - 0, /* tp_getattr */ - 0, /* tp_setattr */ - 0, /* tp_compare */ - 0, /* tp_repr */ - 0, /* tp_as_number */ - &BaseRowProxy_as_sequence, /* tp_as_sequence */ - &BaseRowProxy_as_mapping, /* tp_as_mapping */ - 0, /* tp_hash */ - 0, /* tp_call */ - 0, /* tp_str */ - (getattrofunc)BaseRowProxy_getattro,/* tp_getattro */ - 0, /* tp_setattro */ - 0, /* tp_as_buffer */ - Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /* tp_flags */ - "BaseRowProxy is a abstract base class for RowProxy", /* tp_doc */ - 0, /* tp_traverse */ - 0, /* tp_clear */ - 0, /* tp_richcompare */ - 0, /* tp_weaklistoffset */ - (getiterfunc)BaseRowProxy_iter, /* tp_iter */ - 0, /* tp_iternext */ - BaseRowProxy_methods, /* tp_methods */ - 0, /* tp_members */ - BaseRowProxy_getseters, /* tp_getset */ - 0, /* tp_base */ - 0, /* tp_dict */ - 0, /* tp_descr_get */ - 0, /* tp_descr_set */ - 0, /* tp_dictoffset */ - (initproc)BaseRowProxy_init, /* tp_init */ - 0, /* tp_alloc */ - 0 /* tp_new */ -}; - - -#ifndef PyMODINIT_FUNC /* declarations for DLL import/export */ -#define PyMODINIT_FUNC void -#endif - - -static PyMethodDef module_methods[] = { - {"safe_rowproxy_reconstructor", safe_rowproxy_reconstructor, METH_VARARGS, - "reconstruct a RowProxy instance from its pickled form."}, - {NULL, NULL, 0, NULL} /* Sentinel */ -}; - -PyMODINIT_FUNC -initcresultproxy(void) -{ - PyObject *m; - - BaseRowProxyType.tp_new = PyType_GenericNew; - if (PyType_Ready(&BaseRowProxyType) < 0) - return; - - m = Py_InitModule3("cresultproxy", module_methods, - "Module containing C versions of core ResultProxy classes."); - if (m == NULL) - return; - - Py_INCREF(&BaseRowProxyType); - PyModule_AddObject(m, "BaseRowProxy", (PyObject *)&BaseRowProxyType); - -} - diff --git a/libs/sqlalchemy/connectors/__init__.py b/libs/sqlalchemy/connectors/__init__.py deleted file mode 100644 index a4e017c4..00000000 --- a/libs/sqlalchemy/connectors/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -# connectors/__init__.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - - -class Connector(object): - pass - diff --git a/libs/sqlalchemy/connectors/mxodbc.py b/libs/sqlalchemy/connectors/mxodbc.py deleted file mode 100644 index 2848f200..00000000 --- a/libs/sqlalchemy/connectors/mxodbc.py +++ /dev/null @@ -1,150 +0,0 @@ -# connectors/mxodbc.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -""" -Provide an SQLALchemy connector for the eGenix mxODBC commercial -Python adapter for ODBC. This is not a free product, but eGenix -provides SQLAlchemy with a license for use in continuous integration -testing. - -This has been tested for use with mxODBC 3.1.2 on SQL Server 2005 -and 2008, using the SQL Server Native driver. However, it is -possible for this to be used on other database platforms. - -For more info on mxODBC, see http://www.egenix.com/ - -""" - -import sys -import re -import warnings - -from sqlalchemy.connectors import Connector - -class MxODBCConnector(Connector): - driver='mxodbc' - - supports_sane_multi_rowcount = False - supports_unicode_statements = False - supports_unicode_binds = False - - supports_native_decimal = True - - @classmethod - def dbapi(cls): - # this classmethod will normally be replaced by an instance - # attribute of the same name, so this is normally only called once. - cls._load_mx_exceptions() - platform = sys.platform - if platform == 'win32': - from mx.ODBC import Windows as module - # this can be the string "linux2", and possibly others - elif 'linux' in platform: - from mx.ODBC import unixODBC as module - elif platform == 'darwin': - from mx.ODBC import iODBC as module - else: - raise ImportError, "Unrecognized platform for mxODBC import" - return module - - @classmethod - def _load_mx_exceptions(cls): - """ Import mxODBC exception classes into the module namespace, - as if they had been imported normally. This is done here - to avoid requiring all SQLAlchemy users to install mxODBC. - """ - global InterfaceError, ProgrammingError - from mx.ODBC import InterfaceError - from mx.ODBC import ProgrammingError - - def on_connect(self): - def connect(conn): - conn.stringformat = self.dbapi.MIXED_STRINGFORMAT - conn.datetimeformat = self.dbapi.PYDATETIME_DATETIMEFORMAT - conn.decimalformat = self.dbapi.DECIMAL_DECIMALFORMAT - conn.errorhandler = self._error_handler() - return connect - - def _error_handler(self): - """ Return a handler that adjusts mxODBC's raised Warnings to - emit Python standard warnings. - """ - from mx.ODBC.Error import Warning as MxOdbcWarning - def error_handler(connection, cursor, errorclass, errorvalue): - - if issubclass(errorclass, MxOdbcWarning): - errorclass.__bases__ = (Warning,) - warnings.warn(message=str(errorvalue), - category=errorclass, - stacklevel=2) - else: - raise errorclass, errorvalue - return error_handler - - def create_connect_args(self, url): - """ Return a tuple of *args,**kwargs for creating a connection. - - The mxODBC 3.x connection constructor looks like this: - - connect(dsn, user='', password='', - clear_auto_commit=1, errorhandler=None) - - This method translates the values in the provided uri - into args and kwargs needed to instantiate an mxODBC Connection. - - The arg 'errorhandler' is not used by SQLAlchemy and will - not be populated. - - """ - opts = url.translate_connect_args(username='user') - opts.update(url.query) - args = opts.pop('host') - opts.pop('port', None) - opts.pop('database', None) - return (args,), opts - - def is_disconnect(self, e, connection, cursor): - # TODO: eGenix recommends checking connection.closed here - # Does that detect dropped connections ? - if isinstance(e, self.dbapi.ProgrammingError): - return "connection already closed" in str(e) - elif isinstance(e, self.dbapi.Error): - return '[08S01]' in str(e) - else: - return False - - def _get_server_version_info(self, connection): - # eGenix suggests using conn.dbms_version instead - # of what we're doing here - dbapi_con = connection.connection - version = [] - r = re.compile('[.\-]') - # 18 == pyodbc.SQL_DBMS_VER - for n in r.split(dbapi_con.getinfo(18)[1]): - try: - version.append(int(n)) - except ValueError: - version.append(n) - return tuple(version) - - def do_execute(self, cursor, statement, parameters, context=None): - if context: - native_odbc_execute = context.execution_options.\ - get('native_odbc_execute', 'auto') - if native_odbc_execute is True: - # user specified native_odbc_execute=True - cursor.execute(statement, parameters) - elif native_odbc_execute is False: - # user specified native_odbc_execute=False - cursor.executedirect(statement, parameters) - elif context.is_crud: - # statement is UPDATE, DELETE, INSERT - cursor.execute(statement, parameters) - else: - # all other statements - cursor.executedirect(statement, parameters) - else: - cursor.executedirect(statement, parameters) diff --git a/libs/sqlalchemy/connectors/mysqldb.py b/libs/sqlalchemy/connectors/mysqldb.py deleted file mode 100644 index be1f3530..00000000 --- a/libs/sqlalchemy/connectors/mysqldb.py +++ /dev/null @@ -1,151 +0,0 @@ -"""Define behaviors common to MySQLdb dialects. - -Currently includes MySQL and Drizzle. - -""" - -from sqlalchemy.connectors import Connector -from sqlalchemy.engine import base as engine_base, default -from sqlalchemy.sql import operators as sql_operators -from sqlalchemy import exc, log, schema, sql, types as sqltypes, util -from sqlalchemy import processors -import re - -# the subclassing of Connector by all classes -# here is not strictly necessary - -class MySQLDBExecutionContext(Connector): - - @property - def rowcount(self): - if hasattr(self, '_rowcount'): - return self._rowcount - else: - return self.cursor.rowcount - -class MySQLDBCompiler(Connector): - def visit_mod(self, binary, **kw): - return self.process(binary.left) + " %% " + self.process(binary.right) - - def post_process_text(self, text): - return text.replace('%', '%%') - -class MySQLDBIdentifierPreparer(Connector): - - def _escape_identifier(self, value): - value = value.replace(self.escape_quote, self.escape_to_quote) - return value.replace("%", "%%") - -class MySQLDBConnector(Connector): - driver = 'mysqldb' - supports_unicode_statements = False - supports_sane_rowcount = True - supports_sane_multi_rowcount = True - - supports_native_decimal = True - - default_paramstyle = 'format' - - @classmethod - def dbapi(cls): - # is overridden when pymysql is used - return __import__('MySQLdb') - - def do_executemany(self, cursor, statement, parameters, context=None): - rowcount = cursor.executemany(statement, parameters) - if context is not None: - context._rowcount = rowcount - - def create_connect_args(self, url): - opts = url.translate_connect_args(database='db', username='user', - password='passwd') - opts.update(url.query) - - util.coerce_kw_type(opts, 'compress', bool) - util.coerce_kw_type(opts, 'connect_timeout', int) - util.coerce_kw_type(opts, 'read_timeout', int) - util.coerce_kw_type(opts, 'client_flag', int) - util.coerce_kw_type(opts, 'local_infile', int) - # Note: using either of the below will cause all strings to be returned - # as Unicode, both in raw SQL operations and with column types like - # String and MSString. - util.coerce_kw_type(opts, 'use_unicode', bool) - util.coerce_kw_type(opts, 'charset', str) - - # Rich values 'cursorclass' and 'conv' are not supported via - # query string. - - ssl = {} - for key in ['ssl_ca', 'ssl_key', 'ssl_cert', 'ssl_capath', 'ssl_cipher']: - if key in opts: - ssl[key[4:]] = opts[key] - util.coerce_kw_type(ssl, key[4:], str) - del opts[key] - if ssl: - opts['ssl'] = ssl - - # FOUND_ROWS must be set in CLIENT_FLAGS to enable - # supports_sane_rowcount. - client_flag = opts.get('client_flag', 0) - if self.dbapi is not None: - try: - CLIENT_FLAGS = __import__( - self.dbapi.__name__ + '.constants.CLIENT' - ).constants.CLIENT - client_flag |= CLIENT_FLAGS.FOUND_ROWS - except (AttributeError, ImportError): - self.supports_sane_rowcount = False - opts['client_flag'] = client_flag - return [[], opts] - - def _get_server_version_info(self, connection): - dbapi_con = connection.connection - version = [] - r = re.compile('[.\-]') - for n in r.split(dbapi_con.get_server_info()): - try: - version.append(int(n)) - except ValueError: - version.append(n) - return tuple(version) - - def _extract_error_code(self, exception): - return exception.args[0] - - def _detect_charset(self, connection): - """Sniff out the character set in use for connection results.""" - - # Note: MySQL-python 1.2.1c7 seems to ignore changes made - # on a connection via set_character_set() - if self.server_version_info < (4, 1, 0): - try: - return connection.connection.character_set_name() - except AttributeError: - # < 1.2.1 final MySQL-python drivers have no charset support. - # a query is needed. - pass - - # Prefer 'character_set_results' for the current connection over the - # value in the driver. SET NAMES or individual variable SETs will - # change the charset without updating the driver's view of the world. - # - # If it's decided that issuing that sort of SQL leaves you SOL, then - # this can prefer the driver value. - rs = connection.execute("SHOW VARIABLES LIKE 'character_set%%'") - opts = dict([(row[0], row[1]) for row in self._compat_fetchall(rs)]) - - if 'character_set_results' in opts: - return opts['character_set_results'] - try: - return connection.connection.character_set_name() - except AttributeError: - # Still no charset on < 1.2.1 final... - if 'character_set' in opts: - return opts['character_set'] - else: - util.warn( - "Could not detect the connection character set with this " - "combination of MySQL server and MySQL-python. " - "MySQL-python >= 1.2.2 is recommended. Assuming latin1.") - return 'latin1' - diff --git a/libs/sqlalchemy/connectors/pyodbc.py b/libs/sqlalchemy/connectors/pyodbc.py deleted file mode 100644 index 5be65d2d..00000000 --- a/libs/sqlalchemy/connectors/pyodbc.py +++ /dev/null @@ -1,163 +0,0 @@ -# connectors/pyodbc.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -from sqlalchemy.connectors import Connector -from sqlalchemy.util import asbool - -import sys -import re -import urllib - -class PyODBCConnector(Connector): - driver='pyodbc' - - supports_sane_multi_rowcount = False - # PyODBC unicode is broken on UCS-4 builds - supports_unicode = sys.maxunicode == 65535 - supports_unicode_statements = supports_unicode - supports_native_decimal = True - default_paramstyle = 'named' - - # for non-DSN connections, this should - # hold the desired driver name - pyodbc_driver_name = None - - # will be set to True after initialize() - # if the freetds.so is detected - freetds = False - - # will be set to the string version of - # the FreeTDS driver if freetds is detected - freetds_driver_version = None - - # will be set to True after initialize() - # if the libessqlsrv.so is detected - easysoft = False - - def __init__(self, supports_unicode_binds=None, **kw): - super(PyODBCConnector, self).__init__(**kw) - self._user_supports_unicode_binds = supports_unicode_binds - - @classmethod - def dbapi(cls): - return __import__('pyodbc') - - def create_connect_args(self, url): - opts = url.translate_connect_args(username='user') - opts.update(url.query) - - keys = opts - query = url.query - - connect_args = {} - for param in ('ansi', 'unicode_results', 'autocommit'): - if param in keys: - connect_args[param] = asbool(keys.pop(param)) - - if 'odbc_connect' in keys: - connectors = [urllib.unquote_plus(keys.pop('odbc_connect'))] - else: - dsn_connection = 'dsn' in keys or \ - ('host' in keys and 'database' not in keys) - if dsn_connection: - connectors= ['dsn=%s' % (keys.pop('host', '') or \ - keys.pop('dsn', ''))] - else: - port = '' - if 'port' in keys and not 'port' in query: - port = ',%d' % int(keys.pop('port')) - - connectors = ["DRIVER={%s}" % - keys.pop('driver', self.pyodbc_driver_name), - 'Server=%s%s' % (keys.pop('host', ''), port), - 'Database=%s' % keys.pop('database', '') ] - - user = keys.pop("user", None) - if user: - connectors.append("UID=%s" % user) - connectors.append("PWD=%s" % keys.pop('password', '')) - else: - connectors.append("Trusted_Connection=Yes") - - # if set to 'Yes', the ODBC layer will try to automagically - # convert textual data from your database encoding to your - # client encoding. This should obviously be set to 'No' if - # you query a cp1253 encoded database from a latin1 client... - if 'odbc_autotranslate' in keys: - connectors.append("AutoTranslate=%s" % - keys.pop("odbc_autotranslate")) - - connectors.extend(['%s=%s' % (k,v) for k,v in keys.iteritems()]) - return [[";".join (connectors)], connect_args] - - def is_disconnect(self, e, connection, cursor): - if isinstance(e, self.dbapi.ProgrammingError): - return "The cursor's connection has been closed." in str(e) or \ - 'Attempt to use a closed connection.' in str(e) - elif isinstance(e, self.dbapi.Error): - return '[08S01]' in str(e) - else: - return False - - def initialize(self, connection): - # determine FreeTDS first. can't issue SQL easily - # without getting unicode_statements/binds set up. - - pyodbc = self.dbapi - - dbapi_con = connection.connection - - _sql_driver_name = dbapi_con.getinfo(pyodbc.SQL_DRIVER_NAME) - self.freetds = bool(re.match(r".*libtdsodbc.*\.so", _sql_driver_name - )) - self.easysoft = bool(re.match(r".*libessqlsrv.*\.so", _sql_driver_name - )) - - if self.freetds: - self.freetds_driver_version = dbapi_con.getinfo(pyodbc.SQL_DRIVER_VER) - - # the "Py2K only" part here is theoretical. - # have not tried pyodbc + python3.1 yet. - # Py2K - self.supports_unicode_statements = not self.freetds and not self.easysoft - if self._user_supports_unicode_binds is not None: - self.supports_unicode_binds = self._user_supports_unicode_binds - else: - self.supports_unicode_binds = (not self.freetds or - self.freetds_driver_version >= '0.91' - ) and not self.easysoft - # end Py2K - - # run other initialization which asks for user name, etc. - super(PyODBCConnector, self).initialize(connection) - - def _dbapi_version(self): - if not self.dbapi: - return () - return self._parse_dbapi_version(self.dbapi.version) - - def _parse_dbapi_version(self, vers): - m = re.match( - r'(?:py.*-)?([\d\.]+)(?:-(\w+))?', - vers - ) - if not m: - return () - vers = tuple([int(x) for x in m.group(1).split(".")]) - if m.group(2): - vers += (m.group(2),) - return vers - - def _get_server_version_info(self, connection): - dbapi_con = connection.connection - version = [] - r = re.compile('[.\-]') - for n in r.split(dbapi_con.getinfo(self.dbapi.SQL_DBMS_VER)): - try: - version.append(int(n)) - except ValueError: - version.append(n) - return tuple(version) diff --git a/libs/sqlalchemy/connectors/zxJDBC.py b/libs/sqlalchemy/connectors/zxJDBC.py deleted file mode 100644 index e2bfed2e..00000000 --- a/libs/sqlalchemy/connectors/zxJDBC.py +++ /dev/null @@ -1,58 +0,0 @@ -# connectors/zxJDBC.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -import sys -from sqlalchemy.connectors import Connector - -class ZxJDBCConnector(Connector): - driver = 'zxjdbc' - - supports_sane_rowcount = False - supports_sane_multi_rowcount = False - - supports_unicode_binds = True - supports_unicode_statements = sys.version > '2.5.0+' - description_encoding = None - default_paramstyle = 'qmark' - - jdbc_db_name = None - jdbc_driver_name = None - - @classmethod - def dbapi(cls): - from com.ziclix.python.sql import zxJDBC - return zxJDBC - - def _driver_kwargs(self): - """Return kw arg dict to be sent to connect().""" - return {} - - def _create_jdbc_url(self, url): - """Create a JDBC url from a :class:`~sqlalchemy.engine.url.URL`""" - return 'jdbc:%s://%s%s/%s' % (self.jdbc_db_name, url.host, - url.port is not None - and ':%s' % url.port or '', - url.database) - - def create_connect_args(self, url): - opts = self._driver_kwargs() - opts.update(url.query) - return [ - [self._create_jdbc_url(url), - url.username, url.password, - self.jdbc_driver_name], - opts] - - def is_disconnect(self, e, connection, cursor): - if not isinstance(e, self.dbapi.ProgrammingError): - return False - e = str(e) - return 'connection is closed' in e or 'cursor is closed' in e - - def _get_server_version_info(self, connection): - # use connection.connection.dbversion, and parse appropriately - # to get a tuple - raise NotImplementedError() diff --git a/libs/sqlalchemy/databases/__init__.py b/libs/sqlalchemy/databases/__init__.py deleted file mode 100644 index bb0b370e..00000000 --- a/libs/sqlalchemy/databases/__init__.py +++ /dev/null @@ -1,37 +0,0 @@ -# databases/__init__.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Include imports from the sqlalchemy.dialects package for backwards -compatibility with pre 0.6 versions. - -""" -from sqlalchemy.dialects.sqlite import base as sqlite -from sqlalchemy.dialects.postgresql import base as postgresql -postgres = postgresql -from sqlalchemy.dialects.mysql import base as mysql -from sqlalchemy.dialects.drizzle import base as drizzle -from sqlalchemy.dialects.oracle import base as oracle -from sqlalchemy.dialects.firebird import base as firebird -from sqlalchemy.dialects.maxdb import base as maxdb -from sqlalchemy.dialects.informix import base as informix -from sqlalchemy.dialects.mssql import base as mssql -from sqlalchemy.dialects.access import base as access -from sqlalchemy.dialects.sybase import base as sybase - - -__all__ = ( - 'access', - 'drizzle', - 'firebird', - 'informix', - 'maxdb', - 'mssql', - 'mysql', - 'postgresql', - 'sqlite', - 'oracle', - 'sybase', - ) diff --git a/libs/sqlalchemy/dialects/__init__.py b/libs/sqlalchemy/dialects/__init__.py deleted file mode 100644 index a427cde4..00000000 --- a/libs/sqlalchemy/dialects/__init__.py +++ /dev/null @@ -1,19 +0,0 @@ -# dialects/__init__.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -__all__ = ( -# 'access', - 'drizzle', - 'firebird', -# 'informix', -# 'maxdb', - 'mssql', - 'mysql', - 'oracle', - 'postgresql', - 'sqlite', - 'sybase', - ) diff --git a/libs/sqlalchemy/dialects/access/__init__.py b/libs/sqlalchemy/dialects/access/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/libs/sqlalchemy/dialects/access/base.py b/libs/sqlalchemy/dialects/access/base.py deleted file mode 100644 index f107c9c8..00000000 --- a/libs/sqlalchemy/dialects/access/base.py +++ /dev/null @@ -1,451 +0,0 @@ -# access/base.py -# Copyright (C) 2007-2011 the SQLAlchemy authors and contributors -# Copyright (C) 2007 Paul Johnston, paj@pajhome.org.uk -# Portions derived from jet2sql.py by Matt Keranen, mksql@yahoo.com -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -""" -Support for the Microsoft Access database. - -.. note:: - - The Access dialect is **non-functional as of SQLAlchemy 0.6**, - pending development efforts to bring it up-to-date. - - -""" -from sqlalchemy import sql, schema, types, exc, pool -from sqlalchemy.sql import compiler, expression -from sqlalchemy.engine import default, base, reflection -from sqlalchemy import processors - -class AcNumeric(types.Numeric): - def get_col_spec(self): - return "NUMERIC" - - def bind_processor(self, dialect): - return processors.to_str - - def result_processor(self, dialect, coltype): - return None - -class AcFloat(types.Float): - def get_col_spec(self): - return "FLOAT" - - def bind_processor(self, dialect): - """By converting to string, we can use Decimal types round-trip.""" - return processors.to_str - -class AcInteger(types.Integer): - def get_col_spec(self): - return "INTEGER" - -class AcTinyInteger(types.Integer): - def get_col_spec(self): - return "TINYINT" - -class AcSmallInteger(types.SmallInteger): - def get_col_spec(self): - return "SMALLINT" - -class AcDateTime(types.DateTime): - def get_col_spec(self): - return "DATETIME" - -class AcDate(types.Date): - - def get_col_spec(self): - return "DATETIME" - -class AcText(types.Text): - def get_col_spec(self): - return "MEMO" - -class AcString(types.String): - def get_col_spec(self): - return "TEXT" + (self.length and ("(%d)" % self.length) or "") - -class AcUnicode(types.Unicode): - def get_col_spec(self): - return "TEXT" + (self.length and ("(%d)" % self.length) or "") - - def bind_processor(self, dialect): - return None - - def result_processor(self, dialect, coltype): - return None - -class AcChar(types.CHAR): - def get_col_spec(self): - return "TEXT" + (self.length and ("(%d)" % self.length) or "") - -class AcBinary(types.LargeBinary): - def get_col_spec(self): - return "BINARY" - -class AcBoolean(types.Boolean): - def get_col_spec(self): - return "YESNO" - -class AcTimeStamp(types.TIMESTAMP): - def get_col_spec(self): - return "TIMESTAMP" - -class AccessExecutionContext(default.DefaultExecutionContext): - def _has_implicit_sequence(self, column): - if column.primary_key and column.autoincrement: - if isinstance(column.type, types.Integer) and \ - not column.foreign_keys: - if column.default is None or \ - (isinstance(column.default, schema.Sequence) and \ - column.default.optional): - return True - return False - - def post_exec(self): - """If we inserted into a row with a COUNTER column, fetch the ID""" - - if self.compiled.isinsert: - tbl = self.compiled.statement.table - if not hasattr(tbl, 'has_sequence'): - tbl.has_sequence = None - for column in tbl.c: - if getattr(column, 'sequence', False) or \ - self._has_implicit_sequence(column): - tbl.has_sequence = column - break - - if bool(tbl.has_sequence): - # TBD: for some reason _last_inserted_ids doesn't exist here - # (but it does at corresponding point in mssql???) - #if not len(self._last_inserted_ids) or - # self._last_inserted_ids[0] is None: - self.cursor.execute("SELECT @@identity AS lastrowid") - row = self.cursor.fetchone() - self._last_inserted_ids = [int(row[0])] - #+ self._last_inserted_ids[1:] - # print "LAST ROW ID", self._last_inserted_ids - - super(AccessExecutionContext, self).post_exec() - - -const, daoEngine = None, None -class AccessDialect(default.DefaultDialect): - colspecs = { - types.Unicode : AcUnicode, - types.Integer : AcInteger, - types.SmallInteger: AcSmallInteger, - types.Numeric : AcNumeric, - types.Float : AcFloat, - types.DateTime : AcDateTime, - types.Date : AcDate, - types.String : AcString, - types.LargeBinary : AcBinary, - types.Boolean : AcBoolean, - types.Text : AcText, - types.CHAR: AcChar, - types.TIMESTAMP: AcTimeStamp, - } - name = 'access' - supports_sane_rowcount = False - supports_sane_multi_rowcount = False - - ported_sqla_06 = False - - def type_descriptor(self, typeobj): - newobj = types.adapt_type(typeobj, self.colspecs) - return newobj - - def __init__(self, **params): - super(AccessDialect, self).__init__(**params) - self.text_as_varchar = False - self._dtbs = None - - @classmethod - def dbapi(cls): - import win32com.client, pythoncom - - global const, daoEngine - if const is None: - const = win32com.client.constants - for suffix in (".36", ".35", ".30"): - try: - daoEngine = win32com.client.\ - gencache.\ - EnsureDispatch("DAO.DBEngine" + suffix) - break - except pythoncom.com_error: - pass - else: - raise exc.InvalidRequestError( - "Can't find a DB engine. Check " - "http://support.microsoft.com/kb/239114 for details.") - - import pyodbc as module - return module - - def create_connect_args(self, url): - opts = url.translate_connect_args() - connectors = ["Driver={Microsoft Access Driver (*.mdb)}"] - connectors.append("Dbq=%s" % opts["database"]) - user = opts.get("username", None) - if user: - connectors.append("UID=%s" % user) - connectors.append("PWD=%s" % opts.get("password", "")) - return [[";".join(connectors)], {}] - - def last_inserted_ids(self): - return self.context.last_inserted_ids - - def do_execute(self, cursor, statement, params, context=None): - if params == {}: - params = () - super(AccessDialect, self).\ - do_execute(cursor, statement, params, **kwargs) - - def _execute(self, c, statement, parameters): - try: - if parameters == {}: - parameters = () - c.execute(statement, parameters) - self.context.rowcount = c.rowcount - except Exception, e: - raise exc.DBAPIError.instance(statement, parameters, e) - - def has_table(self, connection, tablename, schema=None): - # This approach seems to be more reliable that using DAO - try: - connection.execute('select top 1 * from [%s]' % tablename) - return True - except Exception, e: - return False - - def reflecttable(self, connection, table, include_columns): - # This is defined in the function, as it relies on win32com constants, - # that aren't imported until dbapi method is called - if not hasattr(self, 'ischema_names'): - self.ischema_names = { - const.dbByte: AcBinary, - const.dbInteger: AcInteger, - const.dbLong: AcInteger, - const.dbSingle: AcFloat, - const.dbDouble: AcFloat, - const.dbDate: AcDateTime, - const.dbLongBinary: AcBinary, - const.dbMemo: AcText, - const.dbBoolean: AcBoolean, - const.dbText: AcUnicode, # All Access strings are - # unicode - const.dbCurrency: AcNumeric, - } - - # A fresh DAO connection is opened for each reflection - # This is necessary, so we get the latest updates - dtbs = daoEngine.OpenDatabase(connection.engine.url.database) - - try: - for tbl in dtbs.TableDefs: - if tbl.Name.lower() == table.name.lower(): - break - else: - raise exc.NoSuchTableError(table.name) - - for col in tbl.Fields: - coltype = self.ischema_names[col.Type] - if col.Type == const.dbText: - coltype = coltype(col.Size) - - colargs = \ - { - 'nullable': not(col.Required or - col.Attributes & const.dbAutoIncrField), - } - default = col.DefaultValue - - if col.Attributes & const.dbAutoIncrField: - colargs['default'] = schema.Sequence(col.Name + '_seq') - elif default: - if col.Type == const.dbBoolean: - default = default == 'Yes' and '1' or '0' - colargs['server_default'] = \ - schema.DefaultClause(sql.text(default)) - - table.append_column( - schema.Column(col.Name, coltype, **colargs)) - - # TBD: check constraints - - # Find primary key columns first - for idx in tbl.Indexes: - if idx.Primary: - for col in idx.Fields: - thecol = table.c[col.Name] - table.primary_key.add(thecol) - if isinstance(thecol.type, AcInteger) and \ - not (thecol.default and - isinstance( - thecol.default.arg, - schema.Sequence - )): - thecol.autoincrement = False - - # Then add other indexes - for idx in tbl.Indexes: - if not idx.Primary: - if len(idx.Fields) == 1: - col = table.c[idx.Fields[0].Name] - if not col.primary_key: - col.index = True - col.unique = idx.Unique - else: - pass # TBD: multi-column indexes - - - for fk in dtbs.Relations: - if fk.ForeignTable != table.name: - continue - scols = [c.ForeignName for c in fk.Fields] - rcols = ['%s.%s' % (fk.Table, c.Name) for c in fk.Fields] - table.append_constraint( - schema.ForeignKeyConstraint(scols, rcols,\ - link_to_name=True)) - - finally: - dtbs.Close() - - @reflection.cache - def get_table_names(self, connection, schema=None, **kw): - # A fresh DAO connection is opened for each reflection - # This is necessary, so we get the latest updates - dtbs = daoEngine.OpenDatabase(connection.engine.url.database) - - names = [t.Name for t in dtbs.TableDefs - if t.Name[:4] != "MSys" and t.Name[:4] != "~TMP"] - dtbs.Close() - return names - - -class AccessCompiler(compiler.SQLCompiler): - extract_map = compiler.SQLCompiler.extract_map.copy() - extract_map.update ({ - 'month': 'm', - 'day': 'd', - 'year': 'yyyy', - 'second': 's', - 'hour': 'h', - 'doy': 'y', - 'minute': 'n', - 'quarter': 'q', - 'dow': 'w', - 'week': 'ww' - }) - - def visit_select_precolumns(self, select): - """Access puts TOP, it's version of LIMIT here """ - s = select.distinct and "DISTINCT " or "" - if select.limit: - s += "TOP %s " % (select.limit) - if select.offset: - raise exc.InvalidRequestError( - 'Access does not support LIMIT with an offset') - return s - - def limit_clause(self, select): - """Limit in access is after the select keyword""" - return "" - - def binary_operator_string(self, binary): - """Access uses "mod" instead of "%" """ - return binary.operator == '%' and 'mod' or binary.operator - - def label_select_column(self, select, column, asfrom): - if isinstance(column, expression.Function): - return column.label() - else: - return super(AccessCompiler, self).\ - label_select_column(select, column, asfrom) - - function_rewrites = {'current_date': 'now', - 'current_timestamp': 'now', - 'length': 'len', - } - def visit_function(self, func): - """Access function names differ from the ANSI SQL names; - rewrite common ones""" - func.name = self.function_rewrites.get(func.name, func.name) - return super(AccessCompiler, self).visit_function(func) - - def for_update_clause(self, select): - """FOR UPDATE is not supported by Access; silently ignore""" - return '' - - # Strip schema - def visit_table(self, table, asfrom=False, **kwargs): - if asfrom: - return self.preparer.quote(table.name, table.quote) - else: - return "" - - def visit_join(self, join, asfrom=False, **kwargs): - return (self.process(join.left, asfrom=True) + \ - (join.isouter and " LEFT OUTER JOIN " or " INNER JOIN ") + \ - self.process(join.right, asfrom=True) + " ON " + \ - self.process(join.onclause)) - - def visit_extract(self, extract, **kw): - field = self.extract_map.get(extract.field, extract.field) - return 'DATEPART("%s", %s)' % \ - (field, self.process(extract.expr, **kw)) - -class AccessDDLCompiler(compiler.DDLCompiler): - def get_column_specification(self, column, **kwargs): - colspec = self.preparer.format_column(column) + " " + \ - column.type.dialect_impl(self.dialect).get_col_spec() - - # install a sequence if we have an implicit IDENTITY column - if (not getattr(column.table, 'has_sequence', False)) and \ - column.primary_key and \ - column.autoincrement and \ - isinstance(column.type, types.Integer) and \ - not column.foreign_keys: - if column.default is None or \ - (isinstance(column.default, schema.Sequence) and - column.default.optional): - column.sequence = schema.Sequence(column.name + '_seq') - - if not column.nullable: - colspec += " NOT NULL" - - if hasattr(column, 'sequence'): - column.table.has_sequence = column - colspec = self.preparer.format_column(column) + " counter" - else: - default = self.get_column_default_string(column) - if default is not None: - colspec += " DEFAULT " + default - - return colspec - - def visit_drop_index(self, drop): - index = drop.element - self.append("\nDROP INDEX [%s].[%s]" % \ - (index.table.name, - self._index_identifier(index.name))) - -class AccessIdentifierPreparer(compiler.IdentifierPreparer): - reserved_words = compiler.RESERVED_WORDS.copy() - reserved_words.update(['value', 'text']) - def __init__(self, dialect): - super(AccessIdentifierPreparer, self).\ - __init__(dialect, initial_quote='[', final_quote=']') - - -dialect = AccessDialect -dialect.poolclass = pool.SingletonThreadPool -dialect.statement_compiler = AccessCompiler -dialect.ddlcompiler = AccessDDLCompiler -dialect.preparer = AccessIdentifierPreparer -dialect.execution_ctx_cls = AccessExecutionContext diff --git a/libs/sqlalchemy/dialects/drizzle/__init__.py b/libs/sqlalchemy/dialects/drizzle/__init__.py deleted file mode 100644 index 1392b8e2..00000000 --- a/libs/sqlalchemy/dialects/drizzle/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -from sqlalchemy.dialects.drizzle import base, mysqldb - -base.dialect = mysqldb.dialect - -from sqlalchemy.dialects.drizzle.base import \ - BIGINT, BINARY, BLOB, \ - BOOLEAN, CHAR, DATE, \ - DATETIME, DECIMAL, DOUBLE, \ - ENUM, FLOAT, INTEGER, \ - NUMERIC, REAL, TEXT, \ - TIME, TIMESTAMP, VARBINARY, \ - VARCHAR, dialect - -__all__ = ( - 'BIGINT', 'BINARY', 'BLOB', - 'BOOLEAN', 'CHAR', 'DATE', - 'DATETIME', 'DECIMAL', 'DOUBLE', - 'ENUM', 'FLOAT', 'INTEGER', - 'NUMERIC', 'REAL', 'TEXT', - 'TIME', 'TIMESTAMP', 'VARBINARY', - 'VARCHAR', 'dialect' -) diff --git a/libs/sqlalchemy/dialects/drizzle/base.py b/libs/sqlalchemy/dialects/drizzle/base.py deleted file mode 100644 index 0165a2aa..00000000 --- a/libs/sqlalchemy/dialects/drizzle/base.py +++ /dev/null @@ -1,509 +0,0 @@ -# drizzle/base.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# Copyright (C) 2010-2011 Monty Taylor -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - - -"""Support for the Drizzle database. - -Drizzle is a variant of MySQL. Unlike MySQL, Drizzle's default storage engine -is InnoDB (transactions, foreign-keys) rather than MyISAM. For more -`Notable Differences `_, visit -the `Drizzle Documentation `_. - -The SQLAlchemy Drizzle dialect leans heavily on the MySQL dialect, so much of -the :doc:`SQLAlchemy MySQL ` documentation is also relevant. - -Connecting ----------- - -See the individual driver sections below for details on connecting. - -""" - -from sqlalchemy import exc -from sqlalchemy import log -from sqlalchemy import types as sqltypes -from sqlalchemy.engine import reflection -from sqlalchemy.dialects.mysql import base as mysql_dialect -from sqlalchemy.types import DATE, DATETIME, BOOLEAN, TIME, \ - BLOB, BINARY, VARBINARY - - -class _NumericType(object): - """Base for Drizzle numeric types.""" - - def __init__(self, **kw): - super(_NumericType, self).__init__(**kw) - - -class _FloatType(_NumericType, sqltypes.Float): - def __init__(self, precision=None, scale=None, asdecimal=True, **kw): - if isinstance(self, (REAL, DOUBLE)) and \ - ( - (precision is None and scale is not None) or - (precision is not None and scale is None) - ): - raise exc.ArgumentError( - "You must specify both precision and scale or omit " - "both altogether.") - - super(_FloatType, self).__init__(precision=precision, - asdecimal=asdecimal, **kw) - self.scale = scale - - -class _StringType(mysql_dialect._StringType): - """Base for Drizzle string types.""" - - def __init__(self, collation=None, binary=False, **kw): - kw['national'] = False - super(_StringType, self).__init__(collation=collation, binary=binary, - **kw) - - -class NUMERIC(_NumericType, sqltypes.NUMERIC): - """Drizzle NUMERIC type.""" - - __visit_name__ = 'NUMERIC' - - def __init__(self, precision=None, scale=None, asdecimal=True, **kw): - """Construct a NUMERIC. - - :param precision: Total digits in this number. If scale and precision - are both None, values are stored to limits allowed by the server. - - :param scale: The number of digits after the decimal point. - - """ - - super(NUMERIC, self).__init__(precision=precision, scale=scale, - asdecimal=asdecimal, **kw) - - -class DECIMAL(_NumericType, sqltypes.DECIMAL): - """Drizzle DECIMAL type.""" - - __visit_name__ = 'DECIMAL' - - def __init__(self, precision=None, scale=None, asdecimal=True, **kw): - """Construct a DECIMAL. - - :param precision: Total digits in this number. If scale and precision - are both None, values are stored to limits allowed by the server. - - :param scale: The number of digits after the decimal point. - - """ - super(DECIMAL, self).__init__(precision=precision, scale=scale, - asdecimal=asdecimal, **kw) - - -class DOUBLE(_FloatType): - """Drizzle DOUBLE type.""" - - __visit_name__ = 'DOUBLE' - - def __init__(self, precision=None, scale=None, asdecimal=True, **kw): - """Construct a DOUBLE. - - :param precision: Total digits in this number. If scale and precision - are both None, values are stored to limits allowed by the server. - - :param scale: The number of digits after the decimal point. - - """ - - super(DOUBLE, self).__init__(precision=precision, scale=scale, - asdecimal=asdecimal, **kw) - - -class REAL(_FloatType, sqltypes.REAL): - """Drizzle REAL type.""" - - __visit_name__ = 'REAL' - - def __init__(self, precision=None, scale=None, asdecimal=True, **kw): - """Construct a REAL. - - :param precision: Total digits in this number. If scale and precision - are both None, values are stored to limits allowed by the server. - - :param scale: The number of digits after the decimal point. - - """ - - super(REAL, self).__init__(precision=precision, scale=scale, - asdecimal=asdecimal, **kw) - - -class FLOAT(_FloatType, sqltypes.FLOAT): - """Drizzle FLOAT type.""" - - __visit_name__ = 'FLOAT' - - def __init__(self, precision=None, scale=None, asdecimal=False, **kw): - """Construct a FLOAT. - - :param precision: Total digits in this number. If scale and precision - are both None, values are stored to limits allowed by the server. - - :param scale: The number of digits after the decimal point. - - """ - - super(FLOAT, self).__init__(precision=precision, scale=scale, - asdecimal=asdecimal, **kw) - - def bind_processor(self, dialect): - return None - - -class INTEGER(sqltypes.INTEGER): - """Drizzle INTEGER type.""" - - __visit_name__ = 'INTEGER' - - def __init__(self, **kw): - """Construct an INTEGER.""" - - super(INTEGER, self).__init__(**kw) - - -class BIGINT(sqltypes.BIGINT): - """Drizzle BIGINTEGER type.""" - - __visit_name__ = 'BIGINT' - - def __init__(self, **kw): - """Construct a BIGINTEGER.""" - - super(BIGINT, self).__init__(**kw) - - -class _DrizzleTime(mysql_dialect._MSTime): - """Drizzle TIME type.""" - - -class TIMESTAMP(sqltypes.TIMESTAMP): - """Drizzle TIMESTAMP type.""" - - __visit_name__ = 'TIMESTAMP' - - -class TEXT(_StringType, sqltypes.TEXT): - """Drizzle TEXT type, for text up to 2^16 characters.""" - - __visit_name__ = 'TEXT' - - def __init__(self, length=None, **kw): - """Construct a TEXT. - - :param length: Optional, if provided the server may optimize storage - by substituting the smallest TEXT type sufficient to store - ``length`` characters. - - :param collation: Optional, a column-level collation for this string - value. Takes precedence to 'binary' short-hand. - - :param binary: Defaults to False: short-hand, pick the binary - collation type that matches the column's character set. Generates - BINARY in schema. This does not affect the type of data stored, - only the collation of character data. - - """ - - super(TEXT, self).__init__(length=length, **kw) - - -class VARCHAR(_StringType, sqltypes.VARCHAR): - """Drizzle VARCHAR type, for variable-length character data.""" - - __visit_name__ = 'VARCHAR' - - def __init__(self, length=None, **kwargs): - """Construct a VARCHAR. - - :param collation: Optional, a column-level collation for this string - value. Takes precedence to 'binary' short-hand. - - :param binary: Defaults to False: short-hand, pick the binary - collation type that matches the column's character set. Generates - BINARY in schema. This does not affect the type of data stored, - only the collation of character data. - - """ - - super(VARCHAR, self).__init__(length=length, **kwargs) - - -class CHAR(_StringType, sqltypes.CHAR): - """Drizzle CHAR type, for fixed-length character data.""" - - __visit_name__ = 'CHAR' - - def __init__(self, length=None, **kwargs): - """Construct a CHAR. - - :param length: Maximum data length, in characters. - - :param binary: Optional, use the default binary collation for the - national character set. This does not affect the type of data - stored, use a BINARY type for binary data. - - :param collation: Optional, request a particular collation. Must be - compatible with the national character set. - - """ - - super(CHAR, self).__init__(length=length, **kwargs) - - -class ENUM(mysql_dialect.ENUM): - """Drizzle ENUM type.""" - - def __init__(self, *enums, **kw): - """Construct an ENUM. - - Example: - - Column('myenum', ENUM("foo", "bar", "baz")) - - :param enums: The range of valid values for this ENUM. Values will be - quoted when generating the schema according to the quoting flag (see - below). - - :param strict: Defaults to False: ensure that a given value is in this - ENUM's range of permissible values when inserting or updating rows. - Note that Drizzle will not raise a fatal error if you attempt to - store an out of range value- an alternate value will be stored - instead. - (See Drizzle ENUM documentation.) - - :param collation: Optional, a column-level collation for this string - value. Takes precedence to 'binary' short-hand. - - :param binary: Defaults to False: short-hand, pick the binary - collation type that matches the column's character set. Generates - BINARY in schema. This does not affect the type of data stored, - only the collation of character data. - - :param quoting: Defaults to 'auto': automatically determine enum value - quoting. If all enum values are surrounded by the same quoting - character, then use 'quoted' mode. Otherwise, use 'unquoted' mode. - - 'quoted': values in enums are already quoted, they will be used - directly when generating the schema - this usage is deprecated. - - 'unquoted': values in enums are not quoted, they will be escaped and - surrounded by single quotes when generating the schema. - - Previous versions of this type always required manually quoted - values to be supplied; future versions will always quote the string - literals for you. This is a transitional option. - - """ - - super(ENUM, self).__init__(*enums, **kw) - - -class _DrizzleBoolean(sqltypes.Boolean): - def get_dbapi_type(self, dbapi): - return dbapi.NUMERIC - - -colspecs = { - sqltypes.Numeric: NUMERIC, - sqltypes.Float: FLOAT, - sqltypes.Time: _DrizzleTime, - sqltypes.Enum: ENUM, - sqltypes.Boolean: _DrizzleBoolean, -} - - -# All the types we have in Drizzle -ischema_names = { - 'BIGINT': BIGINT, - 'BINARY': BINARY, - 'BLOB': BLOB, - 'BOOLEAN': BOOLEAN, - 'CHAR': CHAR, - 'DATE': DATE, - 'DATETIME': DATETIME, - 'DECIMAL': DECIMAL, - 'DOUBLE': DOUBLE, - 'ENUM': ENUM, - 'FLOAT': FLOAT, - 'INT': INTEGER, - 'INTEGER': INTEGER, - 'NUMERIC': NUMERIC, - 'TEXT': TEXT, - 'TIME': TIME, - 'TIMESTAMP': TIMESTAMP, - 'VARBINARY': VARBINARY, - 'VARCHAR': VARCHAR, -} - - -class DrizzleCompiler(mysql_dialect.MySQLCompiler): - - def visit_typeclause(self, typeclause): - type_ = typeclause.type.dialect_impl(self.dialect) - if isinstance(type_, sqltypes.Integer): - return 'INTEGER' - else: - return super(DrizzleCompiler, self).visit_typeclause(typeclause) - - def visit_cast(self, cast, **kwargs): - type_ = self.process(cast.typeclause) - if type_ is None: - return self.process(cast.clause) - - return 'CAST(%s AS %s)' % (self.process(cast.clause), type_) - - -class DrizzleDDLCompiler(mysql_dialect.MySQLDDLCompiler): - pass - - -class DrizzleTypeCompiler(mysql_dialect.MySQLTypeCompiler): - def _extend_numeric(self, type_, spec): - return spec - - def _extend_string(self, type_, defaults, spec): - """Extend a string-type declaration with standard SQL - COLLATE annotations and Drizzle specific extensions. - - """ - - def attr(name): - return getattr(type_, name, defaults.get(name)) - - if attr('collation'): - collation = 'COLLATE %s' % type_.collation - elif attr('binary'): - collation = 'BINARY' - else: - collation = None - - return ' '.join([c for c in (spec, collation) - if c is not None]) - - def visit_NCHAR(self, type): - raise NotImplementedError("Drizzle does not support NCHAR") - - def visit_NVARCHAR(self, type): - raise NotImplementedError("Drizzle does not support NVARCHAR") - - def visit_FLOAT(self, type_): - if type_.scale is not None and type_.precision is not None: - return "FLOAT(%s, %s)" % (type_.precision, type_.scale) - else: - return "FLOAT" - - def visit_BOOLEAN(self, type_): - return "BOOLEAN" - - def visit_BLOB(self, type_): - return "BLOB" - - -class DrizzleExecutionContext(mysql_dialect.MySQLExecutionContext): - pass - - -class DrizzleIdentifierPreparer(mysql_dialect.MySQLIdentifierPreparer): - pass - - -class DrizzleDialect(mysql_dialect.MySQLDialect): - """Details of the Drizzle dialect. - - Not used directly in application code. - """ - - name = 'drizzle' - - _supports_cast = True - supports_sequences = False - supports_native_boolean = True - supports_views = False - - default_paramstyle = 'format' - colspecs = colspecs - - statement_compiler = DrizzleCompiler - ddl_compiler = DrizzleDDLCompiler - type_compiler = DrizzleTypeCompiler - ischema_names = ischema_names - preparer = DrizzleIdentifierPreparer - - def on_connect(self): - """Force autocommit - Drizzle Bug#707842 doesn't set this properly""" - - def connect(conn): - conn.autocommit(False) - return connect - - def do_commit(self, connection): - """Execute a COMMIT.""" - - connection.commit() - - def do_rollback(self, connection): - """Execute a ROLLBACK.""" - - connection.rollback() - - @reflection.cache - def get_table_names(self, connection, schema=None, **kw): - """Return a Unicode SHOW TABLES from a given schema.""" - - if schema is not None: - current_schema = schema - else: - current_schema = self.default_schema_name - - charset = 'utf8' - rp = connection.execute("SHOW TABLES FROM %s" % - self.identifier_preparer.quote_identifier(current_schema)) - return [row[0] for row in self._compat_fetchall(rp, charset=charset)] - - @reflection.cache - def get_view_names(self, connection, schema=None, **kw): - raise NotImplementedError - - def _detect_casing(self, connection): - """Sniff out identifier case sensitivity. - - Cached per-connection. This value can not change without a server - restart. - """ - - return 0 - - def _detect_collations(self, connection): - """Pull the active COLLATIONS list from the server. - - Cached per-connection. - """ - - collations = {} - charset = self._connection_charset - rs = connection.execute( - 'SELECT CHARACTER_SET_NAME, COLLATION_NAME FROM' - ' data_dictionary.COLLATIONS') - for row in self._compat_fetchall(rs, charset): - collations[row[0]] = row[1] - return collations - - def _detect_ansiquotes(self, connection): - """Detect and adjust for the ANSI_QUOTES sql mode.""" - - self._server_ansiquotes = False - self._backslash_escapes = False - - -log.class_logger(DrizzleDialect) diff --git a/libs/sqlalchemy/dialects/drizzle/mysqldb.py b/libs/sqlalchemy/dialects/drizzle/mysqldb.py deleted file mode 100644 index ce9518a8..00000000 --- a/libs/sqlalchemy/dialects/drizzle/mysqldb.py +++ /dev/null @@ -1,53 +0,0 @@ -"""Support for the Drizzle database via the mysql-python adapter. - -MySQL-Python is available at: - - http://sourceforge.net/projects/mysql-python - -Connecting ------------ - -Connect string format:: - - drizzle+mysqldb://:@[:]/ - -""" - -from sqlalchemy.dialects.drizzle.base import ( - DrizzleDialect, - DrizzleExecutionContext, - DrizzleCompiler, - DrizzleIdentifierPreparer) -from sqlalchemy.connectors.mysqldb import ( - MySQLDBExecutionContext, - MySQLDBCompiler, - MySQLDBIdentifierPreparer, - MySQLDBConnector) - - -class DrizzleExecutionContext_mysqldb(MySQLDBExecutionContext, - DrizzleExecutionContext): - pass - - -class DrizzleCompiler_mysqldb(MySQLDBCompiler, DrizzleCompiler): - pass - - -class DrizzleIdentifierPreparer_mysqldb(MySQLDBIdentifierPreparer, - DrizzleIdentifierPreparer): - pass - - -class DrizzleDialect_mysqldb(MySQLDBConnector, DrizzleDialect): - execution_ctx_cls = DrizzleExecutionContext_mysqldb - statement_compiler = DrizzleCompiler_mysqldb - preparer = DrizzleIdentifierPreparer_mysqldb - - def _detect_charset(self, connection): - """Sniff out the character set in use for connection results.""" - - return 'utf8' - - -dialect = DrizzleDialect_mysqldb diff --git a/libs/sqlalchemy/dialects/firebird/__init__.py b/libs/sqlalchemy/dialects/firebird/__init__.py deleted file mode 100644 index 2a3b756f..00000000 --- a/libs/sqlalchemy/dialects/firebird/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# firebird/__init__.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -from sqlalchemy.dialects.firebird import base, kinterbasdb - -base.dialect = kinterbasdb.dialect - -from sqlalchemy.dialects.firebird.base import \ - SMALLINT, BIGINT, FLOAT, FLOAT, DATE, TIME, \ - TEXT, NUMERIC, FLOAT, TIMESTAMP, VARCHAR, CHAR, BLOB,\ - dialect - -__all__ = ( - 'SMALLINT', 'BIGINT', 'FLOAT', 'FLOAT', 'DATE', 'TIME', - 'TEXT', 'NUMERIC', 'FLOAT', 'TIMESTAMP', 'VARCHAR', 'CHAR', 'BLOB', - 'dialect' -) - - diff --git a/libs/sqlalchemy/dialects/firebird/base.py b/libs/sqlalchemy/dialects/firebird/base.py deleted file mode 100644 index a0bb9c20..00000000 --- a/libs/sqlalchemy/dialects/firebird/base.py +++ /dev/null @@ -1,700 +0,0 @@ -# firebird/base.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -""" -Support for the Firebird database. - -Connectivity is usually supplied via the kinterbasdb_ DBAPI module. - -Dialects -~~~~~~~~ - -Firebird offers two distinct dialects_ (not to be confused with a -SQLAlchemy ``Dialect``): - -dialect 1 - This is the old syntax and behaviour, inherited from Interbase pre-6.0. - -dialect 3 - This is the newer and supported syntax, introduced in Interbase 6.0. - -The SQLAlchemy Firebird dialect detects these versions and -adjusts its representation of SQL accordingly. However, -support for dialect 1 is not well tested and probably has -incompatibilities. - -Locking Behavior -~~~~~~~~~~~~~~~~ - -Firebird locks tables aggressively. For this reason, a DROP TABLE may -hang until other transactions are released. SQLAlchemy does its best -to release transactions as quickly as possible. The most common cause -of hanging transactions is a non-fully consumed result set, i.e.:: - - result = engine.execute("select * from table") - row = result.fetchone() - return - -Where above, the ``ResultProxy`` has not been fully consumed. The -connection will be returned to the pool and the transactional state -rolled back once the Python garbage collector reclaims the objects -which hold onto the connection, which often occurs asynchronously. -The above use case can be alleviated by calling ``first()`` on the -``ResultProxy`` which will fetch the first row and immediately close -all remaining cursor/connection resources. - -RETURNING support -~~~~~~~~~~~~~~~~~ - -Firebird 2.0 supports returning a result set from inserts, and 2.1 -extends that to deletes and updates. This is generically exposed by -the SQLAlchemy ``returning()`` method, such as:: - - # INSERT..RETURNING - result = table.insert().returning(table.c.col1, table.c.col2).\\ - values(name='foo') - print result.fetchall() - - # UPDATE..RETURNING - raises = empl.update().returning(empl.c.id, empl.c.salary).\\ - where(empl.c.sales>100).\\ - values(dict(salary=empl.c.salary * 1.1)) - print raises.fetchall() - - -.. _dialects: http://mc-computing.com/Databases/Firebird/SQL_Dialect.html - -""" - -import datetime, re - -from sqlalchemy import schema as sa_schema -from sqlalchemy import exc, types as sqltypes, sql, util -from sqlalchemy.sql import expression -from sqlalchemy.engine import base, default, reflection -from sqlalchemy.sql import compiler - - -from sqlalchemy.types import (BIGINT, BLOB, BOOLEAN, DATE, - FLOAT, INTEGER, NUMERIC, SMALLINT, - TEXT, TIME, TIMESTAMP) - - -RESERVED_WORDS = set([ - "active", "add", "admin", "after", "all", "alter", "and", "any", "as", - "asc", "ascending", "at", "auto", "avg", "before", "begin", "between", - "bigint", "bit_length", "blob", "both", "by", "case", "cast", "char", - "character", "character_length", "char_length", "check", "close", - "collate", "column", "commit", "committed", "computed", "conditional", - "connect", "constraint", "containing", "count", "create", "cross", - "cstring", "current", "current_connection", "current_date", - "current_role", "current_time", "current_timestamp", - "current_transaction", "current_user", "cursor", "database", "date", - "day", "dec", "decimal", "declare", "default", "delete", "desc", - "descending", "disconnect", "distinct", "do", "domain", "double", - "drop", "else", "end", "entry_point", "escape", "exception", - "execute", "exists", "exit", "external", "extract", "fetch", "file", - "filter", "float", "for", "foreign", "from", "full", "function", - "gdscode", "generator", "gen_id", "global", "grant", "group", - "having", "hour", "if", "in", "inactive", "index", "inner", - "input_type", "insensitive", "insert", "int", "integer", "into", "is", - "isolation", "join", "key", "leading", "left", "length", "level", - "like", "long", "lower", "manual", "max", "maximum_segment", "merge", - "min", "minute", "module_name", "month", "names", "national", - "natural", "nchar", "no", "not", "null", "numeric", "octet_length", - "of", "on", "only", "open", "option", "or", "order", "outer", - "output_type", "overflow", "page", "pages", "page_size", "parameter", - "password", "plan", "position", "post_event", "precision", "primary", - "privileges", "procedure", "protected", "rdb$db_key", "read", "real", - "record_version", "recreate", "recursive", "references", "release", - "reserv", "reserving", "retain", "returning_values", "returns", - "revoke", "right", "rollback", "rows", "row_count", "savepoint", - "schema", "second", "segment", "select", "sensitive", "set", "shadow", - "shared", "singular", "size", "smallint", "snapshot", "some", "sort", - "sqlcode", "stability", "start", "starting", "starts", "statistics", - "sub_type", "sum", "suspend", "table", "then", "time", "timestamp", - "to", "trailing", "transaction", "trigger", "trim", "uncommitted", - "union", "unique", "update", "upper", "user", "using", "value", - "values", "varchar", "variable", "varying", "view", "wait", "when", - "where", "while", "with", "work", "write", "year", - ]) - - -class _StringType(sqltypes.String): - """Base for Firebird string types.""" - - def __init__(self, charset = None, **kw): - self.charset = charset - super(_StringType, self).__init__(**kw) - -class VARCHAR(_StringType, sqltypes.VARCHAR): - """Firebird VARCHAR type""" - __visit_name__ = 'VARCHAR' - - def __init__(self, length = None, **kwargs): - super(VARCHAR, self).__init__(length=length, **kwargs) - -class CHAR(_StringType, sqltypes.CHAR): - """Firebird CHAR type""" - __visit_name__ = 'CHAR' - - def __init__(self, length = None, **kwargs): - super(CHAR, self).__init__(length=length, **kwargs) - -colspecs = { -} - -ischema_names = { - 'SHORT': SMALLINT, - 'LONG': BIGINT, - 'QUAD': FLOAT, - 'FLOAT': FLOAT, - 'DATE': DATE, - 'TIME': TIME, - 'TEXT': TEXT, - 'INT64': NUMERIC, - 'DOUBLE': FLOAT, - 'TIMESTAMP': TIMESTAMP, - 'VARYING': VARCHAR, - 'CSTRING': CHAR, - 'BLOB': BLOB, - } - - -# TODO: date conversion types (should be implemented as _FBDateTime, -# _FBDate, etc. as bind/result functionality is required) - -class FBTypeCompiler(compiler.GenericTypeCompiler): - def visit_boolean(self, type_): - return self.visit_SMALLINT(type_) - - def visit_datetime(self, type_): - return self.visit_TIMESTAMP(type_) - - def visit_TEXT(self, type_): - return "BLOB SUB_TYPE 1" - - def visit_BLOB(self, type_): - return "BLOB SUB_TYPE 0" - - def _extend_string(self, type_, basic): - charset = getattr(type_, 'charset', None) - if charset is None: - return basic - else: - return '%s CHARACTER SET %s' % (basic, charset) - - def visit_CHAR(self, type_): - basic = super(FBTypeCompiler, self).visit_CHAR(type_) - return self._extend_string(type_, basic) - - def visit_VARCHAR(self, type_): - basic = super(FBTypeCompiler, self).visit_VARCHAR(type_) - return self._extend_string(type_, basic) - - - -class FBCompiler(sql.compiler.SQLCompiler): - """Firebird specific idiosyncrasies""" - - def visit_mod(self, binary, **kw): - # Firebird lacks a builtin modulo operator, but there is - # an equivalent function in the ib_udf library. - return "mod(%s, %s)" % ( - self.process(binary.left), - self.process(binary.right)) - - def visit_alias(self, alias, asfrom=False, **kwargs): - if self.dialect._version_two: - return super(FBCompiler, self).\ - visit_alias(alias, asfrom=asfrom, **kwargs) - else: - # Override to not use the AS keyword which FB 1.5 does not like - if asfrom: - alias_name = isinstance(alias.name, - expression._truncated_label) and \ - self._truncated_identifier("alias", - alias.name) or alias.name - - return self.process( - alias.original, asfrom=asfrom, **kwargs) + \ - " " + \ - self.preparer.format_alias(alias, alias_name) - else: - return self.process(alias.original, **kwargs) - - def visit_substring_func(self, func, **kw): - s = self.process(func.clauses.clauses[0]) - start = self.process(func.clauses.clauses[1]) - if len(func.clauses.clauses) > 2: - length = self.process(func.clauses.clauses[2]) - return "SUBSTRING(%s FROM %s FOR %s)" % (s, start, length) - else: - return "SUBSTRING(%s FROM %s)" % (s, start) - - def visit_length_func(self, function, **kw): - if self.dialect._version_two: - return "char_length" + self.function_argspec(function) - else: - return "strlen" + self.function_argspec(function) - - visit_char_length_func = visit_length_func - - def function_argspec(self, func, **kw): - # TODO: this probably will need to be - # narrowed to a fixed list, some no-arg functions - # may require parens - see similar example in the oracle - # dialect - if func.clauses is not None and len(func.clauses): - return self.process(func.clause_expr) - else: - return "" - - def default_from(self): - return " FROM rdb$database" - - def visit_sequence(self, seq): - return "gen_id(%s, 1)" % self.preparer.format_sequence(seq) - - def get_select_precolumns(self, select): - """Called when building a ``SELECT`` statement, position is just - before column list Firebird puts the limit and offset right - after the ``SELECT``... - """ - - result = "" - if select._limit: - result += "FIRST %s " % self.process(sql.literal(select._limit)) - if select._offset: - result +="SKIP %s " % self.process(sql.literal(select._offset)) - if select._distinct: - result += "DISTINCT " - return result - - def limit_clause(self, select): - """Already taken care of in the `get_select_precolumns` method.""" - - return "" - - def returning_clause(self, stmt, returning_cols): - - columns = [ - self.process( - self.label_select_column(None, c, asfrom=False), - within_columns_clause=True, - result_map=self.result_map - ) - for c in expression._select_iterables(returning_cols) - ] - return 'RETURNING ' + ', '.join(columns) - - -class FBDDLCompiler(sql.compiler.DDLCompiler): - """Firebird syntactic idiosyncrasies""" - - def visit_create_sequence(self, create): - """Generate a ``CREATE GENERATOR`` statement for the sequence.""" - - # no syntax for these - # http://www.firebirdsql.org/manual/generatorguide-sqlsyntax.html - if create.element.start is not None: - raise NotImplemented( - "Firebird SEQUENCE doesn't support START WITH") - if create.element.increment is not None: - raise NotImplemented( - "Firebird SEQUENCE doesn't support INCREMENT BY") - - if self.dialect._version_two: - return "CREATE SEQUENCE %s" % \ - self.preparer.format_sequence(create.element) - else: - return "CREATE GENERATOR %s" % \ - self.preparer.format_sequence(create.element) - - def visit_drop_sequence(self, drop): - """Generate a ``DROP GENERATOR`` statement for the sequence.""" - - if self.dialect._version_two: - return "DROP SEQUENCE %s" % \ - self.preparer.format_sequence(drop.element) - else: - return "DROP GENERATOR %s" % \ - self.preparer.format_sequence(drop.element) - - -class FBIdentifierPreparer(sql.compiler.IdentifierPreparer): - """Install Firebird specific reserved words.""" - - reserved_words = RESERVED_WORDS - - def __init__(self, dialect): - super(FBIdentifierPreparer, self).__init__(dialect, omit_schema=True) - - -class FBExecutionContext(default.DefaultExecutionContext): - def fire_sequence(self, seq, type_): - """Get the next value from the sequence using ``gen_id()``.""" - - return self._execute_scalar( - "SELECT gen_id(%s, 1) FROM rdb$database" % - self.dialect.identifier_preparer.format_sequence(seq), - type_ - ) - - -class FBDialect(default.DefaultDialect): - """Firebird dialect""" - - name = 'firebird' - - max_identifier_length = 31 - - supports_sequences = True - sequences_optional = False - supports_default_values = True - postfetch_lastrowid = False - - supports_native_boolean = False - - requires_name_normalize = True - supports_empty_insert = False - - statement_compiler = FBCompiler - ddl_compiler = FBDDLCompiler - preparer = FBIdentifierPreparer - type_compiler = FBTypeCompiler - execution_ctx_cls = FBExecutionContext - - colspecs = colspecs - ischema_names = ischema_names - - # defaults to dialect ver. 3, - # will be autodetected off upon - # first connect - _version_two = True - - def initialize(self, connection): - super(FBDialect, self).initialize(connection) - self._version_two = ('firebird' in self.server_version_info and \ - self.server_version_info >= (2, ) - ) or \ - ('interbase' in self.server_version_info and \ - self.server_version_info >= (6, ) - ) - - if not self._version_two: - # TODO: whatever other pre < 2.0 stuff goes here - self.ischema_names = ischema_names.copy() - self.ischema_names['TIMESTAMP'] = sqltypes.DATE - self.colspecs = { - sqltypes.DateTime: sqltypes.DATE - } - - self.implicit_returning = self._version_two and \ - self.__dict__.get('implicit_returning', True) - - def normalize_name(self, name): - # Remove trailing spaces: FB uses a CHAR() type, - # that is padded with spaces - name = name and name.rstrip() - if name is None: - return None - elif name.upper() == name and \ - not self.identifier_preparer._requires_quotes(name.lower()): - return name.lower() - else: - return name - - def denormalize_name(self, name): - if name is None: - return None - elif name.lower() == name and \ - not self.identifier_preparer._requires_quotes(name.lower()): - return name.upper() - else: - return name - - def has_table(self, connection, table_name, schema=None): - """Return ``True`` if the given table exists, ignoring - the `schema`.""" - - tblqry = """ - SELECT 1 AS has_table FROM rdb$database - WHERE EXISTS (SELECT rdb$relation_name - FROM rdb$relations - WHERE rdb$relation_name=?) - """ - c = connection.execute(tblqry, [self.denormalize_name(table_name)]) - return c.first() is not None - - def has_sequence(self, connection, sequence_name, schema=None): - """Return ``True`` if the given sequence (generator) exists.""" - - genqry = """ - SELECT 1 AS has_sequence FROM rdb$database - WHERE EXISTS (SELECT rdb$generator_name - FROM rdb$generators - WHERE rdb$generator_name=?) - """ - c = connection.execute(genqry, [self.denormalize_name(sequence_name)]) - return c.first() is not None - - @reflection.cache - def get_table_names(self, connection, schema=None, **kw): - s = """ - SELECT DISTINCT rdb$relation_name - FROM rdb$relation_fields - WHERE rdb$system_flag=0 AND rdb$view_context IS NULL - """ - return [self.normalize_name(row[0]) for row in connection.execute(s)] - - @reflection.cache - def get_view_names(self, connection, schema=None, **kw): - s = """ - SELECT distinct rdb$view_name - FROM rdb$view_relations - """ - return [self.normalize_name(row[0]) for row in connection.execute(s)] - - @reflection.cache - def get_view_definition(self, connection, view_name, schema=None, **kw): - qry = """ - SELECT rdb$view_source AS view_source - FROM rdb$relations - WHERE rdb$relation_name=? - """ - rp = connection.execute(qry, [self.denormalize_name(view_name)]) - row = rp.first() - if row: - return row['view_source'] - else: - return None - - @reflection.cache - def get_primary_keys(self, connection, table_name, schema=None, **kw): - # Query to extract the PK/FK constrained fields of the given table - keyqry = """ - SELECT se.rdb$field_name AS fname - FROM rdb$relation_constraints rc - JOIN rdb$index_segments se ON rc.rdb$index_name=se.rdb$index_name - WHERE rc.rdb$constraint_type=? AND rc.rdb$relation_name=? - """ - tablename = self.denormalize_name(table_name) - # get primary key fields - c = connection.execute(keyqry, ["PRIMARY KEY", tablename]) - pkfields = [self.normalize_name(r['fname']) for r in c.fetchall()] - return pkfields - - @reflection.cache - def get_column_sequence(self, connection, - table_name, column_name, - schema=None, **kw): - tablename = self.denormalize_name(table_name) - colname = self.denormalize_name(column_name) - # Heuristic-query to determine the generator associated to a PK field - genqry = """ - SELECT trigdep.rdb$depended_on_name AS fgenerator - FROM rdb$dependencies tabdep - JOIN rdb$dependencies trigdep - ON tabdep.rdb$dependent_name=trigdep.rdb$dependent_name - AND trigdep.rdb$depended_on_type=14 - AND trigdep.rdb$dependent_type=2 - JOIN rdb$triggers trig ON - trig.rdb$trigger_name=tabdep.rdb$dependent_name - WHERE tabdep.rdb$depended_on_name=? - AND tabdep.rdb$depended_on_type=0 - AND trig.rdb$trigger_type=1 - AND tabdep.rdb$field_name=? - AND (SELECT count(*) - FROM rdb$dependencies trigdep2 - WHERE trigdep2.rdb$dependent_name = trigdep.rdb$dependent_name) = 2 - """ - genr = connection.execute(genqry, [tablename, colname]).first() - if genr is not None: - return dict(name=self.normalize_name(genr['fgenerator'])) - - @reflection.cache - def get_columns(self, connection, table_name, schema=None, **kw): - # Query to extract the details of all the fields of the given table - tblqry = """ - SELECT r.rdb$field_name AS fname, - r.rdb$null_flag AS null_flag, - t.rdb$type_name AS ftype, - f.rdb$field_sub_type AS stype, - f.rdb$field_length/ - COALESCE(cs.rdb$bytes_per_character,1) AS flen, - f.rdb$field_precision AS fprec, - f.rdb$field_scale AS fscale, - COALESCE(r.rdb$default_source, - f.rdb$default_source) AS fdefault - FROM rdb$relation_fields r - JOIN rdb$fields f ON r.rdb$field_source=f.rdb$field_name - JOIN rdb$types t - ON t.rdb$type=f.rdb$field_type AND - t.rdb$field_name='RDB$FIELD_TYPE' - LEFT JOIN rdb$character_sets cs ON - f.rdb$character_set_id=cs.rdb$character_set_id - WHERE f.rdb$system_flag=0 AND r.rdb$relation_name=? - ORDER BY r.rdb$field_position - """ - # get the PK, used to determine the eventual associated sequence - pkey_cols = self.get_primary_keys(connection, table_name) - - tablename = self.denormalize_name(table_name) - # get all of the fields for this table - c = connection.execute(tblqry, [tablename]) - cols = [] - while True: - row = c.fetchone() - if row is None: - break - name = self.normalize_name(row['fname']) - orig_colname = row['fname'] - - # get the data type - colspec = row['ftype'].rstrip() - coltype = self.ischema_names.get(colspec) - if coltype is None: - util.warn("Did not recognize type '%s' of column '%s'" % - (colspec, name)) - coltype = sqltypes.NULLTYPE - elif colspec == 'INT64': - coltype = coltype( - precision=row['fprec'], - scale=row['fscale'] * -1) - elif colspec in ('VARYING', 'CSTRING'): - coltype = coltype(row['flen']) - elif colspec == 'TEXT': - coltype = TEXT(row['flen']) - elif colspec == 'BLOB': - if row['stype'] == 1: - coltype = TEXT() - else: - coltype = BLOB() - else: - coltype = coltype() - - # does it have a default value? - defvalue = None - if row['fdefault'] is not None: - # the value comes down as "DEFAULT 'value'": there may be - # more than one whitespace around the "DEFAULT" keyword - # and it may also be lower case - # (see also http://tracker.firebirdsql.org/browse/CORE-356) - defexpr = row['fdefault'].lstrip() - assert defexpr[:8].rstrip().upper() == \ - 'DEFAULT', "Unrecognized default value: %s" % \ - defexpr - defvalue = defexpr[8:].strip() - if defvalue == 'NULL': - # Redundant - defvalue = None - col_d = { - 'name' : name, - 'type' : coltype, - 'nullable' : not bool(row['null_flag']), - 'default' : defvalue, - 'autoincrement':defvalue is None - } - - if orig_colname.lower() == orig_colname: - col_d['quote'] = True - - # if the PK is a single field, try to see if its linked to - # a sequence thru a trigger - if len(pkey_cols)==1 and name==pkey_cols[0]: - seq_d = self.get_column_sequence(connection, tablename, name) - if seq_d is not None: - col_d['sequence'] = seq_d - - cols.append(col_d) - return cols - - @reflection.cache - def get_foreign_keys(self, connection, table_name, schema=None, **kw): - # Query to extract the details of each UK/FK of the given table - fkqry = """ - SELECT rc.rdb$constraint_name AS cname, - cse.rdb$field_name AS fname, - ix2.rdb$relation_name AS targetrname, - se.rdb$field_name AS targetfname - FROM rdb$relation_constraints rc - JOIN rdb$indices ix1 ON ix1.rdb$index_name=rc.rdb$index_name - JOIN rdb$indices ix2 ON ix2.rdb$index_name=ix1.rdb$foreign_key - JOIN rdb$index_segments cse ON - cse.rdb$index_name=ix1.rdb$index_name - JOIN rdb$index_segments se - ON se.rdb$index_name=ix2.rdb$index_name - AND se.rdb$field_position=cse.rdb$field_position - WHERE rc.rdb$constraint_type=? AND rc.rdb$relation_name=? - ORDER BY se.rdb$index_name, se.rdb$field_position - """ - tablename = self.denormalize_name(table_name) - - c = connection.execute(fkqry, ["FOREIGN KEY", tablename]) - fks = util.defaultdict(lambda:{ - 'name' : None, - 'constrained_columns' : [], - 'referred_schema' : None, - 'referred_table' : None, - 'referred_columns' : [] - }) - - for row in c: - cname = self.normalize_name(row['cname']) - fk = fks[cname] - if not fk['name']: - fk['name'] = cname - fk['referred_table'] = self.normalize_name(row['targetrname']) - fk['constrained_columns'].append( - self.normalize_name(row['fname'])) - fk['referred_columns'].append( - self.normalize_name(row['targetfname'])) - return fks.values() - - @reflection.cache - def get_indexes(self, connection, table_name, schema=None, **kw): - qry = """ - SELECT ix.rdb$index_name AS index_name, - ix.rdb$unique_flag AS unique_flag, - ic.rdb$field_name AS field_name - FROM rdb$indices ix - JOIN rdb$index_segments ic - ON ix.rdb$index_name=ic.rdb$index_name - LEFT OUTER JOIN rdb$relation_constraints - ON rdb$relation_constraints.rdb$index_name = - ic.rdb$index_name - WHERE ix.rdb$relation_name=? AND ix.rdb$foreign_key IS NULL - AND rdb$relation_constraints.rdb$constraint_type IS NULL - ORDER BY index_name, field_name - """ - c = connection.execute(qry, [self.denormalize_name(table_name)]) - - indexes = util.defaultdict(dict) - for row in c: - indexrec = indexes[row['index_name']] - if 'name' not in indexrec: - indexrec['name'] = self.normalize_name(row['index_name']) - indexrec['column_names'] = [] - indexrec['unique'] = bool(row['unique_flag']) - - indexrec['column_names'].append( - self.normalize_name(row['field_name'])) - - return indexes.values() - - def do_execute(self, cursor, statement, parameters, context=None): - # kinterbase does not accept a None, but wants an empty list - # when there are no arguments. - cursor.execute(statement, parameters or []) - - def do_rollback(self, connection): - # Use the retaining feature, that keeps the transaction going - connection.rollback(True) - - def do_commit(self, connection): - # Use the retaining feature, that keeps the transaction going - connection.commit(True) diff --git a/libs/sqlalchemy/dialects/firebird/kinterbasdb.py b/libs/sqlalchemy/dialects/firebird/kinterbasdb.py deleted file mode 100644 index ddca91db..00000000 --- a/libs/sqlalchemy/dialects/firebird/kinterbasdb.py +++ /dev/null @@ -1,167 +0,0 @@ -# firebird/kinterbasdb.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -""" -The most common way to connect to a Firebird engine is implemented by -kinterbasdb__, currently maintained__ directly by the Firebird people. - -The connection URL is of the form -``firebird[+kinterbasdb]://user:password@host:port/path/to/db[?key=value&key=value...]``. - -Kinterbasedb backend specific keyword arguments are: - -* type_conv - select the kind of mapping done on the types: by default - SQLAlchemy uses 200 with Unicode, datetime and decimal support (see - details__). - -* concurrency_level - set the backend policy with regards to threading - issues: by default SQLAlchemy uses policy 1 (see details__). - -* enable_rowcount - True by default, setting this to False disables - the usage of "cursor.rowcount" with the - Kinterbasdb dialect, which SQLAlchemy ordinarily calls upon automatically - after any UPDATE or DELETE statement. When disabled, SQLAlchemy's - ResultProxy will return -1 for result.rowcount. The rationale here is - that Kinterbasdb requires a second round trip to the database when - .rowcount is called - since SQLA's resultproxy automatically closes - the cursor after a non-result-returning statement, rowcount must be - called, if at all, before the result object is returned. Additionally, - cursor.rowcount may not return correct results with older versions - of Firebird, and setting this flag to False will also cause the - SQLAlchemy ORM to ignore its usage. The behavior can also be controlled on a - per-execution basis using the `enable_rowcount` option with - :meth:`execution_options()`:: - - conn = engine.connect().execution_options(enable_rowcount=True) - r = conn.execute(stmt) - print r.rowcount - -__ http://sourceforge.net/projects/kinterbasdb -__ http://firebirdsql.org/index.php?op=devel&sub=python -__ http://kinterbasdb.sourceforge.net/dist_docs/usage.html#adv_param_conv_dynamic_type_translation -__ http://kinterbasdb.sourceforge.net/dist_docs/usage.html#special_issue_concurrency -""" - -from sqlalchemy.dialects.firebird.base import FBDialect, \ - FBCompiler, FBExecutionContext -from sqlalchemy import util, types as sqltypes -from sqlalchemy.util.compat import decimal -from re import match - - -class _FBNumeric_kinterbasdb(sqltypes.Numeric): - def bind_processor(self, dialect): - def process(value): - if isinstance(value, decimal.Decimal): - return str(value) - else: - return value - return process - -class FBExecutionContext_kinterbasdb(FBExecutionContext): - @property - def rowcount(self): - if self.execution_options.get('enable_rowcount', - self.dialect.enable_rowcount): - return self.cursor.rowcount - else: - return -1 - -class FBDialect_kinterbasdb(FBDialect): - driver = 'kinterbasdb' - supports_sane_rowcount = False - supports_sane_multi_rowcount = False - execution_ctx_cls = FBExecutionContext_kinterbasdb - - supports_native_decimal = True - - colspecs = util.update_copy( - FBDialect.colspecs, - { - sqltypes.Numeric:_FBNumeric_kinterbasdb, - } - - ) - - def __init__(self, type_conv=200, concurrency_level=1, - enable_rowcount=True, **kwargs): - super(FBDialect_kinterbasdb, self).__init__(**kwargs) - self.enable_rowcount = enable_rowcount - self.type_conv = type_conv - self.concurrency_level = concurrency_level - if enable_rowcount: - self.supports_sane_rowcount = True - - @classmethod - def dbapi(cls): - k = __import__('kinterbasdb') - return k - - def create_connect_args(self, url): - opts = url.translate_connect_args(username='user') - if opts.get('port'): - opts['host'] = "%s/%s" % (opts['host'], opts['port']) - del opts['port'] - opts.update(url.query) - - util.coerce_kw_type(opts, 'type_conv', int) - - type_conv = opts.pop('type_conv', self.type_conv) - concurrency_level = opts.pop('concurrency_level', - self.concurrency_level) - - if self.dbapi is not None: - initialized = getattr(self.dbapi, 'initialized', None) - if initialized is None: - # CVS rev 1.96 changed the name of the attribute: - # http://kinterbasdb.cvs.sourceforge.net/viewvc/kinterbasdb/Kinterbasdb-3.0/__init__.py?r1=1.95&r2=1.96 - initialized = getattr(self.dbapi, '_initialized', False) - if not initialized: - self.dbapi.init(type_conv=type_conv, - concurrency_level=concurrency_level) - return ([], opts) - - def _get_server_version_info(self, connection): - """Get the version of the Firebird server used by a connection. - - Returns a tuple of (`major`, `minor`, `build`), three integers - representing the version of the attached server. - """ - - # This is the simpler approach (the other uses the services api), - # that for backward compatibility reasons returns a string like - # LI-V6.3.3.12981 Firebird 2.0 - # where the first version is a fake one resembling the old - # Interbase signature. - - fbconn = connection.connection - version = fbconn.server_version - - return self._parse_version_info(version) - - def _parse_version_info(self, version): - m = match('\w+-V(\d+)\.(\d+)\.(\d+)\.(\d+)( \w+ (\d+)\.(\d+))?', version) - if not m: - raise AssertionError( - "Could not determine version from string '%s'" % version) - - if m.group(5) != None: - return tuple([int(x) for x in m.group(6, 7, 4)] + ['firebird']) - else: - return tuple([int(x) for x in m.group(1, 2, 3)] + ['interbase']) - - def is_disconnect(self, e, connection, cursor): - if isinstance(e, (self.dbapi.OperationalError, - self.dbapi.ProgrammingError)): - msg = str(e) - return ('Unable to complete network request to host' in msg or - 'Invalid connection state' in msg or - 'Invalid cursor state' in msg or - 'connection shutdown' in msg) - else: - return False - -dialect = FBDialect_kinterbasdb diff --git a/libs/sqlalchemy/dialects/informix/__init__.py b/libs/sqlalchemy/dialects/informix/__init__.py deleted file mode 100644 index bd633da5..00000000 --- a/libs/sqlalchemy/dialects/informix/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# informix/__init__.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -from sqlalchemy.dialects.informix import base, informixdb - -base.dialect = informixdb.dialect \ No newline at end of file diff --git a/libs/sqlalchemy/dialects/informix/base.py b/libs/sqlalchemy/dialects/informix/base.py deleted file mode 100644 index 07561f8d..00000000 --- a/libs/sqlalchemy/dialects/informix/base.py +++ /dev/null @@ -1,596 +0,0 @@ -# informix/base.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# coding: gbk -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Support for the Informix database. - -.. note:: - - The Informix dialect functions on current SQLAlchemy versions - but is not regularly tested, and may have many issues and - caveats not currently handled. - -""" - - -import datetime - -from sqlalchemy import sql, schema, exc, pool, util -from sqlalchemy.sql import compiler, text -from sqlalchemy.engine import default, reflection -from sqlalchemy import types as sqltypes - -RESERVED_WORDS = set( - ["abs", "absolute", "access", "access_method", "acos", "active", "add", - "address", "add_months", "admin", "after", "aggregate", "alignment", - "all", "allocate", "all_rows", "alter", "and", "ansi", "any", "append", - "array", "as", "asc", "ascii", "asin", "at", "atan", "atan2", "attach", - "attributes", "audit", "authentication", "authid", "authorization", - "authorized", "auto", "autofree", "auto_reprepare", "auto_stat_mode", - "avg", "avoid_execute", "avoid_fact", "avoid_full", "avoid_hash", - "avoid_index", "avoid_index_sj", "avoid_multi_index", "avoid_nl", - "avoid_star_join", "avoid_subqf", "based", "before", "begin", - "between", "bigint", "bigserial", "binary", "bitand", "bitandnot", - "bitnot", "bitor", "bitxor", "blob", "blobdir", "boolean", "both", - "bound_impl_pdq", "buffered", "builtin", "by", "byte", "cache", "call", - "cannothash", "cardinality", "cascade", "case", "cast", "ceil", "char", - "character", "character_length", "char_length", "check", "class", - "class_origin", "client", "clob", "clobdir", "close", "cluster", - "clustersize", "cobol", "codeset", "collation", "collection", - "column", "columns", "commit", "committed", "commutator", "component", - "components", "concat", "concurrent", "connect", "connection", - "connection_name", "connect_by_iscycle", "connect_by_isleaf", - "connect_by_rootconst", "constraint", "constraints", "constructor", - "context", "continue", "copy", "cos", "costfunc", "count", "crcols", - "create", "cross", "current", "current_role", "currval", "cursor", - "cycle", "database", "datafiles", "dataskip", "date", "datetime", - "day", "dba", "dbdate", "dbinfo", "dbpassword", "dbsecadm", - "dbservername", "deallocate", "debug", "debugmode", "debug_env", "dec", - "decimal", "declare", "decode", "decrypt_binary", "decrypt_char", - "dec_t", "default", "default_role", "deferred", "deferred_prepare", - "define", "delay", "delete", "deleting", "delimited", "delimiter", - "deluxe", "desc", "describe", "descriptor", "detach", "diagnostics", - "directives", "dirty", "disable", "disabled", "disconnect", "disk", - "distinct", "distributebinary", "distributesreferences", - "distributions", "document", "domain", "donotdistribute", "dormant", - "double", "drop", "dtime_t", "each", "elif", "else", "enabled", - "encryption", "encrypt_aes", "encrypt_tdes", "end", "enum", - "environment", "error", "escape", "exception", "exclusive", "exec", - "execute", "executeanywhere", "exemption", "exists", "exit", "exp", - "explain", "explicit", "express", "expression", "extdirectives", - "extend", "extent", "external", "fact", "false", "far", "fetch", - "file", "filetoblob", "filetoclob", "fillfactor", "filtering", "first", - "first_rows", "fixchar", "fixed", "float", "floor", "flush", "for", - "force", "forced", "force_ddl_exec", "foreach", "foreign", "format", - "format_units", "fortran", "found", "fraction", "fragment", - "fragments", "free", "from", "full", "function", "general", "get", - "gethint", "global", "go", "goto", "grant", "greaterthan", - "greaterthanorequal", "group", "handlesnulls", "hash", "having", "hdr", - "hex", "high", "hint", "hold", "home", "hour", "idslbacreadarray", - "idslbacreadset", "idslbacreadtree", "idslbacrules", - "idslbacwritearray", "idslbacwriteset", "idslbacwritetree", - "idssecuritylabel", "if", "ifx_auto_reprepare", "ifx_batchedread_table", - "ifx_int8_t", "ifx_lo_create_spec_t", "ifx_lo_stat_t", "immediate", - "implicit", "implicit_pdq", "in", "inactive", "increment", "index", - "indexes", "index_all", "index_sj", "indicator", "informix", "init", - "initcap", "inline", "inner", "inout", "insert", "inserting", "instead", - "int", "int8", "integ", "integer", "internal", "internallength", - "interval", "into", "intrvl_t", "is", "iscanonical", "isolation", - "item", "iterator", "java", "join", "keep", "key", "label", "labeleq", - "labelge", "labelglb", "labelgt", "labelle", "labellt", "labellub", - "labeltostring", "language", "last", "last_day", "leading", "left", - "length", "lessthan", "lessthanorequal", "let", "level", "like", - "limit", "list", "listing", "load", "local", "locator", "lock", "locks", - "locopy", "loc_t", "log", "log10", "logn", "long", "loop", "lotofile", - "low", "lower", "lpad", "ltrim", "lvarchar", "matched", "matches", - "max", "maxerrors", "maxlen", "maxvalue", "mdy", "median", "medium", - "memory", "memory_resident", "merge", "message_length", "message_text", - "middle", "min", "minute", "minvalue", "mod", "mode", "moderate", - "modify", "module", "money", "month", "months_between", "mounting", - "multiset", "multi_index", "name", "nchar", "negator", "new", "next", - "nextval", "next_day", "no", "nocache", "nocycle", "nomaxvalue", - "nomigrate", "nominvalue", "none", "non_dim", "non_resident", "noorder", - "normal", "not", "notemplatearg", "notequal", "null", "nullif", - "numeric", "numrows", "numtodsinterval", "numtoyminterval", "nvarchar", - "nvl", "octet_length", "of", "off", "old", "on", "online", "only", - "opaque", "opclass", "open", "optcompind", "optical", "optimization", - "option", "or", "order", "ordered", "out", "outer", "output", - "override", "page", "parallelizable", "parameter", "partition", - "pascal", "passedbyvalue", "password", "pdqpriority", "percaltl_cos", - "pipe", "pli", "pload", "policy", "pow", "power", "precision", - "prepare", "previous", "primary", "prior", "private", "privileges", - "procedure", "properties", "public", "put", "raise", "range", "raw", - "read", "real", "recordend", "references", "referencing", "register", - "rejectfile", "relative", "release", "remainder", "rename", - "reoptimization", "repeatable", "replace", "replication", "reserve", - "resolution", "resource", "restart", "restrict", "resume", "retain", - "retainupdatelocks", "return", "returned_sqlstate", "returning", - "returns", "reuse", "revoke", "right", "robin", "role", "rollback", - "rollforward", "root", "round", "routine", "row", "rowid", "rowids", - "rows", "row_count", "rpad", "rtrim", "rule", "sameas", "samples", - "sampling", "save", "savepoint", "schema", "scroll", "seclabel_by_comp", - "seclabel_by_name", "seclabel_to_char", "second", "secondary", - "section", "secured", "security", "selconst", "select", "selecting", - "selfunc", "selfuncargs", "sequence", "serial", "serial8", - "serializable", "serveruuid", "server_name", "session", "set", - "setsessionauth", "share", "short", "siblings", "signed", "sin", - "sitename", "size", "skall", "skinhibit", "skip", "skshow", - "smallfloat", "smallint", "some", "specific", "sql", "sqlcode", - "sqlcontext", "sqlerror", "sqlstate", "sqlwarning", "sqrt", - "stability", "stack", "standard", "start", "star_join", "statchange", - "statement", "static", "statistics", "statlevel", "status", "stdev", - "step", "stop", "storage", "store", "strategies", "string", - "stringtolabel", "struct", "style", "subclass_origin", "substr", - "substring", "sum", "support", "sync", "synonym", "sysdate", - "sysdbclose", "sysdbopen", "system", "sys_connect_by_path", "table", - "tables", "tan", "task", "temp", "template", "test", "text", "then", - "time", "timeout", "to", "today", "to_char", "to_date", - "to_dsinterval", "to_number", "to_yminterval", "trace", "trailing", - "transaction", "transition", "tree", "trigger", "triggers", "trim", - "true", "trunc", "truncate", "trusted", "type", "typedef", "typeid", - "typename", "typeof", "uid", "uncommitted", "under", "union", - "unique", "units", "unknown", "unload", "unlock", "unsigned", - "update", "updating", "upon", "upper", "usage", "use", - "uselastcommitted", "user", "use_hash", "use_nl", "use_subqf", - "using", "value", "values", "var", "varchar", "variable", "variance", - "variant", "varying", "vercols", "view", "violations", "void", - "volatile", "wait", "warning", "weekday", "when", "whenever", "where", - "while", "with", "without", "work", "write", "writedown", "writeup", - "xadatasource", "xid", "xload", "xunload", "year" - ]) - -class InfoDateTime(sqltypes.DateTime): - def bind_processor(self, dialect): - def process(value): - if value is not None: - if value.microsecond: - value = value.replace(microsecond=0) - return value - return process - -class InfoTime(sqltypes.Time): - def bind_processor(self, dialect): - def process(value): - if value is not None: - if value.microsecond: - value = value.replace(microsecond=0) - return value - return process - - def result_processor(self, dialect, coltype): - def process(value): - if isinstance(value, datetime.datetime): - return value.time() - else: - return value - return process - -colspecs = { - sqltypes.DateTime : InfoDateTime, - sqltypes.TIMESTAMP: InfoDateTime, - sqltypes.Time: InfoTime, -} - - -ischema_names = { - 0 : sqltypes.CHAR, # CHAR - 1 : sqltypes.SMALLINT, # SMALLINT - 2 : sqltypes.INTEGER, # INT - 3 : sqltypes.FLOAT, # Float - 3 : sqltypes.Float, # SmallFloat - 5 : sqltypes.DECIMAL, # DECIMAL - 6 : sqltypes.Integer, # Serial - 7 : sqltypes.DATE, # DATE - 8 : sqltypes.Numeric, # MONEY - 10 : sqltypes.DATETIME, # DATETIME - 11 : sqltypes.LargeBinary, # BYTE - 12 : sqltypes.TEXT, # TEXT - 13 : sqltypes.VARCHAR, # VARCHAR - 15 : sqltypes.NCHAR, # NCHAR - 16 : sqltypes.NVARCHAR, # NVARCHAR - 17 : sqltypes.Integer, # INT8 - 18 : sqltypes.Integer, # Serial8 - 43 : sqltypes.String, # LVARCHAR - -1 : sqltypes.BLOB, # BLOB - -1 : sqltypes.CLOB, # CLOB -} - - -class InfoTypeCompiler(compiler.GenericTypeCompiler): - def visit_DATETIME(self, type_): - return "DATETIME YEAR TO SECOND" - - def visit_TIME(self, type_): - return "DATETIME HOUR TO SECOND" - - def visit_TIMESTAMP(self, type_): - return "DATETIME YEAR TO SECOND" - - def visit_large_binary(self, type_): - return "BYTE" - - def visit_boolean(self, type_): - return "SMALLINT" - -class InfoSQLCompiler(compiler.SQLCompiler): - def default_from(self): - return " from systables where tabname = 'systables' " - - def get_select_precolumns(self, select): - s = "" - if select._offset: - s += "SKIP %s " % select._offset - if select._limit: - s += "FIRST %s " % select._limit - s += select._distinct and "DISTINCT " or "" - return s - - def visit_select(self, select, asfrom=False, parens=True, **kw): - text = compiler.SQLCompiler.visit_select(self, select, asfrom, parens, **kw) - if asfrom and parens and self.dialect.server_version_info < (11,): - #assuming that 11 version doesn't need this, not tested - return "table(multiset" + text + ")" - else: - return text - - def limit_clause(self, select): - return "" - - def visit_function(self, func, **kw): - if func.name.lower() == 'current_date': - return "today" - elif func.name.lower() == 'current_time': - return "CURRENT HOUR TO SECOND" - elif func.name.lower() in ('current_timestamp', 'now'): - return "CURRENT YEAR TO SECOND" - else: - return compiler.SQLCompiler.visit_function(self, func, **kw) - - def visit_mod(self, binary, **kw): - return "MOD(%s, %s)" % (self.process(binary.left), self.process(binary.right)) - - -class InfoDDLCompiler(compiler.DDLCompiler): - - def visit_add_constraint(self, create): - preparer = self.preparer - return "ALTER TABLE %s ADD CONSTRAINT %s" % ( - self.preparer.format_table(create.element.table), - self.process(create.element) - ) - - def get_column_specification(self, column, **kw): - colspec = self.preparer.format_column(column) - first = None - if column.primary_key and column.autoincrement: - try: - first = [c for c in column.table.primary_key.columns - if (c.autoincrement and - isinstance(c.type, sqltypes.Integer) and - not c.foreign_keys)].pop(0) - except IndexError: - pass - - if column is first: - colspec += " SERIAL" - else: - colspec += " " + self.dialect.type_compiler.process(column.type) - default = self.get_column_default_string(column) - if default is not None: - colspec += " DEFAULT " + default - - if not column.nullable: - colspec += " NOT NULL" - - return colspec - - def get_column_default_string(self, column): - if (isinstance(column.server_default, schema.DefaultClause) and - isinstance(column.server_default.arg, basestring)): - if isinstance(column.type, (sqltypes.Integer, sqltypes.Numeric)): - return self.sql_compiler.process(text(column.server_default.arg)) - - return super(InfoDDLCompiler, self).get_column_default_string(column) - - ### Informix wants the constraint name at the end, hence this ist c&p from sql/compiler.py - def visit_primary_key_constraint(self, constraint): - if len(constraint) == 0: - return '' - text = "PRIMARY KEY " - text += "(%s)" % ', '.join(self.preparer.quote(c.name, c.quote) - for c in constraint) - text += self.define_constraint_deferrability(constraint) - - if constraint.name is not None: - text += " CONSTRAINT %s" % self.preparer.format_constraint(constraint) - return text - - def visit_foreign_key_constraint(self, constraint): - preparer = self.dialect.identifier_preparer - remote_table = list(constraint._elements.values())[0].column.table - text = "FOREIGN KEY (%s) REFERENCES %s (%s)" % ( - ', '.join(preparer.quote(f.parent.name, f.parent.quote) - for f in constraint._elements.values()), - preparer.format_table(remote_table), - ', '.join(preparer.quote(f.column.name, f.column.quote) - for f in constraint._elements.values()) - ) - text += self.define_constraint_cascades(constraint) - text += self.define_constraint_deferrability(constraint) - - if constraint.name is not None: - text += " CONSTRAINT %s " % \ - preparer.format_constraint(constraint) - return text - - def visit_unique_constraint(self, constraint): - text = "UNIQUE (%s)" % (', '.join(self.preparer.quote(c.name, c.quote) for c in constraint)) - text += self.define_constraint_deferrability(constraint) - - if constraint.name is not None: - text += "CONSTRAINT %s " % self.preparer.format_constraint(constraint) - return text - -class InformixIdentifierPreparer(compiler.IdentifierPreparer): - - reserved_words = RESERVED_WORDS - - -class InformixDialect(default.DefaultDialect): - name = 'informix' - - max_identifier_length = 128 # adjusts at runtime based on server version - - type_compiler = InfoTypeCompiler - statement_compiler = InfoSQLCompiler - ddl_compiler = InfoDDLCompiler - colspecs = colspecs - ischema_names = ischema_names - preparer = InformixIdentifierPreparer - default_paramstyle = 'qmark' - - def __init__(self, has_transactions=True, *args, **kwargs): - self.has_transactions = has_transactions - default.DefaultDialect.__init__(self, *args, **kwargs) - - def initialize(self, connection): - super(InformixDialect, self).initialize(connection) - - # http://www.querix.com/support/knowledge-base/error_number_message/error_200 - if self.server_version_info < (9, 2): - self.max_identifier_length = 18 - else: - self.max_identifier_length = 128 - - def do_begin(self, connection): - cu = connection.cursor() - cu.execute('SET LOCK MODE TO WAIT') - if self.has_transactions: - cu.execute('SET ISOLATION TO REPEATABLE READ') - - def do_commit(self, connection): - if self.has_transactions: - connection.commit() - - def do_rollback(self, connection): - if self.has_transactions: - connection.rollback() - - def _get_table_names(self, connection, schema, type, **kw): - schema = schema or self.default_schema_name - s = "select tabname, owner from systables where owner=? and tabtype=?" - return [row[0] for row in connection.execute(s, schema, type)] - - @reflection.cache - def get_table_names(self, connection, schema=None, **kw): - return self._get_table_names(connection, schema, 'T', **kw) - - @reflection.cache - def get_view_names(self, connection, schema=None, **kw): - return self._get_table_names(connection, schema, 'V', **kw) - - @reflection.cache - def get_schema_names(self, connection, **kw): - s = "select owner from systables" - return [row[0] for row in connection.execute(s)] - - def has_table(self, connection, table_name, schema=None): - schema = schema or self.default_schema_name - cursor = connection.execute( - """select tabname from systables where tabname=? and owner=?""", - table_name, schema) - return cursor.first() is not None - - @reflection.cache - def get_columns(self, connection, table_name, schema=None, **kw): - schema = schema or self.default_schema_name - c = connection.execute( - """select colname, coltype, collength, t3.default, t1.colno from - syscolumns as t1 , systables as t2 , OUTER sysdefaults as t3 - where t1.tabid = t2.tabid and t2.tabname=? and t2.owner=? - and t3.tabid = t2.tabid and t3.colno = t1.colno - order by t1.colno""", table_name, schema) - - primary_cols = self.get_primary_keys(connection, table_name, schema, **kw) - - columns = [] - rows = c.fetchall() - for name, colattr, collength, default, colno in rows: - name = name.lower() - - autoincrement = False - primary_key = False - - if name in primary_cols: - primary_key = True - - # in 7.31, coltype = 0x000 - # ^^-- column type - # ^-- 1 not null, 0 null - not_nullable, coltype = divmod(colattr, 256) - if coltype not in (0, 13) and default: - default = default.split()[-1] - - if coltype == 6: # Serial, mark as autoincrement - autoincrement = True - - if coltype == 0 or coltype == 13: # char, varchar - coltype = ischema_names[coltype](collength) - if default: - default = "'%s'" % default - elif coltype == 5: # decimal - precision, scale = (collength & 0xFF00) >> 8, collength & 0xFF - if scale == 255: - scale = 0 - coltype = sqltypes.Numeric(precision, scale) - else: - try: - coltype = ischema_names[coltype] - except KeyError: - util.warn("Did not recognize type '%s' of column '%s'" % - (coltype, name)) - coltype = sqltypes.NULLTYPE - - column_info = dict(name=name, type=coltype, nullable=not not_nullable, - default=default, autoincrement=autoincrement, - primary_key=primary_key) - columns.append(column_info) - return columns - - @reflection.cache - def get_foreign_keys(self, connection, table_name, schema=None, **kw): - schema_sel = schema or self.default_schema_name - c = connection.execute( - """select t1.constrname as cons_name, - t4.colname as local_column, t7.tabname as remote_table, - t6.colname as remote_column, t7.owner as remote_owner - from sysconstraints as t1 , systables as t2 , - sysindexes as t3 , syscolumns as t4 , - sysreferences as t5 , syscolumns as t6 , systables as t7 , - sysconstraints as t8 , sysindexes as t9 - where t1.tabid = t2.tabid and t2.tabname=? and t2.owner=? and t1.constrtype = 'R' - and t3.tabid = t2.tabid and t3.idxname = t1.idxname - and t4.tabid = t2.tabid and t4.colno in (t3.part1, t3.part2, t3.part3, - t3.part4, t3.part5, t3.part6, t3.part7, t3.part8, t3.part9, t3.part10, - t3.part11, t3.part11, t3.part12, t3.part13, t3.part4, t3.part15, t3.part16) - and t5.constrid = t1.constrid and t8.constrid = t5.primary - and t6.tabid = t5.ptabid and t6.colno in (t9.part1, t9.part2, t9.part3, - t9.part4, t9.part5, t9.part6, t9.part7, t9.part8, t9.part9, t9.part10, - t9.part11, t9.part11, t9.part12, t9.part13, t9.part4, t9.part15, t9.part16) and t9.idxname = - t8.idxname - and t7.tabid = t5.ptabid""", table_name, schema_sel) - - - def fkey_rec(): - return { - 'name' : None, - 'constrained_columns' : [], - 'referred_schema' : None, - 'referred_table' : None, - 'referred_columns' : [] - } - - fkeys = util.defaultdict(fkey_rec) - - rows = c.fetchall() - for cons_name, local_column, \ - remote_table, remote_column, remote_owner in rows: - - rec = fkeys[cons_name] - rec['name'] = cons_name - local_cols, remote_cols = \ - rec['constrained_columns'], rec['referred_columns'] - - if not rec['referred_table']: - rec['referred_table'] = remote_table - if schema is not None: - rec['referred_schema'] = remote_owner - - if local_column not in local_cols: - local_cols.append(local_column) - if remote_column not in remote_cols: - remote_cols.append(remote_column) - - return fkeys.values() - - @reflection.cache - def get_primary_keys(self, connection, table_name, schema=None, **kw): - schema = schema or self.default_schema_name - - # Select the column positions from sysindexes for sysconstraints - data = connection.execute( - """select t2.* - from systables as t1, sysindexes as t2, sysconstraints as t3 - where t1.tabid=t2.tabid and t1.tabname=? and t1.owner=? - and t2.idxname=t3.idxname and t3.constrtype='P'""", - table_name, schema - ).fetchall() - - colpositions = set() - - for row in data: - colpos = set([getattr(row, 'part%d' % x) for x in range(1,16)]) - colpositions |= colpos - - if not len(colpositions): - return [] - - # Select the column names using the columnpositions - # TODO: Maybe cache a bit of those col infos (eg select all colnames for one table) - place_holder = ','.join('?'*len(colpositions)) - c = connection.execute( - """select t1.colname - from syscolumns as t1, systables as t2 - where t2.tabname=? and t1.tabid = t2.tabid and - t1.colno in (%s)""" % place_holder, - table_name, *colpositions - ).fetchall() - - return reduce(lambda x,y: list(x)+list(y), c, []) - - @reflection.cache - def get_indexes(self, connection, table_name, schema, **kw): - # TODO: schema... - c = connection.execute( - """select t1.* - from sysindexes as t1 , systables as t2 - where t1.tabid = t2.tabid and t2.tabname=?""", - table_name) - - indexes = [] - for row in c.fetchall(): - colnames = [getattr(row, 'part%d' % x) for x in range(1,16)] - colnames = [x for x in colnames if x] - place_holder = ','.join('?'*len(colnames)) - c = connection.execute( - """select t1.colname - from syscolumns as t1, systables as t2 - where t2.tabname=? and t1.tabid = t2.tabid and - t1.colno in (%s)""" % place_holder, - table_name, *colnames - ).fetchall() - c = reduce(lambda x,y: list(x)+list(y), c, []) - indexes.append({ - 'name': row.idxname, - 'unique': row.idxtype.lower() == 'u', - 'column_names': c - }) - return indexes - - @reflection.cache - def get_view_definition(self, connection, view_name, schema=None, **kw): - schema = schema or self.default_schema_name - c = connection.execute( - """select t1.viewtext - from sysviews as t1 , systables as t2 - where t1.tabid=t2.tabid and t2.tabname=? - and t2.owner=? order by seqno""", - view_name, schema).fetchall() - - return ''.join([row[0] for row in c]) - - def _get_default_schema_name(self, connection): - return connection.execute('select CURRENT_ROLE from systables').scalar() diff --git a/libs/sqlalchemy/dialects/informix/informixdb.py b/libs/sqlalchemy/dialects/informix/informixdb.py deleted file mode 100644 index 8b543467..00000000 --- a/libs/sqlalchemy/dialects/informix/informixdb.py +++ /dev/null @@ -1,73 +0,0 @@ -# informix/informixdb.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -""" -Support for the informixdb DBAPI. - -informixdb is available at: - - http://informixdb.sourceforge.net/ - -Connecting -^^^^^^^^^^ - -Sample informix connection:: - - engine = create_engine('informix+informixdb://user:password@host/dbname') - -""" - -import re - -from sqlalchemy.dialects.informix.base import InformixDialect -from sqlalchemy.engine import default - -VERSION_RE = re.compile(r'(\d+)\.(\d+)(.+\d+)') - -class InformixExecutionContext_informixdb(default.DefaultExecutionContext): - def post_exec(self): - if self.isinsert: - self._lastrowid = self.cursor.sqlerrd[1] - - def get_lastrowid(self): - return self._lastrowid - - -class InformixDialect_informixdb(InformixDialect): - driver = 'informixdb' - execution_ctx_cls = InformixExecutionContext_informixdb - - @classmethod - def dbapi(cls): - return __import__('informixdb') - - def create_connect_args(self, url): - if url.host: - dsn = '%s@%s' % (url.database, url.host) - else: - dsn = url.database - - if url.username: - opt = {'user': url.username, 'password': url.password} - else: - opt = {} - - return ([dsn], opt) - - def _get_server_version_info(self, connection): - # http://informixdb.sourceforge.net/manual.html#inspecting-version-numbers - v = VERSION_RE.split(connection.connection.dbms_version) - return (int(v[1]), int(v[2]), v[3]) - - def is_disconnect(self, e, connection, cursor): - if isinstance(e, self.dbapi.OperationalError): - return 'closed the connection' in str(e) \ - or 'connection not open' in str(e) - else: - return False - - -dialect = InformixDialect_informixdb diff --git a/libs/sqlalchemy/dialects/maxdb/__init__.py b/libs/sqlalchemy/dialects/maxdb/__init__.py deleted file mode 100644 index 9d1d6418..00000000 --- a/libs/sqlalchemy/dialects/maxdb/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# maxdb/__init__.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -from sqlalchemy.dialects.maxdb import base, sapdb - -base.dialect = sapdb.dialect \ No newline at end of file diff --git a/libs/sqlalchemy/dialects/maxdb/base.py b/libs/sqlalchemy/dialects/maxdb/base.py deleted file mode 100644 index 68ae630e..00000000 --- a/libs/sqlalchemy/dialects/maxdb/base.py +++ /dev/null @@ -1,1117 +0,0 @@ -# maxdb/base.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Support for the MaxDB database. - -.. note:: - - The MaxDB dialect is **non-functional as of SQLAlchemy 0.6**, - pending development efforts to bring it up-to-date. - -Overview --------- - -The ``maxdb`` dialect is **experimental** and has only been tested on 7.6.03.007 -and 7.6.00.037. Of these, **only 7.6.03.007 will work** with SQLAlchemy's ORM. -The earlier version has severe ``LEFT JOIN`` limitations and will return -incorrect results from even very simple ORM queries. - -Only the native Python DB-API is currently supported. ODBC driver support -is a future enhancement. - -Connecting ----------- - -The username is case-sensitive. If you usually connect to the -database with sqlcli and other tools in lower case, you likely need to -use upper case for DB-API. - -Implementation Notes --------------------- - -With the 7.6.00.37 driver and Python 2.5, it seems that all DB-API -generated exceptions are broken and can cause Python to crash. - -For 'somecol.in_([])' to work, the IN operator's generation must be changed -to cast 'NULL' to a numeric, i.e. NUM(NULL). The DB-API doesn't accept a -bind parameter there, so that particular generation must inline the NULL value, -which depends on [ticket:807]. - -The DB-API is very picky about where bind params may be used in queries. - -Bind params for some functions (e.g. MOD) need type information supplied. -The dialect does not yet do this automatically. - -Max will occasionally throw up 'bad sql, compile again' exceptions for -perfectly valid SQL. The dialect does not currently handle these, more -research is needed. - -MaxDB 7.5 and Sap DB <= 7.4 reportedly do not support schemas. A very -slightly different version of this dialect would be required to support -those versions, and can easily be added if there is demand. Some other -required components such as an Max-aware 'old oracle style' join compiler -(thetas with (+) outer indicators) are already done and available for -integration- email the devel list if you're interested in working on -this. - -Versions tested: 7.6.03.07 and 7.6.00.37, native Python DB-API - -* MaxDB has severe limitations on OUTER JOINs, which are essential to ORM - eager loading. And rather than raise an error if a SELECT can't be serviced, - the database simply returns incorrect results. -* Version 7.6.03.07 seems to JOIN properly, however the docs do not show the - OUTER restrictions being lifted (as of this writing), and no changelog is - available to confirm either. If you are using a different server version and - your tasks require the ORM or any semi-advanced SQL through the SQL layer, - running the SQLAlchemy test suite against your database is HIGHLY - recommended before you begin. -* Version 7.6.00.37 is LHS/RHS sensitive in `FROM lhs LEFT OUTER JOIN rhs ON - lhs.col=rhs.col` vs `rhs.col=lhs.col`! -* Version 7.6.00.37 is confused by `SELECT DISTINCT col as alias FROM t ORDER - BY col` - these aliased, DISTINCT, ordered queries need to be re-written to - order by the alias name. -* Version 7.6.x supports creating a SAVEPOINT but not its RELEASE. -* MaxDB supports autoincrement-style columns (DEFAULT SERIAL) and independent - sequences. When including a DEFAULT SERIAL column in an insert, 0 needs to - be inserted rather than NULL to generate a value. -* MaxDB supports ANSI and "old Oracle style" theta joins with (+) outer join - indicators. -* The SQLAlchemy dialect is schema-aware and probably won't function correctly - on server versions (pre-7.6?). Support for schema-less server versions could - be added if there's call. -* ORDER BY is not supported in subqueries. LIMIT is not supported in - subqueries. In 7.6.00.37, TOP does work in subqueries, but without limit not - so useful. OFFSET does not work in 7.6 despite being in the docs. Row number - tricks in WHERE via ROWNO may be possible but it only seems to allow - less-than comparison! -* Version 7.6.03.07 can't LIMIT if a derived table is in FROM: `SELECT * FROM - (SELECT * FROM a) LIMIT 2` -* MaxDB does not support sql's CAST and can only usefullly cast two types. - There isn't much implicit type conversion, so be precise when creating - `PassiveDefaults` in DDL generation: `'3'` and `3` aren't the same. - -sapdb.dbapi -^^^^^^^^^^^ - -* As of 2007-10-22 the Python 2.4 and 2.5 compatible versions of the DB-API - are no longer available. A forum posting at SAP states that the Python - driver will be available again "in the future". The last release from MySQL - AB works if you can find it. -* sequence.NEXTVAL skips every other value! -* No rowcount for executemany() -* If an INSERT into a table with a DEFAULT SERIAL column inserts the results - of a function `INSERT INTO t VALUES (LENGTH('foo'))`, the cursor won't have - the serial id. It needs to be manually yanked from tablename.CURRVAL. -* Super-duper picky about where bind params can be placed. Not smart about - converting Python types for some functions, such as `MOD(5, ?)`. -* LONG (text, binary) values in result sets are read-once. The dialect uses a - caching RowProxy when these types are present. -* Connection objects seem like they want to be either `close()`d or garbage - collected, but not both. There's a warning issued but it seems harmless. - - -""" -import datetime, itertools, re - -from sqlalchemy import exc, schema, sql, util, processors -from sqlalchemy.sql import operators as sql_operators, expression as sql_expr -from sqlalchemy.sql import compiler, visitors -from sqlalchemy.engine import base as engine_base, default, reflection -from sqlalchemy import types as sqltypes - - -class _StringType(sqltypes.String): - _type = None - - def __init__(self, length=None, encoding=None, **kw): - super(_StringType, self).__init__(length=length, **kw) - self.encoding = encoding - - def bind_processor(self, dialect): - if self.encoding == 'unicode': - return None - else: - def process(value): - if isinstance(value, unicode): - return value.encode(dialect.encoding) - else: - return value - return process - - def result_processor(self, dialect, coltype): - #XXX: this code is probably very slow and one should try (if at all - # possible) to determine the correct code path on a per-connection - # basis (ie, here in result_processor, instead of inside the processor - # function itself) and probably also use a few generic - # processors, or possibly per query (though there is no mechanism - # for that yet). - def process(value): - while True: - if value is None: - return None - elif isinstance(value, unicode): - return value - elif isinstance(value, str): - if self.convert_unicode or dialect.convert_unicode: - return value.decode(dialect.encoding) - else: - return value - elif hasattr(value, 'read'): - # some sort of LONG, snarf and retry - value = value.read(value.remainingLength()) - continue - else: - # unexpected type, return as-is - return value - return process - - -class MaxString(_StringType): - _type = 'VARCHAR' - - -class MaxUnicode(_StringType): - _type = 'VARCHAR' - - def __init__(self, length=None, **kw): - kw['encoding'] = 'unicode' - super(MaxUnicode, self).__init__(length=length, **kw) - - -class MaxChar(_StringType): - _type = 'CHAR' - - -class MaxText(_StringType): - _type = 'LONG' - - def __init__(self, length=None, **kw): - super(MaxText, self).__init__(length, **kw) - - def get_col_spec(self): - spec = 'LONG' - if self.encoding is not None: - spec = ' '.join((spec, self.encoding)) - elif self.convert_unicode: - spec = ' '.join((spec, 'UNICODE')) - - return spec - - -class MaxNumeric(sqltypes.Numeric): - """The FIXED (also NUMERIC, DECIMAL) data type.""" - - def __init__(self, precision=None, scale=None, **kw): - kw.setdefault('asdecimal', True) - super(MaxNumeric, self).__init__(scale=scale, precision=precision, - **kw) - - def bind_processor(self, dialect): - return None - - -class MaxTimestamp(sqltypes.DateTime): - def bind_processor(self, dialect): - def process(value): - if value is None: - return None - elif isinstance(value, basestring): - return value - elif dialect.datetimeformat == 'internal': - ms = getattr(value, 'microsecond', 0) - return value.strftime("%Y%m%d%H%M%S" + ("%06u" % ms)) - elif dialect.datetimeformat == 'iso': - ms = getattr(value, 'microsecond', 0) - return value.strftime("%Y-%m-%d %H:%M:%S." + ("%06u" % ms)) - else: - raise exc.InvalidRequestError( - "datetimeformat '%s' is not supported." % ( - dialect.datetimeformat,)) - return process - - def result_processor(self, dialect, coltype): - if dialect.datetimeformat == 'internal': - def process(value): - if value is None: - return None - else: - return datetime.datetime( - *[int(v) - for v in (value[0:4], value[4:6], value[6:8], - value[8:10], value[10:12], value[12:14], - value[14:])]) - elif dialect.datetimeformat == 'iso': - def process(value): - if value is None: - return None - else: - return datetime.datetime( - *[int(v) - for v in (value[0:4], value[5:7], value[8:10], - value[11:13], value[14:16], value[17:19], - value[20:])]) - else: - raise exc.InvalidRequestError( - "datetimeformat '%s' is not supported." % - dialect.datetimeformat) - return process - - -class MaxDate(sqltypes.Date): - def bind_processor(self, dialect): - def process(value): - if value is None: - return None - elif isinstance(value, basestring): - return value - elif dialect.datetimeformat == 'internal': - return value.strftime("%Y%m%d") - elif dialect.datetimeformat == 'iso': - return value.strftime("%Y-%m-%d") - else: - raise exc.InvalidRequestError( - "datetimeformat '%s' is not supported." % ( - dialect.datetimeformat,)) - return process - - def result_processor(self, dialect, coltype): - if dialect.datetimeformat == 'internal': - def process(value): - if value is None: - return None - else: - return datetime.date(int(value[0:4]), int(value[4:6]), - int(value[6:8])) - elif dialect.datetimeformat == 'iso': - def process(value): - if value is None: - return None - else: - return datetime.date(int(value[0:4]), int(value[5:7]), - int(value[8:10])) - else: - raise exc.InvalidRequestError( - "datetimeformat '%s' is not supported." % - dialect.datetimeformat) - return process - - -class MaxTime(sqltypes.Time): - def bind_processor(self, dialect): - def process(value): - if value is None: - return None - elif isinstance(value, basestring): - return value - elif dialect.datetimeformat == 'internal': - return value.strftime("%H%M%S") - elif dialect.datetimeformat == 'iso': - return value.strftime("%H-%M-%S") - else: - raise exc.InvalidRequestError( - "datetimeformat '%s' is not supported." % ( - dialect.datetimeformat,)) - return process - - def result_processor(self, dialect, coltype): - if dialect.datetimeformat == 'internal': - def process(value): - if value is None: - return None - else: - return datetime.time(int(value[0:4]), int(value[4:6]), - int(value[6:8])) - elif dialect.datetimeformat == 'iso': - def process(value): - if value is None: - return None - else: - return datetime.time(int(value[0:4]), int(value[5:7]), - int(value[8:10])) - else: - raise exc.InvalidRequestError( - "datetimeformat '%s' is not supported." % - dialect.datetimeformat) - return process - - -class MaxBlob(sqltypes.LargeBinary): - def bind_processor(self, dialect): - return processors.to_str - - def result_processor(self, dialect, coltype): - def process(value): - if value is None: - return None - else: - return value.read(value.remainingLength()) - return process - -class MaxDBTypeCompiler(compiler.GenericTypeCompiler): - def _string_spec(self, string_spec, type_): - if type_.length is None: - spec = 'LONG' - else: - spec = '%s(%s)' % (string_spec, type_.length) - - if getattr(type_, 'encoding'): - spec = ' '.join([spec, getattr(type_, 'encoding').upper()]) - return spec - - def visit_text(self, type_): - spec = 'LONG' - if getattr(type_, 'encoding', None): - spec = ' '.join((spec, type_.encoding)) - elif type_.convert_unicode: - spec = ' '.join((spec, 'UNICODE')) - - return spec - - def visit_char(self, type_): - return self._string_spec("CHAR", type_) - - def visit_string(self, type_): - return self._string_spec("VARCHAR", type_) - - def visit_large_binary(self, type_): - return "LONG BYTE" - - def visit_numeric(self, type_): - if type_.scale and type_.precision: - return 'FIXED(%s, %s)' % (type_.precision, type_.scale) - elif type_.precision: - return 'FIXED(%s)' % type_.precision - else: - return 'INTEGER' - - def visit_BOOLEAN(self, type_): - return "BOOLEAN" - -colspecs = { - sqltypes.Numeric: MaxNumeric, - sqltypes.DateTime: MaxTimestamp, - sqltypes.Date: MaxDate, - sqltypes.Time: MaxTime, - sqltypes.String: MaxString, - sqltypes.Unicode:MaxUnicode, - sqltypes.LargeBinary: MaxBlob, - sqltypes.Text: MaxText, - sqltypes.CHAR: MaxChar, - sqltypes.TIMESTAMP: MaxTimestamp, - sqltypes.BLOB: MaxBlob, - sqltypes.Unicode: MaxUnicode, - } - -ischema_names = { - 'boolean': sqltypes.BOOLEAN, - 'char': sqltypes.CHAR, - 'character': sqltypes.CHAR, - 'date': sqltypes.DATE, - 'fixed': sqltypes.Numeric, - 'float': sqltypes.FLOAT, - 'int': sqltypes.INT, - 'integer': sqltypes.INT, - 'long binary': sqltypes.BLOB, - 'long unicode': sqltypes.Text, - 'long': sqltypes.Text, - 'long': sqltypes.Text, - 'smallint': sqltypes.SmallInteger, - 'time': sqltypes.Time, - 'timestamp': sqltypes.TIMESTAMP, - 'varchar': sqltypes.VARCHAR, - } - -# TODO: migrate this to sapdb.py -class MaxDBExecutionContext(default.DefaultExecutionContext): - def post_exec(self): - # DB-API bug: if there were any functions as values, - # then do another select and pull CURRVAL from the - # autoincrement column's implicit sequence... ugh - if self.compiled.isinsert and not self.executemany: - table = self.compiled.statement.table - index, serial_col = _autoserial_column(table) - - if serial_col and (not self.compiled._safeserial or - not(self._last_inserted_ids) or - self._last_inserted_ids[index] in (None, 0)): - if table.schema: - sql = "SELECT %s.CURRVAL FROM DUAL" % ( - self.compiled.preparer.format_table(table)) - else: - sql = "SELECT CURRENT_SCHEMA.%s.CURRVAL FROM DUAL" % ( - self.compiled.preparer.format_table(table)) - - rs = self.cursor.execute(sql) - id = rs.fetchone()[0] - - if not self._last_inserted_ids: - # This shouldn't ever be > 1? Right? - self._last_inserted_ids = \ - [None] * len(table.primary_key.columns) - self._last_inserted_ids[index] = id - - super(MaxDBExecutionContext, self).post_exec() - - def get_result_proxy(self): - if self.cursor.description is not None: - for column in self.cursor.description: - if column[1] in ('Long Binary', 'Long', 'Long Unicode'): - return MaxDBResultProxy(self) - return engine_base.ResultProxy(self) - - @property - def rowcount(self): - if hasattr(self, '_rowcount'): - return self._rowcount - else: - return self.cursor.rowcount - - def fire_sequence(self, seq): - if seq.optional: - return None - return self._execute_scalar("SELECT %s.NEXTVAL FROM DUAL" % ( - self.dialect.identifier_preparer.format_sequence(seq))) - -class MaxDBCachedColumnRow(engine_base.RowProxy): - """A RowProxy that only runs result_processors once per column.""" - - def __init__(self, parent, row): - super(MaxDBCachedColumnRow, self).__init__(parent, row) - self.columns = {} - self._row = row - self._parent = parent - - def _get_col(self, key): - if key not in self.columns: - self.columns[key] = self._parent._get_col(self._row, key) - return self.columns[key] - - def __iter__(self): - for i in xrange(len(self._row)): - yield self._get_col(i) - - def __repr__(self): - return repr(list(self)) - - def __eq__(self, other): - return ((other is self) or - (other == tuple([self._get_col(key) - for key in xrange(len(self._row))]))) - def __getitem__(self, key): - if isinstance(key, slice): - indices = key.indices(len(self._row)) - return tuple([self._get_col(i) for i in xrange(*indices)]) - else: - return self._get_col(key) - - def __getattr__(self, name): - try: - return self._get_col(name) - except KeyError: - raise AttributeError(name) - - -class MaxDBResultProxy(engine_base.ResultProxy): - _process_row = MaxDBCachedColumnRow - -class MaxDBCompiler(compiler.SQLCompiler): - - function_conversion = { - 'CURRENT_DATE': 'DATE', - 'CURRENT_TIME': 'TIME', - 'CURRENT_TIMESTAMP': 'TIMESTAMP', - } - - # These functions must be written without parens when called with no - # parameters. e.g. 'SELECT DATE FROM DUAL' not 'SELECT DATE() FROM DUAL' - bare_functions = set([ - 'CURRENT_SCHEMA', 'DATE', 'FALSE', 'SYSDBA', 'TIME', 'TIMESTAMP', - 'TIMEZONE', 'TRANSACTION', 'TRUE', 'USER', 'UID', 'USERGROUP', - 'UTCDATE', 'UTCDIFF']) - - def visit_mod(self, binary, **kw): - return "mod(%s, %s)" % \ - (self.process(binary.left), self.process(binary.right)) - - def default_from(self): - return ' FROM DUAL' - - def for_update_clause(self, select): - clause = select.for_update - if clause is True: - return " WITH LOCK EXCLUSIVE" - elif clause is None: - return "" - elif clause == "read": - return " WITH LOCK" - elif clause == "ignore": - return " WITH LOCK (IGNORE) EXCLUSIVE" - elif clause == "nowait": - return " WITH LOCK (NOWAIT) EXCLUSIVE" - elif isinstance(clause, basestring): - return " WITH LOCK %s" % clause.upper() - elif not clause: - return "" - else: - return " WITH LOCK EXCLUSIVE" - - def function_argspec(self, fn, **kw): - if fn.name.upper() in self.bare_functions: - return "" - elif len(fn.clauses) > 0: - return compiler.SQLCompiler.function_argspec(self, fn, **kw) - else: - return "" - - def visit_function(self, fn, **kw): - transform = self.function_conversion.get(fn.name.upper(), None) - if transform: - fn = fn._clone() - fn.name = transform - return super(MaxDBCompiler, self).visit_function(fn, **kw) - - def visit_cast(self, cast, **kwargs): - # MaxDB only supports casts * to NUMERIC, * to VARCHAR or - # date/time to VARCHAR. Casts of LONGs will fail. - if isinstance(cast.type, (sqltypes.Integer, sqltypes.Numeric)): - return "NUM(%s)" % self.process(cast.clause) - elif isinstance(cast.type, sqltypes.String): - return "CHR(%s)" % self.process(cast.clause) - else: - return self.process(cast.clause) - - def visit_sequence(self, sequence): - if sequence.optional: - return None - else: - return ( - self.dialect.identifier_preparer.format_sequence(sequence) + - ".NEXTVAL") - - class ColumnSnagger(visitors.ClauseVisitor): - def __init__(self): - self.count = 0 - self.column = None - def visit_column(self, column): - self.column = column - self.count += 1 - - def _find_labeled_columns(self, columns, use_labels=False): - labels = {} - for column in columns: - if isinstance(column, basestring): - continue - snagger = self.ColumnSnagger() - snagger.traverse(column) - if snagger.count == 1: - if isinstance(column, sql_expr._Label): - labels[unicode(snagger.column)] = column.name - elif use_labels: - labels[unicode(snagger.column)] = column._label - - return labels - - def order_by_clause(self, select, **kw): - order_by = self.process(select._order_by_clause, **kw) - - # ORDER BY clauses in DISTINCT queries must reference aliased - # inner columns by alias name, not true column name. - if order_by and getattr(select, '_distinct', False): - labels = self._find_labeled_columns(select.inner_columns, - select.use_labels) - if labels: - for needs_alias in labels.keys(): - r = re.compile(r'(^| )(%s)(,| |$)' % - re.escape(needs_alias)) - order_by = r.sub((r'\1%s\3' % labels[needs_alias]), - order_by) - - # No ORDER BY in subqueries. - if order_by: - if self.is_subquery(): - # It's safe to simply drop the ORDER BY if there is no - # LIMIT. Right? Other dialects seem to get away with - # dropping order. - if select._limit: - raise exc.CompileError( - "MaxDB does not support ORDER BY in subqueries") - else: - return "" - return " ORDER BY " + order_by - else: - return "" - - def get_select_precolumns(self, select): - # Convert a subquery's LIMIT to TOP - sql = select._distinct and 'DISTINCT ' or '' - if self.is_subquery() and select._limit: - if select._offset: - raise exc.InvalidRequestError( - 'MaxDB does not support LIMIT with an offset.') - sql += 'TOP %s ' % select._limit - return sql - - def limit_clause(self, select): - # The docs say offsets are supported with LIMIT. But they're not. - # TODO: maybe emulate by adding a ROWNO/ROWNUM predicate? - # TODO: does MaxDB support bind params for LIMIT / TOP ? - if self.is_subquery(): - # sub queries need TOP - return '' - elif select._offset: - raise exc.InvalidRequestError( - 'MaxDB does not support LIMIT with an offset.') - else: - return ' \n LIMIT %s' % (select._limit,) - - def visit_insert(self, insert): - self.isinsert = True - self._safeserial = True - - colparams = self._get_colparams(insert) - for value in (insert.parameters or {}).itervalues(): - if isinstance(value, sql_expr.Function): - self._safeserial = False - break - - return ''.join(('INSERT INTO ', - self.preparer.format_table(insert.table), - ' (', - ', '.join([self.preparer.format_column(c[0]) - for c in colparams]), - ') VALUES (', - ', '.join([c[1] for c in colparams]), - ')')) - - -class MaxDBIdentifierPreparer(compiler.IdentifierPreparer): - reserved_words = set([ - 'abs', 'absolute', 'acos', 'adddate', 'addtime', 'all', 'alpha', - 'alter', 'any', 'ascii', 'asin', 'atan', 'atan2', 'avg', 'binary', - 'bit', 'boolean', 'byte', 'case', 'ceil', 'ceiling', 'char', - 'character', 'check', 'chr', 'column', 'concat', 'constraint', 'cos', - 'cosh', 'cot', 'count', 'cross', 'curdate', 'current', 'curtime', - 'database', 'date', 'datediff', 'day', 'dayname', 'dayofmonth', - 'dayofweek', 'dayofyear', 'dec', 'decimal', 'decode', 'default', - 'degrees', 'delete', 'digits', 'distinct', 'double', 'except', - 'exists', 'exp', 'expand', 'first', 'fixed', 'float', 'floor', 'for', - 'from', 'full', 'get_objectname', 'get_schema', 'graphic', 'greatest', - 'group', 'having', 'hex', 'hextoraw', 'hour', 'ifnull', 'ignore', - 'index', 'initcap', 'inner', 'insert', 'int', 'integer', 'internal', - 'intersect', 'into', 'join', 'key', 'last', 'lcase', 'least', 'left', - 'length', 'lfill', 'list', 'ln', 'locate', 'log', 'log10', 'long', - 'longfile', 'lower', 'lpad', 'ltrim', 'makedate', 'maketime', - 'mapchar', 'max', 'mbcs', 'microsecond', 'min', 'minute', 'mod', - 'month', 'monthname', 'natural', 'nchar', 'next', 'no', 'noround', - 'not', 'now', 'null', 'num', 'numeric', 'object', 'of', 'on', - 'order', 'packed', 'pi', 'power', 'prev', 'primary', 'radians', - 'real', 'reject', 'relative', 'replace', 'rfill', 'right', 'round', - 'rowid', 'rowno', 'rpad', 'rtrim', 'second', 'select', 'selupd', - 'serial', 'set', 'show', 'sign', 'sin', 'sinh', 'smallint', 'some', - 'soundex', 'space', 'sqrt', 'stamp', 'statistics', 'stddev', - 'subdate', 'substr', 'substring', 'subtime', 'sum', 'sysdba', - 'table', 'tan', 'tanh', 'time', 'timediff', 'timestamp', 'timezone', - 'to', 'toidentifier', 'transaction', 'translate', 'trim', 'trunc', - 'truncate', 'ucase', 'uid', 'unicode', 'union', 'update', 'upper', - 'user', 'usergroup', 'using', 'utcdate', 'utcdiff', 'value', 'values', - 'varchar', 'vargraphic', 'variance', 'week', 'weekofyear', 'when', - 'where', 'with', 'year', 'zoned' ]) - - def _normalize_name(self, name): - if name is None: - return None - if name.isupper(): - lc_name = name.lower() - if not self._requires_quotes(lc_name): - return lc_name - return name - - def _denormalize_name(self, name): - if name is None: - return None - elif (name.islower() and - not self._requires_quotes(name)): - return name.upper() - else: - return name - - def _maybe_quote_identifier(self, name): - if self._requires_quotes(name): - return self.quote_identifier(name) - else: - return name - - -class MaxDBDDLCompiler(compiler.DDLCompiler): - def get_column_specification(self, column, **kw): - colspec = [self.preparer.format_column(column), - self.dialect.type_compiler.process(column.type)] - - if not column.nullable: - colspec.append('NOT NULL') - - default = column.default - default_str = self.get_column_default_string(column) - - # No DDL default for columns specified with non-optional sequence- - # this defaulting behavior is entirely client-side. (And as a - # consequence, non-reflectable.) - if (default and isinstance(default, schema.Sequence) and - not default.optional): - pass - # Regular default - elif default_str is not None: - colspec.append('DEFAULT %s' % default_str) - # Assign DEFAULT SERIAL heuristically - elif column.primary_key and column.autoincrement: - # For SERIAL on a non-primary key member, use - # DefaultClause(text('SERIAL')) - try: - first = [c for c in column.table.primary_key.columns - if (c.autoincrement and - (isinstance(c.type, sqltypes.Integer) or - (isinstance(c.type, MaxNumeric) and - c.type.precision)) and - not c.foreign_keys)].pop(0) - if column is first: - colspec.append('DEFAULT SERIAL') - except IndexError: - pass - return ' '.join(colspec) - - def get_column_default_string(self, column): - if isinstance(column.server_default, schema.DefaultClause): - if isinstance(column.default.arg, basestring): - if isinstance(column.type, sqltypes.Integer): - return str(column.default.arg) - else: - return "'%s'" % column.default.arg - else: - return unicode(self._compile(column.default.arg, None)) - else: - return None - - def visit_create_sequence(self, create): - """Creates a SEQUENCE. - - TODO: move to module doc? - - start - With an integer value, set the START WITH option. - - increment - An integer value to increment by. Default is the database default. - - maxdb_minvalue - maxdb_maxvalue - With an integer value, sets the corresponding sequence option. - - maxdb_no_minvalue - maxdb_no_maxvalue - Defaults to False. If true, sets the corresponding sequence option. - - maxdb_cycle - Defaults to False. If true, sets the CYCLE option. - - maxdb_cache - With an integer value, sets the CACHE option. - - maxdb_no_cache - Defaults to False. If true, sets NOCACHE. - """ - sequence = create.element - - if (not sequence.optional and - (not self.checkfirst or - not self.dialect.has_sequence(self.connection, sequence.name))): - - ddl = ['CREATE SEQUENCE', - self.preparer.format_sequence(sequence)] - - sequence.increment = 1 - - if sequence.increment is not None: - ddl.extend(('INCREMENT BY', str(sequence.increment))) - - if sequence.start is not None: - ddl.extend(('START WITH', str(sequence.start))) - - opts = dict([(pair[0][6:].lower(), pair[1]) - for pair in sequence.kwargs.items() - if pair[0].startswith('maxdb_')]) - - if 'maxvalue' in opts: - ddl.extend(('MAXVALUE', str(opts['maxvalue']))) - elif opts.get('no_maxvalue', False): - ddl.append('NOMAXVALUE') - if 'minvalue' in opts: - ddl.extend(('MINVALUE', str(opts['minvalue']))) - elif opts.get('no_minvalue', False): - ddl.append('NOMINVALUE') - - if opts.get('cycle', False): - ddl.append('CYCLE') - - if 'cache' in opts: - ddl.extend(('CACHE', str(opts['cache']))) - elif opts.get('no_cache', False): - ddl.append('NOCACHE') - - return ' '.join(ddl) - - -class MaxDBDialect(default.DefaultDialect): - name = 'maxdb' - supports_alter = True - supports_unicode_statements = True - max_identifier_length = 32 - supports_sane_rowcount = True - supports_sane_multi_rowcount = False - - preparer = MaxDBIdentifierPreparer - statement_compiler = MaxDBCompiler - ddl_compiler = MaxDBDDLCompiler - execution_ctx_cls = MaxDBExecutionContext - - ported_sqla_06 = False - - colspecs = colspecs - ischema_names = ischema_names - - # MaxDB-specific - datetimeformat = 'internal' - - def __init__(self, _raise_known_sql_errors=False, **kw): - super(MaxDBDialect, self).__init__(**kw) - self._raise_known = _raise_known_sql_errors - - if self.dbapi is None: - self.dbapi_type_map = {} - else: - self.dbapi_type_map = { - 'Long Binary': MaxBlob(), - 'Long byte_t': MaxBlob(), - 'Long Unicode': MaxText(), - 'Timestamp': MaxTimestamp(), - 'Date': MaxDate(), - 'Time': MaxTime(), - datetime.datetime: MaxTimestamp(), - datetime.date: MaxDate(), - datetime.time: MaxTime(), - } - - def do_execute(self, cursor, statement, parameters, context=None): - res = cursor.execute(statement, parameters) - if isinstance(res, int) and context is not None: - context._rowcount = res - - def do_release_savepoint(self, connection, name): - # Does MaxDB truly support RELEASE SAVEPOINT ? All my attempts - # produce "SUBTRANS COMMIT/ROLLBACK not allowed without SUBTRANS - # BEGIN SQLSTATE: I7065" - # Note that ROLLBACK TO works fine. In theory, a RELEASE should - # just free up some transactional resources early, before the overall - # COMMIT/ROLLBACK so omitting it should be relatively ok. - pass - - def _get_default_schema_name(self, connection): - return self.identifier_preparer._normalize_name( - connection.execute( - 'SELECT CURRENT_SCHEMA FROM DUAL').scalar()) - - def has_table(self, connection, table_name, schema=None): - denormalize = self.identifier_preparer._denormalize_name - bind = [denormalize(table_name)] - if schema is None: - sql = ("SELECT tablename FROM TABLES " - "WHERE TABLES.TABLENAME=? AND" - " TABLES.SCHEMANAME=CURRENT_SCHEMA ") - else: - sql = ("SELECT tablename FROM TABLES " - "WHERE TABLES.TABLENAME = ? AND" - " TABLES.SCHEMANAME=? ") - bind.append(denormalize(schema)) - - rp = connection.execute(sql, bind) - return bool(rp.first()) - - @reflection.cache - def get_table_names(self, connection, schema=None, **kw): - if schema is None: - sql = (" SELECT TABLENAME FROM TABLES WHERE " - " SCHEMANAME=CURRENT_SCHEMA ") - rs = connection.execute(sql) - else: - sql = (" SELECT TABLENAME FROM TABLES WHERE " - " SCHEMANAME=? ") - matchname = self.identifier_preparer._denormalize_name(schema) - rs = connection.execute(sql, matchname) - normalize = self.identifier_preparer._normalize_name - return [normalize(row[0]) for row in rs] - - def reflecttable(self, connection, table, include_columns): - denormalize = self.identifier_preparer._denormalize_name - normalize = self.identifier_preparer._normalize_name - - st = ('SELECT COLUMNNAME, MODE, DATATYPE, CODETYPE, LEN, DEC, ' - ' NULLABLE, "DEFAULT", DEFAULTFUNCTION ' - 'FROM COLUMNS ' - 'WHERE TABLENAME=? AND SCHEMANAME=%s ' - 'ORDER BY POS') - - fk = ('SELECT COLUMNNAME, FKEYNAME, ' - ' REFSCHEMANAME, REFTABLENAME, REFCOLUMNNAME, RULE, ' - ' (CASE WHEN REFSCHEMANAME = CURRENT_SCHEMA ' - ' THEN 1 ELSE 0 END) AS in_schema ' - 'FROM FOREIGNKEYCOLUMNS ' - 'WHERE TABLENAME=? AND SCHEMANAME=%s ' - 'ORDER BY FKEYNAME ') - - params = [denormalize(table.name)] - if not table.schema: - st = st % 'CURRENT_SCHEMA' - fk = fk % 'CURRENT_SCHEMA' - else: - st = st % '?' - fk = fk % '?' - params.append(denormalize(table.schema)) - - rows = connection.execute(st, params).fetchall() - if not rows: - raise exc.NoSuchTableError(table.fullname) - - include_columns = set(include_columns or []) - - for row in rows: - (name, mode, col_type, encoding, length, scale, - nullable, constant_def, func_def) = row - - name = normalize(name) - - if include_columns and name not in include_columns: - continue - - type_args, type_kw = [], {} - if col_type == 'FIXED': - type_args = length, scale - # Convert FIXED(10) DEFAULT SERIAL to our Integer - if (scale == 0 and - func_def is not None and func_def.startswith('SERIAL')): - col_type = 'INTEGER' - type_args = length, - elif col_type in 'FLOAT': - type_args = length, - elif col_type in ('CHAR', 'VARCHAR'): - type_args = length, - type_kw['encoding'] = encoding - elif col_type == 'LONG': - type_kw['encoding'] = encoding - - try: - type_cls = ischema_names[col_type.lower()] - type_instance = type_cls(*type_args, **type_kw) - except KeyError: - util.warn("Did not recognize type '%s' of column '%s'" % - (col_type, name)) - type_instance = sqltypes.NullType - - col_kw = {'autoincrement': False} - col_kw['nullable'] = (nullable == 'YES') - col_kw['primary_key'] = (mode == 'KEY') - - if func_def is not None: - if func_def.startswith('SERIAL'): - if col_kw['primary_key']: - # No special default- let the standard autoincrement - # support handle SERIAL pk columns. - col_kw['autoincrement'] = True - else: - # strip current numbering - col_kw['server_default'] = schema.DefaultClause( - sql.text('SERIAL')) - col_kw['autoincrement'] = True - else: - col_kw['server_default'] = schema.DefaultClause( - sql.text(func_def)) - elif constant_def is not None: - col_kw['server_default'] = schema.DefaultClause(sql.text( - "'%s'" % constant_def.replace("'", "''"))) - - table.append_column(schema.Column(name, type_instance, **col_kw)) - - fk_sets = itertools.groupby(connection.execute(fk, params), - lambda row: row.FKEYNAME) - for fkeyname, fkey in fk_sets: - fkey = list(fkey) - if include_columns: - key_cols = set([r.COLUMNNAME for r in fkey]) - if key_cols != include_columns: - continue - - columns, referants = [], [] - quote = self.identifier_preparer._maybe_quote_identifier - - for row in fkey: - columns.append(normalize(row.COLUMNNAME)) - if table.schema or not row.in_schema: - referants.append('.'.join( - [quote(normalize(row[c])) - for c in ('REFSCHEMANAME', 'REFTABLENAME', - 'REFCOLUMNNAME')])) - else: - referants.append('.'.join( - [quote(normalize(row[c])) - for c in ('REFTABLENAME', 'REFCOLUMNNAME')])) - - constraint_kw = {'name': fkeyname.lower()} - if fkey[0].RULE is not None: - rule = fkey[0].RULE - if rule.startswith('DELETE '): - rule = rule[7:] - constraint_kw['ondelete'] = rule - - table_kw = {} - if table.schema or not row.in_schema: - table_kw['schema'] = normalize(fkey[0].REFSCHEMANAME) - - ref_key = schema._get_table_key(normalize(fkey[0].REFTABLENAME), - table_kw.get('schema')) - if ref_key not in table.metadata.tables: - schema.Table(normalize(fkey[0].REFTABLENAME), - table.metadata, - autoload=True, autoload_with=connection, - **table_kw) - - constraint = schema.ForeignKeyConstraint( - columns, referants, link_to_name=True, - **constraint_kw) - table.append_constraint(constraint) - - def has_sequence(self, connection, name): - # [ticket:726] makes this schema-aware. - denormalize = self.identifier_preparer._denormalize_name - sql = ("SELECT sequence_name FROM SEQUENCES " - "WHERE SEQUENCE_NAME=? ") - - rp = connection.execute(sql, denormalize(name)) - return bool(rp.first()) - - -def _autoserial_column(table): - """Finds the effective DEFAULT SERIAL column of a Table, if any.""" - - for index, col in enumerate(table.primary_key.columns): - if (isinstance(col.type, (sqltypes.Integer, sqltypes.Numeric)) and - col.autoincrement): - if isinstance(col.default, schema.Sequence): - if col.default.optional: - return index, col - elif (col.default is None or - (not isinstance(col.server_default, schema.DefaultClause))): - return index, col - - return None, None - diff --git a/libs/sqlalchemy/dialects/maxdb/sapdb.py b/libs/sqlalchemy/dialects/maxdb/sapdb.py deleted file mode 100644 index 51f272a2..00000000 --- a/libs/sqlalchemy/dialects/maxdb/sapdb.py +++ /dev/null @@ -1,23 +0,0 @@ -# maxdb/sapdb.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -from sqlalchemy.dialects.maxdb.base import MaxDBDialect - -class MaxDBDialect_sapdb(MaxDBDialect): - driver = 'sapdb' - - @classmethod - def dbapi(cls): - from sapdb import dbapi as _dbapi - return _dbapi - - def create_connect_args(self, url): - opts = url.translate_connect_args(username='user') - opts.update(url.query) - return [], opts - - -dialect = MaxDBDialect_sapdb \ No newline at end of file diff --git a/libs/sqlalchemy/dialects/mssql/__init__.py b/libs/sqlalchemy/dialects/mssql/__init__.py deleted file mode 100644 index b3acbf3a..00000000 --- a/libs/sqlalchemy/dialects/mssql/__init__.py +++ /dev/null @@ -1,26 +0,0 @@ -# mssql/__init__.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -from sqlalchemy.dialects.mssql import base, pyodbc, adodbapi, \ - pymssql, zxjdbc, mxodbc - -base.dialect = pyodbc.dialect - -from sqlalchemy.dialects.mssql.base import \ - INTEGER, BIGINT, SMALLINT, TINYINT, VARCHAR, NVARCHAR, CHAR, \ - NCHAR, TEXT, NTEXT, DECIMAL, NUMERIC, FLOAT, DATETIME,\ - DATETIME2, DATETIMEOFFSET, DATE, TIME, SMALLDATETIME, \ - BINARY, VARBINARY, BIT, REAL, IMAGE, TIMESTAMP,\ - MONEY, SMALLMONEY, UNIQUEIDENTIFIER, SQL_VARIANT, dialect - - -__all__ = ( - 'INTEGER', 'BIGINT', 'SMALLINT', 'TINYINT', 'VARCHAR', 'NVARCHAR', 'CHAR', - 'NCHAR', 'TEXT', 'NTEXT', 'DECIMAL', 'NUMERIC', 'FLOAT', 'DATETIME', - 'DATETIME2', 'DATETIMEOFFSET', 'DATE', 'TIME', 'SMALLDATETIME', - 'BINARY', 'VARBINARY', 'BIT', 'REAL', 'IMAGE', 'TIMESTAMP', - 'MONEY', 'SMALLMONEY', 'UNIQUEIDENTIFIER', 'SQL_VARIANT', 'dialect' -) \ No newline at end of file diff --git a/libs/sqlalchemy/dialects/mssql/adodbapi.py b/libs/sqlalchemy/dialects/mssql/adodbapi.py deleted file mode 100644 index 05ac6d6f..00000000 --- a/libs/sqlalchemy/dialects/mssql/adodbapi.py +++ /dev/null @@ -1,69 +0,0 @@ -# mssql/adodbapi.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -""" -The adodbapi dialect is not implemented for 0.6 at this time. - -""" -import datetime -from sqlalchemy import types as sqltypes, util -from sqlalchemy.dialects.mssql.base import MSDateTime, MSDialect -import sys - -class MSDateTime_adodbapi(MSDateTime): - def result_processor(self, dialect, coltype): - def process(value): - # adodbapi will return datetimes with empty time - # values as datetime.date() objects. - # Promote them back to full datetime.datetime() - if type(value) is datetime.date: - return datetime.datetime(value.year, value.month, value.day) - return value - return process - - -class MSDialect_adodbapi(MSDialect): - supports_sane_rowcount = True - supports_sane_multi_rowcount = True - supports_unicode = sys.maxunicode == 65535 - supports_unicode_statements = True - driver = 'adodbapi' - - @classmethod - def import_dbapi(cls): - import adodbapi as module - return module - - colspecs = util.update_copy( - MSDialect.colspecs, - { - sqltypes.DateTime:MSDateTime_adodbapi - } - ) - - def create_connect_args(self, url): - keys = url.query - - connectors = ["Provider=SQLOLEDB"] - if 'port' in keys: - connectors.append ("Data Source=%s, %s" % - (keys.get("host"), keys.get("port"))) - else: - connectors.append ("Data Source=%s" % keys.get("host")) - connectors.append ("Initial Catalog=%s" % keys.get("database")) - user = keys.get("user") - if user: - connectors.append("User Id=%s" % user) - connectors.append("Password=%s" % keys.get("password", "")) - else: - connectors.append("Integrated Security=SSPI") - return [[";".join (connectors)], {}] - - def is_disconnect(self, e, connection, cursor): - return isinstance(e, self.dbapi.adodbapi.DatabaseError) and \ - "'connection failure'" in str(e) - -dialect = MSDialect_adodbapi diff --git a/libs/sqlalchemy/dialects/mssql/base.py b/libs/sqlalchemy/dialects/mssql/base.py deleted file mode 100644 index b6e0d881..00000000 --- a/libs/sqlalchemy/dialects/mssql/base.py +++ /dev/null @@ -1,1535 +0,0 @@ -# mssql/base.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Support for the Microsoft SQL Server database. - -Connecting ----------- - -See the individual driver sections below for details on connecting. - -Auto Increment Behavior ------------------------ - -``IDENTITY`` columns are supported by using SQLAlchemy -``schema.Sequence()`` objects. In other words:: - - from sqlalchemy import Table, Integer, Sequence, Column - - Table('test', metadata, - Column('id', Integer, - Sequence('blah',100,10), primary_key=True), - Column('name', String(20)) - ).create(some_engine) - -would yield:: - - CREATE TABLE test ( - id INTEGER NOT NULL IDENTITY(100,10) PRIMARY KEY, - name VARCHAR(20) NULL, - ) - -Note that the ``start`` and ``increment`` values for sequences are -optional and will default to 1,1. - -Implicit ``autoincrement`` behavior works the same in MSSQL as it -does in other dialects and results in an ``IDENTITY`` column. - -* Support for ``SET IDENTITY_INSERT ON`` mode (automagic on / off for - ``INSERT`` s) - -* Support for auto-fetching of ``@@IDENTITY/@@SCOPE_IDENTITY()`` on - ``INSERT`` - -Collation Support ------------------ - -MSSQL specific string types support a collation parameter that -creates a column-level specific collation for the column. The -collation parameter accepts a Windows Collation Name or a SQL -Collation Name. Supported types are MSChar, MSNChar, MSString, -MSNVarchar, MSText, and MSNText. For example:: - - from sqlalchemy.dialects.mssql import VARCHAR - Column('login', VARCHAR(32, collation='Latin1_General_CI_AS')) - -When such a column is associated with a :class:`.Table`, the -CREATE TABLE statement for this column will yield:: - - login VARCHAR(32) COLLATE Latin1_General_CI_AS NULL - -LIMIT/OFFSET Support --------------------- - -MSSQL has no support for the LIMIT or OFFSET keysowrds. LIMIT is -supported directly through the ``TOP`` Transact SQL keyword:: - - select.limit - -will yield:: - - SELECT TOP n - -If using SQL Server 2005 or above, LIMIT with OFFSET -support is available through the ``ROW_NUMBER OVER`` construct. -For versions below 2005, LIMIT with OFFSET usage will fail. - -Nullability ------------ -MSSQL has support for three levels of column nullability. The default -nullability allows nulls and is explicit in the CREATE TABLE -construct:: - - name VARCHAR(20) NULL - -If ``nullable=None`` is specified then no specification is made. In -other words the database's configured default is used. This will -render:: - - name VARCHAR(20) - -If ``nullable`` is ``True`` or ``False`` then the column will be -``NULL` or ``NOT NULL`` respectively. - -Date / Time Handling --------------------- -DATE and TIME are supported. Bind parameters are converted -to datetime.datetime() objects as required by most MSSQL drivers, -and results are processed from strings if needed. -The DATE and TIME types are not available for MSSQL 2005 and -previous - if a server version below 2008 is detected, DDL -for these types will be issued as DATETIME. - -Compatibility Levels --------------------- -MSSQL supports the notion of setting compatibility levels at the -database level. This allows, for instance, to run a database that -is compatible with SQL2000 while running on a SQL2005 database -server. ``server_version_info`` will always return the database -server version information (in this case SQL2005) and not the -compatibility level information. Because of this, if running under -a backwards compatibility mode SQAlchemy may attempt to use T-SQL -statements that are unable to be parsed by the database server. - -Triggers --------- - -SQLAlchemy by default uses OUTPUT INSERTED to get at newly -generated primary key values via IDENTITY columns or other -server side defaults. MS-SQL does not -allow the usage of OUTPUT INSERTED on tables that have triggers. -To disable the usage of OUTPUT INSERTED on a per-table basis, -specify ``implicit_returning=False`` for each :class:`.Table` -which has triggers:: - - Table('mytable', metadata, - Column('id', Integer, primary_key=True), - # ..., - implicit_returning=False - ) - -Declarative form:: - - class MyClass(Base): - # ... - __table_args__ = {'implicit_returning':False} - - -This option can also be specified engine-wide using the -``implicit_returning=False`` argument on :func:`.create_engine`. - -Enabling Snapshot Isolation ---------------------------- - -Not necessarily specific to SQLAlchemy, SQL Server has a default transaction -isolation mode that locks entire tables, and causes even mildly concurrent -applications to have long held locks and frequent deadlocks. -Enabling snapshot isolation for the database as a whole is recommended -for modern levels of concurrency support. This is accomplished via the -following ALTER DATABASE commands executed at the SQL prompt:: - - ALTER DATABASE MyDatabase SET ALLOW_SNAPSHOT_ISOLATION ON - - ALTER DATABASE MyDatabase SET READ_COMMITTED_SNAPSHOT ON - -Background on SQL Server snapshot isolation is available at -http://msdn.microsoft.com/en-us/library/ms175095.aspx. - -Scalar Select Comparisons -------------------------- - -.. deprecated:: 0.8 - The MSSQL dialect contains a legacy behavior whereby comparing - a scalar select to a value using the ``=`` or ``!=`` operator - will resolve to IN or NOT IN, respectively. This behavior - will be removed in 0.8 - the ``s.in_()``/``~s.in_()`` operators - should be used when IN/NOT IN are desired. - -For the time being, the existing behavior prevents a comparison -between scalar select and another value that actually wants to use ``=``. -To remove this behavior in a forwards-compatible way, apply this -compilation rule by placing the following code at the module import -level:: - - from sqlalchemy.ext.compiler import compiles - from sqlalchemy.sql.expression import _BinaryExpression - from sqlalchemy.sql.compiler import SQLCompiler - - @compiles(_BinaryExpression, 'mssql') - def override_legacy_binary(element, compiler, **kw): - return SQLCompiler.visit_binary(compiler, element, **kw) - -Known Issues ------------- - -* No support for more than one ``IDENTITY`` column per table -* reflection of indexes does not work with versions older than - SQL Server 2005 - -""" -import datetime, operator, re - -from sqlalchemy import sql, schema as sa_schema, exc, util -from sqlalchemy.sql import select, compiler, expression, \ - operators as sql_operators, \ - util as sql_util, cast -from sqlalchemy.engine import default, base, reflection -from sqlalchemy import types as sqltypes -from sqlalchemy.types import INTEGER, BIGINT, SMALLINT, DECIMAL, NUMERIC, \ - FLOAT, TIMESTAMP, DATETIME, DATE, BINARY,\ - VARBINARY, BLOB - - -from sqlalchemy.dialects.mssql import information_schema as ischema - -MS_2008_VERSION = (10,) -MS_2005_VERSION = (9,) -MS_2000_VERSION = (8,) - -RESERVED_WORDS = set( - ['add', 'all', 'alter', 'and', 'any', 'as', 'asc', 'authorization', - 'backup', 'begin', 'between', 'break', 'browse', 'bulk', 'by', 'cascade', - 'case', 'check', 'checkpoint', 'close', 'clustered', 'coalesce', - 'collate', 'column', 'commit', 'compute', 'constraint', 'contains', - 'containstable', 'continue', 'convert', 'create', 'cross', 'current', - 'current_date', 'current_time', 'current_timestamp', 'current_user', - 'cursor', 'database', 'dbcc', 'deallocate', 'declare', 'default', - 'delete', 'deny', 'desc', 'disk', 'distinct', 'distributed', 'double', - 'drop', 'dump', 'else', 'end', 'errlvl', 'escape', 'except', 'exec', - 'execute', 'exists', 'exit', 'external', 'fetch', 'file', 'fillfactor', - 'for', 'foreign', 'freetext', 'freetexttable', 'from', 'full', - 'function', 'goto', 'grant', 'group', 'having', 'holdlock', 'identity', - 'identity_insert', 'identitycol', 'if', 'in', 'index', 'inner', 'insert', - 'intersect', 'into', 'is', 'join', 'key', 'kill', 'left', 'like', - 'lineno', 'load', 'merge', 'national', 'nocheck', 'nonclustered', 'not', - 'null', 'nullif', 'of', 'off', 'offsets', 'on', 'open', 'opendatasource', - 'openquery', 'openrowset', 'openxml', 'option', 'or', 'order', 'outer', - 'over', 'percent', 'pivot', 'plan', 'precision', 'primary', 'print', - 'proc', 'procedure', 'public', 'raiserror', 'read', 'readtext', - 'reconfigure', 'references', 'replication', 'restore', 'restrict', - 'return', 'revert', 'revoke', 'right', 'rollback', 'rowcount', - 'rowguidcol', 'rule', 'save', 'schema', 'securityaudit', 'select', - 'session_user', 'set', 'setuser', 'shutdown', 'some', 'statistics', - 'system_user', 'table', 'tablesample', 'textsize', 'then', 'to', 'top', - 'tran', 'transaction', 'trigger', 'truncate', 'tsequal', 'union', - 'unique', 'unpivot', 'update', 'updatetext', 'use', 'user', 'values', - 'varying', 'view', 'waitfor', 'when', 'where', 'while', 'with', - 'writetext', - ]) - -class REAL(sqltypes.REAL): - __visit_name__ = 'REAL' - - def __init__(self, **kw): - # REAL is a synonym for FLOAT(24) on SQL server - kw['precision'] = 24 - super(REAL, self).__init__(**kw) - -class TINYINT(sqltypes.Integer): - __visit_name__ = 'TINYINT' - - -# MSSQL DATE/TIME types have varied behavior, sometimes returning -# strings. MSDate/TIME check for everything, and always -# filter bind parameters into datetime objects (required by pyodbc, -# not sure about other dialects). - -class _MSDate(sqltypes.Date): - def bind_processor(self, dialect): - def process(value): - if type(value) == datetime.date: - return datetime.datetime(value.year, value.month, value.day) - else: - return value - return process - - _reg = re.compile(r"(\d+)-(\d+)-(\d+)") - def result_processor(self, dialect, coltype): - def process(value): - if isinstance(value, datetime.datetime): - return value.date() - elif isinstance(value, basestring): - return datetime.date(*[ - int(x or 0) - for x in self._reg.match(value).groups() - ]) - else: - return value - return process - -class TIME(sqltypes.TIME): - def __init__(self, precision=None, **kwargs): - self.precision = precision - super(TIME, self).__init__() - - __zero_date = datetime.date(1900, 1, 1) - - def bind_processor(self, dialect): - def process(value): - if isinstance(value, datetime.datetime): - value = datetime.datetime.combine( - self.__zero_date, value.time()) - elif isinstance(value, datetime.time): - value = datetime.datetime.combine(self.__zero_date, value) - return value - return process - - _reg = re.compile(r"(\d+):(\d+):(\d+)(?:\.(\d{0,6}))?") - def result_processor(self, dialect, coltype): - def process(value): - if isinstance(value, datetime.datetime): - return value.time() - elif isinstance(value, basestring): - return datetime.time(*[ - int(x or 0) - for x in self._reg.match(value).groups()]) - else: - return value - return process - -class _DateTimeBase(object): - def bind_processor(self, dialect): - def process(value): - if type(value) == datetime.date: - return datetime.datetime(value.year, value.month, value.day) - else: - return value - return process - -class _MSDateTime(_DateTimeBase, sqltypes.DateTime): - pass - -class SMALLDATETIME(_DateTimeBase, sqltypes.DateTime): - __visit_name__ = 'SMALLDATETIME' - -class DATETIME2(_DateTimeBase, sqltypes.DateTime): - __visit_name__ = 'DATETIME2' - - def __init__(self, precision=None, **kw): - super(DATETIME2, self).__init__(**kw) - self.precision = precision - - -# TODO: is this not an Interval ? -class DATETIMEOFFSET(sqltypes.TypeEngine): - __visit_name__ = 'DATETIMEOFFSET' - - def __init__(self, precision=None, **kwargs): - self.precision = precision - -class _StringType(object): - """Base for MSSQL string types.""" - - def __init__(self, collation=None): - self.collation = collation - -class TEXT(_StringType, sqltypes.TEXT): - """MSSQL TEXT type, for variable-length text up to 2^31 characters.""" - - def __init__(self, length=None, collation=None, **kw): - """Construct a TEXT. - - :param collation: Optional, a column-level collation for this string - value. Accepts a Windows Collation Name or a SQL Collation Name. - - """ - _StringType.__init__(self, collation) - sqltypes.Text.__init__(self, length, **kw) - -class NTEXT(_StringType, sqltypes.UnicodeText): - """MSSQL NTEXT type, for variable-length unicode text up to 2^30 - characters.""" - - __visit_name__ = 'NTEXT' - - def __init__(self, length=None, collation=None, **kw): - """Construct a NTEXT. - - :param collation: Optional, a column-level collation for this string - value. Accepts a Windows Collation Name or a SQL Collation Name. - - """ - _StringType.__init__(self, collation) - sqltypes.UnicodeText.__init__(self, length, **kw) - - -class VARCHAR(_StringType, sqltypes.VARCHAR): - """MSSQL VARCHAR type, for variable-length non-Unicode data with a maximum - of 8,000 characters.""" - - def __init__(self, length=None, collation=None, **kw): - """Construct a VARCHAR. - - :param length: Optinal, maximum data length, in characters. - - :param convert_unicode: defaults to False. If True, convert - ``unicode`` data sent to the database to a ``str`` - bytestring, and convert bytestrings coming back from the - database into ``unicode``. - - Bytestrings are encoded using the dialect's - :attr:`~sqlalchemy.engine.base.Dialect.encoding`, which - defaults to `utf-8`. - - If False, may be overridden by - :attr:`sqlalchemy.engine.base.Dialect.convert_unicode`. - - :param collation: Optional, a column-level collation for this string - value. Accepts a Windows Collation Name or a SQL Collation Name. - - """ - _StringType.__init__(self, collation) - sqltypes.VARCHAR.__init__(self, length, **kw) - -class NVARCHAR(_StringType, sqltypes.NVARCHAR): - """MSSQL NVARCHAR type. - - For variable-length unicode character data up to 4,000 characters.""" - - def __init__(self, length=None, collation=None, **kw): - """Construct a NVARCHAR. - - :param length: Optional, Maximum data length, in characters. - - :param collation: Optional, a column-level collation for this string - value. Accepts a Windows Collation Name or a SQL Collation Name. - - """ - _StringType.__init__(self, collation) - sqltypes.NVARCHAR.__init__(self, length, **kw) - -class CHAR(_StringType, sqltypes.CHAR): - """MSSQL CHAR type, for fixed-length non-Unicode data with a maximum - of 8,000 characters.""" - - def __init__(self, length=None, collation=None, **kw): - """Construct a CHAR. - - :param length: Optinal, maximum data length, in characters. - - :param convert_unicode: defaults to False. If True, convert - ``unicode`` data sent to the database to a ``str`` - bytestring, and convert bytestrings coming back from the - database into ``unicode``. - - Bytestrings are encoded using the dialect's - :attr:`~sqlalchemy.engine.base.Dialect.encoding`, which - defaults to `utf-8`. - - If False, may be overridden by - :attr:`sqlalchemy.engine.base.Dialect.convert_unicode`. - - :param collation: Optional, a column-level collation for this string - value. Accepts a Windows Collation Name or a SQL Collation Name. - - """ - _StringType.__init__(self, collation) - sqltypes.CHAR.__init__(self, length, **kw) - -class NCHAR(_StringType, sqltypes.NCHAR): - """MSSQL NCHAR type. - - For fixed-length unicode character data up to 4,000 characters.""" - - def __init__(self, length=None, collation=None, **kw): - """Construct an NCHAR. - - :param length: Optional, Maximum data length, in characters. - - :param collation: Optional, a column-level collation for this string - value. Accepts a Windows Collation Name or a SQL Collation Name. - - """ - _StringType.__init__(self, collation) - sqltypes.NCHAR.__init__(self, length, **kw) - -class IMAGE(sqltypes.LargeBinary): - __visit_name__ = 'IMAGE' - -class BIT(sqltypes.TypeEngine): - __visit_name__ = 'BIT' - - -class MONEY(sqltypes.TypeEngine): - __visit_name__ = 'MONEY' - -class SMALLMONEY(sqltypes.TypeEngine): - __visit_name__ = 'SMALLMONEY' - -class UNIQUEIDENTIFIER(sqltypes.TypeEngine): - __visit_name__ = "UNIQUEIDENTIFIER" - -class SQL_VARIANT(sqltypes.TypeEngine): - __visit_name__ = 'SQL_VARIANT' - -# old names. -MSDateTime = _MSDateTime -MSDate = _MSDate -MSReal = REAL -MSTinyInteger = TINYINT -MSTime = TIME -MSSmallDateTime = SMALLDATETIME -MSDateTime2 = DATETIME2 -MSDateTimeOffset = DATETIMEOFFSET -MSText = TEXT -MSNText = NTEXT -MSString = VARCHAR -MSNVarchar = NVARCHAR -MSChar = CHAR -MSNChar = NCHAR -MSBinary = BINARY -MSVarBinary = VARBINARY -MSImage = IMAGE -MSBit = BIT -MSMoney = MONEY -MSSmallMoney = SMALLMONEY -MSUniqueIdentifier = UNIQUEIDENTIFIER -MSVariant = SQL_VARIANT - -ischema_names = { - 'int' : INTEGER, - 'bigint': BIGINT, - 'smallint' : SMALLINT, - 'tinyint' : TINYINT, - 'varchar' : VARCHAR, - 'nvarchar' : NVARCHAR, - 'char' : CHAR, - 'nchar' : NCHAR, - 'text' : TEXT, - 'ntext' : NTEXT, - 'decimal' : DECIMAL, - 'numeric' : NUMERIC, - 'float' : FLOAT, - 'datetime' : DATETIME, - 'datetime2' : DATETIME2, - 'datetimeoffset' : DATETIMEOFFSET, - 'date': DATE, - 'time': TIME, - 'smalldatetime' : SMALLDATETIME, - 'binary' : BINARY, - 'varbinary' : VARBINARY, - 'bit': BIT, - 'real' : REAL, - 'image' : IMAGE, - 'timestamp': TIMESTAMP, - 'money': MONEY, - 'smallmoney': SMALLMONEY, - 'uniqueidentifier': UNIQUEIDENTIFIER, - 'sql_variant': SQL_VARIANT, -} - - -class MSTypeCompiler(compiler.GenericTypeCompiler): - def _extend(self, spec, type_, length=None): - """Extend a string-type declaration with standard SQL - COLLATE annotations. - - """ - - if getattr(type_, 'collation', None): - collation = 'COLLATE %s' % type_.collation - else: - collation = None - - if not length: - length = type_.length - - if length: - spec = spec + "(%s)" % length - - return ' '.join([c for c in (spec, collation) - if c is not None]) - - def visit_FLOAT(self, type_): - precision = getattr(type_, 'precision', None) - if precision is None: - return "FLOAT" - else: - return "FLOAT(%(precision)s)" % {'precision': precision} - - def visit_TINYINT(self, type_): - return "TINYINT" - - def visit_DATETIMEOFFSET(self, type_): - if type_.precision: - return "DATETIMEOFFSET(%s)" % type_.precision - else: - return "DATETIMEOFFSET" - - def visit_TIME(self, type_): - precision = getattr(type_, 'precision', None) - if precision: - return "TIME(%s)" % precision - else: - return "TIME" - - def visit_DATETIME2(self, type_): - precision = getattr(type_, 'precision', None) - if precision: - return "DATETIME2(%s)" % precision - else: - return "DATETIME2" - - def visit_SMALLDATETIME(self, type_): - return "SMALLDATETIME" - - def visit_unicode(self, type_): - return self.visit_NVARCHAR(type_) - - def visit_unicode_text(self, type_): - return self.visit_NTEXT(type_) - - def visit_NTEXT(self, type_): - return self._extend("NTEXT", type_) - - def visit_TEXT(self, type_): - return self._extend("TEXT", type_) - - def visit_VARCHAR(self, type_): - return self._extend("VARCHAR", type_, - length = type_.length or 'max') - - def visit_CHAR(self, type_): - return self._extend("CHAR", type_) - - def visit_NCHAR(self, type_): - return self._extend("NCHAR", type_) - - def visit_NVARCHAR(self, type_): - return self._extend("NVARCHAR", type_, - length = type_.length or 'max') - - def visit_date(self, type_): - if self.dialect.server_version_info < MS_2008_VERSION: - return self.visit_DATETIME(type_) - else: - return self.visit_DATE(type_) - - def visit_time(self, type_): - if self.dialect.server_version_info < MS_2008_VERSION: - return self.visit_DATETIME(type_) - else: - return self.visit_TIME(type_) - - def visit_large_binary(self, type_): - return self.visit_IMAGE(type_) - - def visit_IMAGE(self, type_): - return "IMAGE" - - def visit_VARBINARY(self, type_): - return self._extend( - "VARBINARY", - type_, - length=type_.length or 'max') - - def visit_boolean(self, type_): - return self.visit_BIT(type_) - - def visit_BIT(self, type_): - return "BIT" - - def visit_MONEY(self, type_): - return "MONEY" - - def visit_SMALLMONEY(self, type_): - return 'SMALLMONEY' - - def visit_UNIQUEIDENTIFIER(self, type_): - return "UNIQUEIDENTIFIER" - - def visit_SQL_VARIANT(self, type_): - return 'SQL_VARIANT' - -class MSExecutionContext(default.DefaultExecutionContext): - _enable_identity_insert = False - _select_lastrowid = False - _result_proxy = None - _lastrowid = None - - def pre_exec(self): - """Activate IDENTITY_INSERT if needed.""" - - if self.isinsert: - tbl = self.compiled.statement.table - seq_column = tbl._autoincrement_column - insert_has_sequence = seq_column is not None - - if insert_has_sequence: - self._enable_identity_insert = \ - seq_column.key in self.compiled_parameters[0] - else: - self._enable_identity_insert = False - - self._select_lastrowid = insert_has_sequence and \ - not self.compiled.returning and \ - not self._enable_identity_insert and \ - not self.executemany - - if self._enable_identity_insert: - self.root_connection._cursor_execute(self.cursor, - "SET IDENTITY_INSERT %s ON" % - self.dialect.identifier_preparer.format_table(tbl), - ()) - - def post_exec(self): - """Disable IDENTITY_INSERT if enabled.""" - - conn = self.root_connection - if self._select_lastrowid: - if self.dialect.use_scope_identity: - conn._cursor_execute(self.cursor, - "SELECT scope_identity() AS lastrowid", ()) - else: - conn._cursor_execute(self.cursor, - "SELECT @@identity AS lastrowid", ()) - # fetchall() ensures the cursor is consumed without closing it - row = self.cursor.fetchall()[0] - self._lastrowid = int(row[0]) - - if (self.isinsert or self.isupdate or self.isdelete) and \ - self.compiled.returning: - self._result_proxy = base.FullyBufferedResultProxy(self) - - if self._enable_identity_insert: - conn._cursor_execute(self.cursor, - "SET IDENTITY_INSERT %s OFF" % - self.dialect.identifier_preparer. - format_table(self.compiled.statement.table), - () - ) - - def get_lastrowid(self): - return self._lastrowid - - def handle_dbapi_exception(self, e): - if self._enable_identity_insert: - try: - self.cursor.execute( - "SET IDENTITY_INSERT %s OFF" % - self.dialect.identifier_preparer.\ - format_table(self.compiled.statement.table) - ) - except: - pass - - def get_result_proxy(self): - if self._result_proxy: - return self._result_proxy - else: - return base.ResultProxy(self) - -class MSSQLCompiler(compiler.SQLCompiler): - returning_precedes_values = True - - extract_map = util.update_copy( - compiler.SQLCompiler.extract_map, - { - 'doy': 'dayofyear', - 'dow': 'weekday', - 'milliseconds': 'millisecond', - 'microseconds': 'microsecond' - }) - - def __init__(self, *args, **kwargs): - self.tablealiases = {} - super(MSSQLCompiler, self).__init__(*args, **kwargs) - - def visit_now_func(self, fn, **kw): - return "CURRENT_TIMESTAMP" - - def visit_current_date_func(self, fn, **kw): - return "GETDATE()" - - def visit_length_func(self, fn, **kw): - return "LEN%s" % self.function_argspec(fn, **kw) - - def visit_char_length_func(self, fn, **kw): - return "LEN%s" % self.function_argspec(fn, **kw) - - def visit_concat_op(self, binary, **kw): - return "%s + %s" % \ - (self.process(binary.left, **kw), - self.process(binary.right, **kw)) - - def visit_match_op(self, binary, **kw): - return "CONTAINS (%s, %s)" % ( - self.process(binary.left, **kw), - self.process(binary.right, **kw)) - - def get_select_precolumns(self, select): - """ MS-SQL puts TOP, it's version of LIMIT here """ - if select._distinct or select._limit is not None: - s = select._distinct and "DISTINCT " or "" - - # ODBC drivers and possibly others - # don't support bind params in the SELECT clause on SQL Server. - # so have to use literal here. - if select._limit is not None: - if not select._offset: - s += "TOP %d " % select._limit - return s - return compiler.SQLCompiler.get_select_precolumns(self, select) - - def get_from_hint_text(self, table, text): - return text - - def get_crud_hint_text(self, table, text): - return text - - def limit_clause(self, select): - # Limit in mssql is after the select keyword - return "" - - def visit_select(self, select, **kwargs): - """Look for ``LIMIT`` and OFFSET in a select statement, and if - so tries to wrap it in a subquery with ``row_number()`` criterion. - - """ - if select._offset and not getattr(select, '_mssql_visit', None): - # to use ROW_NUMBER(), an ORDER BY is required. - if not select._order_by_clause.clauses: - raise exc.CompileError('MSSQL requires an order_by when ' - 'using an offset.') - - _offset = select._offset - _limit = select._limit - _order_by_clauses = select._order_by_clause.clauses - select = select._generate() - select._mssql_visit = True - select = select.column( - sql.func.ROW_NUMBER().over(order_by=_order_by_clauses) - .label("mssql_rn") - ).order_by(None).alias() - - mssql_rn = sql.column('mssql_rn') - limitselect = sql.select([c for c in select.c if - c.key != 'mssql_rn']) - limitselect.append_whereclause(mssql_rn > _offset) - if _limit is not None: - limitselect.append_whereclause(mssql_rn <= (_limit + _offset)) - return self.process(limitselect, iswrapper=True, **kwargs) - else: - return compiler.SQLCompiler.visit_select(self, select, **kwargs) - - def _schema_aliased_table(self, table): - if getattr(table, 'schema', None) is not None: - if table not in self.tablealiases: - self.tablealiases[table] = table.alias() - return self.tablealiases[table] - else: - return None - - def visit_table(self, table, mssql_aliased=False, **kwargs): - if mssql_aliased is table: - return super(MSSQLCompiler, self).visit_table(table, **kwargs) - - # alias schema-qualified tables - alias = self._schema_aliased_table(table) - if alias is not None: - return self.process(alias, mssql_aliased=table, **kwargs) - else: - return super(MSSQLCompiler, self).visit_table(table, **kwargs) - - def visit_alias(self, alias, **kwargs): - # translate for schema-qualified table aliases - kwargs['mssql_aliased'] = alias.original - return super(MSSQLCompiler, self).visit_alias(alias, **kwargs) - - def visit_extract(self, extract, **kw): - field = self.extract_map.get(extract.field, extract.field) - return 'DATEPART("%s", %s)' % \ - (field, self.process(extract.expr, **kw)) - - def visit_savepoint(self, savepoint_stmt): - return "SAVE TRANSACTION %s" % self.preparer.format_savepoint(savepoint_stmt) - - def visit_rollback_to_savepoint(self, savepoint_stmt): - return ("ROLLBACK TRANSACTION %s" - % self.preparer.format_savepoint(savepoint_stmt)) - - def visit_column(self, column, result_map=None, **kwargs): - if column.table is not None and \ - (not self.isupdate and not self.isdelete) or self.is_subquery(): - # translate for schema-qualified table aliases - t = self._schema_aliased_table(column.table) - if t is not None: - converted = expression._corresponding_column_or_error( - t, column) - - if result_map is not None: - result_map[column.name.lower()] = \ - (column.name, (column, column.name, - column.key), - column.type) - - return super(MSSQLCompiler, self).\ - visit_column(converted, - result_map=None, **kwargs) - - return super(MSSQLCompiler, self).visit_column(column, - result_map=result_map, - **kwargs) - - def visit_binary(self, binary, **kwargs): - """Move bind parameters to the right-hand side of an operator, where - possible. - - """ - if ( - isinstance(binary.left, expression._BindParamClause) - and binary.operator == operator.eq - and not isinstance(binary.right, expression._BindParamClause) - ): - return self.process( - expression._BinaryExpression(binary.right, - binary.left, - binary.operator), - **kwargs) - else: - if ( - (binary.operator is operator.eq or - binary.operator is operator.ne) - and ( - (isinstance(binary.left, expression._FromGrouping) - and isinstance(binary.left.element, - expression._ScalarSelect)) - or (isinstance(binary.right, expression._FromGrouping) - and isinstance(binary.right.element, - expression._ScalarSelect)) - or isinstance(binary.left, expression._ScalarSelect) - or isinstance(binary.right, expression._ScalarSelect) - ) - ): - op = binary.operator == operator.eq and "IN" or "NOT IN" - util.warn_deprecated("Comparing a scalar select using ``=``/``!=`` will " - "no longer produce IN/NOT IN in 0.8. To remove this " - "behavior immediately, use the recipe at " - "http://www.sqlalchemy.org/docs/07/dialects/mssql.html#scalar-select-comparisons") - return self.process( - expression._BinaryExpression(binary.left, - binary.right, op), - **kwargs) - return super(MSSQLCompiler, self).visit_binary(binary, **kwargs) - - def returning_clause(self, stmt, returning_cols): - - if self.isinsert or self.isupdate: - target = stmt.table.alias("inserted") - else: - target = stmt.table.alias("deleted") - - adapter = sql_util.ClauseAdapter(target) - def col_label(col): - adapted = adapter.traverse(col) - if isinstance(col, expression._Label): - return adapted.label(c.key) - else: - return self.label_select_column(None, adapted, asfrom=False) - - columns = [ - self.process( - col_label(c), - within_columns_clause=True, - result_map=self.result_map - ) - for c in expression._select_iterables(returning_cols) - ] - return 'OUTPUT ' + ', '.join(columns) - - def get_cte_preamble(self, recursive): - # SQL Server finds it too inconvenient to accept - # an entirely optional, SQL standard specified, - # "RECURSIVE" word with their "WITH", - # so here we go - return "WITH" - - def label_select_column(self, select, column, asfrom): - if isinstance(column, expression.Function): - return column.label(None) - else: - return super(MSSQLCompiler, self).\ - label_select_column(select, column, asfrom) - - def for_update_clause(self, select): - # "FOR UPDATE" is only allowed on "DECLARE CURSOR" which - # SQLAlchemy doesn't use - return '' - - def order_by_clause(self, select, **kw): - order_by = self.process(select._order_by_clause, **kw) - - # MSSQL only allows ORDER BY in subqueries if there is a LIMIT - if order_by and (not self.is_subquery() or select._limit): - return " ORDER BY " + order_by - else: - return "" - - def update_from_clause(self, update_stmt, - from_table, extra_froms, - from_hints, - **kw): - """Render the UPDATE..FROM clause specific to MSSQL. - - In MSSQL, if the UPDATE statement involves an alias of the table to - be updated, then the table itself must be added to the FROM list as - well. Otherwise, it is optional. Here, we add it regardless. - - """ - return "FROM " + ', '.join( - t._compiler_dispatch(self, asfrom=True, - fromhints=from_hints, **kw) - for t in [from_table] + extra_froms) - -class MSSQLStrictCompiler(MSSQLCompiler): - """A subclass of MSSQLCompiler which disables the usage of bind - parameters where not allowed natively by MS-SQL. - - A dialect may use this compiler on a platform where native - binds are used. - - """ - ansi_bind_rules = True - - def visit_in_op(self, binary, **kw): - kw['literal_binds'] = True - return "%s IN %s" % ( - self.process(binary.left, **kw), - self.process(binary.right, **kw) - ) - - def visit_notin_op(self, binary, **kw): - kw['literal_binds'] = True - return "%s NOT IN %s" % ( - self.process(binary.left, **kw), - self.process(binary.right, **kw) - ) - - def visit_function(self, func, **kw): - kw['literal_binds'] = True - return super(MSSQLStrictCompiler, self).visit_function(func, **kw) - - def render_literal_value(self, value, type_): - """ - For date and datetime values, convert to a string - format acceptable to MSSQL. That seems to be the - so-called ODBC canonical date format which looks - like this: - - yyyy-mm-dd hh:mi:ss.mmm(24h) - - For other data types, call the base class implementation. - """ - # datetime and date are both subclasses of datetime.date - if issubclass(type(value), datetime.date): - # SQL Server wants single quotes around the date string. - return "'" + str(value) + "'" - else: - return super(MSSQLStrictCompiler, self).\ - render_literal_value(value, type_) - -class MSDDLCompiler(compiler.DDLCompiler): - def get_column_specification(self, column, **kwargs): - colspec = (self.preparer.format_column(column) + " " - + self.dialect.type_compiler.process(column.type)) - - if column.nullable is not None: - if not column.nullable or column.primary_key: - colspec += " NOT NULL" - else: - colspec += " NULL" - - if column.table is None: - raise exc.CompileError( - "mssql requires Table-bound columns " - "in order to generate DDL") - - seq_col = column.table._autoincrement_column - - # install a IDENTITY Sequence if we have an implicit IDENTITY column - if seq_col is column: - sequence = isinstance(column.default, sa_schema.Sequence) and \ - column.default - if sequence: - start, increment = sequence.start or 1, \ - sequence.increment or 1 - else: - start, increment = 1, 1 - colspec += " IDENTITY(%s,%s)" % (start, increment) - else: - default = self.get_column_default_string(column) - if default is not None: - colspec += " DEFAULT " + default - - return colspec - - def visit_drop_index(self, drop): - return "\nDROP INDEX %s.%s" % ( - self.preparer.quote_identifier(drop.element.table.name), - self.preparer.quote( - self._index_identifier(drop.element.name), - drop.element.quote) - ) - - -class MSIdentifierPreparer(compiler.IdentifierPreparer): - reserved_words = RESERVED_WORDS - - def __init__(self, dialect): - super(MSIdentifierPreparer, self).__init__(dialect, initial_quote='[', - final_quote=']') - - def _escape_identifier(self, value): - return value - - def quote_schema(self, schema, force=True): - """Prepare a quoted table and schema name.""" - result = '.'.join([self.quote(x, force) for x in schema.split('.')]) - return result - -class MSDialect(default.DefaultDialect): - name = 'mssql' - supports_default_values = True - supports_empty_insert = False - execution_ctx_cls = MSExecutionContext - use_scope_identity = True - max_identifier_length = 128 - schema_name = "dbo" - - colspecs = { - sqltypes.DateTime : _MSDateTime, - sqltypes.Date : _MSDate, - sqltypes.Time : TIME, - } - - ischema_names = ischema_names - - supports_native_boolean = False - supports_unicode_binds = True - postfetch_lastrowid = True - - server_version_info = () - - statement_compiler = MSSQLCompiler - ddl_compiler = MSDDLCompiler - type_compiler = MSTypeCompiler - preparer = MSIdentifierPreparer - - def __init__(self, - query_timeout=None, - use_scope_identity=True, - max_identifier_length=None, - schema_name=u"dbo", **opts): - self.query_timeout = int(query_timeout or 0) - self.schema_name = schema_name - - self.use_scope_identity = use_scope_identity - self.max_identifier_length = int(max_identifier_length or 0) or \ - self.max_identifier_length - super(MSDialect, self).__init__(**opts) - - def do_savepoint(self, connection, name): - # give the DBAPI a push - connection.execute("IF @@TRANCOUNT = 0 BEGIN TRANSACTION") - super(MSDialect, self).do_savepoint(connection, name) - - def do_release_savepoint(self, connection, name): - # SQL Server does not support RELEASE SAVEPOINT - pass - - def initialize(self, connection): - super(MSDialect, self).initialize(connection) - if self.server_version_info[0] not in range(8, 17): - # FreeTDS with version 4.2 seems to report here - # a number like "95.10.255". Don't know what - # that is. So emit warning. - util.warn( - "Unrecognized server version info '%s'. Version specific " - "behaviors may not function properly. If using ODBC " - "with FreeTDS, ensure server version 7.0 or 8.0, not 4.2, " - "is configured in the FreeTDS configuration." % - ".".join(str(x) for x in self.server_version_info) ) - if self.server_version_info >= MS_2005_VERSION and \ - 'implicit_returning' not in self.__dict__: - self.implicit_returning = True - - def _get_default_schema_name(self, connection): - user_name = connection.scalar("SELECT user_name() as user_name;") - if user_name is not None: - # now, get the default schema - query = sql.text(""" - SELECT default_schema_name FROM - sys.database_principals - WHERE name = :name - AND type = 'S' - """) - try: - default_schema_name = connection.scalar(query, name=user_name) - if default_schema_name is not None: - return unicode(default_schema_name) - except: - pass - return self.schema_name - - def _unicode_cast(self, column): - if self.server_version_info >= MS_2005_VERSION: - return cast(column, NVARCHAR(_warn_on_bytestring=False)) - else: - return column - - def has_table(self, connection, tablename, schema=None): - current_schema = schema or self.default_schema_name - columns = ischema.columns - - whereclause = self._unicode_cast(columns.c.table_name)==tablename - if current_schema: - whereclause = sql.and_(whereclause, - columns.c.table_schema==current_schema) - s = sql.select([columns], whereclause) - c = connection.execute(s) - return c.first() is not None - - @reflection.cache - def get_schema_names(self, connection, **kw): - s = sql.select([ischema.schemata.c.schema_name], - order_by=[ischema.schemata.c.schema_name] - ) - schema_names = [r[0] for r in connection.execute(s)] - return schema_names - - @reflection.cache - def get_table_names(self, connection, schema=None, **kw): - current_schema = schema or self.default_schema_name - tables = ischema.tables - s = sql.select([tables.c.table_name], - sql.and_( - tables.c.table_schema == current_schema, - tables.c.table_type == u'BASE TABLE' - ), - order_by=[tables.c.table_name] - ) - table_names = [r[0] for r in connection.execute(s)] - return table_names - - @reflection.cache - def get_view_names(self, connection, schema=None, **kw): - current_schema = schema or self.default_schema_name - tables = ischema.tables - s = sql.select([tables.c.table_name], - sql.and_( - tables.c.table_schema == current_schema, - tables.c.table_type == u'VIEW' - ), - order_by=[tables.c.table_name] - ) - view_names = [r[0] for r in connection.execute(s)] - return view_names - - @reflection.cache - def get_indexes(self, connection, tablename, schema=None, **kw): - # using system catalogs, don't support index reflection - # below MS 2005 - if self.server_version_info < MS_2005_VERSION: - return [] - - current_schema = schema or self.default_schema_name - full_tname = "%s.%s" % (current_schema, tablename) - - rp = connection.execute( - sql.text("select ind.index_id, ind.is_unique, ind.name " - "from sys.indexes as ind join sys.tables as tab on " - "ind.object_id=tab.object_id " - "join sys.schemas as sch on sch.schema_id=tab.schema_id " - "where tab.name = :tabname " - "and sch.name=:schname " - "and ind.is_primary_key=0", - bindparams=[ - sql.bindparam('tabname', tablename, - sqltypes.String(convert_unicode=True)), - sql.bindparam('schname', current_schema, - sqltypes.String(convert_unicode=True)) - ], - typemap = { - 'name':sqltypes.Unicode() - } - ) - ) - indexes = {} - for row in rp: - indexes[row['index_id']] = { - 'name':row['name'], - 'unique':row['is_unique'] == 1, - 'column_names':[] - } - rp = connection.execute( - sql.text( - "select ind_col.index_id, ind_col.object_id, col.name " - "from sys.columns as col " - "join sys.tables as tab on tab.object_id=col.object_id " - "join sys.index_columns as ind_col on " - "(ind_col.column_id=col.column_id and " - "ind_col.object_id=tab.object_id) " - "join sys.schemas as sch on sch.schema_id=tab.schema_id " - "where tab.name=:tabname " - "and sch.name=:schname", - bindparams=[ - sql.bindparam('tabname', tablename, - sqltypes.String(convert_unicode=True)), - sql.bindparam('schname', current_schema, - sqltypes.String(convert_unicode=True)) - ], - typemap = { - 'name':sqltypes.Unicode() - } - ), - ) - for row in rp: - if row['index_id'] in indexes: - indexes[row['index_id']]['column_names'].append(row['name']) - - return indexes.values() - - @reflection.cache - def get_view_definition(self, connection, viewname, schema=None, **kw): - current_schema = schema or self.default_schema_name - - rp = connection.execute( - sql.text( - "select definition from sys.sql_modules as mod, " - "sys.views as views, " - "sys.schemas as sch" - " where " - "mod.object_id=views.object_id and " - "views.schema_id=sch.schema_id and " - "views.name=:viewname and sch.name=:schname", - bindparams=[ - sql.bindparam('viewname', viewname, - sqltypes.String(convert_unicode=True)), - sql.bindparam('schname', current_schema, - sqltypes.String(convert_unicode=True)) - ] - ) - ) - - if rp: - view_def = rp.scalar() - return view_def - - @reflection.cache - def get_columns(self, connection, tablename, schema=None, **kw): - # Get base columns - current_schema = schema or self.default_schema_name - columns = ischema.columns - if current_schema: - whereclause = sql.and_(columns.c.table_name==tablename, - columns.c.table_schema==current_schema) - else: - whereclause = columns.c.table_name==tablename - s = sql.select([columns], whereclause, - order_by=[columns.c.ordinal_position]) - c = connection.execute(s) - cols = [] - while True: - row = c.fetchone() - if row is None: - break - (name, type, nullable, charlen, - numericprec, numericscale, default, collation) = ( - row[columns.c.column_name], - row[columns.c.data_type], - row[columns.c.is_nullable] == 'YES', - row[columns.c.character_maximum_length], - row[columns.c.numeric_precision], - row[columns.c.numeric_scale], - row[columns.c.column_default], - row[columns.c.collation_name] - ) - coltype = self.ischema_names.get(type, None) - - kwargs = {} - if coltype in (MSString, MSChar, MSNVarchar, MSNChar, MSText, - MSNText, MSBinary, MSVarBinary, - sqltypes.LargeBinary): - kwargs['length'] = charlen - if collation: - kwargs['collation'] = collation - if coltype == MSText or \ - (coltype in (MSString, MSNVarchar) and charlen == -1): - kwargs.pop('length') - - if coltype is None: - util.warn( - "Did not recognize type '%s' of column '%s'" % - (type, name)) - coltype = sqltypes.NULLTYPE - else: - if issubclass(coltype, sqltypes.Numeric) and \ - coltype is not MSReal: - kwargs['scale'] = numericscale - kwargs['precision'] = numericprec - - coltype = coltype(**kwargs) - cdict = { - 'name' : name, - 'type' : coltype, - 'nullable' : nullable, - 'default' : default, - 'autoincrement':False, - } - cols.append(cdict) - # autoincrement and identity - colmap = {} - for col in cols: - colmap[col['name']] = col - # We also run an sp_columns to check for identity columns: - cursor = connection.execute("sp_columns @table_name = '%s', " - "@table_owner = '%s'" - % (tablename, current_schema)) - ic = None - while True: - row = cursor.fetchone() - if row is None: - break - (col_name, type_name) = row[3], row[5] - if type_name.endswith("identity") and col_name in colmap: - ic = col_name - colmap[col_name]['autoincrement'] = True - colmap[col_name]['sequence'] = dict( - name='%s_identity' % col_name) - break - cursor.close() - - if ic is not None and self.server_version_info >= MS_2005_VERSION: - table_fullname = "%s.%s" % (current_schema, tablename) - cursor = connection.execute( - "select ident_seed('%s'), ident_incr('%s')" - % (table_fullname, table_fullname) - ) - - row = cursor.first() - if row is not None and row[0] is not None: - colmap[ic]['sequence'].update({ - 'start' : int(row[0]), - 'increment' : int(row[1]) - }) - return cols - - @reflection.cache - def get_primary_keys(self, connection, tablename, schema=None, **kw): - current_schema = schema or self.default_schema_name - pkeys = [] - # information_schema.referential_constraints - RR = ischema.ref_constraints - # information_schema.table_constraints - TC = ischema.constraints - # information_schema.constraint_column_usage: - # the constrained column - C = ischema.key_constraints.alias('C') - # information_schema.constraint_column_usage: - # the referenced column - R = ischema.key_constraints.alias('R') - - # Primary key constraints - s = sql.select([C.c.column_name, TC.c.constraint_type], - sql.and_(TC.c.constraint_name == C.c.constraint_name, - TC.c.table_schema == C.c.table_schema, - C.c.table_name == tablename, - C.c.table_schema == current_schema) - ) - c = connection.execute(s) - for row in c: - if 'PRIMARY' in row[TC.c.constraint_type.name]: - pkeys.append(row[0]) - return pkeys - - @reflection.cache - def get_foreign_keys(self, connection, tablename, schema=None, **kw): - current_schema = schema or self.default_schema_name - # Add constraints - #information_schema.referential_constraints - RR = ischema.ref_constraints - # information_schema.table_constraints - TC = ischema.constraints - # information_schema.constraint_column_usage: - # the constrained column - C = ischema.key_constraints.alias('C') - # information_schema.constraint_column_usage: - # the referenced column - R = ischema.key_constraints.alias('R') - - # Foreign key constraints - s = sql.select([C.c.column_name, - R.c.table_schema, R.c.table_name, R.c.column_name, - RR.c.constraint_name, RR.c.match_option, - RR.c.update_rule, - RR.c.delete_rule], - sql.and_(C.c.table_name == tablename, - C.c.table_schema == current_schema, - C.c.constraint_name == RR.c.constraint_name, - R.c.constraint_name == - RR.c.unique_constraint_name, - C.c.ordinal_position == R.c.ordinal_position - ), - order_by = [ - RR.c.constraint_name, - R.c.ordinal_position]) - - - # group rows by constraint ID, to handle multi-column FKs - fkeys = [] - fknm, scols, rcols = (None, [], []) - - def fkey_rec(): - return { - 'name' : None, - 'constrained_columns' : [], - 'referred_schema' : None, - 'referred_table' : None, - 'referred_columns' : [] - } - - fkeys = util.defaultdict(fkey_rec) - - for r in connection.execute(s).fetchall(): - scol, rschema, rtbl, rcol, rfknm, fkmatch, fkuprule, fkdelrule = r - - rec = fkeys[rfknm] - rec['name'] = rfknm - if not rec['referred_table']: - rec['referred_table'] = rtbl - - if schema is not None or current_schema != rschema: - rec['referred_schema'] = rschema - - local_cols, remote_cols = \ - rec['constrained_columns'],\ - rec['referred_columns'] - - local_cols.append(scol) - remote_cols.append(rcol) - - return fkeys.values() - diff --git a/libs/sqlalchemy/dialects/mssql/information_schema.py b/libs/sqlalchemy/dialects/mssql/information_schema.py deleted file mode 100644 index 0dcddae9..00000000 --- a/libs/sqlalchemy/dialects/mssql/information_schema.py +++ /dev/null @@ -1,98 +0,0 @@ -# mssql/information_schema.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -# TODO: should be using the sys. catalog with SQL Server, not information schema - -from sqlalchemy import Table, MetaData, Column -from sqlalchemy.types import String, Unicode, Integer, TypeDecorator - -ischema = MetaData() - -class CoerceUnicode(TypeDecorator): - impl = Unicode - - def process_bind_param(self, value, dialect): - # Py2K - if isinstance(value, str): - value = value.decode(dialect.encoding) - # end Py2K - return value - -schemata = Table("SCHEMATA", ischema, - Column("CATALOG_NAME", CoerceUnicode, key="catalog_name"), - Column("SCHEMA_NAME", CoerceUnicode, key="schema_name"), - Column("SCHEMA_OWNER", CoerceUnicode, key="schema_owner"), - schema="INFORMATION_SCHEMA") - -tables = Table("TABLES", ischema, - Column("TABLE_CATALOG", CoerceUnicode, key="table_catalog"), - Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"), - Column("TABLE_NAME", CoerceUnicode, key="table_name"), - Column("TABLE_TYPE", String(convert_unicode=True), key="table_type"), - schema="INFORMATION_SCHEMA") - -columns = Table("COLUMNS", ischema, - Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"), - Column("TABLE_NAME", CoerceUnicode, key="table_name"), - Column("COLUMN_NAME", CoerceUnicode, key="column_name"), - Column("IS_NULLABLE", Integer, key="is_nullable"), - Column("DATA_TYPE", String, key="data_type"), - Column("ORDINAL_POSITION", Integer, key="ordinal_position"), - Column("CHARACTER_MAXIMUM_LENGTH", Integer, key="character_maximum_length"), - Column("NUMERIC_PRECISION", Integer, key="numeric_precision"), - Column("NUMERIC_SCALE", Integer, key="numeric_scale"), - Column("COLUMN_DEFAULT", Integer, key="column_default"), - Column("COLLATION_NAME", String, key="collation_name"), - schema="INFORMATION_SCHEMA") - -constraints = Table("TABLE_CONSTRAINTS", ischema, - Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"), - Column("TABLE_NAME", CoerceUnicode, key="table_name"), - Column("CONSTRAINT_NAME", CoerceUnicode, key="constraint_name"), - Column("CONSTRAINT_TYPE", String(convert_unicode=True), key="constraint_type"), - schema="INFORMATION_SCHEMA") - -column_constraints = Table("CONSTRAINT_COLUMN_USAGE", ischema, - Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"), - Column("TABLE_NAME", CoerceUnicode, key="table_name"), - Column("COLUMN_NAME", CoerceUnicode, key="column_name"), - Column("CONSTRAINT_NAME", CoerceUnicode, key="constraint_name"), - schema="INFORMATION_SCHEMA") - -key_constraints = Table("KEY_COLUMN_USAGE", ischema, - Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"), - Column("TABLE_NAME", CoerceUnicode, key="table_name"), - Column("COLUMN_NAME", CoerceUnicode, key="column_name"), - Column("CONSTRAINT_NAME", CoerceUnicode, key="constraint_name"), - Column("ORDINAL_POSITION", Integer, key="ordinal_position"), - schema="INFORMATION_SCHEMA") - -ref_constraints = Table("REFERENTIAL_CONSTRAINTS", ischema, - Column("CONSTRAINT_CATALOG", CoerceUnicode, key="constraint_catalog"), - Column("CONSTRAINT_SCHEMA", CoerceUnicode, key="constraint_schema"), - Column("CONSTRAINT_NAME", CoerceUnicode, key="constraint_name"), - # TODO: is CATLOG misspelled ? - Column("UNIQUE_CONSTRAINT_CATLOG", CoerceUnicode, - key="unique_constraint_catalog"), - - Column("UNIQUE_CONSTRAINT_SCHEMA", CoerceUnicode, - key="unique_constraint_schema"), - Column("UNIQUE_CONSTRAINT_NAME", CoerceUnicode, - key="unique_constraint_name"), - Column("MATCH_OPTION", String, key="match_option"), - Column("UPDATE_RULE", String, key="update_rule"), - Column("DELETE_RULE", String, key="delete_rule"), - schema="INFORMATION_SCHEMA") - -views = Table("VIEWS", ischema, - Column("TABLE_CATALOG", CoerceUnicode, key="table_catalog"), - Column("TABLE_SCHEMA", CoerceUnicode, key="table_schema"), - Column("TABLE_NAME", CoerceUnicode, key="table_name"), - Column("VIEW_DEFINITION", CoerceUnicode, key="view_definition"), - Column("CHECK_OPTION", String, key="check_option"), - Column("IS_UPDATABLE", String, key="is_updatable"), - schema="INFORMATION_SCHEMA") - diff --git a/libs/sqlalchemy/dialects/mssql/mxodbc.py b/libs/sqlalchemy/dialects/mssql/mxodbc.py deleted file mode 100644 index 56a72f41..00000000 --- a/libs/sqlalchemy/dialects/mssql/mxodbc.py +++ /dev/null @@ -1,93 +0,0 @@ -# mssql/mxodbc.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -""" -Support for MS-SQL via mxODBC. - -mxODBC is available at: - - http://www.egenix.com/ - -This was tested with mxODBC 3.1.2 and the SQL Server Native -Client connected to MSSQL 2005 and 2008 Express Editions. - -Connecting -~~~~~~~~~~ - -Connection is via DSN:: - - mssql+mxodbc://:@ - -Execution Modes -~~~~~~~~~~~~~~~ - -mxODBC features two styles of statement execution, using the -``cursor.execute()`` and ``cursor.executedirect()`` methods (the second being -an extension to the DBAPI specification). The former makes use of a particular -API call specific to the SQL Server Native Client ODBC driver known -SQLDescribeParam, while the latter does not. - -mxODBC apparently only makes repeated use of a single prepared statement -when SQLDescribeParam is used. The advantage to prepared statement reuse is -one of performance. The disadvantage is that SQLDescribeParam has a limited -set of scenarios in which bind parameters are understood, including that they -cannot be placed within the argument lists of function calls, anywhere outside -the FROM, or even within subqueries within the FROM clause - making the usage -of bind parameters within SELECT statements impossible for all but the most -simplistic statements. - -For this reason, the mxODBC dialect uses the "native" mode by default only for -INSERT, UPDATE, and DELETE statements, and uses the escaped string mode for -all other statements. - -This behavior can be controlled via -:meth:`~sqlalchemy.sql.expression.Executable.execution_options` using the -``native_odbc_execute`` flag with a value of ``True`` or ``False``, where a -value of ``True`` will unconditionally use native bind parameters and a value -of ``False`` will unconditionally use string-escaped parameters. - -""" - - -from sqlalchemy import types as sqltypes -from sqlalchemy.connectors.mxodbc import MxODBCConnector -from sqlalchemy.dialects.mssql.pyodbc import MSExecutionContext_pyodbc -from sqlalchemy.dialects.mssql.base import (MSDialect, - MSSQLStrictCompiler, - _MSDateTime, _MSDate, TIME) - - - -class MSExecutionContext_mxodbc(MSExecutionContext_pyodbc): - """ - The pyodbc execution context is useful for enabling - SELECT SCOPE_IDENTITY in cases where OUTPUT clause - does not work (tables with insert triggers). - """ - #todo - investigate whether the pyodbc execution context - # is really only being used in cases where OUTPUT - # won't work. - -class MSDialect_mxodbc(MxODBCConnector, MSDialect): - - # TODO: may want to use this only if FreeTDS is not in use, - # since FreeTDS doesn't seem to use native binds. - statement_compiler = MSSQLStrictCompiler - execution_ctx_cls = MSExecutionContext_mxodbc - colspecs = { - #sqltypes.Numeric : _MSNumeric, - sqltypes.DateTime : _MSDateTime, - sqltypes.Date : _MSDate, - sqltypes.Time : TIME, - } - - - def __init__(self, description_encoding='latin-1', **params): - super(MSDialect_mxodbc, self).__init__(**params) - self.description_encoding = description_encoding - -dialect = MSDialect_mxodbc - diff --git a/libs/sqlalchemy/dialects/mssql/pymssql.py b/libs/sqlalchemy/dialects/mssql/pymssql.py deleted file mode 100644 index 8229d6ce..00000000 --- a/libs/sqlalchemy/dialects/mssql/pymssql.py +++ /dev/null @@ -1,108 +0,0 @@ -# mssql/pymssql.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -""" -Support for the pymssql dialect. - -This dialect supports pymssql 1.0 and greater. - -pymssql is available at: - - http://pymssql.sourceforge.net/ - -Connecting -^^^^^^^^^^ - -Sample connect string:: - - mssql+pymssql://:@ - -Adding "?charset=utf8" or similar will cause pymssql to return -strings as Python unicode objects. This can potentially improve -performance in some scenarios as decoding of strings is -handled natively. - -Limitations -^^^^^^^^^^^ - -pymssql inherits a lot of limitations from FreeTDS, including: - -* no support for multibyte schema identifiers -* poor support for large decimals -* poor support for binary fields -* poor support for VARCHAR/CHAR fields over 255 characters - -Please consult the pymssql documentation for further information. - -""" -from sqlalchemy.dialects.mssql.base import MSDialect -from sqlalchemy import types as sqltypes, util, processors -import re - -class _MSNumeric_pymssql(sqltypes.Numeric): - def result_processor(self, dialect, type_): - if not self.asdecimal: - return processors.to_float - else: - return sqltypes.Numeric.result_processor(self, dialect, type_) - -class MSDialect_pymssql(MSDialect): - supports_sane_rowcount = False - driver = 'pymssql' - - colspecs = util.update_copy( - MSDialect.colspecs, - { - sqltypes.Numeric:_MSNumeric_pymssql, - sqltypes.Float:sqltypes.Float, - } - ) - @classmethod - def dbapi(cls): - module = __import__('pymssql') - # pymmsql doesn't have a Binary method. we use string - # TODO: monkeypatching here is less than ideal - module.Binary = str - - client_ver = tuple(int(x) for x in module.__version__.split(".")) - if client_ver < (1, ): - util.warn("The pymssql dialect expects at least " - "the 1.0 series of the pymssql DBAPI.") - return module - - def __init__(self, **params): - super(MSDialect_pymssql, self).__init__(**params) - self.use_scope_identity = True - - def _get_server_version_info(self, connection): - vers = connection.scalar("select @@version") - m = re.match( - r"Microsoft SQL Server.*? - (\d+).(\d+).(\d+).(\d+)", vers) - if m: - return tuple(int(x) for x in m.group(1, 2, 3, 4)) - else: - return None - - def create_connect_args(self, url): - opts = url.translate_connect_args(username='user') - opts.update(url.query) - port = opts.pop('port', None) - if port and 'host' in opts: - opts['host'] = "%s:%s" % (opts['host'], port) - return [[], opts] - - def is_disconnect(self, e, connection, cursor): - for msg in ( - "Error 10054", - "Not connected to any MS SQL server", - "Connection is closed" - ): - if msg in str(e): - return True - else: - return False - -dialect = MSDialect_pymssql diff --git a/libs/sqlalchemy/dialects/mssql/pyodbc.py b/libs/sqlalchemy/dialects/mssql/pyodbc.py deleted file mode 100644 index 389018c6..00000000 --- a/libs/sqlalchemy/dialects/mssql/pyodbc.py +++ /dev/null @@ -1,254 +0,0 @@ -# mssql/pyodbc.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -""" -Support for MS-SQL via pyodbc. - -pyodbc is available at: - - http://pypi.python.org/pypi/pyodbc/ - -Connecting -^^^^^^^^^^ - -Examples of pyodbc connection string URLs: - -* ``mssql+pyodbc://mydsn`` - connects using the specified DSN named ``mydsn``. - The connection string that is created will appear like:: - - dsn=mydsn;Trusted_Connection=Yes - -* ``mssql+pyodbc://user:pass@mydsn`` - connects using the DSN named - ``mydsn`` passing in the ``UID`` and ``PWD`` information. The - connection string that is created will appear like:: - - dsn=mydsn;UID=user;PWD=pass - -* ``mssql+pyodbc://user:pass@mydsn/?LANGUAGE=us_english`` - connects - using the DSN named ``mydsn`` passing in the ``UID`` and ``PWD`` - information, plus the additional connection configuration option - ``LANGUAGE``. The connection string that is created will appear - like:: - - dsn=mydsn;UID=user;PWD=pass;LANGUAGE=us_english - -* ``mssql+pyodbc://user:pass@host/db`` - connects using a connection - that would appear like:: - - DRIVER={SQL Server};Server=host;Database=db;UID=user;PWD=pass - -* ``mssql+pyodbc://user:pass@host:123/db`` - connects using a connection - string which includes the port - information using the comma syntax. This will create the following - connection string:: - - DRIVER={SQL Server};Server=host,123;Database=db;UID=user;PWD=pass - -* ``mssql+pyodbc://user:pass@host/db?port=123`` - connects using a connection - string that includes the port - information as a separate ``port`` keyword. This will create the - following connection string:: - - DRIVER={SQL Server};Server=host;Database=db;UID=user;PWD=pass;port=123 - -* ``mssql+pyodbc://user:pass@host/db?driver=MyDriver`` - connects using a connection - string that includes a custom - ODBC driver name. This will create the following connection string:: - - DRIVER={MyDriver};Server=host;Database=db;UID=user;PWD=pass - -If you require a connection string that is outside the options -presented above, use the ``odbc_connect`` keyword to pass in a -urlencoded connection string. What gets passed in will be urldecoded -and passed directly. - -For example:: - - mssql+pyodbc:///?odbc_connect=dsn%3Dmydsn%3BDatabase%3Ddb - -would create the following connection string:: - - dsn=mydsn;Database=db - -Encoding your connection string can be easily accomplished through -the python shell. For example:: - - >>> import urllib - >>> urllib.quote_plus('dsn=mydsn;Database=db') - 'dsn%3Dmydsn%3BDatabase%3Ddb' - -Unicode Binds -^^^^^^^^^^^^^ - -The current state of PyODBC on a unix backend with FreeTDS and/or -EasySoft is poor regarding unicode; different OS platforms and versions of UnixODBC -versus IODBC versus FreeTDS/EasySoft versus PyODBC itself dramatically -alter how strings are received. The PyODBC dialect attempts to use all the information -it knows to determine whether or not a Python unicode literal can be -passed directly to the PyODBC driver or not; while SQLAlchemy can encode -these to bytestrings first, some users have reported that PyODBC mis-handles -bytestrings for certain encodings and requires a Python unicode object, -while the author has observed widespread cases where a Python unicode -is completely misinterpreted by PyODBC, particularly when dealing with -the information schema tables used in table reflection, and the value -must first be encoded to a bytestring. - -It is for this reason that whether or not unicode literals for bound -parameters be sent to PyODBC can be controlled using the -``supports_unicode_binds`` parameter to ``create_engine()``. When -left at its default of ``None``, the PyODBC dialect will use its -best guess as to whether or not the driver deals with unicode literals -well. When ``False``, unicode literals will be encoded first, and when -``True`` unicode literals will be passed straight through. This is an interim -flag that hopefully should not be needed when the unicode situation stabilizes -for unix + PyODBC. - -.. versionadded:: 0.7.7 - ``supports_unicode_binds`` parameter to ``create_engine()``\ . - -""" - -from sqlalchemy.dialects.mssql.base import MSExecutionContext, MSDialect -from sqlalchemy.connectors.pyodbc import PyODBCConnector -from sqlalchemy import types as sqltypes, util -import decimal - -class _MSNumeric_pyodbc(sqltypes.Numeric): - """Turns Decimals with adjusted() < 0 or > 7 into strings. - - This is the only method that is proven to work with Pyodbc+MSSQL - without crashing (floats can be used but seem to cause sporadic - crashes). - - """ - - def bind_processor(self, dialect): - - super_process = super(_MSNumeric_pyodbc, self).\ - bind_processor(dialect) - - if not dialect._need_decimal_fix: - return super_process - - def process(value): - if self.asdecimal and \ - isinstance(value, decimal.Decimal): - - adjusted = value.adjusted() - if adjusted < 0: - return self._small_dec_to_string(value) - elif adjusted > 7: - return self._large_dec_to_string(value) - - if super_process: - return super_process(value) - else: - return value - return process - - # these routines needed for older versions of pyodbc. - # as of 2.1.8 this logic is integrated. - - def _small_dec_to_string(self, value): - return "%s0.%s%s" % ( - (value < 0 and '-' or ''), - '0' * (abs(value.adjusted()) - 1), - "".join([str(nint) for nint in value.as_tuple()[1]])) - - def _large_dec_to_string(self, value): - _int = value.as_tuple()[1] - if 'E' in str(value): - result = "%s%s%s" % ( - (value < 0 and '-' or ''), - "".join([str(s) for s in _int]), - "0" * (value.adjusted() - (len(_int)-1))) - else: - if (len(_int) - 1) > value.adjusted(): - result = "%s%s.%s" % ( - (value < 0 and '-' or ''), - "".join( - [str(s) for s in _int][0:value.adjusted() + 1]), - "".join( - [str(s) for s in _int][value.adjusted() + 1:])) - else: - result = "%s%s" % ( - (value < 0 and '-' or ''), - "".join( - [str(s) for s in _int][0:value.adjusted() + 1])) - return result - - -class MSExecutionContext_pyodbc(MSExecutionContext): - _embedded_scope_identity = False - - def pre_exec(self): - """where appropriate, issue "select scope_identity()" in the same - statement. - - Background on why "scope_identity()" is preferable to "@@identity": - http://msdn.microsoft.com/en-us/library/ms190315.aspx - - Background on why we attempt to embed "scope_identity()" into the same - statement as the INSERT: - http://code.google.com/p/pyodbc/wiki/FAQs#How_do_I_retrieve_autogenerated/identity_values? - - """ - - super(MSExecutionContext_pyodbc, self).pre_exec() - - # don't embed the scope_identity select into an - # "INSERT .. DEFAULT VALUES" - if self._select_lastrowid and \ - self.dialect.use_scope_identity and \ - len(self.parameters[0]): - self._embedded_scope_identity = True - - self.statement += "; select scope_identity()" - - def post_exec(self): - if self._embedded_scope_identity: - # Fetch the last inserted id from the manipulated statement - # We may have to skip over a number of result sets with - # no data (due to triggers, etc.) - while True: - try: - # fetchall() ensures the cursor is consumed - # without closing it (FreeTDS particularly) - row = self.cursor.fetchall()[0] - break - except self.dialect.dbapi.Error, e: - # no way around this - nextset() consumes the previous set - # so we need to just keep flipping - self.cursor.nextset() - - self._lastrowid = int(row[0]) - else: - super(MSExecutionContext_pyodbc, self).post_exec() - - -class MSDialect_pyodbc(PyODBCConnector, MSDialect): - - execution_ctx_cls = MSExecutionContext_pyodbc - - pyodbc_driver_name = 'SQL Server' - - colspecs = util.update_copy( - MSDialect.colspecs, - { - sqltypes.Numeric:_MSNumeric_pyodbc - } - ) - - def __init__(self, description_encoding='latin-1', **params): - super(MSDialect_pyodbc, self).__init__(**params) - self.description_encoding = description_encoding - self.use_scope_identity = self.use_scope_identity and \ - self.dbapi and \ - hasattr(self.dbapi.Cursor, 'nextset') - self._need_decimal_fix = self.dbapi and \ - self._dbapi_version() < (2, 1, 8) - -dialect = MSDialect_pyodbc diff --git a/libs/sqlalchemy/dialects/mssql/zxjdbc.py b/libs/sqlalchemy/dialects/mssql/zxjdbc.py deleted file mode 100644 index 842225da..00000000 --- a/libs/sqlalchemy/dialects/mssql/zxjdbc.py +++ /dev/null @@ -1,75 +0,0 @@ -# mssql/zxjdbc.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Support for the Microsoft SQL Server database via the zxjdbc JDBC -connector. - -JDBC Driver ------------ - -Requires the jTDS driver, available from: http://jtds.sourceforge.net/ - -Connecting ----------- - -URLs are of the standard form of -``mssql+zxjdbc://user:pass@host:port/dbname[?key=value&key=value...]``. - -Additional arguments which may be specified either as query string -arguments on the URL, or as keyword arguments to -:func:`~sqlalchemy.create_engine()` will be passed as Connection -properties to the underlying JDBC driver. - -""" -from sqlalchemy.connectors.zxJDBC import ZxJDBCConnector -from sqlalchemy.dialects.mssql.base import MSDialect, MSExecutionContext -from sqlalchemy.engine import base - -class MSExecutionContext_zxjdbc(MSExecutionContext): - - _embedded_scope_identity = False - - def pre_exec(self): - super(MSExecutionContext_zxjdbc, self).pre_exec() - # scope_identity after the fact returns null in jTDS so we must - # embed it - if self._select_lastrowid and self.dialect.use_scope_identity: - self._embedded_scope_identity = True - self.statement += "; SELECT scope_identity()" - - def post_exec(self): - if self._embedded_scope_identity: - while True: - try: - row = self.cursor.fetchall()[0] - break - except self.dialect.dbapi.Error, e: - self.cursor.nextset() - self._lastrowid = int(row[0]) - - if (self.isinsert or self.isupdate or self.isdelete) and \ - self.compiled.returning: - self._result_proxy = base.FullyBufferedResultProxy(self) - - if self._enable_identity_insert: - table = self.dialect.identifier_preparer.format_table( - self.compiled.statement.table) - self.cursor.execute("SET IDENTITY_INSERT %s OFF" % table) - - -class MSDialect_zxjdbc(ZxJDBCConnector, MSDialect): - jdbc_db_name = 'jtds:sqlserver' - jdbc_driver_name = 'net.sourceforge.jtds.jdbc.Driver' - - execution_ctx_cls = MSExecutionContext_zxjdbc - - def _get_server_version_info(self, connection): - return tuple( - int(x) - for x in connection.connection.dbversion.split('.') - ) - -dialect = MSDialect_zxjdbc diff --git a/libs/sqlalchemy/dialects/mysql/__init__.py b/libs/sqlalchemy/dialects/mysql/__init__.py deleted file mode 100644 index c41dd0b1..00000000 --- a/libs/sqlalchemy/dialects/mysql/__init__.py +++ /dev/null @@ -1,28 +0,0 @@ -# mysql/__init__.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -from sqlalchemy.dialects.mysql import base, mysqldb, oursql, \ - pyodbc, zxjdbc, mysqlconnector, pymysql,\ - gaerdbms - -# default dialect -base.dialect = mysqldb.dialect - -from sqlalchemy.dialects.mysql.base import \ - BIGINT, BINARY, BIT, BLOB, BOOLEAN, CHAR, DATE, DATETIME, \ - DECIMAL, DOUBLE, ENUM, DECIMAL,\ - FLOAT, INTEGER, INTEGER, LONGBLOB, LONGTEXT, MEDIUMBLOB, \ - MEDIUMINT, MEDIUMTEXT, NCHAR, \ - NVARCHAR, NUMERIC, SET, SMALLINT, REAL, TEXT, TIME, TIMESTAMP, \ - TINYBLOB, TINYINT, TINYTEXT,\ - VARBINARY, VARCHAR, YEAR, dialect - -__all__ = ( -'BIGINT', 'BINARY', 'BIT', 'BLOB', 'BOOLEAN', 'CHAR', 'DATE', 'DATETIME', 'DECIMAL', 'DOUBLE', -'ENUM', 'DECIMAL', 'FLOAT', 'INTEGER', 'INTEGER', 'LONGBLOB', 'LONGTEXT', 'MEDIUMBLOB', 'MEDIUMINT', -'MEDIUMTEXT', 'NCHAR', 'NVARCHAR', 'NUMERIC', 'SET', 'SMALLINT', 'REAL', 'TEXT', 'TIME', 'TIMESTAMP', -'TINYBLOB', 'TINYINT', 'TINYTEXT', 'VARBINARY', 'VARCHAR', 'YEAR', 'dialect' -) diff --git a/libs/sqlalchemy/dialects/mysql/base.py b/libs/sqlalchemy/dialects/mysql/base.py deleted file mode 100644 index ea180eee..00000000 --- a/libs/sqlalchemy/dialects/mysql/base.py +++ /dev/null @@ -1,2759 +0,0 @@ -# mysql/base.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Support for the MySQL database. - -Supported Versions and Features -------------------------------- - -SQLAlchemy supports MySQL starting with version 4.1 through modern releases. -However, no heroic measures are taken to work around major missing -SQL features - if your server version does not support sub-selects, for -example, they won't work in SQLAlchemy either. - -See the official MySQL documentation for detailed information about features -supported in any given server release. - -Connecting ----------- - -See the API documentation on individual drivers for details on connecting. - -Connection Timeouts -------------------- - -MySQL features an automatic connection close behavior, for connections that have -been idle for eight hours or more. To circumvent having this issue, use the -``pool_recycle`` option which controls the maximum age of any connection:: - - engine = create_engine('mysql+mysqldb://...', pool_recycle=3600) - -.. _mysql_storage_engines: - -Storage Engines ---------------- - -Most MySQL server installations have a default table type of ``MyISAM``, a -non-transactional table type. During a transaction, non-transactional storage -engines do not participate and continue to store table changes in autocommit -mode. For fully atomic transactions as well as support for foreign key -constraints, all participating tables must use a -transactional engine such as ``InnoDB``, ``Falcon``, ``SolidDB``, `PBXT`, etc. - -Storage engines can be elected when creating tables in SQLAlchemy by supplying -a ``mysql_engine='whatever'`` to the ``Table`` constructor. Any MySQL table -creation option can be specified in this syntax:: - - Table('mytable', metadata, - Column('data', String(32)), - mysql_engine='InnoDB', - mysql_charset='utf8' - ) - -.. seealso:: - - `The InnoDB Storage Engine `_ - on the MySQL website. - -Case Sensitivity and Table Reflection -------------------------------------- - -MySQL has inconsistent support for case-sensitive identifier -names, basing support on specific details of the underlying -operating system. However, it has been observed that no matter -what case sensitivity behavior is present, the names of tables in -foreign key declarations are *always* received from the database -as all-lower case, making it impossible to accurately reflect a -schema where inter-related tables use mixed-case identifier names. - -Therefore it is strongly advised that table names be declared as -all lower case both within SQLAlchemy as well as on the MySQL -database itself, especially if database reflection features are -to be used. - -Transaction Isolation Level ---------------------------- - -:func:`.create_engine` accepts an ``isolation_level`` -parameter which results in the command ``SET SESSION -TRANSACTION ISOLATION LEVEL `` being invoked for -every new connection. Valid values for this parameter are -``READ COMMITTED``, ``READ UNCOMMITTED``, -``REPEATABLE READ``, and ``SERIALIZABLE``:: - - engine = create_engine( - "mysql://scott:tiger@localhost/test", - isolation_level="READ UNCOMMITTED" - ) - -.. versionadded:: 0.7.6 - -Keys ----- - -Not all MySQL storage engines support foreign keys. For ``MyISAM`` and -similar engines, the information loaded by table reflection will not include -foreign keys. For these tables, you may supply a -:class:`~sqlalchemy.ForeignKeyConstraint` at reflection time:: - - Table('mytable', metadata, - ForeignKeyConstraint(['other_id'], ['othertable.other_id']), - autoload=True - ) - -When creating tables, SQLAlchemy will automatically set ``AUTO_INCREMENT`` on -an integer primary key column:: - - >>> t = Table('mytable', metadata, - ... Column('mytable_id', Integer, primary_key=True) - ... ) - >>> t.create() - CREATE TABLE mytable ( - id INTEGER NOT NULL AUTO_INCREMENT, - PRIMARY KEY (id) - ) - -You can disable this behavior by supplying ``autoincrement=False`` to the -:class:`~sqlalchemy.Column`. This flag can also be used to enable -auto-increment on a secondary column in a multi-column key for some storage -engines:: - - Table('mytable', metadata, - Column('gid', Integer, primary_key=True, autoincrement=False), - Column('id', Integer, primary_key=True) - ) - -SQL Mode --------- - -MySQL SQL modes are supported. Modes that enable ``ANSI_QUOTES`` (such as -``ANSI``) require an engine option to modify SQLAlchemy's quoting style. -When using an ANSI-quoting mode, supply ``use_ansiquotes=True`` when -creating your ``Engine``:: - - create_engine('mysql://localhost/test', use_ansiquotes=True) - -This is an engine-wide option and is not toggleable on a per-connection basis. -SQLAlchemy does not presume to ``SET sql_mode`` for you with this option. For -the best performance, set the quoting style server-wide in ``my.cnf`` or by -supplying ``--sql-mode`` to ``mysqld``. You can also use a -:class:`sqlalchemy.pool.Pool` listener hook to issue a ``SET SESSION -sql_mode='...'`` on connect to configure each connection. - -If you do not specify ``use_ansiquotes``, the regular MySQL quoting style is -used by default. - -If you do issue a ``SET sql_mode`` through SQLAlchemy, the dialect must be -updated if the quoting style is changed. Again, this change will affect all -connections:: - - connection.execute('SET sql_mode="ansi"') - connection.dialect.use_ansiquotes = True - -MySQL SQL Extensions --------------------- - -Many of the MySQL SQL extensions are handled through SQLAlchemy's generic -function and operator support:: - - table.select(table.c.password==func.md5('plaintext')) - table.select(table.c.username.op('regexp')('^[a-d]')) - -And of course any valid MySQL statement can be executed as a string as well. - -Some limited direct support for MySQL extensions to SQL is currently -available. - -* SELECT pragma:: - - select(..., prefixes=['HIGH_PRIORITY', 'SQL_SMALL_RESULT']) - -* UPDATE with LIMIT:: - - update(..., mysql_limit=10) - -rowcount Support ----------------- - -SQLAlchemy standardizes the DBAPI ``cursor.rowcount`` attribute to be the -usual definition of "number of rows matched by an UPDATE or DELETE" statement. -This is in contradiction to the default setting on most MySQL DBAPI drivers, -which is "number of rows actually modified/deleted". For this reason, the -SQLAlchemy MySQL dialects always set the ``constants.CLIENT.FOUND_ROWS`` flag, -or whatever is equivalent for the DBAPI in use, on connect, unless the flag value -is overridden using DBAPI-specific options -(such as ``client_flag`` for the MySQL-Python driver, ``found_rows`` for the -OurSQL driver). - -See also: - -:attr:`.ResultProxy.rowcount` - - -CAST Support ------------- - -MySQL documents the CAST operator as available in version 4.0.2. When using the -SQLAlchemy :func:`.cast` function, SQLAlchemy -will not render the CAST token on MySQL before this version, based on server version -detection, instead rendering the internal expression directly. - -CAST may still not be desirable on an early MySQL version post-4.0.2, as it didn't -add all datatype support until 4.1.1. If your application falls into this -narrow area, the behavior of CAST can be controlled using the :ref:`sqlalchemy.ext.compiler_toplevel` -system, as per the recipe below:: - - from sqlalchemy.sql.expression import _Cast - from sqlalchemy.ext.compiler import compiles - - @compiles(_Cast, 'mysql') - def _check_mysql_version(element, compiler, **kw): - if compiler.dialect.server_version_info < (4, 1, 0): - return compiler.process(element.clause, **kw) - else: - return compiler.visit_cast(element, **kw) - -The above function, which only needs to be declared once -within an application, overrides the compilation of the -:func:`.cast` construct to check for version 4.1.0 before -fully rendering CAST; else the internal element of the -construct is rendered directly. - - -.. _mysql_indexes: - -MySQL Specific Index Options ----------------------------- - -MySQL-specific extensions to the :class:`.Index` construct are available. - -Index Length -~~~~~~~~~~~~~ - -MySQL provides an option to create index entries with a certain length, where -"length" refers to the number of characters or bytes in each value which will -become part of the index. SQLAlchemy provides this feature via the -``mysql_length`` parameter:: - - Index('my_index', my_table.c.data, mysql_length=10) - -Prefix lengths are given in characters for nonbinary string types and in bytes -for binary string types. The value passed to the keyword argument will be -simply passed through to the underlying CREATE INDEX command, so it *must* be -an integer. MySQL only allows a length for an index if it is for a CHAR, -VARCHAR, TEXT, BINARY, VARBINARY and BLOB. - -Index Types -~~~~~~~~~~~~~ - -Some MySQL storage engines permit you to specify an index type when creating -an index or primary key constraint. SQLAlchemy provides this feature via the -``mysql_using`` parameter on :class:`.Index`:: - - Index('my_index', my_table.c.data, mysql_using='hash') - -As well as the ``mysql_using`` parameter on :class:`.PrimaryKeyConstraint`:: - - PrimaryKeyConstraint("data", mysql_using='hash') - -The value passed to the keyword argument will be simply passed through to the -underlying CREATE INDEX or PRIMARY KEY clause, so it *must* be a valid index -type for your MySQL storage engine. - -More information can be found at: - -http://dev.mysql.com/doc/refman/5.0/en/create-index.html - -http://dev.mysql.com/doc/refman/5.0/en/create-table.html - -""" - -import datetime, inspect, re, sys - -from sqlalchemy import schema as sa_schema -from sqlalchemy import exc, log, sql, util -from sqlalchemy.sql import operators as sql_operators -from sqlalchemy.sql import functions as sql_functions -from sqlalchemy.sql import compiler -from array import array as _array - -from sqlalchemy.engine import reflection -from sqlalchemy.engine import base as engine_base, default -from sqlalchemy import types as sqltypes -from sqlalchemy.util import topological -from sqlalchemy.types import DATE, DATETIME, BOOLEAN, TIME, \ - BLOB, BINARY, VARBINARY - -RESERVED_WORDS = set( - ['accessible', 'add', 'all', 'alter', 'analyze','and', 'as', 'asc', - 'asensitive', 'before', 'between', 'bigint', 'binary', 'blob', 'both', - 'by', 'call', 'cascade', 'case', 'change', 'char', 'character', 'check', - 'collate', 'column', 'condition', 'constraint', 'continue', 'convert', - 'create', 'cross', 'current_date', 'current_time', 'current_timestamp', - 'current_user', 'cursor', 'database', 'databases', 'day_hour', - 'day_microsecond', 'day_minute', 'day_second', 'dec', 'decimal', - 'declare', 'default', 'delayed', 'delete', 'desc', 'describe', - 'deterministic', 'distinct', 'distinctrow', 'div', 'double', 'drop', - 'dual', 'each', 'else', 'elseif', 'enclosed', 'escaped', 'exists', - 'exit', 'explain', 'false', 'fetch', 'float', 'float4', 'float8', - 'for', 'force', 'foreign', 'from', 'fulltext', 'grant', 'group', 'having', - 'high_priority', 'hour_microsecond', 'hour_minute', 'hour_second', 'if', - 'ignore', 'in', 'index', 'infile', 'inner', 'inout', 'insensitive', - 'insert', 'int', 'int1', 'int2', 'int3', 'int4', 'int8', 'integer', - 'interval', 'into', 'is', 'iterate', 'join', 'key', 'keys', 'kill', - 'leading', 'leave', 'left', 'like', 'limit', 'linear', 'lines', 'load', - 'localtime', 'localtimestamp', 'lock', 'long', 'longblob', 'longtext', - 'loop', 'low_priority', 'master_ssl_verify_server_cert', 'match', - 'mediumblob', 'mediumint', 'mediumtext', 'middleint', - 'minute_microsecond', 'minute_second', 'mod', 'modifies', 'natural', - 'not', 'no_write_to_binlog', 'null', 'numeric', 'on', 'optimize', - 'option', 'optionally', 'or', 'order', 'out', 'outer', 'outfile', - 'precision', 'primary', 'procedure', 'purge', 'range', 'read', 'reads', - 'read_only', 'read_write', 'real', 'references', 'regexp', 'release', - 'rename', 'repeat', 'replace', 'require', 'restrict', 'return', - 'revoke', 'right', 'rlike', 'schema', 'schemas', 'second_microsecond', - 'select', 'sensitive', 'separator', 'set', 'show', 'smallint', 'spatial', - 'specific', 'sql', 'sqlexception', 'sqlstate', 'sqlwarning', - 'sql_big_result', 'sql_calc_found_rows', 'sql_small_result', 'ssl', - 'starting', 'straight_join', 'table', 'terminated', 'then', 'tinyblob', - 'tinyint', 'tinytext', 'to', 'trailing', 'trigger', 'true', 'undo', - 'union', 'unique', 'unlock', 'unsigned', 'update', 'usage', 'use', - 'using', 'utc_date', 'utc_time', 'utc_timestamp', 'values', 'varbinary', - 'varchar', 'varcharacter', 'varying', 'when', 'where', 'while', 'with', - 'write', 'x509', 'xor', 'year_month', 'zerofill', # 5.0 - 'columns', 'fields', 'privileges', 'soname', 'tables', # 4.1 - 'accessible', 'linear', 'master_ssl_verify_server_cert', 'range', - 'read_only', 'read_write', # 5.1 - ]) - -AUTOCOMMIT_RE = re.compile( - r'\s*(?:UPDATE|INSERT|CREATE|DELETE|DROP|ALTER|LOAD +DATA|REPLACE)', - re.I | re.UNICODE) -SET_RE = re.compile( - r'\s*SET\s+(?:(?:GLOBAL|SESSION)\s+)?\w', - re.I | re.UNICODE) - - -class _NumericType(object): - """Base for MySQL numeric types.""" - - def __init__(self, unsigned=False, zerofill=False, **kw): - self.unsigned = unsigned - self.zerofill = zerofill - super(_NumericType, self).__init__(**kw) - -class _FloatType(_NumericType, sqltypes.Float): - def __init__(self, precision=None, scale=None, asdecimal=True, **kw): - if isinstance(self, (REAL, DOUBLE)) and \ - ( - (precision is None and scale is not None) or - (precision is not None and scale is None) - ): - raise exc.ArgumentError( - "You must specify both precision and scale or omit " - "both altogether.") - - super(_FloatType, self).__init__(precision=precision, asdecimal=asdecimal, **kw) - self.scale = scale - -class _IntegerType(_NumericType, sqltypes.Integer): - def __init__(self, display_width=None, **kw): - self.display_width = display_width - super(_IntegerType, self).__init__(**kw) - -class _StringType(sqltypes.String): - """Base for MySQL string types.""" - - def __init__(self, charset=None, collation=None, - ascii=False, binary=False, - national=False, **kw): - self.charset = charset - # allow collate= or collation= - self.collation = kw.pop('collate', collation) - self.ascii = ascii - # We have to munge the 'unicode' param strictly as a dict - # otherwise 2to3 will turn it into str. - self.__dict__['unicode'] = kw.get('unicode', False) - # sqltypes.String does not accept the 'unicode' arg at all. - if 'unicode' in kw: - del kw['unicode'] - self.binary = binary - self.national = national - super(_StringType, self).__init__(**kw) - - def __repr__(self): - attributes = inspect.getargspec(self.__init__)[0][1:] - attributes.extend(inspect.getargspec(_StringType.__init__)[0][1:]) - - params = {} - for attr in attributes: - val = getattr(self, attr) - if val is not None and val is not False: - params[attr] = val - - return "%s(%s)" % (self.__class__.__name__, - ', '.join(['%s=%r' % (k, params[k]) for k in params])) - - -class NUMERIC(_NumericType, sqltypes.NUMERIC): - """MySQL NUMERIC type.""" - - __visit_name__ = 'NUMERIC' - - def __init__(self, precision=None, scale=None, asdecimal=True, **kw): - """Construct a NUMERIC. - - :param precision: Total digits in this number. If scale and precision - are both None, values are stored to limits allowed by the server. - - :param scale: The number of digits after the decimal point. - - :param unsigned: a boolean, optional. - - :param zerofill: Optional. If true, values will be stored as strings - left-padded with zeros. Note that this does not effect the values - returned by the underlying database API, which continue to be - numeric. - - """ - super(NUMERIC, self).__init__(precision=precision, scale=scale, asdecimal=asdecimal, **kw) - - -class DECIMAL(_NumericType, sqltypes.DECIMAL): - """MySQL DECIMAL type.""" - - __visit_name__ = 'DECIMAL' - - def __init__(self, precision=None, scale=None, asdecimal=True, **kw): - """Construct a DECIMAL. - - :param precision: Total digits in this number. If scale and precision - are both None, values are stored to limits allowed by the server. - - :param scale: The number of digits after the decimal point. - - :param unsigned: a boolean, optional. - - :param zerofill: Optional. If true, values will be stored as strings - left-padded with zeros. Note that this does not effect the values - returned by the underlying database API, which continue to be - numeric. - - """ - super(DECIMAL, self).__init__(precision=precision, scale=scale, - asdecimal=asdecimal, **kw) - - -class DOUBLE(_FloatType): - """MySQL DOUBLE type.""" - - __visit_name__ = 'DOUBLE' - - def __init__(self, precision=None, scale=None, asdecimal=True, **kw): - """Construct a DOUBLE. - - :param precision: Total digits in this number. If scale and precision - are both None, values are stored to limits allowed by the server. - - :param scale: The number of digits after the decimal point. - - :param unsigned: a boolean, optional. - - :param zerofill: Optional. If true, values will be stored as strings - left-padded with zeros. Note that this does not effect the values - returned by the underlying database API, which continue to be - numeric. - - """ - super(DOUBLE, self).__init__(precision=precision, scale=scale, - asdecimal=asdecimal, **kw) - -class REAL(_FloatType, sqltypes.REAL): - """MySQL REAL type.""" - - __visit_name__ = 'REAL' - - def __init__(self, precision=None, scale=None, asdecimal=True, **kw): - """Construct a REAL. - - :param precision: Total digits in this number. If scale and precision - are both None, values are stored to limits allowed by the server. - - :param scale: The number of digits after the decimal point. - - :param unsigned: a boolean, optional. - - :param zerofill: Optional. If true, values will be stored as strings - left-padded with zeros. Note that this does not effect the values - returned by the underlying database API, which continue to be - numeric. - - """ - super(REAL, self).__init__(precision=precision, scale=scale, - asdecimal=asdecimal, **kw) - -class FLOAT(_FloatType, sqltypes.FLOAT): - """MySQL FLOAT type.""" - - __visit_name__ = 'FLOAT' - - def __init__(self, precision=None, scale=None, asdecimal=False, **kw): - """Construct a FLOAT. - - :param precision: Total digits in this number. If scale and precision - are both None, values are stored to limits allowed by the server. - - :param scale: The number of digits after the decimal point. - - :param unsigned: a boolean, optional. - - :param zerofill: Optional. If true, values will be stored as strings - left-padded with zeros. Note that this does not effect the values - returned by the underlying database API, which continue to be - numeric. - - """ - super(FLOAT, self).__init__(precision=precision, scale=scale, - asdecimal=asdecimal, **kw) - - def bind_processor(self, dialect): - return None - -class INTEGER(_IntegerType, sqltypes.INTEGER): - """MySQL INTEGER type.""" - - __visit_name__ = 'INTEGER' - - def __init__(self, display_width=None, **kw): - """Construct an INTEGER. - - :param display_width: Optional, maximum display width for this number. - - :param unsigned: a boolean, optional. - - :param zerofill: Optional. If true, values will be stored as strings - left-padded with zeros. Note that this does not effect the values - returned by the underlying database API, which continue to be - numeric. - - """ - super(INTEGER, self).__init__(display_width=display_width, **kw) - -class BIGINT(_IntegerType, sqltypes.BIGINT): - """MySQL BIGINTEGER type.""" - - __visit_name__ = 'BIGINT' - - def __init__(self, display_width=None, **kw): - """Construct a BIGINTEGER. - - :param display_width: Optional, maximum display width for this number. - - :param unsigned: a boolean, optional. - - :param zerofill: Optional. If true, values will be stored as strings - left-padded with zeros. Note that this does not effect the values - returned by the underlying database API, which continue to be - numeric. - - """ - super(BIGINT, self).__init__(display_width=display_width, **kw) - -class MEDIUMINT(_IntegerType): - """MySQL MEDIUMINTEGER type.""" - - __visit_name__ = 'MEDIUMINT' - - def __init__(self, display_width=None, **kw): - """Construct a MEDIUMINTEGER - - :param display_width: Optional, maximum display width for this number. - - :param unsigned: a boolean, optional. - - :param zerofill: Optional. If true, values will be stored as strings - left-padded with zeros. Note that this does not effect the values - returned by the underlying database API, which continue to be - numeric. - - """ - super(MEDIUMINT, self).__init__(display_width=display_width, **kw) - -class TINYINT(_IntegerType): - """MySQL TINYINT type.""" - - __visit_name__ = 'TINYINT' - - def __init__(self, display_width=None, **kw): - """Construct a TINYINT. - - Note: following the usual MySQL conventions, TINYINT(1) columns - reflected during Table(..., autoload=True) are treated as - Boolean columns. - - :param display_width: Optional, maximum display width for this number. - - :param unsigned: a boolean, optional. - - :param zerofill: Optional. If true, values will be stored as strings - left-padded with zeros. Note that this does not effect the values - returned by the underlying database API, which continue to be - numeric. - - """ - super(TINYINT, self).__init__(display_width=display_width, **kw) - -class SMALLINT(_IntegerType, sqltypes.SMALLINT): - """MySQL SMALLINTEGER type.""" - - __visit_name__ = 'SMALLINT' - - def __init__(self, display_width=None, **kw): - """Construct a SMALLINTEGER. - - :param display_width: Optional, maximum display width for this number. - - :param unsigned: a boolean, optional. - - :param zerofill: Optional. If true, values will be stored as strings - left-padded with zeros. Note that this does not effect the values - returned by the underlying database API, which continue to be - numeric. - - """ - super(SMALLINT, self).__init__(display_width=display_width, **kw) - -class BIT(sqltypes.TypeEngine): - """MySQL BIT type. - - This type is for MySQL 5.0.3 or greater for MyISAM, and 5.0.5 or greater for - MyISAM, MEMORY, InnoDB and BDB. For older versions, use a MSTinyInteger() - type. - - """ - - __visit_name__ = 'BIT' - - def __init__(self, length=None): - """Construct a BIT. - - :param length: Optional, number of bits. - - """ - self.length = length - - def result_processor(self, dialect, coltype): - """Convert a MySQL's 64 bit, variable length binary string to a long. - - TODO: this is MySQL-db, pyodbc specific. OurSQL and mysqlconnector - already do this, so this logic should be moved to those dialects. - - """ - - def process(value): - if value is not None: - v = 0L - for i in map(ord, value): - v = v << 8 | i - return v - return value - return process - -class _MSTime(sqltypes.Time): - """MySQL TIME type.""" - - __visit_name__ = 'TIME' - - def result_processor(self, dialect, coltype): - time = datetime.time - def process(value): - # convert from a timedelta value - if value is not None: - seconds = value.seconds - minutes = seconds / 60 - return time(minutes / 60, minutes % 60, seconds - minutes * 60) - else: - return None - return process - -class TIMESTAMP(sqltypes.TIMESTAMP): - """MySQL TIMESTAMP type.""" - __visit_name__ = 'TIMESTAMP' - -class YEAR(sqltypes.TypeEngine): - """MySQL YEAR type, for single byte storage of years 1901-2155.""" - - __visit_name__ = 'YEAR' - - def __init__(self, display_width=None): - self.display_width = display_width - -class TEXT(_StringType, sqltypes.TEXT): - """MySQL TEXT type, for text up to 2^16 characters.""" - - __visit_name__ = 'TEXT' - - def __init__(self, length=None, **kw): - """Construct a TEXT. - - :param length: Optional, if provided the server may optimize storage - by substituting the smallest TEXT type sufficient to store - ``length`` characters. - - :param charset: Optional, a column-level character set for this string - value. Takes precedence to 'ascii' or 'unicode' short-hand. - - :param collation: Optional, a column-level collation for this string - value. Takes precedence to 'binary' short-hand. - - :param ascii: Defaults to False: short-hand for the ``latin1`` - character set, generates ASCII in schema. - - :param unicode: Defaults to False: short-hand for the ``ucs2`` - character set, generates UNICODE in schema. - - :param national: Optional. If true, use the server's configured - national character set. - - :param binary: Defaults to False: short-hand, pick the binary - collation type that matches the column's character set. Generates - BINARY in schema. This does not affect the type of data stored, - only the collation of character data. - - """ - super(TEXT, self).__init__(length=length, **kw) - -class TINYTEXT(_StringType): - """MySQL TINYTEXT type, for text up to 2^8 characters.""" - - __visit_name__ = 'TINYTEXT' - - def __init__(self, **kwargs): - """Construct a TINYTEXT. - - :param charset: Optional, a column-level character set for this string - value. Takes precedence to 'ascii' or 'unicode' short-hand. - - :param collation: Optional, a column-level collation for this string - value. Takes precedence to 'binary' short-hand. - - :param ascii: Defaults to False: short-hand for the ``latin1`` - character set, generates ASCII in schema. - - :param unicode: Defaults to False: short-hand for the ``ucs2`` - character set, generates UNICODE in schema. - - :param national: Optional. If true, use the server's configured - national character set. - - :param binary: Defaults to False: short-hand, pick the binary - collation type that matches the column's character set. Generates - BINARY in schema. This does not affect the type of data stored, - only the collation of character data. - - """ - super(TINYTEXT, self).__init__(**kwargs) - -class MEDIUMTEXT(_StringType): - """MySQL MEDIUMTEXT type, for text up to 2^24 characters.""" - - __visit_name__ = 'MEDIUMTEXT' - - def __init__(self, **kwargs): - """Construct a MEDIUMTEXT. - - :param charset: Optional, a column-level character set for this string - value. Takes precedence to 'ascii' or 'unicode' short-hand. - - :param collation: Optional, a column-level collation for this string - value. Takes precedence to 'binary' short-hand. - - :param ascii: Defaults to False: short-hand for the ``latin1`` - character set, generates ASCII in schema. - - :param unicode: Defaults to False: short-hand for the ``ucs2`` - character set, generates UNICODE in schema. - - :param national: Optional. If true, use the server's configured - national character set. - - :param binary: Defaults to False: short-hand, pick the binary - collation type that matches the column's character set. Generates - BINARY in schema. This does not affect the type of data stored, - only the collation of character data. - - """ - super(MEDIUMTEXT, self).__init__(**kwargs) - -class LONGTEXT(_StringType): - """MySQL LONGTEXT type, for text up to 2^32 characters.""" - - __visit_name__ = 'LONGTEXT' - - def __init__(self, **kwargs): - """Construct a LONGTEXT. - - :param charset: Optional, a column-level character set for this string - value. Takes precedence to 'ascii' or 'unicode' short-hand. - - :param collation: Optional, a column-level collation for this string - value. Takes precedence to 'binary' short-hand. - - :param ascii: Defaults to False: short-hand for the ``latin1`` - character set, generates ASCII in schema. - - :param unicode: Defaults to False: short-hand for the ``ucs2`` - character set, generates UNICODE in schema. - - :param national: Optional. If true, use the server's configured - national character set. - - :param binary: Defaults to False: short-hand, pick the binary - collation type that matches the column's character set. Generates - BINARY in schema. This does not affect the type of data stored, - only the collation of character data. - - """ - super(LONGTEXT, self).__init__(**kwargs) - - -class VARCHAR(_StringType, sqltypes.VARCHAR): - """MySQL VARCHAR type, for variable-length character data.""" - - __visit_name__ = 'VARCHAR' - - def __init__(self, length=None, **kwargs): - """Construct a VARCHAR. - - :param charset: Optional, a column-level character set for this string - value. Takes precedence to 'ascii' or 'unicode' short-hand. - - :param collation: Optional, a column-level collation for this string - value. Takes precedence to 'binary' short-hand. - - :param ascii: Defaults to False: short-hand for the ``latin1`` - character set, generates ASCII in schema. - - :param unicode: Defaults to False: short-hand for the ``ucs2`` - character set, generates UNICODE in schema. - - :param national: Optional. If true, use the server's configured - national character set. - - :param binary: Defaults to False: short-hand, pick the binary - collation type that matches the column's character set. Generates - BINARY in schema. This does not affect the type of data stored, - only the collation of character data. - - """ - super(VARCHAR, self).__init__(length=length, **kwargs) - -class CHAR(_StringType, sqltypes.CHAR): - """MySQL CHAR type, for fixed-length character data.""" - - __visit_name__ = 'CHAR' - - def __init__(self, length=None, **kwargs): - """Construct a CHAR. - - :param length: Maximum data length, in characters. - - :param binary: Optional, use the default binary collation for the - national character set. This does not affect the type of data - stored, use a BINARY type for binary data. - - :param collation: Optional, request a particular collation. Must be - compatible with the national character set. - - """ - super(CHAR, self).__init__(length=length, **kwargs) - -class NVARCHAR(_StringType, sqltypes.NVARCHAR): - """MySQL NVARCHAR type. - - For variable-length character data in the server's configured national - character set. - """ - - __visit_name__ = 'NVARCHAR' - - def __init__(self, length=None, **kwargs): - """Construct an NVARCHAR. - - :param length: Maximum data length, in characters. - - :param binary: Optional, use the default binary collation for the - national character set. This does not affect the type of data - stored, use a BINARY type for binary data. - - :param collation: Optional, request a particular collation. Must be - compatible with the national character set. - - """ - kwargs['national'] = True - super(NVARCHAR, self).__init__(length=length, **kwargs) - - -class NCHAR(_StringType, sqltypes.NCHAR): - """MySQL NCHAR type. - - For fixed-length character data in the server's configured national - character set. - """ - - __visit_name__ = 'NCHAR' - - def __init__(self, length=None, **kwargs): - """Construct an NCHAR. - - :param length: Maximum data length, in characters. - - :param binary: Optional, use the default binary collation for the - national character set. This does not affect the type of data - stored, use a BINARY type for binary data. - - :param collation: Optional, request a particular collation. Must be - compatible with the national character set. - - """ - kwargs['national'] = True - super(NCHAR, self).__init__(length=length, **kwargs) - - - - -class TINYBLOB(sqltypes._Binary): - """MySQL TINYBLOB type, for binary data up to 2^8 bytes.""" - - __visit_name__ = 'TINYBLOB' - -class MEDIUMBLOB(sqltypes._Binary): - """MySQL MEDIUMBLOB type, for binary data up to 2^24 bytes.""" - - __visit_name__ = 'MEDIUMBLOB' - -class LONGBLOB(sqltypes._Binary): - """MySQL LONGBLOB type, for binary data up to 2^32 bytes.""" - - __visit_name__ = 'LONGBLOB' - -class ENUM(sqltypes.Enum, _StringType): - """MySQL ENUM type.""" - - __visit_name__ = 'ENUM' - - def __init__(self, *enums, **kw): - """Construct an ENUM. - - Example: - - Column('myenum', MSEnum("foo", "bar", "baz")) - - :param enums: The range of valid values for this ENUM. Values will be - quoted when generating the schema according to the quoting flag (see - below). - - :param strict: Defaults to False: ensure that a given value is in this - ENUM's range of permissible values when inserting or updating rows. - Note that MySQL will not raise a fatal error if you attempt to store - an out of range value- an alternate value will be stored instead. - (See MySQL ENUM documentation.) - - :param charset: Optional, a column-level character set for this string - value. Takes precedence to 'ascii' or 'unicode' short-hand. - - :param collation: Optional, a column-level collation for this string - value. Takes precedence to 'binary' short-hand. - - :param ascii: Defaults to False: short-hand for the ``latin1`` - character set, generates ASCII in schema. - - :param unicode: Defaults to False: short-hand for the ``ucs2`` - character set, generates UNICODE in schema. - - :param binary: Defaults to False: short-hand, pick the binary - collation type that matches the column's character set. Generates - BINARY in schema. This does not affect the type of data stored, - only the collation of character data. - - :param quoting: Defaults to 'auto': automatically determine enum value - quoting. If all enum values are surrounded by the same quoting - character, then use 'quoted' mode. Otherwise, use 'unquoted' mode. - - 'quoted': values in enums are already quoted, they will be used - directly when generating the schema - this usage is deprecated. - - 'unquoted': values in enums are not quoted, they will be escaped and - surrounded by single quotes when generating the schema. - - Previous versions of this type always required manually quoted - values to be supplied; future versions will always quote the string - literals for you. This is a transitional option. - - """ - self.quoting = kw.pop('quoting', 'auto') - - if self.quoting == 'auto' and len(enums): - # What quoting character are we using? - q = None - for e in enums: - if len(e) == 0: - self.quoting = 'unquoted' - break - elif q is None: - q = e[0] - - if e[0] != q or e[-1] != q: - self.quoting = 'unquoted' - break - else: - self.quoting = 'quoted' - - if self.quoting == 'quoted': - util.warn_deprecated( - 'Manually quoting ENUM value literals is deprecated. Supply ' - 'unquoted values and use the quoting= option in cases of ' - 'ambiguity.') - enums = self._strip_enums(enums) - - self.strict = kw.pop('strict', False) - length = max([len(v) for v in enums] + [0]) - kw.pop('metadata', None) - kw.pop('schema', None) - kw.pop('name', None) - kw.pop('quote', None) - kw.pop('native_enum', None) - _StringType.__init__(self, length=length, **kw) - sqltypes.Enum.__init__(self, *enums) - - @classmethod - def _strip_enums(cls, enums): - strip_enums = [] - for a in enums: - if a[0:1] == '"' or a[0:1] == "'": - # strip enclosing quotes and unquote interior - a = a[1:-1].replace(a[0] * 2, a[0]) - strip_enums.append(a) - return strip_enums - - def bind_processor(self, dialect): - super_convert = super(ENUM, self).bind_processor(dialect) - def process(value): - if self.strict and value is not None and value not in self.enums: - raise exc.InvalidRequestError('"%s" not a valid value for ' - 'this enum' % value) - if super_convert: - return super_convert(value) - else: - return value - return process - - def adapt(self, impltype, **kw): - kw['strict'] = self.strict - return sqltypes.Enum.adapt(self, impltype, **kw) - -class SET(_StringType): - """MySQL SET type.""" - - __visit_name__ = 'SET' - - def __init__(self, *values, **kw): - """Construct a SET. - - Example:: - - Column('myset', MSSet("'foo'", "'bar'", "'baz'")) - - :param values: The range of valid values for this SET. Values will be - used exactly as they appear when generating schemas. Strings must - be quoted, as in the example above. Single-quotes are suggested for - ANSI compatibility and are required for portability to servers with - ANSI_QUOTES enabled. - - :param charset: Optional, a column-level character set for this string - value. Takes precedence to 'ascii' or 'unicode' short-hand. - - :param collation: Optional, a column-level collation for this string - value. Takes precedence to 'binary' short-hand. - - :param ascii: Defaults to False: short-hand for the ``latin1`` - character set, generates ASCII in schema. - - :param unicode: Defaults to False: short-hand for the ``ucs2`` - character set, generates UNICODE in schema. - - :param binary: Defaults to False: short-hand, pick the binary - collation type that matches the column's character set. Generates - BINARY in schema. This does not affect the type of data stored, - only the collation of character data. - - """ - self._ddl_values = values - - strip_values = [] - for a in values: - if a[0:1] == '"' or a[0:1] == "'": - # strip enclosing quotes and unquote interior - a = a[1:-1].replace(a[0] * 2, a[0]) - strip_values.append(a) - - self.values = strip_values - kw.setdefault('length', max([len(v) for v in strip_values] + [0])) - super(SET, self).__init__(**kw) - - def result_processor(self, dialect, coltype): - def process(value): - # The good news: - # No ',' quoting issues- commas aren't allowed in SET values - # The bad news: - # Plenty of driver inconsistencies here. - if isinstance(value, util.set_types): - # ..some versions convert '' to an empty set - if not value: - value.add('') - # ..some return sets.Set, even for pythons that have __builtin__.set - if not isinstance(value, set): - value = set(value) - return value - # ...and some versions return strings - if value is not None: - return set(value.split(',')) - else: - return value - return process - - def bind_processor(self, dialect): - super_convert = super(SET, self).bind_processor(dialect) - def process(value): - if value is None or isinstance(value, (int, long, basestring)): - pass - else: - if None in value: - value = set(value) - value.remove(None) - value.add('') - value = ','.join(value) - if super_convert: - return super_convert(value) - else: - return value - return process - -# old names -MSTime = _MSTime -MSSet = SET -MSEnum = ENUM -MSLongBlob = LONGBLOB -MSMediumBlob = MEDIUMBLOB -MSTinyBlob = TINYBLOB -MSBlob = BLOB -MSBinary = BINARY -MSVarBinary = VARBINARY -MSNChar = NCHAR -MSNVarChar = NVARCHAR -MSChar = CHAR -MSString = VARCHAR -MSLongText = LONGTEXT -MSMediumText = MEDIUMTEXT -MSTinyText = TINYTEXT -MSText = TEXT -MSYear = YEAR -MSTimeStamp = TIMESTAMP -MSBit = BIT -MSSmallInteger = SMALLINT -MSTinyInteger = TINYINT -MSMediumInteger = MEDIUMINT -MSBigInteger = BIGINT -MSNumeric = NUMERIC -MSDecimal = DECIMAL -MSDouble = DOUBLE -MSReal = REAL -MSFloat = FLOAT -MSInteger = INTEGER - -colspecs = { - sqltypes.Numeric: NUMERIC, - sqltypes.Float: FLOAT, - sqltypes.Time: _MSTime, - sqltypes.Enum: ENUM, -} - -# Everything 3.23 through 5.1 excepting OpenGIS types. -ischema_names = { - 'bigint': BIGINT, - 'binary': BINARY, - 'bit': BIT, - 'blob': BLOB, - 'boolean': BOOLEAN, - 'char': CHAR, - 'date': DATE, - 'datetime': DATETIME, - 'decimal': DECIMAL, - 'double': DOUBLE, - 'enum': ENUM, - 'fixed': DECIMAL, - 'float': FLOAT, - 'int': INTEGER, - 'integer': INTEGER, - 'longblob': LONGBLOB, - 'longtext': LONGTEXT, - 'mediumblob': MEDIUMBLOB, - 'mediumint': MEDIUMINT, - 'mediumtext': MEDIUMTEXT, - 'nchar': NCHAR, - 'nvarchar': NVARCHAR, - 'numeric': NUMERIC, - 'set': SET, - 'smallint': SMALLINT, - 'text': TEXT, - 'time': TIME, - 'timestamp': TIMESTAMP, - 'tinyblob': TINYBLOB, - 'tinyint': TINYINT, - 'tinytext': TINYTEXT, - 'varbinary': VARBINARY, - 'varchar': VARCHAR, - 'year': YEAR, -} - -class MySQLExecutionContext(default.DefaultExecutionContext): - - def should_autocommit_text(self, statement): - return AUTOCOMMIT_RE.match(statement) - -class MySQLCompiler(compiler.SQLCompiler): - - render_table_with_column_in_update_from = True - """Overridden from base SQLCompiler value""" - - extract_map = compiler.SQLCompiler.extract_map.copy() - extract_map.update ({ - 'milliseconds': 'millisecond', - }) - - def visit_random_func(self, fn, **kw): - return "rand%s" % self.function_argspec(fn) - - def visit_utc_timestamp_func(self, fn, **kw): - return "UTC_TIMESTAMP" - - def visit_sysdate_func(self, fn, **kw): - return "SYSDATE()" - - def visit_concat_op(self, binary, **kw): - return "concat(%s, %s)" % (self.process(binary.left), self.process(binary.right)) - - def visit_match_op(self, binary, **kw): - return "MATCH (%s) AGAINST (%s IN BOOLEAN MODE)" % (self.process(binary.left), self.process(binary.right)) - - def get_from_hint_text(self, table, text): - return text - - def visit_typeclause(self, typeclause): - type_ = typeclause.type.dialect_impl(self.dialect) - if isinstance(type_, sqltypes.Integer): - if getattr(type_, 'unsigned', False): - return 'UNSIGNED INTEGER' - else: - return 'SIGNED INTEGER' - elif isinstance(type_, sqltypes.TIMESTAMP): - return 'DATETIME' - elif isinstance(type_, (sqltypes.DECIMAL, sqltypes.DateTime, sqltypes.Date, sqltypes.Time)): - return self.dialect.type_compiler.process(type_) - elif isinstance(type_, sqltypes.Text): - return 'CHAR' - elif (isinstance(type_, sqltypes.String) and not - isinstance(type_, (ENUM, SET))): - if getattr(type_, 'length'): - return 'CHAR(%s)' % type_.length - else: - return 'CHAR' - elif isinstance(type_, sqltypes._Binary): - return 'BINARY' - elif isinstance(type_, sqltypes.NUMERIC): - return self.dialect.type_compiler.process(type_).replace('NUMERIC', 'DECIMAL') - else: - return None - - def visit_cast(self, cast, **kwargs): - # No cast until 4, no decimals until 5. - if not self.dialect._supports_cast: - return self.process(cast.clause.self_group()) - - type_ = self.process(cast.typeclause) - if type_ is None: - return self.process(cast.clause.self_group()) - - return 'CAST(%s AS %s)' % (self.process(cast.clause), type_) - - def render_literal_value(self, value, type_): - value = super(MySQLCompiler, self).render_literal_value(value, type_) - if self.dialect._backslash_escapes: - value = value.replace('\\', '\\\\') - return value - - def get_select_precolumns(self, select): - """Add special MySQL keywords in place of DISTINCT. - - .. note:: - - this usage is deprecated. :meth:`.Select.prefix_with` - should be used for special keywords at the start - of a SELECT. - - """ - if isinstance(select._distinct, basestring): - return select._distinct.upper() + " " - elif select._distinct: - return "DISTINCT " - else: - return "" - - def visit_join(self, join, asfrom=False, **kwargs): - # 'JOIN ... ON ...' for inner joins isn't available until 4.0. - # Apparently < 3.23.17 requires theta joins for inner joins - # (but not outer). Not generating these currently, but - # support can be added, preferably after dialects are - # refactored to be version-sensitive. - return ''.join( - (self.process(join.left, asfrom=True, **kwargs), - (join.isouter and " LEFT OUTER JOIN " or " INNER JOIN "), - self.process(join.right, asfrom=True, **kwargs), - " ON ", - self.process(join.onclause, **kwargs))) - - def for_update_clause(self, select): - if select.for_update == 'read': - return ' LOCK IN SHARE MODE' - else: - return super(MySQLCompiler, self).for_update_clause(select) - - def limit_clause(self, select): - # MySQL supports: - # LIMIT - # LIMIT , - # and in server versions > 3.3: - # LIMIT OFFSET - # The latter is more readable for offsets but we're stuck with the - # former until we can refine dialects by server revision. - - limit, offset = select._limit, select._offset - - if (limit, offset) == (None, None): - return '' - elif offset is not None: - # As suggested by the MySQL docs, need to apply an - # artificial limit if one wasn't provided - # http://dev.mysql.com/doc/refman/5.0/en/select.html - if limit is None: - # hardwire the upper limit. Currently - # needed by OurSQL with Python 3 - # (https://bugs.launchpad.net/oursql/+bug/686232), - # but also is consistent with the usage of the upper - # bound as part of MySQL's "syntax" for OFFSET with - # no LIMIT - return ' \n LIMIT %s, %s' % ( - self.process(sql.literal(offset)), - "18446744073709551615") - else: - return ' \n LIMIT %s, %s' % ( - self.process(sql.literal(offset)), - self.process(sql.literal(limit))) - else: - # No offset provided, so just use the limit - return ' \n LIMIT %s' % (self.process(sql.literal(limit)),) - - def update_limit_clause(self, update_stmt): - limit = update_stmt.kwargs.get('%s_limit' % self.dialect.name, None) - if limit: - return "LIMIT %s" % limit - else: - return None - - def update_tables_clause(self, update_stmt, from_table, extra_froms, **kw): - return ', '.join(t._compiler_dispatch(self, asfrom=True, **kw) - for t in [from_table] + list(extra_froms)) - - def update_from_clause(self, update_stmt, from_table, - extra_froms, from_hints, **kw): - return None - - -# ug. "InnoDB needs indexes on foreign keys and referenced keys [...]. -# Starting with MySQL 4.1.2, these indexes are created automatically. -# In older versions, the indexes must be created explicitly or the -# creation of foreign key constraints fails." - -class MySQLDDLCompiler(compiler.DDLCompiler): - def create_table_constraints(self, table): - """Get table constraints.""" - constraint_string = super(MySQLDDLCompiler, self).create_table_constraints(table) - - engine_key = '%s_engine' % self.dialect.name - is_innodb = table.kwargs.has_key(engine_key) and \ - table.kwargs[engine_key].lower() == 'innodb' - - auto_inc_column = table._autoincrement_column - - if is_innodb and \ - auto_inc_column is not None and \ - auto_inc_column is not list(table.primary_key)[0]: - if constraint_string: - constraint_string += ", \n\t" - constraint_string += "KEY %s (%s)" % ( - self.preparer.quote( - "idx_autoinc_%s" % auto_inc_column.name, None - ), - self.preparer.format_column(auto_inc_column) - ) - - return constraint_string - - - def get_column_specification(self, column, **kw): - """Builds column DDL.""" - - colspec = [self.preparer.format_column(column), - self.dialect.type_compiler.process(column.type) - ] - - default = self.get_column_default_string(column) - if default is not None: - colspec.append('DEFAULT ' + default) - - is_timestamp = isinstance(column.type, sqltypes.TIMESTAMP) - if not column.nullable and not is_timestamp: - colspec.append('NOT NULL') - - elif column.nullable and is_timestamp and default is None: - colspec.append('NULL') - - if column is column.table._autoincrement_column and column.server_default is None: - colspec.append('AUTO_INCREMENT') - - return ' '.join(colspec) - - def post_create_table(self, table): - """Build table-level CREATE options like ENGINE and COLLATE.""" - - table_opts = [] - - opts = dict( - ( - k[len(self.dialect.name)+1:].upper(), - v - ) - for k, v in table.kwargs.items() - if k.startswith('%s_' % self.dialect.name) - ) - - for opt in topological.sort([ - ('DEFAULT_CHARSET', 'COLLATE'), - ('DEFAULT_CHARACTER_SET', 'COLLATE') - ], opts): - arg = opts[opt] - if opt in _options_of_type_string: - arg = "'%s'" % arg.replace("\\", "\\\\").replace("'", "''") - - if opt in ('DATA_DIRECTORY', 'INDEX_DIRECTORY', - 'DEFAULT_CHARACTER_SET', 'CHARACTER_SET', - 'DEFAULT_CHARSET', - 'DEFAULT_COLLATE'): - opt = opt.replace('_', ' ') - - joiner = '=' - if opt in ('TABLESPACE', 'DEFAULT CHARACTER SET', - 'CHARACTER SET', 'COLLATE'): - joiner = ' ' - - table_opts.append(joiner.join((opt, arg))) - return ' '.join(table_opts) - - - def visit_create_index(self, create): - index = create.element - preparer = self.preparer - table = preparer.format_table(index.table) - columns = [preparer.quote(c.name, c.quote) for c in index.columns] - name = preparer.quote( - self._index_identifier(index.name), - index.quote) - - text = "CREATE " - if index.unique: - text += "UNIQUE " - text += "INDEX %s ON %s " % (name, table) - - columns = ', '.join(columns) - if 'mysql_length' in index.kwargs: - length = index.kwargs['mysql_length'] - text += "(%s(%d))" % (columns, length) - else: - text += "(%s)" % (columns) - - if 'mysql_using' in index.kwargs: - using = index.kwargs['mysql_using'] - text += " USING %s" % (preparer.quote(using, index.quote)) - - return text - - def visit_primary_key_constraint(self, constraint): - text = super(MySQLDDLCompiler, self).\ - visit_primary_key_constraint(constraint) - if "mysql_using" in constraint.kwargs: - using = constraint.kwargs['mysql_using'] - text += " USING %s" % ( - self.preparer.quote(using, constraint.quote)) - return text - - def visit_drop_index(self, drop): - index = drop.element - - return "\nDROP INDEX %s ON %s" % \ - (self.preparer.quote( - self._index_identifier(index.name), index.quote - ), - self.preparer.format_table(index.table)) - - def visit_drop_constraint(self, drop): - constraint = drop.element - if isinstance(constraint, sa_schema.ForeignKeyConstraint): - qual = "FOREIGN KEY " - const = self.preparer.format_constraint(constraint) - elif isinstance(constraint, sa_schema.PrimaryKeyConstraint): - qual = "PRIMARY KEY " - const = "" - elif isinstance(constraint, sa_schema.UniqueConstraint): - qual = "INDEX " - const = self.preparer.format_constraint(constraint) - else: - qual = "" - const = self.preparer.format_constraint(constraint) - return "ALTER TABLE %s DROP %s%s" % \ - (self.preparer.format_table(constraint.table), - qual, const) - -class MySQLTypeCompiler(compiler.GenericTypeCompiler): - def _extend_numeric(self, type_, spec): - "Extend a numeric-type declaration with MySQL specific extensions." - - if not self._mysql_type(type_): - return spec - - if type_.unsigned: - spec += ' UNSIGNED' - if type_.zerofill: - spec += ' ZEROFILL' - return spec - - def _extend_string(self, type_, defaults, spec): - """Extend a string-type declaration with standard SQL CHARACTER SET / - COLLATE annotations and MySQL specific extensions. - - """ - - def attr(name): - return getattr(type_, name, defaults.get(name)) - - if attr('charset'): - charset = 'CHARACTER SET %s' % attr('charset') - elif attr('ascii'): - charset = 'ASCII' - elif attr('unicode'): - charset = 'UNICODE' - else: - charset = None - - if attr('collation'): - collation = 'COLLATE %s' % type_.collation - elif attr('binary'): - collation = 'BINARY' - else: - collation = None - - if attr('national'): - # NATIONAL (aka NCHAR/NVARCHAR) trumps charsets. - return ' '.join([c for c in ('NATIONAL', spec, collation) - if c is not None]) - return ' '.join([c for c in (spec, charset, collation) - if c is not None]) - - def _mysql_type(self, type_): - return isinstance(type_, (_StringType, _NumericType)) - - def visit_NUMERIC(self, type_): - if type_.precision is None: - return self._extend_numeric(type_, "NUMERIC") - elif type_.scale is None: - return self._extend_numeric(type_, - "NUMERIC(%(precision)s)" % - {'precision': type_.precision}) - else: - return self._extend_numeric(type_, - "NUMERIC(%(precision)s, %(scale)s)" % - {'precision': type_.precision, 'scale' : type_.scale}) - - def visit_DECIMAL(self, type_): - if type_.precision is None: - return self._extend_numeric(type_, "DECIMAL") - elif type_.scale is None: - return self._extend_numeric(type_, - "DECIMAL(%(precision)s)" % - {'precision': type_.precision}) - else: - return self._extend_numeric(type_, - "DECIMAL(%(precision)s, %(scale)s)" % - {'precision': type_.precision, 'scale' : type_.scale}) - - def visit_DOUBLE(self, type_): - if type_.precision is not None and type_.scale is not None: - return self._extend_numeric(type_, "DOUBLE(%(precision)s, %(scale)s)" % - {'precision': type_.precision, - 'scale' : type_.scale}) - else: - return self._extend_numeric(type_, 'DOUBLE') - - def visit_REAL(self, type_): - if type_.precision is not None and type_.scale is not None: - return self._extend_numeric(type_, "REAL(%(precision)s, %(scale)s)" % - {'precision': type_.precision, - 'scale' : type_.scale}) - else: - return self._extend_numeric(type_, 'REAL') - - def visit_FLOAT(self, type_): - if self._mysql_type(type_) and \ - type_.scale is not None and \ - type_.precision is not None: - return self._extend_numeric(type_, - "FLOAT(%s, %s)" % (type_.precision, type_.scale)) - elif type_.precision is not None: - return self._extend_numeric(type_, "FLOAT(%s)" % (type_.precision,)) - else: - return self._extend_numeric(type_, "FLOAT") - - def visit_INTEGER(self, type_): - if self._mysql_type(type_) and type_.display_width is not None: - return self._extend_numeric(type_, - "INTEGER(%(display_width)s)" % - {'display_width': type_.display_width}) - else: - return self._extend_numeric(type_, "INTEGER") - - def visit_BIGINT(self, type_): - if self._mysql_type(type_) and type_.display_width is not None: - return self._extend_numeric(type_, - "BIGINT(%(display_width)s)" % - {'display_width': type_.display_width}) - else: - return self._extend_numeric(type_, "BIGINT") - - def visit_MEDIUMINT(self, type_): - if self._mysql_type(type_) and type_.display_width is not None: - return self._extend_numeric(type_, - "MEDIUMINT(%(display_width)s)" % - {'display_width': type_.display_width}) - else: - return self._extend_numeric(type_, "MEDIUMINT") - - def visit_TINYINT(self, type_): - if self._mysql_type(type_) and type_.display_width is not None: - return self._extend_numeric(type_, "TINYINT(%s)" % type_.display_width) - else: - return self._extend_numeric(type_, "TINYINT") - - def visit_SMALLINT(self, type_): - if self._mysql_type(type_) and type_.display_width is not None: - return self._extend_numeric(type_, - "SMALLINT(%(display_width)s)" % - {'display_width': type_.display_width} - ) - else: - return self._extend_numeric(type_, "SMALLINT") - - def visit_BIT(self, type_): - if type_.length is not None: - return "BIT(%s)" % type_.length - else: - return "BIT" - - def visit_DATETIME(self, type_): - return "DATETIME" - - def visit_DATE(self, type_): - return "DATE" - - def visit_TIME(self, type_): - return "TIME" - - def visit_TIMESTAMP(self, type_): - return 'TIMESTAMP' - - def visit_YEAR(self, type_): - if type_.display_width is None: - return "YEAR" - else: - return "YEAR(%s)" % type_.display_width - - def visit_TEXT(self, type_): - if type_.length: - return self._extend_string(type_, {}, "TEXT(%d)" % type_.length) - else: - return self._extend_string(type_, {}, "TEXT") - - def visit_TINYTEXT(self, type_): - return self._extend_string(type_, {}, "TINYTEXT") - - def visit_MEDIUMTEXT(self, type_): - return self._extend_string(type_, {}, "MEDIUMTEXT") - - def visit_LONGTEXT(self, type_): - return self._extend_string(type_, {}, "LONGTEXT") - - def visit_VARCHAR(self, type_): - if type_.length: - return self._extend_string(type_, {}, "VARCHAR(%d)" % type_.length) - else: - raise exc.CompileError( - "VARCHAR requires a length on dialect %s" % - self.dialect.name) - - def visit_CHAR(self, type_): - if type_.length: - return self._extend_string(type_, {}, "CHAR(%(length)s)" % {'length' : type_.length}) - else: - return self._extend_string(type_, {}, "CHAR") - - def visit_NVARCHAR(self, type_): - # We'll actually generate the equiv. "NATIONAL VARCHAR" instead - # of "NVARCHAR". - if type_.length: - return self._extend_string(type_, {'national':True}, "VARCHAR(%(length)s)" % {'length': type_.length}) - else: - raise exc.CompileError( - "NVARCHAR requires a length on dialect %s" % - self.dialect.name) - - def visit_NCHAR(self, type_): - # We'll actually generate the equiv. "NATIONAL CHAR" instead of "NCHAR". - if type_.length: - return self._extend_string(type_, {'national':True}, "CHAR(%(length)s)" % {'length': type_.length}) - else: - return self._extend_string(type_, {'national':True}, "CHAR") - - def visit_VARBINARY(self, type_): - return "VARBINARY(%d)" % type_.length - - def visit_large_binary(self, type_): - return self.visit_BLOB(type_) - - def visit_enum(self, type_): - if not type_.native_enum: - return super(MySQLTypeCompiler, self).visit_enum(type_) - else: - return self.visit_ENUM(type_) - - def visit_BLOB(self, type_): - if type_.length: - return "BLOB(%d)" % type_.length - else: - return "BLOB" - - def visit_TINYBLOB(self, type_): - return "TINYBLOB" - - def visit_MEDIUMBLOB(self, type_): - return "MEDIUMBLOB" - - def visit_LONGBLOB(self, type_): - return "LONGBLOB" - - def visit_ENUM(self, type_): - quoted_enums = [] - for e in type_.enums: - quoted_enums.append("'%s'" % e.replace("'", "''")) - return self._extend_string(type_, {}, "ENUM(%s)" % ",".join(quoted_enums)) - - def visit_SET(self, type_): - return self._extend_string(type_, {}, "SET(%s)" % ",".join(type_._ddl_values)) - - def visit_BOOLEAN(self, type): - return "BOOL" - - -class MySQLIdentifierPreparer(compiler.IdentifierPreparer): - - reserved_words = RESERVED_WORDS - - def __init__(self, dialect, server_ansiquotes=False, **kw): - if not server_ansiquotes: - quote = "`" - else: - quote = '"' - - super(MySQLIdentifierPreparer, self).__init__( - dialect, - initial_quote=quote, - escape_quote=quote) - - def _quote_free_identifiers(self, *ids): - """Unilaterally identifier-quote any number of strings.""" - - return tuple([self.quote_identifier(i) for i in ids if i is not None]) - -class MySQLDialect(default.DefaultDialect): - """Details of the MySQL dialect. Not used directly in application code.""" - - name = 'mysql' - supports_alter = True - - # identifiers are 64, however aliases can be 255... - max_identifier_length = 255 - max_index_name_length = 64 - - supports_native_enum = True - - supports_sane_rowcount = True - supports_sane_multi_rowcount = False - - default_paramstyle = 'format' - colspecs = colspecs - - statement_compiler = MySQLCompiler - ddl_compiler = MySQLDDLCompiler - type_compiler = MySQLTypeCompiler - ischema_names = ischema_names - preparer = MySQLIdentifierPreparer - - # default SQL compilation settings - - # these are modified upon initialize(), - # i.e. first connect - _backslash_escapes = True - _server_ansiquotes = False - - def __init__(self, use_ansiquotes=None, isolation_level=None, **kwargs): - default.DefaultDialect.__init__(self, **kwargs) - self.isolation_level = isolation_level - - def on_connect(self): - if self.isolation_level is not None: - def connect(conn): - self.set_isolation_level(conn, self.isolation_level) - return connect - else: - return None - - _isolation_lookup = set(['SERIALIZABLE', - 'READ UNCOMMITTED', 'READ COMMITTED', 'REPEATABLE READ']) - - def set_isolation_level(self, connection, level): - level = level.replace('_', ' ') - if level not in self._isolation_lookup: - raise exc.ArgumentError( - "Invalid value '%s' for isolation_level. " - "Valid isolation levels for %s are %s" % - (level, self.name, ", ".join(self._isolation_lookup)) - ) - cursor = connection.cursor() - cursor.execute("SET SESSION TRANSACTION ISOLATION LEVEL %s" % level) - cursor.execute("COMMIT") - cursor.close() - - def get_isolation_level(self, connection): - cursor = connection.cursor() - cursor.execute('SELECT @@tx_isolation') - val = cursor.fetchone()[0] - cursor.close() - return val.upper().replace("-", " ") - - def do_commit(self, connection): - """Execute a COMMIT.""" - - # COMMIT/ROLLBACK were introduced in 3.23.15. - # Yes, we have at least one user who has to talk to these old versions! - # - # Ignore commit/rollback if support isn't present, otherwise even basic - # operations via autocommit fail. - try: - connection.commit() - except: - if self.server_version_info < (3, 23, 15): - args = sys.exc_info()[1].args - if args and args[0] == 1064: - return - raise - - def do_rollback(self, connection): - """Execute a ROLLBACK.""" - - try: - connection.rollback() - except: - if self.server_version_info < (3, 23, 15): - args = sys.exc_info()[1].args - if args and args[0] == 1064: - return - raise - - def do_begin_twophase(self, connection, xid): - connection.execute(sql.text("XA BEGIN :xid"), xid=xid) - - def do_prepare_twophase(self, connection, xid): - connection.execute(sql.text("XA END :xid"), xid=xid) - connection.execute(sql.text("XA PREPARE :xid"), xid=xid) - - def do_rollback_twophase(self, connection, xid, is_prepared=True, - recover=False): - if not is_prepared: - connection.execute(sql.text("XA END :xid"), xid=xid) - connection.execute(sql.text("XA ROLLBACK :xid"), xid=xid) - - def do_commit_twophase(self, connection, xid, is_prepared=True, - recover=False): - if not is_prepared: - self.do_prepare_twophase(connection, xid) - connection.execute(sql.text("XA COMMIT :xid"), xid=xid) - - def do_recover_twophase(self, connection): - resultset = connection.execute("XA RECOVER") - return [row['data'][0:row['gtrid_length']] for row in resultset] - - def is_disconnect(self, e, connection, cursor): - if isinstance(e, self.dbapi.OperationalError): - return self._extract_error_code(e) in \ - (2006, 2013, 2014, 2045, 2055) - elif isinstance(e, self.dbapi.InterfaceError): - # if underlying connection is closed, - # this is the error you get - return "(0, '')" in str(e) - else: - return False - - def _compat_fetchall(self, rp, charset=None): - """Proxy result rows to smooth over MySQL-Python driver inconsistencies.""" - - return [_DecodingRowProxy(row, charset) for row in rp.fetchall()] - - def _compat_fetchone(self, rp, charset=None): - """Proxy a result row to smooth over MySQL-Python driver inconsistencies.""" - - return _DecodingRowProxy(rp.fetchone(), charset) - - def _compat_first(self, rp, charset=None): - """Proxy a result row to smooth over MySQL-Python driver inconsistencies.""" - - return _DecodingRowProxy(rp.first(), charset) - - def _extract_error_code(self, exception): - raise NotImplementedError() - - def _get_default_schema_name(self, connection): - return connection.execute('SELECT DATABASE()').scalar() - - - def has_table(self, connection, table_name, schema=None): - # SHOW TABLE STATUS LIKE and SHOW TABLES LIKE do not function properly - # on macosx (and maybe win?) with multibyte table names. - # - # TODO: if this is not a problem on win, make the strategy swappable - # based on platform. DESCRIBE is slower. - - # [ticket:726] - # full_name = self.identifier_preparer.format_table(table, - # use_schema=True) - - - full_name = '.'.join(self.identifier_preparer._quote_free_identifiers( - schema, table_name)) - - st = "DESCRIBE %s" % full_name - rs = None - try: - try: - rs = connection.execute(st) - have = rs.rowcount > 0 - rs.close() - return have - except exc.DBAPIError, e: - if self._extract_error_code(e.orig) == 1146: - return False - raise - finally: - if rs: - rs.close() - - def initialize(self, connection): - default.DefaultDialect.initialize(self, connection) - self._connection_charset = self._detect_charset(connection) - self._server_casing = self._detect_casing(connection) - self._server_collations = self._detect_collations(connection) - self._detect_ansiquotes(connection) - if self._server_ansiquotes: - # if ansiquotes == True, build a new IdentifierPreparer - # with the new setting - self.identifier_preparer = self.preparer(self, - server_ansiquotes=self._server_ansiquotes) - - @property - def _supports_cast(self): - return self.server_version_info is None or \ - self.server_version_info >= (4, 0, 2) - - @reflection.cache - def get_schema_names(self, connection, **kw): - rp = connection.execute("SHOW schemas") - return [r[0] for r in rp] - - @reflection.cache - def get_table_names(self, connection, schema=None, **kw): - """Return a Unicode SHOW TABLES from a given schema.""" - if schema is not None: - current_schema = schema - else: - current_schema = self.default_schema_name - - charset = self._connection_charset - if self.server_version_info < (5, 0, 2): - rp = connection.execute("SHOW TABLES FROM %s" % - self.identifier_preparer.quote_identifier(current_schema)) - return [row[0] for row in self._compat_fetchall(rp, charset=charset)] - else: - rp = connection.execute("SHOW FULL TABLES FROM %s" % - self.identifier_preparer.quote_identifier(current_schema)) - - return [row[0] for row in self._compat_fetchall(rp, charset=charset)\ - if row[1] == 'BASE TABLE'] - - @reflection.cache - def get_view_names(self, connection, schema=None, **kw): - if self.server_version_info < (5, 0, 2): - raise NotImplementedError - if schema is None: - schema = self.default_schema_name - if self.server_version_info < (5, 0, 2): - return self.get_table_names(connection, schema) - charset = self._connection_charset - rp = connection.execute("SHOW FULL TABLES FROM %s" % - self.identifier_preparer.quote_identifier(schema)) - return [row[0] for row in self._compat_fetchall(rp, charset=charset)\ - if row[1] in ('VIEW', 'SYSTEM VIEW')] - - @reflection.cache - def get_table_options(self, connection, table_name, schema=None, **kw): - - parsed_state = self._parsed_state_or_create(connection, table_name, schema, **kw) - return parsed_state.table_options - - @reflection.cache - def get_columns(self, connection, table_name, schema=None, **kw): - parsed_state = self._parsed_state_or_create(connection, table_name, schema, **kw) - return parsed_state.columns - - @reflection.cache - def get_primary_keys(self, connection, table_name, schema=None, **kw): - parsed_state = self._parsed_state_or_create(connection, table_name, schema, **kw) - for key in parsed_state.keys: - if key['type'] == 'PRIMARY': - # There can be only one. - ##raise Exception, str(key) - return [s[0] for s in key['columns']] - return [] - - @reflection.cache - def get_foreign_keys(self, connection, table_name, schema=None, **kw): - - parsed_state = self._parsed_state_or_create(connection, table_name, schema, **kw) - default_schema = None - - fkeys = [] - - for spec in parsed_state.constraints: - # only FOREIGN KEYs - ref_name = spec['table'][-1] - ref_schema = len(spec['table']) > 1 and spec['table'][-2] or schema - - if not ref_schema: - if default_schema is None: - default_schema = \ - connection.dialect.default_schema_name - if schema == default_schema: - ref_schema = schema - - loc_names = spec['local'] - ref_names = spec['foreign'] - - con_kw = {} - for opt in ('name', 'onupdate', 'ondelete'): - if spec.get(opt, False): - con_kw[opt] = spec[opt] - - fkey_d = { - 'name' : spec['name'], - 'constrained_columns' : loc_names, - 'referred_schema' : ref_schema, - 'referred_table' : ref_name, - 'referred_columns' : ref_names, - 'options' : con_kw - } - fkeys.append(fkey_d) - return fkeys - - @reflection.cache - def get_indexes(self, connection, table_name, schema=None, **kw): - - parsed_state = self._parsed_state_or_create(connection, table_name, schema, **kw) - - indexes = [] - for spec in parsed_state.keys: - unique = False - flavor = spec['type'] - if flavor == 'PRIMARY': - continue - if flavor == 'UNIQUE': - unique = True - elif flavor in (None, 'FULLTEXT', 'SPATIAL'): - pass - else: - self.logger.info( - "Converting unknown KEY type %s to a plain KEY" % flavor) - pass - index_d = {} - index_d['name'] = spec['name'] - index_d['column_names'] = [s[0] for s in spec['columns']] - index_d['unique'] = unique - index_d['type'] = flavor - indexes.append(index_d) - return indexes - - @reflection.cache - def get_view_definition(self, connection, view_name, schema=None, **kw): - - charset = self._connection_charset - full_name = '.'.join(self.identifier_preparer._quote_free_identifiers( - schema, view_name)) - sql = self._show_create_table(connection, None, charset, - full_name=full_name) - return sql - - def _parsed_state_or_create(self, connection, table_name, schema=None, **kw): - return self._setup_parser( - connection, - table_name, - schema, - info_cache=kw.get('info_cache', None) - ) - - @util.memoized_property - def _tabledef_parser(self): - """return the MySQLTableDefinitionParser, generate if needed. - - The deferred creation ensures that the dialect has - retrieved server version information first. - - """ - if (self.server_version_info < (4, 1) and self._server_ansiquotes): - # ANSI_QUOTES doesn't affect SHOW CREATE TABLE on < 4.1 - preparer = self.preparer(self, server_ansiquotes=False) - else: - preparer = self.identifier_preparer - return MySQLTableDefinitionParser(self, preparer) - - @reflection.cache - def _setup_parser(self, connection, table_name, schema=None, **kw): - charset = self._connection_charset - parser = self._tabledef_parser - full_name = '.'.join(self.identifier_preparer._quote_free_identifiers( - schema, table_name)) - sql = self._show_create_table(connection, None, charset, - full_name=full_name) - if sql.startswith('CREATE ALGORITHM'): - # Adapt views to something table-like. - columns = self._describe_table(connection, None, charset, - full_name=full_name) - sql = parser._describe_to_create(table_name, columns) - return parser.parse(sql, charset) - - def _detect_charset(self, connection): - raise NotImplementedError() - - def _detect_casing(self, connection): - """Sniff out identifier case sensitivity. - - Cached per-connection. This value can not change without a server - restart. - - """ - # http://dev.mysql.com/doc/refman/5.0/en/name-case-sensitivity.html - - charset = self._connection_charset - row = self._compat_first(connection.execute( - "SHOW VARIABLES LIKE 'lower_case_table_names'"), - charset=charset) - if not row: - cs = 0 - else: - # 4.0.15 returns OFF or ON according to [ticket:489] - # 3.23 doesn't, 4.0.27 doesn't.. - if row[1] == 'OFF': - cs = 0 - elif row[1] == 'ON': - cs = 1 - else: - cs = int(row[1]) - return cs - - def _detect_collations(self, connection): - """Pull the active COLLATIONS list from the server. - - Cached per-connection. - """ - - collations = {} - if self.server_version_info < (4, 1, 0): - pass - else: - charset = self._connection_charset - rs = connection.execute('SHOW COLLATION') - for row in self._compat_fetchall(rs, charset): - collations[row[0]] = row[1] - return collations - - def _detect_ansiquotes(self, connection): - """Detect and adjust for the ANSI_QUOTES sql mode.""" - - row = self._compat_first( - connection.execute("SHOW VARIABLES LIKE 'sql_mode'"), - charset=self._connection_charset) - - if not row: - mode = '' - else: - mode = row[1] or '' - # 4.0 - if mode.isdigit(): - mode_no = int(mode) - mode = (mode_no | 4 == mode_no) and 'ANSI_QUOTES' or '' - - self._server_ansiquotes = 'ANSI_QUOTES' in mode - - # as of MySQL 5.0.1 - self._backslash_escapes = 'NO_BACKSLASH_ESCAPES' not in mode - - def _show_create_table(self, connection, table, charset=None, - full_name=None): - """Run SHOW CREATE TABLE for a ``Table``.""" - - if full_name is None: - full_name = self.identifier_preparer.format_table(table) - st = "SHOW CREATE TABLE %s" % full_name - - rp = None - try: - rp = connection.execute(st) - except exc.DBAPIError, e: - if self._extract_error_code(e.orig) == 1146: - raise exc.NoSuchTableError(full_name) - else: - raise - row = self._compat_first(rp, charset=charset) - if not row: - raise exc.NoSuchTableError(full_name) - return row[1].strip() - - return sql - - def _describe_table(self, connection, table, charset=None, - full_name=None): - """Run DESCRIBE for a ``Table`` and return processed rows.""" - - if full_name is None: - full_name = self.identifier_preparer.format_table(table) - st = "DESCRIBE %s" % full_name - - rp, rows = None, None - try: - try: - rp = connection.execute(st) - except exc.DBAPIError, e: - if self._extract_error_code(e.orig) == 1146: - raise exc.NoSuchTableError(full_name) - else: - raise - rows = self._compat_fetchall(rp, charset=charset) - finally: - if rp: - rp.close() - return rows - -class ReflectedState(object): - """Stores raw information about a SHOW CREATE TABLE statement.""" - - def __init__(self): - self.columns = [] - self.table_options = {} - self.table_name = None - self.keys = [] - self.constraints = [] - -class MySQLTableDefinitionParser(object): - """Parses the results of a SHOW CREATE TABLE statement.""" - - def __init__(self, dialect, preparer): - self.dialect = dialect - self.preparer = preparer - self._prep_regexes() - - def parse(self, show_create, charset): - state = ReflectedState() - state.charset = charset - for line in re.split(r'\r?\n', show_create): - if line.startswith(' ' + self.preparer.initial_quote): - self._parse_column(line, state) - # a regular table options line - elif line.startswith(') '): - self._parse_table_options(line, state) - # an ANSI-mode table options line - elif line == ')': - pass - elif line.startswith('CREATE '): - self._parse_table_name(line, state) - # Not present in real reflection, but may be if loading from a file. - elif not line: - pass - else: - type_, spec = self._parse_constraints(line) - if type_ is None: - util.warn("Unknown schema content: %r" % line) - elif type_ == 'key': - state.keys.append(spec) - elif type_ == 'constraint': - state.constraints.append(spec) - else: - pass - - return state - - def _parse_constraints(self, line): - """Parse a KEY or CONSTRAINT line. - - :param line: A line of SHOW CREATE TABLE output - """ - - # KEY - m = self._re_key.match(line) - if m: - spec = m.groupdict() - # convert columns into name, length pairs - spec['columns'] = self._parse_keyexprs(spec['columns']) - return 'key', spec - - # CONSTRAINT - m = self._re_constraint.match(line) - if m: - spec = m.groupdict() - spec['table'] = \ - self.preparer.unformat_identifiers(spec['table']) - spec['local'] = [c[0] - for c in self._parse_keyexprs(spec['local'])] - spec['foreign'] = [c[0] - for c in self._parse_keyexprs(spec['foreign'])] - return 'constraint', spec - - # PARTITION and SUBPARTITION - m = self._re_partition.match(line) - if m: - # Punt! - return 'partition', line - - # No match. - return (None, line) - - def _parse_table_name(self, line, state): - """Extract the table name. - - :param line: The first line of SHOW CREATE TABLE - """ - - regex, cleanup = self._pr_name - m = regex.match(line) - if m: - state.table_name = cleanup(m.group('name')) - - def _parse_table_options(self, line, state): - """Build a dictionary of all reflected table-level options. - - :param line: The final line of SHOW CREATE TABLE output. - """ - - options = {} - - if not line or line == ')': - pass - - else: - rest_of_line = line[:] - for regex, cleanup in self._pr_options: - m = regex.search(rest_of_line) - if not m: - continue - directive, value = m.group('directive'), m.group('val') - if cleanup: - value = cleanup(value) - options[directive.lower()] = value - rest_of_line = regex.sub('', rest_of_line) - - for nope in ('auto_increment', 'data directory', 'index directory'): - options.pop(nope, None) - - for opt, val in options.items(): - state.table_options['%s_%s' % (self.dialect.name, opt)] = val - - def _parse_column(self, line, state): - """Extract column details. - - Falls back to a 'minimal support' variant if full parse fails. - - :param line: Any column-bearing line from SHOW CREATE TABLE - """ - - spec = None - m = self._re_column.match(line) - if m: - spec = m.groupdict() - spec['full'] = True - else: - m = self._re_column_loose.match(line) - if m: - spec = m.groupdict() - spec['full'] = False - if not spec: - util.warn("Unknown column definition %r" % line) - return - if not spec['full']: - util.warn("Incomplete reflection of column definition %r" % line) - - name, type_, args, notnull = \ - spec['name'], spec['coltype'], spec['arg'], spec['notnull'] - - try: - col_type = self.dialect.ischema_names[type_] - except KeyError: - util.warn("Did not recognize type '%s' of column '%s'" % - (type_, name)) - col_type = sqltypes.NullType - - # Column type positional arguments eg. varchar(32) - if args is None or args == '': - type_args = [] - elif args[0] == "'" and args[-1] == "'": - type_args = self._re_csv_str.findall(args) - else: - type_args = [int(v) for v in self._re_csv_int.findall(args)] - - # Column type keyword options - type_kw = {} - for kw in ('unsigned', 'zerofill'): - if spec.get(kw, False): - type_kw[kw] = True - for kw in ('charset', 'collate'): - if spec.get(kw, False): - type_kw[kw] = spec[kw] - - if type_ == 'enum': - type_args = ENUM._strip_enums(type_args) - - type_instance = col_type(*type_args, **type_kw) - - col_args, col_kw = [], {} - - # NOT NULL - col_kw['nullable'] = True - if spec.get('notnull', False): - col_kw['nullable'] = False - - # AUTO_INCREMENT - if spec.get('autoincr', False): - col_kw['autoincrement'] = True - elif issubclass(col_type, sqltypes.Integer): - col_kw['autoincrement'] = False - - # DEFAULT - default = spec.get('default', None) - - if default == 'NULL': - # eliminates the need to deal with this later. - default = None - - col_d = dict(name=name, type=type_instance, default=default) - col_d.update(col_kw) - state.columns.append(col_d) - - def _describe_to_create(self, table_name, columns): - """Re-format DESCRIBE output as a SHOW CREATE TABLE string. - - DESCRIBE is a much simpler reflection and is sufficient for - reflecting views for runtime use. This method formats DDL - for columns only- keys are omitted. - - :param columns: A sequence of DESCRIBE or SHOW COLUMNS 6-tuples. - SHOW FULL COLUMNS FROM rows must be rearranged for use with - this function. - """ - - buffer = [] - for row in columns: - (name, col_type, nullable, default, extra) = \ - [row[i] for i in (0, 1, 2, 4, 5)] - - line = [' '] - line.append(self.preparer.quote_identifier(name)) - line.append(col_type) - if not nullable: - line.append('NOT NULL') - if default: - if 'auto_increment' in default: - pass - elif (col_type.startswith('timestamp') and - default.startswith('C')): - line.append('DEFAULT') - line.append(default) - elif default == 'NULL': - line.append('DEFAULT') - line.append(default) - else: - line.append('DEFAULT') - line.append("'%s'" % default.replace("'", "''")) - if extra: - line.append(extra) - - buffer.append(' '.join(line)) - - return ''.join([('CREATE TABLE %s (\n' % - self.preparer.quote_identifier(table_name)), - ',\n'.join(buffer), - '\n) ']) - - def _parse_keyexprs(self, identifiers): - """Unpack '"col"(2),"col" ASC'-ish strings into components.""" - - return self._re_keyexprs.findall(identifiers) - - def _prep_regexes(self): - """Pre-compile regular expressions.""" - - self._re_columns = [] - self._pr_options = [] - - _final = self.preparer.final_quote - - quotes = dict(zip(('iq', 'fq', 'esc_fq'), - [re.escape(s) for s in - (self.preparer.initial_quote, - _final, - self.preparer._escape_identifier(_final))])) - - self._pr_name = _pr_compile( - r'^CREATE (?:\w+ +)?TABLE +' - r'%(iq)s(?P(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +\($' % quotes, - self.preparer._unescape_identifier) - - # `col`,`col2`(32),`col3`(15) DESC - # - # Note: ASC and DESC aren't reflected, so we'll punt... - self._re_keyexprs = _re_compile( - r'(?:' - r'(?:%(iq)s((?:%(esc_fq)s|[^%(fq)s])+)%(fq)s)' - r'(?:\((\d+)\))?(?=\,|$))+' % quotes) - - # 'foo' or 'foo','bar' or 'fo,o','ba''a''r' - self._re_csv_str = _re_compile(r'\x27(?:\x27\x27|[^\x27])*\x27') - - # 123 or 123,456 - self._re_csv_int = _re_compile(r'\d+') - - - # `colname` [type opts] - # (NOT NULL | NULL) - # DEFAULT ('value' | CURRENT_TIMESTAMP...) - # COMMENT 'comment' - # COLUMN_FORMAT (FIXED|DYNAMIC|DEFAULT) - # STORAGE (DISK|MEMORY) - self._re_column = _re_compile( - r' ' - r'%(iq)s(?P(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +' - r'(?P\w+)' - r'(?:\((?P(?:\d+|\d+,\d+|' - r'(?:\x27(?:\x27\x27|[^\x27])*\x27,?)+))\))?' - r'(?: +(?PUNSIGNED))?' - r'(?: +(?PZEROFILL))?' - r'(?: +CHARACTER SET +(?P[\w_]+))?' - r'(?: +COLLATE +(?P[\w_]+))?' - r'(?: +(?PNOT NULL))?' - r'(?: +DEFAULT +(?P' - r'(?:NULL|\x27(?:\x27\x27|[^\x27])*\x27|\w+' - r'(?: +ON UPDATE \w+)?)' - r'))?' - r'(?: +(?PAUTO_INCREMENT))?' - r'(?: +COMMENT +(P(?:\x27\x27|[^\x27])+))?' - r'(?: +COLUMN_FORMAT +(?P\w+))?' - r'(?: +STORAGE +(?P\w+))?' - r'(?: +(?P.*))?' - r',?$' - % quotes - ) - - # Fallback, try to parse as little as possible - self._re_column_loose = _re_compile( - r' ' - r'%(iq)s(?P(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +' - r'(?P\w+)' - r'(?:\((?P(?:\d+|\d+,\d+|\x27(?:\x27\x27|[^\x27])+\x27))\))?' - r'.*?(?PNOT NULL)?' - % quotes - ) - - # (PRIMARY|UNIQUE|FULLTEXT|SPATIAL) INDEX `name` (USING (BTREE|HASH))? - # (`col` (ASC|DESC)?, `col` (ASC|DESC)?) - # KEY_BLOCK_SIZE size | WITH PARSER name - self._re_key = _re_compile( - r' ' - r'(?:(?P\S+) )?KEY' - r'(?: +%(iq)s(?P(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s)?' - r'(?: +USING +(?P\S+))?' - r' +\((?P.+?)\)' - r'(?: +USING +(?P\S+))?' - r'(?: +KEY_BLOCK_SIZE +(?P\S+))?' - r'(?: +WITH PARSER +(?P\S+))?' - r',?$' - % quotes - ) - - # CONSTRAINT `name` FOREIGN KEY (`local_col`) - # REFERENCES `remote` (`remote_col`) - # MATCH FULL | MATCH PARTIAL | MATCH SIMPLE - # ON DELETE CASCADE ON UPDATE RESTRICT - # - # unique constraints come back as KEYs - kw = quotes.copy() - kw['on'] = 'RESTRICT|CASCASDE|SET NULL|NOACTION' - self._re_constraint = _re_compile( - r' ' - r'CONSTRAINT +' - r'%(iq)s(?P(?:%(esc_fq)s|[^%(fq)s])+)%(fq)s +' - r'FOREIGN KEY +' - r'\((?P[^\)]+?)\) REFERENCES +' - r'(?P%(iq)s[^%(fq)s]+%(fq)s(?:\.%(iq)s[^%(fq)s]+%(fq)s)?) +' - r'\((?P[^\)]+?)\)' - r'(?: +(?PMATCH \w+))?' - r'(?: +ON DELETE (?P%(on)s))?' - r'(?: +ON UPDATE (?P%(on)s))?' - % kw - ) - - # PARTITION - # - # punt! - self._re_partition = _re_compile(r'(?:.*)(?:SUB)?PARTITION(?:.*)') - - # Table-level options (COLLATE, ENGINE, etc.) - # Do the string options first, since they have quoted strings we need to get rid of. - for option in _options_of_type_string: - self._add_option_string(option) - - for option in ('ENGINE', 'TYPE', 'AUTO_INCREMENT', - 'AVG_ROW_LENGTH', 'CHARACTER SET', - 'DEFAULT CHARSET', 'CHECKSUM', - 'COLLATE', 'DELAY_KEY_WRITE', 'INSERT_METHOD', - 'MAX_ROWS', 'MIN_ROWS', 'PACK_KEYS', 'ROW_FORMAT', - 'KEY_BLOCK_SIZE'): - self._add_option_word(option) - - self._add_option_regex('UNION', r'\([^\)]+\)') - self._add_option_regex('TABLESPACE', r'.*? STORAGE DISK') - self._add_option_regex('RAID_TYPE', - r'\w+\s+RAID_CHUNKS\s*\=\s*\w+RAID_CHUNKSIZE\s*=\s*\w+') - - _optional_equals = r'(?:\s*(?:=\s*)|\s+)' - - def _add_option_string(self, directive): - regex = (r'(?P%s)%s' - r"'(?P(?:[^']|'')*?)'(?!')" % - (re.escape(directive), self._optional_equals)) - self._pr_options.append( - _pr_compile(regex, lambda v: v.replace("\\\\","\\").replace("''", "'"))) - - def _add_option_word(self, directive): - regex = (r'(?P%s)%s' - r'(?P\w+)' % - (re.escape(directive), self._optional_equals)) - self._pr_options.append(_pr_compile(regex)) - - def _add_option_regex(self, directive, regex): - regex = (r'(?P%s)%s' - r'(?P%s)' % - (re.escape(directive), self._optional_equals, regex)) - self._pr_options.append(_pr_compile(regex)) - -_options_of_type_string = ('COMMENT', 'DATA DIRECTORY', 'INDEX DIRECTORY', - 'PASSWORD', 'CONNECTION') - -log.class_logger(MySQLTableDefinitionParser) -log.class_logger(MySQLDialect) - - -class _DecodingRowProxy(object): - """Return unicode-decoded values based on type inspection. - - Smooth over data type issues (esp. with alpha driver versions) and - normalize strings as Unicode regardless of user-configured driver - encoding settings. - - """ - - # Some MySQL-python versions can return some columns as - # sets.Set(['value']) (seriously) but thankfully that doesn't - # seem to come up in DDL queries. - - def __init__(self, rowproxy, charset): - self.rowproxy = rowproxy - self.charset = charset - - def __getitem__(self, index): - item = self.rowproxy[index] - if isinstance(item, _array): - item = item.tostring() - # Py2K - if self.charset and isinstance(item, str): - # end Py2K - # Py3K - #if self.charset and isinstance(item, bytes): - return item.decode(self.charset) - else: - return item - - def __getattr__(self, attr): - item = getattr(self.rowproxy, attr) - if isinstance(item, _array): - item = item.tostring() - # Py2K - if self.charset and isinstance(item, str): - # end Py2K - # Py3K - #if self.charset and isinstance(item, bytes): - return item.decode(self.charset) - else: - return item - - -def _pr_compile(regex, cleanup=None): - """Prepare a 2-tuple of compiled regex and callable.""" - - return (_re_compile(regex), cleanup) - -def _re_compile(regex): - """Compile a string to regex, I and UNICODE.""" - - return re.compile(regex, re.I | re.UNICODE) - diff --git a/libs/sqlalchemy/dialects/mysql/gaerdbms.py b/libs/sqlalchemy/dialects/mysql/gaerdbms.py deleted file mode 100644 index 2203504f..00000000 --- a/libs/sqlalchemy/dialects/mysql/gaerdbms.py +++ /dev/null @@ -1,84 +0,0 @@ -# mysql/gaerdbms.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php -"""Support for Google Cloud SQL on Google App Engine. - -This dialect is based primarily on the :mod:`.mysql.mysqldb` dialect with minimal -changes. - -.. versionadded:: 0.7.8 - -Connecting ----------- - -Connect string format:: - - mysql+gaerdbms:/// - -E.g.:: - - create_engine('mysql+gaerdbms:///mydb', - connect_args={"instance":"instancename"}) - -Pooling -------- - -Google App Engine connections appear to be randomly recycled, -so the dialect does not pool connections. The :class:`.NullPool` -implementation is installed within the :class:`.Engine` by -default. - -""" - -from sqlalchemy.dialects.mysql.mysqldb import MySQLDialect_mysqldb -from sqlalchemy.pool import NullPool -import re - - -class MySQLDialect_gaerdbms(MySQLDialect_mysqldb): - - @classmethod - def dbapi(cls): - # from django: - # http://code.google.com/p/googleappengine/source/ - # browse/trunk/python/google/storage/speckle/ - # python/django/backend/base.py#118 - # see also [ticket:2649] - # see also http://stackoverflow.com/q/14224679/34549 - from google.appengine.api import apiproxy_stub_map - - if apiproxy_stub_map.apiproxy.GetStub('rdbms'): - from google.storage.speckle.python.api import rdbms_apiproxy - return rdbms_apiproxy - else: - from google.storage.speckle.python.api import rdbms_googleapi - return rdbms_googleapi - - @classmethod - def get_pool_class(cls, url): - # Cloud SQL connections die at any moment - return NullPool - - def create_connect_args(self, url): - opts = url.translate_connect_args() - # 'dsn' and 'instance' are because we are skipping - # the traditional google.api.rdbms wrapper - - opts['dsn'] = '' - opts['instance'] = url.query['instance'] - return [], opts - - def _extract_error_code(self, exception): - match = re.compile(r"^(\d+):").match(str(exception)) - # The rdbms api will wrap then re-raise some types of errors - # making this regex return no matches. - if match: - code = match.group(1) - else: - code = None - if code: - return int(code) - -dialect = MySQLDialect_gaerdbms diff --git a/libs/sqlalchemy/dialects/mysql/mysqlconnector.py b/libs/sqlalchemy/dialects/mysql/mysqlconnector.py deleted file mode 100644 index bd8ee013..00000000 --- a/libs/sqlalchemy/dialects/mysql/mysqlconnector.py +++ /dev/null @@ -1,131 +0,0 @@ -# mysql/mysqlconnector.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Support for the MySQL database via the MySQL Connector/Python adapter. - -MySQL Connector/Python is available at: - - https://launchpad.net/myconnpy - -Connecting ------------ - -Connect string format:: - - mysql+mysqlconnector://:@[:]/ - -""" - -import re - -from sqlalchemy.dialects.mysql.base import (MySQLDialect, - MySQLExecutionContext, MySQLCompiler, MySQLIdentifierPreparer, - BIT) - -from sqlalchemy.engine import base as engine_base, default -from sqlalchemy.sql import operators as sql_operators -from sqlalchemy import exc, log, schema, sql, types as sqltypes, util -from sqlalchemy import processors - -class MySQLExecutionContext_mysqlconnector(MySQLExecutionContext): - - def get_lastrowid(self): - return self.cursor.lastrowid - - -class MySQLCompiler_mysqlconnector(MySQLCompiler): - def visit_mod(self, binary, **kw): - return self.process(binary.left) + " %% " + self.process(binary.right) - - def post_process_text(self, text): - return text.replace('%', '%%') - -class MySQLIdentifierPreparer_mysqlconnector(MySQLIdentifierPreparer): - - def _escape_identifier(self, value): - value = value.replace(self.escape_quote, self.escape_to_quote) - return value.replace("%", "%%") - -class _myconnpyBIT(BIT): - def result_processor(self, dialect, coltype): - """MySQL-connector already converts mysql bits, so.""" - - return None - -class MySQLDialect_mysqlconnector(MySQLDialect): - driver = 'mysqlconnector' - supports_unicode_statements = True - supports_unicode_binds = True - supports_sane_rowcount = True - supports_sane_multi_rowcount = True - - supports_native_decimal = True - - default_paramstyle = 'format' - execution_ctx_cls = MySQLExecutionContext_mysqlconnector - statement_compiler = MySQLCompiler_mysqlconnector - - preparer = MySQLIdentifierPreparer_mysqlconnector - - colspecs = util.update_copy( - MySQLDialect.colspecs, - { - BIT: _myconnpyBIT, - } - ) - - @classmethod - def dbapi(cls): - from mysql import connector - return connector - - def create_connect_args(self, url): - opts = url.translate_connect_args(username='user') - opts.update(url.query) - - util.coerce_kw_type(opts, 'buffered', bool) - util.coerce_kw_type(opts, 'raise_on_warnings', bool) - opts['buffered'] = True - opts['raise_on_warnings'] = True - - # FOUND_ROWS must be set in ClientFlag to enable - # supports_sane_rowcount. - if self.dbapi is not None: - try: - from mysql.connector.constants import ClientFlag - client_flags = opts.get('client_flags', ClientFlag.get_default()) - client_flags |= ClientFlag.FOUND_ROWS - opts['client_flags'] = client_flags - except: - pass - return [[], opts] - - def _get_server_version_info(self, connection): - dbapi_con = connection.connection - version = dbapi_con.get_server_version() - return tuple(version) - - def _detect_charset(self, connection): - return connection.connection.charset - - def _extract_error_code(self, exception): - return exception.errno - - def is_disconnect(self, e, connection, cursor): - errnos = (2006, 2013, 2014, 2045, 2055, 2048) - exceptions = (self.dbapi.OperationalError,self.dbapi.InterfaceError) - if isinstance(e, exceptions): - return e.errno in errnos - else: - return False - - def _compat_fetchall(self, rp, charset=None): - return rp.fetchall() - - def _compat_fetchone(self, rp, charset=None): - return rp.fetchone() - -dialect = MySQLDialect_mysqlconnector diff --git a/libs/sqlalchemy/dialects/mysql/mysqldb.py b/libs/sqlalchemy/dialects/mysql/mysqldb.py deleted file mode 100644 index c6ae5333..00000000 --- a/libs/sqlalchemy/dialects/mysql/mysqldb.py +++ /dev/null @@ -1,82 +0,0 @@ -# mysql/mysqldb.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Support for the MySQL database via the MySQL-python adapter. - -MySQL-Python is available at: - - http://sourceforge.net/projects/mysql-python - -At least version 1.2.1 or 1.2.2 should be used. - -Connecting ------------ - -Connect string format:: - - mysql+mysqldb://:@[:]/ - -Unicode -------- - -MySQLdb will accommodate Python ``unicode`` objects if the -``use_unicode=1`` parameter, or the ``charset`` parameter, -is passed as a connection argument. - -Without this setting, many MySQL server installations default to -a ``latin1`` encoding for client connections, which has the effect -of all data being converted into ``latin1``, even if you have ``utf8`` -or another character set configured on your tables -and columns. With versions 4.1 and higher, you can change the connection -character set either through server configuration or by including the -``charset`` parameter. The ``charset`` -parameter as received by MySQL-Python also has the side-effect of -enabling ``use_unicode=1``:: - - # set client encoding to utf8; all strings come back as unicode - create_engine('mysql+mysqldb:///mydb?charset=utf8') - -Manually configuring ``use_unicode=0`` will cause MySQL-python to -return encoded strings:: - - # set client encoding to utf8; all strings come back as utf8 str - create_engine('mysql+mysqldb:///mydb?charset=utf8&use_unicode=0') - -Known Issues -------------- - -MySQL-python version 1.2.2 has a serious memory leak related -to unicode conversion, a feature which is disabled via ``use_unicode=0``. -It is strongly advised to use the latest version of MySQL-Python. - -""" - -from sqlalchemy.dialects.mysql.base import (MySQLDialect, MySQLExecutionContext, - MySQLCompiler, MySQLIdentifierPreparer) -from sqlalchemy.connectors.mysqldb import ( - MySQLDBExecutionContext, - MySQLDBCompiler, - MySQLDBIdentifierPreparer, - MySQLDBConnector - ) - -class MySQLExecutionContext_mysqldb(MySQLDBExecutionContext, MySQLExecutionContext): - pass - - -class MySQLCompiler_mysqldb(MySQLDBCompiler, MySQLCompiler): - pass - - -class MySQLIdentifierPreparer_mysqldb(MySQLDBIdentifierPreparer, MySQLIdentifierPreparer): - pass - -class MySQLDialect_mysqldb(MySQLDBConnector, MySQLDialect): - execution_ctx_cls = MySQLExecutionContext_mysqldb - statement_compiler = MySQLCompiler_mysqldb - preparer = MySQLIdentifierPreparer_mysqldb - -dialect = MySQLDialect_mysqldb diff --git a/libs/sqlalchemy/dialects/mysql/oursql.py b/libs/sqlalchemy/dialects/mysql/oursql.py deleted file mode 100644 index d6d8e9ff..00000000 --- a/libs/sqlalchemy/dialects/mysql/oursql.py +++ /dev/null @@ -1,268 +0,0 @@ -# mysql/oursql.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Support for the MySQL database via the oursql adapter. - -OurSQL is available at: - - http://packages.python.org/oursql/ - -Connecting ------------ - -Connect string format:: - - mysql+oursql://:@[:]/ - -Unicode -------- - -oursql defaults to using ``utf8`` as the connection charset, but other -encodings may be used instead. Like the MySQL-Python driver, unicode support -can be completely disabled:: - - # oursql sets the connection charset to utf8 automatically; all strings come - # back as utf8 str - create_engine('mysql+oursql:///mydb?use_unicode=0') - -To not automatically use ``utf8`` and instead use whatever the connection -defaults to, there is a separate parameter:: - - # use the default connection charset; all strings come back as unicode - create_engine('mysql+oursql:///mydb?default_charset=1') - - # use latin1 as the connection charset; all strings come back as unicode - create_engine('mysql+oursql:///mydb?charset=latin1') -""" - -import re - -from sqlalchemy.dialects.mysql.base import (BIT, MySQLDialect, MySQLExecutionContext, - MySQLCompiler, MySQLIdentifierPreparer) -from sqlalchemy.engine import base as engine_base, default -from sqlalchemy.sql import operators as sql_operators -from sqlalchemy import exc, log, schema, sql, types as sqltypes, util -from sqlalchemy import processors - - - -class _oursqlBIT(BIT): - def result_processor(self, dialect, coltype): - """oursql already converts mysql bits, so.""" - - return None - - -class MySQLExecutionContext_oursql(MySQLExecutionContext): - - @property - def plain_query(self): - return self.execution_options.get('_oursql_plain_query', False) - -class MySQLDialect_oursql(MySQLDialect): - driver = 'oursql' -# Py2K - supports_unicode_binds = True - supports_unicode_statements = True -# end Py2K - - supports_native_decimal = True - - supports_sane_rowcount = True - supports_sane_multi_rowcount = True - execution_ctx_cls = MySQLExecutionContext_oursql - - colspecs = util.update_copy( - MySQLDialect.colspecs, - { - sqltypes.Time: sqltypes.Time, - BIT: _oursqlBIT, - } - ) - - @classmethod - def dbapi(cls): - return __import__('oursql') - - def do_execute(self, cursor, statement, parameters, context=None): - """Provide an implementation of *cursor.execute(statement, parameters)*.""" - - if context and context.plain_query: - cursor.execute(statement, plain_query=True) - else: - cursor.execute(statement, parameters) - - def do_begin(self, connection): - connection.cursor().execute('BEGIN', plain_query=True) - - def _xa_query(self, connection, query, xid): -# Py2K - arg = connection.connection._escape_string(xid) -# end Py2K -# Py3K -# charset = self._connection_charset -# arg = connection.connection._escape_string(xid.encode(charset)).decode(charset) - arg = "'%s'" % arg - connection.execution_options(_oursql_plain_query=True).execute(query % arg) - - # Because mysql is bad, these methods have to be - # reimplemented to use _PlainQuery. Basically, some queries - # refuse to return any data if they're run through - # the parameterized query API, or refuse to be parameterized - # in the first place. - def do_begin_twophase(self, connection, xid): - self._xa_query(connection, 'XA BEGIN %s', xid) - - def do_prepare_twophase(self, connection, xid): - self._xa_query(connection, 'XA END %s', xid) - self._xa_query(connection, 'XA PREPARE %s', xid) - - def do_rollback_twophase(self, connection, xid, is_prepared=True, - recover=False): - if not is_prepared: - self._xa_query(connection, 'XA END %s', xid) - self._xa_query(connection, 'XA ROLLBACK %s', xid) - - def do_commit_twophase(self, connection, xid, is_prepared=True, - recover=False): - if not is_prepared: - self.do_prepare_twophase(connection, xid) - self._xa_query(connection, 'XA COMMIT %s', xid) - - # Q: why didn't we need all these "plain_query" overrides earlier ? - # am i on a newer/older version of OurSQL ? - def has_table(self, connection, table_name, schema=None): - return MySQLDialect.has_table(self, - connection.connect().\ - execution_options(_oursql_plain_query=True), - table_name, schema) - - def get_table_options(self, connection, table_name, schema=None, **kw): - return MySQLDialect.get_table_options(self, - connection.connect().\ - execution_options(_oursql_plain_query=True), - table_name, - schema = schema, - **kw - ) - - - def get_columns(self, connection, table_name, schema=None, **kw): - return MySQLDialect.get_columns(self, - connection.connect().\ - execution_options(_oursql_plain_query=True), - table_name, - schema=schema, - **kw - ) - - def get_view_names(self, connection, schema=None, **kw): - return MySQLDialect.get_view_names(self, - connection.connect().\ - execution_options(_oursql_plain_query=True), - schema=schema, - **kw - ) - - def get_table_names(self, connection, schema=None, **kw): - return MySQLDialect.get_table_names(self, - connection.connect().\ - execution_options(_oursql_plain_query=True), - schema - ) - - def get_schema_names(self, connection, **kw): - return MySQLDialect.get_schema_names(self, - connection.connect().\ - execution_options(_oursql_plain_query=True), - **kw - ) - - def initialize(self, connection): - return MySQLDialect.initialize( - self, - connection.execution_options(_oursql_plain_query=True) - ) - - def _show_create_table(self, connection, table, charset=None, - full_name=None): - return MySQLDialect._show_create_table(self, - connection.contextual_connect(close_with_result=True). - execution_options(_oursql_plain_query=True), - table, charset, full_name) - - def is_disconnect(self, e, connection, cursor): - if isinstance(e, self.dbapi.ProgrammingError): - return e.errno is None and 'cursor' not in e.args[1] and e.args[1].endswith('closed') - else: - return e.errno in (2006, 2013, 2014, 2045, 2055) - - def create_connect_args(self, url): - opts = url.translate_connect_args(database='db', username='user', - password='passwd') - opts.update(url.query) - - util.coerce_kw_type(opts, 'port', int) - util.coerce_kw_type(opts, 'compress', bool) - util.coerce_kw_type(opts, 'autoping', bool) - util.coerce_kw_type(opts, 'raise_on_warnings', bool) - - util.coerce_kw_type(opts, 'default_charset', bool) - if opts.pop('default_charset', False): - opts['charset'] = None - else: - util.coerce_kw_type(opts, 'charset', str) - opts['use_unicode'] = opts.get('use_unicode', True) - util.coerce_kw_type(opts, 'use_unicode', bool) - - # FOUND_ROWS must be set in CLIENT_FLAGS to enable - # supports_sane_rowcount. - opts.setdefault('found_rows', True) - - ssl = {} - for key in ['ssl_ca', 'ssl_key', 'ssl_cert', - 'ssl_capath', 'ssl_cipher']: - if key in opts: - ssl[key[4:]] = opts[key] - util.coerce_kw_type(ssl, key[4:], str) - del opts[key] - if ssl: - opts['ssl'] = ssl - - return [[], opts] - - def _get_server_version_info(self, connection): - dbapi_con = connection.connection - version = [] - r = re.compile('[.\-]') - for n in r.split(dbapi_con.server_info): - try: - version.append(int(n)) - except ValueError: - version.append(n) - return tuple(version) - - def _extract_error_code(self, exception): - return exception.errno - - def _detect_charset(self, connection): - """Sniff out the character set in use for connection results.""" - - return connection.connection.charset - - def _compat_fetchall(self, rp, charset=None): - """oursql isn't super-broken like MySQLdb, yaaay.""" - return rp.fetchall() - - def _compat_fetchone(self, rp, charset=None): - """oursql isn't super-broken like MySQLdb, yaaay.""" - return rp.fetchone() - - def _compat_first(self, rp, charset=None): - return rp.first() - - -dialect = MySQLDialect_oursql diff --git a/libs/sqlalchemy/dialects/mysql/pymysql.py b/libs/sqlalchemy/dialects/mysql/pymysql.py deleted file mode 100644 index f5aaa122..00000000 --- a/libs/sqlalchemy/dialects/mysql/pymysql.py +++ /dev/null @@ -1,39 +0,0 @@ -# mysql/pymysql.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Support for the MySQL database via the pymysql adapter. - -pymysql is available at: - - http://code.google.com/p/pymysql/ - -Connecting ----------- - -Connect string:: - - mysql+pymysql://:@/[?] - -MySQL-Python Compatibility --------------------------- - -The pymysql DBAPI is a pure Python port of the MySQL-python (MySQLdb) driver, -and targets 100% compatibility. Most behavioral notes for MySQL-python apply to -the pymysql driver as well. - -""" - -from sqlalchemy.dialects.mysql.mysqldb import MySQLDialect_mysqldb - -class MySQLDialect_pymysql(MySQLDialect_mysqldb): - driver = 'pymysql' - - description_encoding = None - @classmethod - def dbapi(cls): - return __import__('pymysql') - -dialect = MySQLDialect_pymysql \ No newline at end of file diff --git a/libs/sqlalchemy/dialects/mysql/pyodbc.py b/libs/sqlalchemy/dialects/mysql/pyodbc.py deleted file mode 100644 index 5d631afb..00000000 --- a/libs/sqlalchemy/dialects/mysql/pyodbc.py +++ /dev/null @@ -1,82 +0,0 @@ -# mysql/pyodbc.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Support for the MySQL database via the pyodbc adapter. - -pyodbc is available at: - - http://pypi.python.org/pypi/pyodbc/ - -Connecting ----------- - -Connect string:: - - mysql+pyodbc://:@ - -Limitations ------------ - -The mysql-pyodbc dialect is subject to unresolved character encoding issues -which exist within the current ODBC drivers available. -(see http://code.google.com/p/pyodbc/issues/detail?id=25). Consider usage -of OurSQL, MySQLdb, or MySQL-connector/Python. - -""" - -from sqlalchemy.dialects.mysql.base import MySQLDialect, MySQLExecutionContext -from sqlalchemy.connectors.pyodbc import PyODBCConnector -from sqlalchemy.engine import base as engine_base -from sqlalchemy import util -import re - -class MySQLExecutionContext_pyodbc(MySQLExecutionContext): - - def get_lastrowid(self): - cursor = self.create_cursor() - cursor.execute("SELECT LAST_INSERT_ID()") - lastrowid = cursor.fetchone()[0] - cursor.close() - return lastrowid - -class MySQLDialect_pyodbc(PyODBCConnector, MySQLDialect): - supports_unicode_statements = False - execution_ctx_cls = MySQLExecutionContext_pyodbc - - pyodbc_driver_name = "MySQL" - - def __init__(self, **kw): - # deal with http://code.google.com/p/pyodbc/issues/detail?id=25 - kw.setdefault('convert_unicode', True) - super(MySQLDialect_pyodbc, self).__init__(**kw) - - def _detect_charset(self, connection): - """Sniff out the character set in use for connection results.""" - - # Prefer 'character_set_results' for the current connection over the - # value in the driver. SET NAMES or individual variable SETs will - # change the charset without updating the driver's view of the world. - # - # If it's decided that issuing that sort of SQL leaves you SOL, then - # this can prefer the driver value. - rs = connection.execute("SHOW VARIABLES LIKE 'character_set%%'") - opts = dict([(row[0], row[1]) for row in self._compat_fetchall(rs)]) - for key in ('character_set_connection', 'character_set'): - if opts.get(key, None): - return opts[key] - - util.warn("Could not detect the connection character set. Assuming latin1.") - return 'latin1' - - def _extract_error_code(self, exception): - m = re.compile(r"\((\d+)\)").search(str(exception.args)) - c = m.group(1) - if c: - return int(c) - else: - return None - -dialect = MySQLDialect_pyodbc diff --git a/libs/sqlalchemy/dialects/mysql/zxjdbc.py b/libs/sqlalchemy/dialects/mysql/zxjdbc.py deleted file mode 100644 index df479043..00000000 --- a/libs/sqlalchemy/dialects/mysql/zxjdbc.py +++ /dev/null @@ -1,117 +0,0 @@ -# mysql/zxjdbc.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Support for the MySQL database via Jython's zxjdbc JDBC connector. - -JDBC Driver ------------ - -The official MySQL JDBC driver is at -http://dev.mysql.com/downloads/connector/j/. - -Connecting ----------- - -Connect string format: - - mysql+zxjdbc://:@[:]/ - -Character Sets --------------- - -SQLAlchemy zxjdbc dialects pass unicode straight through to the -zxjdbc/JDBC layer. To allow multiple character sets to be sent from the -MySQL Connector/J JDBC driver, by default SQLAlchemy sets its -``characterEncoding`` connection property to ``UTF-8``. It may be -overriden via a ``create_engine`` URL parameter. - -""" -import re - -from sqlalchemy import types as sqltypes, util -from sqlalchemy.connectors.zxJDBC import ZxJDBCConnector -from sqlalchemy.dialects.mysql.base import BIT, MySQLDialect, MySQLExecutionContext - -class _ZxJDBCBit(BIT): - def result_processor(self, dialect, coltype): - """Converts boolean or byte arrays from MySQL Connector/J to longs.""" - def process(value): - if value is None: - return value - if isinstance(value, bool): - return int(value) - v = 0L - for i in value: - v = v << 8 | (i & 0xff) - value = v - return value - return process - - -class MySQLExecutionContext_zxjdbc(MySQLExecutionContext): - def get_lastrowid(self): - cursor = self.create_cursor() - cursor.execute("SELECT LAST_INSERT_ID()") - lastrowid = cursor.fetchone()[0] - cursor.close() - return lastrowid - - -class MySQLDialect_zxjdbc(ZxJDBCConnector, MySQLDialect): - jdbc_db_name = 'mysql' - jdbc_driver_name = 'com.mysql.jdbc.Driver' - - execution_ctx_cls = MySQLExecutionContext_zxjdbc - - colspecs = util.update_copy( - MySQLDialect.colspecs, - { - sqltypes.Time: sqltypes.Time, - BIT: _ZxJDBCBit - } - ) - - def _detect_charset(self, connection): - """Sniff out the character set in use for connection results.""" - # Prefer 'character_set_results' for the current connection over the - # value in the driver. SET NAMES or individual variable SETs will - # change the charset without updating the driver's view of the world. - # - # If it's decided that issuing that sort of SQL leaves you SOL, then - # this can prefer the driver value. - rs = connection.execute("SHOW VARIABLES LIKE 'character_set%%'") - opts = dict((row[0], row[1]) for row in self._compat_fetchall(rs)) - for key in ('character_set_connection', 'character_set'): - if opts.get(key, None): - return opts[key] - - util.warn("Could not detect the connection character set. Assuming latin1.") - return 'latin1' - - def _driver_kwargs(self): - """return kw arg dict to be sent to connect().""" - return dict(characterEncoding='UTF-8', yearIsDateType='false') - - def _extract_error_code(self, exception): - # e.g.: DBAPIError: (Error) Table 'test.u2' doesn't exist - # [SQLCode: 1146], [SQLState: 42S02] 'DESCRIBE `u2`' () - m = re.compile(r"\[SQLCode\: (\d+)\]").search(str(exception.args)) - c = m.group(1) - if c: - return int(c) - - def _get_server_version_info(self,connection): - dbapi_con = connection.connection - version = [] - r = re.compile('[.\-]') - for n in r.split(dbapi_con.dbversion): - try: - version.append(int(n)) - except ValueError: - version.append(n) - return tuple(version) - -dialect = MySQLDialect_zxjdbc diff --git a/libs/sqlalchemy/dialects/oracle/__init__.py b/libs/sqlalchemy/dialects/oracle/__init__.py deleted file mode 100644 index a1e2a8dd..00000000 --- a/libs/sqlalchemy/dialects/oracle/__init__.py +++ /dev/null @@ -1,23 +0,0 @@ -# oracle/__init__.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -from sqlalchemy.dialects.oracle import base, cx_oracle, zxjdbc - -base.dialect = cx_oracle.dialect - -from sqlalchemy.dialects.oracle.base import \ - VARCHAR, NVARCHAR, CHAR, DATE, DATETIME, NUMBER,\ - BLOB, BFILE, CLOB, NCLOB, TIMESTAMP, RAW,\ - FLOAT, DOUBLE_PRECISION, LONG, dialect, INTERVAL,\ - VARCHAR2, NVARCHAR2, ROWID - - -__all__ = ( -'VARCHAR', 'NVARCHAR', 'CHAR', 'DATE', 'DATETIME', 'NUMBER', -'BLOB', 'BFILE', 'CLOB', 'NCLOB', 'TIMESTAMP', 'RAW', -'FLOAT', 'DOUBLE_PRECISION', 'LONG', 'dialect', 'INTERVAL', -'VARCHAR2', 'NVARCHAR2', 'ROWID' -) diff --git a/libs/sqlalchemy/dialects/oracle/base.py b/libs/sqlalchemy/dialects/oracle/base.py deleted file mode 100644 index f82991bc..00000000 --- a/libs/sqlalchemy/dialects/oracle/base.py +++ /dev/null @@ -1,1162 +0,0 @@ -# oracle/base.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Support for the Oracle database. - -Oracle version 8 through current (11g at the time of this writing) are supported. - -For information on connecting via specific drivers, see the documentation -for that driver. - -Connect Arguments ------------------ - -The dialect supports several :func:`~sqlalchemy.create_engine()` arguments which -affect the behavior of the dialect regardless of driver in use. - -* *use_ansi* - Use ANSI JOIN constructs (see the section on Oracle 8). Defaults - to ``True``. If ``False``, Oracle-8 compatible constructs are used for joins. - -* *optimize_limits* - defaults to ``False``. see the section on LIMIT/OFFSET. - -* *use_binds_for_limits* - defaults to ``True``. see the section on LIMIT/OFFSET. - -Auto Increment Behavior ------------------------ - -SQLAlchemy Table objects which include integer primary keys are usually assumed to have -"autoincrementing" behavior, meaning they can generate their own primary key values upon -INSERT. Since Oracle has no "autoincrement" feature, SQLAlchemy relies upon sequences -to produce these values. With the Oracle dialect, *a sequence must always be explicitly -specified to enable autoincrement*. This is divergent with the majority of documentation -examples which assume the usage of an autoincrement-capable database. To specify sequences, -use the sqlalchemy.schema.Sequence object which is passed to a Column construct:: - - t = Table('mytable', metadata, - Column('id', Integer, Sequence('id_seq'), primary_key=True), - Column(...), ... - ) - -This step is also required when using table reflection, i.e. autoload=True:: - - t = Table('mytable', metadata, - Column('id', Integer, Sequence('id_seq'), primary_key=True), - autoload=True - ) - -Identifier Casing ------------------ - -In Oracle, the data dictionary represents all case insensitive identifier names -using UPPERCASE text. SQLAlchemy on the other hand considers an all-lower case identifier -name to be case insensitive. The Oracle dialect converts all case insensitive identifiers -to and from those two formats during schema level communication, such as reflection of -tables and indexes. Using an UPPERCASE name on the SQLAlchemy side indicates a -case sensitive identifier, and SQLAlchemy will quote the name - this will cause mismatches -against data dictionary data received from Oracle, so unless identifier names have been -truly created as case sensitive (i.e. using quoted names), all lowercase names should be -used on the SQLAlchemy side. - -Unicode -------- - -.. versionchanged:: 0.6 - SQLAlchemy uses the "native unicode" mode provided as of cx_oracle 5. - cx_oracle 5.0.2 or greater is recommended for support of NCLOB. - If not using cx_oracle 5, the NLS_LANG environment variable needs - to be set in order for the oracle client library to use proper encoding, - such as "AMERICAN_AMERICA.UTF8". - -Also note that Oracle supports unicode data through the NVARCHAR and NCLOB data types. -When using the SQLAlchemy Unicode and UnicodeText types, these DDL types will be used -within CREATE TABLE statements. Usage of VARCHAR2 and CLOB with unicode text still -requires NLS_LANG to be set. - -LIMIT/OFFSET Support --------------------- - -Oracle has no support for the LIMIT or OFFSET keywords. SQLAlchemy uses -a wrapped subquery approach in conjunction with ROWNUM. The exact methodology -is taken from -http://www.oracle.com/technology/oramag/oracle/06-sep/o56asktom.html . - -There are two options which affect its behavior: - -* the "FIRST ROWS()" optimization keyword is not used by default. To enable the usage of this - optimization directive, specify ``optimize_limits=True`` to :func:`.create_engine`. -* the values passed for the limit/offset are sent as bound parameters. Some users have observed - that Oracle produces a poor query plan when the values are sent as binds and not - rendered literally. To render the limit/offset values literally within the SQL - statement, specify ``use_binds_for_limits=False`` to :func:`.create_engine`. - -Some users have reported better performance when the entirely different approach of a -window query is used, i.e. ROW_NUMBER() OVER (ORDER BY), to provide LIMIT/OFFSET (note -that the majority of users don't observe this). To suit this case the -method used for LIMIT/OFFSET can be replaced entirely. See the recipe at -http://www.sqlalchemy.org/trac/wiki/UsageRecipes/WindowFunctionsByDefault -which installs a select compiler that overrides the generation of limit/offset with -a window function. - -ON UPDATE CASCADE ------------------ - -Oracle doesn't have native ON UPDATE CASCADE functionality. A trigger based solution -is available at http://asktom.oracle.com/tkyte/update_cascade/index.html . - -When using the SQLAlchemy ORM, the ORM has limited ability to manually issue -cascading updates - specify ForeignKey objects using the -"deferrable=True, initially='deferred'" keyword arguments, -and specify "passive_updates=False" on each relationship(). - -Oracle 8 Compatibility ----------------------- - -When Oracle 8 is detected, the dialect internally configures itself to the following -behaviors: - -* the use_ansi flag is set to False. This has the effect of converting all - JOIN phrases into the WHERE clause, and in the case of LEFT OUTER JOIN - makes use of Oracle's (+) operator. - -* the NVARCHAR2 and NCLOB datatypes are no longer generated as DDL when - the :class:`~sqlalchemy.types.Unicode` is used - VARCHAR2 and CLOB are issued - instead. This because these types don't seem to work correctly on Oracle 8 - even though they are available. The :class:`~sqlalchemy.types.NVARCHAR` - and :class:`~sqlalchemy.dialects.oracle.NCLOB` types will always generate NVARCHAR2 and NCLOB. - -* the "native unicode" mode is disabled when using cx_oracle, i.e. SQLAlchemy - encodes all Python unicode objects to "string" before passing in as bind parameters. - -Synonym/DBLINK Reflection -------------------------- - -When using reflection with Table objects, the dialect can optionally search for tables -indicated by synonyms that reference DBLINK-ed tables by passing the flag -oracle_resolve_synonyms=True as a keyword argument to the Table construct. If DBLINK -is not in use this flag should be left off. - -""" - -import random, re - -from sqlalchemy import schema as sa_schema -from sqlalchemy import util, sql, log -from sqlalchemy.engine import default, base, reflection -from sqlalchemy.sql import compiler, visitors, expression -from sqlalchemy.sql import operators as sql_operators, functions as sql_functions -from sqlalchemy import types as sqltypes -from sqlalchemy.types import VARCHAR, NVARCHAR, CHAR, DATE, DATETIME, \ - BLOB, CLOB, TIMESTAMP, FLOAT - -RESERVED_WORDS = \ - set('SHARE RAW DROP BETWEEN FROM DESC OPTION PRIOR LONG THEN '\ - 'DEFAULT ALTER IS INTO MINUS INTEGER NUMBER GRANT IDENTIFIED '\ - 'ALL TO ORDER ON FLOAT DATE HAVING CLUSTER NOWAIT RESOURCE '\ - 'ANY TABLE INDEX FOR UPDATE WHERE CHECK SMALLINT WITH DELETE '\ - 'BY ASC REVOKE LIKE SIZE RENAME NOCOMPRESS NULL GROUP VALUES '\ - 'AS IN VIEW EXCLUSIVE COMPRESS SYNONYM SELECT INSERT EXISTS '\ - 'NOT TRIGGER ELSE CREATE INTERSECT PCTFREE DISTINCT USER '\ - 'CONNECT SET MODE OF UNIQUE VARCHAR2 VARCHAR LOCK OR CHAR '\ - 'DECIMAL UNION PUBLIC AND START UID COMMENT CURRENT LEVEL'.split()) - -NO_ARG_FNS = set('UID CURRENT_DATE SYSDATE USER ' - 'CURRENT_TIME CURRENT_TIMESTAMP'.split()) - -class RAW(sqltypes._Binary): - __visit_name__ = 'RAW' -OracleRaw = RAW - -class NCLOB(sqltypes.Text): - __visit_name__ = 'NCLOB' - -class VARCHAR2(VARCHAR): - __visit_name__ = 'VARCHAR2' - -NVARCHAR2 = NVARCHAR - -class NUMBER(sqltypes.Numeric, sqltypes.Integer): - __visit_name__ = 'NUMBER' - - def __init__(self, precision=None, scale=None, asdecimal=None): - if asdecimal is None: - asdecimal = bool(scale and scale > 0) - - super(NUMBER, self).__init__(precision=precision, scale=scale, asdecimal=asdecimal) - - def adapt(self, impltype): - ret = super(NUMBER, self).adapt(impltype) - # leave a hint for the DBAPI handler - ret._is_oracle_number = True - return ret - - @property - def _type_affinity(self): - if bool(self.scale and self.scale > 0): - return sqltypes.Numeric - else: - return sqltypes.Integer - - -class DOUBLE_PRECISION(sqltypes.Numeric): - __visit_name__ = 'DOUBLE_PRECISION' - def __init__(self, precision=None, scale=None, asdecimal=None): - if asdecimal is None: - asdecimal = False - - super(DOUBLE_PRECISION, self).__init__(precision=precision, scale=scale, asdecimal=asdecimal) - -class BFILE(sqltypes.LargeBinary): - __visit_name__ = 'BFILE' - -class LONG(sqltypes.Text): - __visit_name__ = 'LONG' - -class INTERVAL(sqltypes.TypeEngine): - __visit_name__ = 'INTERVAL' - - def __init__(self, - day_precision=None, - second_precision=None): - """Construct an INTERVAL. - - Note that only DAY TO SECOND intervals are currently supported. - This is due to a lack of support for YEAR TO MONTH intervals - within available DBAPIs (cx_oracle and zxjdbc). - - :param day_precision: the day precision value. this is the number of digits - to store for the day field. Defaults to "2" - :param second_precision: the second precision value. this is the number of digits - to store for the fractional seconds field. Defaults to "6". - - """ - self.day_precision = day_precision - self.second_precision = second_precision - - @classmethod - def _adapt_from_generic_interval(cls, interval): - return INTERVAL(day_precision=interval.day_precision, - second_precision=interval.second_precision) - - @property - def _type_affinity(self): - return sqltypes.Interval - -class ROWID(sqltypes.TypeEngine): - """Oracle ROWID type. - - When used in a cast() or similar, generates ROWID. - - """ - __visit_name__ = 'ROWID' - - - -class _OracleBoolean(sqltypes.Boolean): - def get_dbapi_type(self, dbapi): - return dbapi.NUMBER - -colspecs = { - sqltypes.Boolean : _OracleBoolean, - sqltypes.Interval : INTERVAL, -} - -ischema_names = { - 'VARCHAR2' : VARCHAR, - 'NVARCHAR2' : NVARCHAR, - 'CHAR' : CHAR, - 'DATE' : DATE, - 'NUMBER' : NUMBER, - 'BLOB' : BLOB, - 'BFILE' : BFILE, - 'CLOB' : CLOB, - 'NCLOB' : NCLOB, - 'TIMESTAMP' : TIMESTAMP, - 'TIMESTAMP WITH TIME ZONE' : TIMESTAMP, - 'INTERVAL DAY TO SECOND' : INTERVAL, - 'RAW' : RAW, - 'FLOAT' : FLOAT, - 'DOUBLE PRECISION' : DOUBLE_PRECISION, - 'LONG' : LONG, -} - - -class OracleTypeCompiler(compiler.GenericTypeCompiler): - # Note: - # Oracle DATE == DATETIME - # Oracle does not allow milliseconds in DATE - # Oracle does not support TIME columns - - def visit_datetime(self, type_): - return self.visit_DATE(type_) - - def visit_float(self, type_): - return self.visit_FLOAT(type_) - - def visit_unicode(self, type_): - if self.dialect._supports_nchar: - return self.visit_NVARCHAR2(type_) - else: - return self.visit_VARCHAR2(type_) - - def visit_INTERVAL(self, type_): - return "INTERVAL DAY%s TO SECOND%s" % ( - type_.day_precision is not None and - "(%d)" % type_.day_precision or - "", - type_.second_precision is not None and - "(%d)" % type_.second_precision or - "", - ) - - def visit_LONG(self, type_): - return "LONG" - - def visit_TIMESTAMP(self, type_): - if type_.timezone: - return "TIMESTAMP WITH TIME ZONE" - else: - return "TIMESTAMP" - - def visit_DOUBLE_PRECISION(self, type_): - return self._generate_numeric(type_, "DOUBLE PRECISION") - - def visit_NUMBER(self, type_, **kw): - return self._generate_numeric(type_, "NUMBER", **kw) - - def _generate_numeric(self, type_, name, precision=None, scale=None): - if precision is None: - precision = type_.precision - - if scale is None: - scale = getattr(type_, 'scale', None) - - if precision is None: - return name - elif scale is None: - return "%(name)s(%(precision)s)" % {'name':name,'precision': precision} - else: - return "%(name)s(%(precision)s, %(scale)s)" % {'name':name,'precision': precision, 'scale' : scale} - - def visit_string(self, type_): - return self.visit_VARCHAR2(type_) - - def visit_VARCHAR2(self, type_): - return self._visit_varchar(type_, '', '2') - - def visit_NVARCHAR2(self, type_): - return self._visit_varchar(type_, 'N', '2') - visit_NVARCHAR = visit_NVARCHAR2 - - def visit_VARCHAR(self, type_): - return self._visit_varchar(type_, '', '') - - def _visit_varchar(self, type_, n, num): - if not n and self.dialect._supports_char_length: - return "VARCHAR%(two)s(%(length)s CHAR)" % { - 'length' : type_.length, - 'two':num} - else: - return "%(n)sVARCHAR%(two)s(%(length)s)" % {'length' : type_.length, - 'two':num, 'n':n} - - def visit_text(self, type_): - return self.visit_CLOB(type_) - - def visit_unicode_text(self, type_): - if self.dialect._supports_nchar: - return self.visit_NCLOB(type_) - else: - return self.visit_CLOB(type_) - - def visit_large_binary(self, type_): - return self.visit_BLOB(type_) - - def visit_big_integer(self, type_): - return self.visit_NUMBER(type_, precision=19) - - def visit_boolean(self, type_): - return self.visit_SMALLINT(type_) - - def visit_RAW(self, type_): - if type_.length: - return "RAW(%(length)s)" % {'length' : type_.length} - else: - return "RAW" - - def visit_ROWID(self, type_): - return "ROWID" - -class OracleCompiler(compiler.SQLCompiler): - """Oracle compiler modifies the lexical structure of Select - statements to work under non-ANSI configured Oracle databases, if - the use_ansi flag is False. - """ - - compound_keywords = util.update_copy( - compiler.SQLCompiler.compound_keywords, - { - expression.CompoundSelect.EXCEPT : 'MINUS' - } - ) - - def __init__(self, *args, **kwargs): - self.__wheres = {} - self._quoted_bind_names = {} - super(OracleCompiler, self).__init__(*args, **kwargs) - - def visit_mod(self, binary, **kw): - return "mod(%s, %s)" % (self.process(binary.left), self.process(binary.right)) - - def visit_now_func(self, fn, **kw): - return "CURRENT_TIMESTAMP" - - def visit_char_length_func(self, fn, **kw): - return "LENGTH" + self.function_argspec(fn, **kw) - - def visit_match_op(self, binary, **kw): - return "CONTAINS (%s, %s)" % (self.process(binary.left), self.process(binary.right)) - - def get_select_hint_text(self, byfroms): - return " ".join( - "/*+ %s */" % text for table, text in byfroms.items() - ) - - def function_argspec(self, fn, **kw): - if len(fn.clauses) > 0 or fn.name.upper() not in NO_ARG_FNS: - return compiler.SQLCompiler.function_argspec(self, fn, **kw) - else: - return "" - - def default_from(self): - """Called when a ``SELECT`` statement has no froms, - and no ``FROM`` clause is to be appended. - - The Oracle compiler tacks a "FROM DUAL" to the statement. - """ - - return " FROM DUAL" - - def visit_join(self, join, **kwargs): - if self.dialect.use_ansi: - return compiler.SQLCompiler.visit_join(self, join, **kwargs) - else: - kwargs['asfrom'] = True - return self.process(join.left, **kwargs) + \ - ", " + self.process(join.right, **kwargs) - - def _get_nonansi_join_whereclause(self, froms): - clauses = [] - - def visit_join(join): - if join.isouter: - def visit_binary(binary): - if binary.operator == sql_operators.eq: - if binary.left.table is join.right: - binary.left = _OuterJoinColumn(binary.left) - elif binary.right.table is join.right: - binary.right = _OuterJoinColumn(binary.right) - clauses.append(visitors.cloned_traverse(join.onclause, {}, - {'binary':visit_binary})) - else: - clauses.append(join.onclause) - - for j in join.left, join.right: - if isinstance(j, expression.Join): - visit_join(j) - - for f in froms: - if isinstance(f, expression.Join): - visit_join(f) - - if not clauses: - return None - else: - return sql.and_(*clauses) - - def visit_outer_join_column(self, vc): - return self.process(vc.column) + "(+)" - - def visit_sequence(self, seq): - return self.dialect.identifier_preparer.format_sequence(seq) + ".nextval" - - def visit_alias(self, alias, asfrom=False, ashint=False, **kwargs): - """Oracle doesn't like ``FROM table AS alias``. Is the AS standard SQL??""" - - if asfrom or ashint: - alias_name = isinstance(alias.name, expression._truncated_label) and \ - self._truncated_identifier("alias", alias.name) or alias.name - - if ashint: - return alias_name - elif asfrom: - return self.process(alias.original, asfrom=asfrom, **kwargs) + \ - " " + self.preparer.format_alias(alias, alias_name) - else: - return self.process(alias.original, **kwargs) - - def returning_clause(self, stmt, returning_cols): - - def create_out_param(col, i): - bindparam = sql.outparam("ret_%d" % i, type_=col.type) - self.binds[bindparam.key] = bindparam - return self.bindparam_string(self._truncate_bindparam(bindparam)) - - columnlist = list(expression._select_iterables(returning_cols)) - - # within_columns_clause =False so that labels (foo AS bar) don't render - columns = [self.process(c, within_columns_clause=False, result_map=self.result_map) for c in columnlist] - - binds = [create_out_param(c, i) for i, c in enumerate(columnlist)] - - return 'RETURNING ' + ', '.join(columns) + " INTO " + ", ".join(binds) - - def _TODO_visit_compound_select(self, select): - """Need to determine how to get ``LIMIT``/``OFFSET`` into a ``UNION`` for Oracle.""" - pass - - def visit_select(self, select, **kwargs): - """Look for ``LIMIT`` and OFFSET in a select statement, and if - so tries to wrap it in a subquery with ``rownum`` criterion. - """ - - if not getattr(select, '_oracle_visit', None): - if not self.dialect.use_ansi: - if self.stack and 'from' in self.stack[-1]: - existingfroms = self.stack[-1]['from'] - else: - existingfroms = None - - froms = select._get_display_froms(existingfroms) - whereclause = self._get_nonansi_join_whereclause(froms) - if whereclause is not None: - select = select.where(whereclause) - select._oracle_visit = True - - if select._limit is not None or select._offset is not None: - # See http://www.oracle.com/technology/oramag/oracle/06-sep/o56asktom.html - # - # Generalized form of an Oracle pagination query: - # select ... from ( - # select /*+ FIRST_ROWS(N) */ ...., rownum as ora_rn from ( - # select distinct ... where ... order by ... - # ) where ROWNUM <= :limit+:offset - # ) where ora_rn > :offset - # Outer select and "ROWNUM as ora_rn" can be dropped if limit=0 - - # TODO: use annotations instead of clone + attr set ? - select = select._generate() - select._oracle_visit = True - - # Wrap the middle select and add the hint - limitselect = sql.select([c for c in select.c]) - if select._limit and self.dialect.optimize_limits: - limitselect = limitselect.prefix_with("/*+ FIRST_ROWS(%d) */" % select._limit) - - limitselect._oracle_visit = True - limitselect._is_wrapper = True - - # If needed, add the limiting clause - if select._limit is not None: - max_row = select._limit - if select._offset is not None: - max_row += select._offset - if not self.dialect.use_binds_for_limits: - max_row = sql.literal_column("%d" % max_row) - limitselect.append_whereclause( - sql.literal_column("ROWNUM") <= max_row) - - # If needed, add the ora_rn, and wrap again with offset. - if select._offset is None: - limitselect.for_update = select.for_update - select = limitselect - else: - limitselect = limitselect.column( - sql.literal_column("ROWNUM").label("ora_rn")) - limitselect._oracle_visit = True - limitselect._is_wrapper = True - - offsetselect = sql.select( - [c for c in limitselect.c if c.key!='ora_rn']) - offsetselect._oracle_visit = True - offsetselect._is_wrapper = True - - offset_value = select._offset - if not self.dialect.use_binds_for_limits: - offset_value = sql.literal_column("%d" % offset_value) - offsetselect.append_whereclause( - sql.literal_column("ora_rn")>offset_value) - - offsetselect.for_update = select.for_update - select = offsetselect - - kwargs['iswrapper'] = getattr(select, '_is_wrapper', False) - return compiler.SQLCompiler.visit_select(self, select, **kwargs) - - def limit_clause(self, select): - return "" - - def for_update_clause(self, select): - if self.is_subquery(): - return "" - elif select.for_update == "nowait": - return " FOR UPDATE NOWAIT" - else: - return super(OracleCompiler, self).for_update_clause(select) - -class OracleDDLCompiler(compiler.DDLCompiler): - - def define_constraint_cascades(self, constraint): - text = "" - if constraint.ondelete is not None: - text += " ON DELETE %s" % constraint.ondelete - - # oracle has no ON UPDATE CASCADE - - # its only available via triggers http://asktom.oracle.com/tkyte/update_cascade/index.html - if constraint.onupdate is not None: - util.warn( - "Oracle does not contain native UPDATE CASCADE " - "functionality - onupdates will not be rendered for foreign keys. " - "Consider using deferrable=True, initially='deferred' or triggers.") - - return text - -class OracleIdentifierPreparer(compiler.IdentifierPreparer): - - reserved_words = set([x.lower() for x in RESERVED_WORDS]) - illegal_initial_characters = set(xrange(0, 10)).union(["_", "$"]) - - def _bindparam_requires_quotes(self, value): - """Return True if the given identifier requires quoting.""" - lc_value = value.lower() - return (lc_value in self.reserved_words - or value[0] in self.illegal_initial_characters - or not self.legal_characters.match(unicode(value)) - ) - - def format_savepoint(self, savepoint): - name = re.sub(r'^_+', '', savepoint.ident) - return super(OracleIdentifierPreparer, self).format_savepoint(savepoint, name) - - -class OracleExecutionContext(default.DefaultExecutionContext): - def fire_sequence(self, seq, type_): - return self._execute_scalar("SELECT " + - self.dialect.identifier_preparer.format_sequence(seq) + - ".nextval FROM DUAL", type_) - -class OracleDialect(default.DefaultDialect): - name = 'oracle' - supports_alter = True - supports_unicode_statements = False - supports_unicode_binds = False - max_identifier_length = 30 - supports_sane_rowcount = True - supports_sane_multi_rowcount = False - - supports_sequences = True - sequences_optional = False - postfetch_lastrowid = False - - default_paramstyle = 'named' - colspecs = colspecs - ischema_names = ischema_names - requires_name_normalize = True - - supports_default_values = False - supports_empty_insert = False - - statement_compiler = OracleCompiler - ddl_compiler = OracleDDLCompiler - type_compiler = OracleTypeCompiler - preparer = OracleIdentifierPreparer - execution_ctx_cls = OracleExecutionContext - - reflection_options = ('oracle_resolve_synonyms', ) - - def __init__(self, - use_ansi=True, - optimize_limits=False, - use_binds_for_limits=True, - **kwargs): - default.DefaultDialect.__init__(self, **kwargs) - self.use_ansi = use_ansi - self.optimize_limits = optimize_limits - self.use_binds_for_limits = use_binds_for_limits - - def initialize(self, connection): - super(OracleDialect, self).initialize(connection) - self.implicit_returning = self.__dict__.get( - 'implicit_returning', - self.server_version_info > (10, ) - ) - - if self._is_oracle_8: - self.colspecs = self.colspecs.copy() - self.colspecs.pop(sqltypes.Interval) - self.use_ansi = False - - @property - def _is_oracle_8(self): - return self.server_version_info and \ - self.server_version_info < (9, ) - - @property - def _supports_char_length(self): - return not self._is_oracle_8 - - @property - def _supports_nchar(self): - return not self._is_oracle_8 - - def do_release_savepoint(self, connection, name): - # Oracle does not support RELEASE SAVEPOINT - pass - - def has_table(self, connection, table_name, schema=None): - if not schema: - schema = self.default_schema_name - cursor = connection.execute( - sql.text("SELECT table_name FROM all_tables " - "WHERE table_name = :name AND owner = :schema_name"), - name=self.denormalize_name(table_name), schema_name=self.denormalize_name(schema)) - return cursor.first() is not None - - def has_sequence(self, connection, sequence_name, schema=None): - if not schema: - schema = self.default_schema_name - cursor = connection.execute( - sql.text("SELECT sequence_name FROM all_sequences " - "WHERE sequence_name = :name AND sequence_owner = :schema_name"), - name=self.denormalize_name(sequence_name), schema_name=self.denormalize_name(schema)) - return cursor.first() is not None - - def normalize_name(self, name): - if name is None: - return None - # Py2K - if isinstance(name, str): - name = name.decode(self.encoding) - # end Py2K - if name.upper() == name and \ - not self.identifier_preparer._requires_quotes(name.lower()): - return name.lower() - else: - return name - - def denormalize_name(self, name): - if name is None: - return None - elif name.lower() == name and not self.identifier_preparer._requires_quotes(name.lower()): - name = name.upper() - # Py2K - if not self.supports_unicode_binds: - name = name.encode(self.encoding) - else: - name = unicode(name) - # end Py2K - return name - - def _get_default_schema_name(self, connection): - return self.normalize_name(connection.execute(u'SELECT USER FROM DUAL').scalar()) - - def _resolve_synonym(self, connection, desired_owner=None, desired_synonym=None, desired_table=None): - """search for a local synonym matching the given desired owner/name. - - if desired_owner is None, attempts to locate a distinct owner. - - returns the actual name, owner, dblink name, and synonym name if found. - """ - - q = "SELECT owner, table_owner, table_name, db_link, synonym_name FROM all_synonyms WHERE " - clauses = [] - params = {} - if desired_synonym: - clauses.append("synonym_name = :synonym_name") - params['synonym_name'] = desired_synonym - if desired_owner: - clauses.append("table_owner = :desired_owner") - params['desired_owner'] = desired_owner - if desired_table: - clauses.append("table_name = :tname") - params['tname'] = desired_table - - q += " AND ".join(clauses) - - result = connection.execute(sql.text(q), **params) - if desired_owner: - row = result.first() - if row: - return row['table_name'], row['table_owner'], row['db_link'], row['synonym_name'] - else: - return None, None, None, None - else: - rows = result.fetchall() - if len(rows) > 1: - raise AssertionError("There are multiple tables visible to the schema, you must specify owner") - elif len(rows) == 1: - row = rows[0] - return row['table_name'], row['table_owner'], row['db_link'], row['synonym_name'] - else: - return None, None, None, None - - @reflection.cache - def _prepare_reflection_args(self, connection, table_name, schema=None, - resolve_synonyms=False, dblink='', **kw): - - if resolve_synonyms: - actual_name, owner, dblink, synonym = self._resolve_synonym( - connection, - desired_owner=self.denormalize_name(schema), - desired_synonym=self.denormalize_name(table_name) - ) - else: - actual_name, owner, dblink, synonym = None, None, None, None - if not actual_name: - actual_name = self.denormalize_name(table_name) - if not dblink: - dblink = '' - if not owner: - owner = self.denormalize_name(schema or self.default_schema_name) - return (actual_name, owner, dblink, synonym) - - @reflection.cache - def get_schema_names(self, connection, **kw): - s = "SELECT username FROM all_users ORDER BY username" - cursor = connection.execute(s,) - return [self.normalize_name(row[0]) for row in cursor] - - @reflection.cache - def get_table_names(self, connection, schema=None, **kw): - schema = self.denormalize_name(schema or self.default_schema_name) - - # note that table_names() isnt loading DBLINKed or synonym'ed tables - if schema is None: - schema = self.default_schema_name - s = sql.text( - "SELECT table_name FROM all_tables " - "WHERE nvl(tablespace_name, 'no tablespace') NOT IN ('SYSTEM', 'SYSAUX') " - "AND OWNER = :owner " - "AND IOT_NAME IS NULL") - cursor = connection.execute(s, owner=schema) - return [self.normalize_name(row[0]) for row in cursor] - - - @reflection.cache - def get_view_names(self, connection, schema=None, **kw): - schema = self.denormalize_name(schema or self.default_schema_name) - s = sql.text("SELECT view_name FROM all_views WHERE owner = :owner") - cursor = connection.execute(s, owner=self.denormalize_name(schema)) - return [self.normalize_name(row[0]) for row in cursor] - - @reflection.cache - def get_columns(self, connection, table_name, schema=None, **kw): - """ - - kw arguments can be: - - oracle_resolve_synonyms - - dblink - - """ - - resolve_synonyms = kw.get('oracle_resolve_synonyms', False) - dblink = kw.get('dblink', '') - info_cache = kw.get('info_cache') - - (table_name, schema, dblink, synonym) = \ - self._prepare_reflection_args(connection, table_name, schema, - resolve_synonyms, dblink, - info_cache=info_cache) - columns = [] - if self._supports_char_length: - char_length_col = 'char_length' - else: - char_length_col = 'data_length' - - c = connection.execute(sql.text( - "SELECT column_name, data_type, %(char_length_col)s, data_precision, data_scale, " - "nullable, data_default FROM ALL_TAB_COLUMNS%(dblink)s " - "WHERE table_name = :table_name AND owner = :owner " - "ORDER BY column_id" % {'dblink': dblink, 'char_length_col':char_length_col}), - table_name=table_name, owner=schema) - - for row in c: - (colname, orig_colname, coltype, length, precision, scale, nullable, default) = \ - (self.normalize_name(row[0]), row[0], row[1], row[2], row[3], row[4], row[5]=='Y', row[6]) - - if coltype == 'NUMBER' : - coltype = NUMBER(precision, scale) - elif coltype in ('VARCHAR2', 'NVARCHAR2', 'CHAR'): - coltype = self.ischema_names.get(coltype)(length) - elif 'WITH TIME ZONE' in coltype: - coltype = TIMESTAMP(timezone=True) - else: - coltype = re.sub(r'\(\d+\)', '', coltype) - try: - coltype = self.ischema_names[coltype] - except KeyError: - util.warn("Did not recognize type '%s' of column '%s'" % - (coltype, colname)) - coltype = sqltypes.NULLTYPE - - cdict = { - 'name': colname, - 'type': coltype, - 'nullable': nullable, - 'default': default, - 'autoincrement':default is None - } - if orig_colname.lower() == orig_colname: - cdict['quote'] = True - - columns.append(cdict) - return columns - - @reflection.cache - def get_indexes(self, connection, table_name, schema=None, - resolve_synonyms=False, dblink='', **kw): - - - info_cache = kw.get('info_cache') - (table_name, schema, dblink, synonym) = \ - self._prepare_reflection_args(connection, table_name, schema, - resolve_synonyms, dblink, - info_cache=info_cache) - indexes = [] - q = sql.text(""" - SELECT a.index_name, a.column_name, b.uniqueness - FROM ALL_IND_COLUMNS%(dblink)s a, - ALL_INDEXES%(dblink)s b - WHERE - a.index_name = b.index_name - AND a.table_owner = b.table_owner - AND a.table_name = b.table_name - - AND a.table_name = :table_name - AND a.table_owner = :schema - ORDER BY a.index_name, a.column_position""" % {'dblink': dblink}) - rp = connection.execute(q, table_name=self.denormalize_name(table_name), - schema=self.denormalize_name(schema)) - indexes = [] - last_index_name = None - pkeys = self.get_primary_keys(connection, table_name, schema, - resolve_synonyms=resolve_synonyms, - dblink=dblink, - info_cache=kw.get('info_cache')) - uniqueness = dict(NONUNIQUE=False, UNIQUE=True) - - oracle_sys_col = re.compile(r'SYS_NC\d+\$', re.IGNORECASE) - - def upper_name_set(names): - return set([i.upper() for i in names]) - - pk_names = upper_name_set(pkeys) - - def remove_if_primary_key(index): - # don't include the primary key index - if index is not None and \ - upper_name_set(index['column_names']) == pk_names: - indexes.pop() - - index = None - for rset in rp: - if rset.index_name != last_index_name: - remove_if_primary_key(index) - index = dict(name=self.normalize_name(rset.index_name), column_names=[]) - indexes.append(index) - index['unique'] = uniqueness.get(rset.uniqueness, False) - - # filter out Oracle SYS_NC names. could also do an outer join - # to the all_tab_columns table and check for real col names there. - if not oracle_sys_col.match(rset.column_name): - index['column_names'].append(self.normalize_name(rset.column_name)) - last_index_name = rset.index_name - remove_if_primary_key(index) - return indexes - - @reflection.cache - def _get_constraint_data(self, connection, table_name, schema=None, - dblink='', **kw): - - rp = connection.execute( - sql.text("""SELECT - ac.constraint_name, - ac.constraint_type, - loc.column_name AS local_column, - rem.table_name AS remote_table, - rem.column_name AS remote_column, - rem.owner AS remote_owner, - loc.position as loc_pos, - rem.position as rem_pos - FROM all_constraints%(dblink)s ac, - all_cons_columns%(dblink)s loc, - all_cons_columns%(dblink)s rem - WHERE ac.table_name = :table_name - AND ac.constraint_type IN ('R','P') - AND ac.owner = :owner - AND ac.owner = loc.owner - AND ac.constraint_name = loc.constraint_name - AND ac.r_owner = rem.owner(+) - AND ac.r_constraint_name = rem.constraint_name(+) - AND (rem.position IS NULL or loc.position=rem.position) - ORDER BY ac.constraint_name, loc.position""" % {'dblink': dblink}), - table_name=table_name, owner=schema) - constraint_data = rp.fetchall() - return constraint_data - - def get_primary_keys(self, connection, table_name, schema=None, **kw): - """ - - kw arguments can be: - - oracle_resolve_synonyms - - dblink - - """ - return self._get_primary_keys(connection, table_name, schema, **kw)[0] - - @reflection.cache - def _get_primary_keys(self, connection, table_name, schema=None, **kw): - resolve_synonyms = kw.get('oracle_resolve_synonyms', False) - dblink = kw.get('dblink', '') - info_cache = kw.get('info_cache') - - (table_name, schema, dblink, synonym) = \ - self._prepare_reflection_args(connection, table_name, schema, - resolve_synonyms, dblink, - info_cache=info_cache) - pkeys = [] - constraint_name = None - constraint_data = self._get_constraint_data(connection, table_name, - schema, dblink, - info_cache=kw.get('info_cache')) - - for row in constraint_data: - #print "ROW:" , row - (cons_name, cons_type, local_column, remote_table, remote_column, remote_owner) = \ - row[0:2] + tuple([self.normalize_name(x) for x in row[2:6]]) - if cons_type == 'P': - if constraint_name is None: - constraint_name = self.normalize_name(cons_name) - pkeys.append(local_column) - return pkeys, constraint_name - - def get_pk_constraint(self, connection, table_name, schema=None, **kw): - cols, name = self._get_primary_keys(connection, table_name, schema=schema, **kw) - - return { - 'constrained_columns':cols, - 'name':name - } - - @reflection.cache - def get_foreign_keys(self, connection, table_name, schema=None, **kw): - """ - - kw arguments can be: - - oracle_resolve_synonyms - - dblink - - """ - - requested_schema = schema # to check later on - resolve_synonyms = kw.get('oracle_resolve_synonyms', False) - dblink = kw.get('dblink', '') - info_cache = kw.get('info_cache') - - (table_name, schema, dblink, synonym) = \ - self._prepare_reflection_args(connection, table_name, schema, - resolve_synonyms, dblink, - info_cache=info_cache) - - constraint_data = self._get_constraint_data(connection, table_name, - schema, dblink, - info_cache=kw.get('info_cache')) - - def fkey_rec(): - return { - 'name' : None, - 'constrained_columns' : [], - 'referred_schema' : None, - 'referred_table' : None, - 'referred_columns' : [] - } - - fkeys = util.defaultdict(fkey_rec) - - for row in constraint_data: - (cons_name, cons_type, local_column, remote_table, remote_column, remote_owner) = \ - row[0:2] + tuple([self.normalize_name(x) for x in row[2:6]]) - - if cons_type == 'R': - if remote_table is None: - # ticket 363 - util.warn( - ("Got 'None' querying 'table_name' from " - "all_cons_columns%(dblink)s - does the user have " - "proper rights to the table?") % {'dblink':dblink}) - continue - - rec = fkeys[cons_name] - rec['name'] = cons_name - local_cols, remote_cols = rec['constrained_columns'], rec['referred_columns'] - - if not rec['referred_table']: - if resolve_synonyms: - ref_remote_name, ref_remote_owner, ref_dblink, ref_synonym = \ - self._resolve_synonym( - connection, - desired_owner=self.denormalize_name(remote_owner), - desired_table=self.denormalize_name(remote_table) - ) - if ref_synonym: - remote_table = self.normalize_name(ref_synonym) - remote_owner = self.normalize_name(ref_remote_owner) - - rec['referred_table'] = remote_table - - if requested_schema is not None or self.denormalize_name(remote_owner) != schema: - rec['referred_schema'] = remote_owner - - local_cols.append(local_column) - remote_cols.append(remote_column) - - return fkeys.values() - - @reflection.cache - def get_view_definition(self, connection, view_name, schema=None, - resolve_synonyms=False, dblink='', **kw): - info_cache = kw.get('info_cache') - (view_name, schema, dblink, synonym) = \ - self._prepare_reflection_args(connection, view_name, schema, - resolve_synonyms, dblink, - info_cache=info_cache) - s = sql.text(""" - SELECT text FROM all_views - WHERE owner = :schema - AND view_name = :view_name - """) - rp = connection.execute(s, - view_name=view_name, schema=schema).scalar() - if rp: - return rp.decode(self.encoding) - else: - return None - - - -class _OuterJoinColumn(sql.ClauseElement): - __visit_name__ = 'outer_join_column' - - def __init__(self, column): - self.column = column - - - diff --git a/libs/sqlalchemy/dialects/oracle/cx_oracle.py b/libs/sqlalchemy/dialects/oracle/cx_oracle.py deleted file mode 100644 index 0154180d..00000000 --- a/libs/sqlalchemy/dialects/oracle/cx_oracle.py +++ /dev/null @@ -1,809 +0,0 @@ -# oracle/cx_oracle.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Support for the Oracle database via the cx_oracle driver. - -Driver ------- - -The Oracle dialect uses the cx_oracle driver, available at -http://cx-oracle.sourceforge.net/ . The dialect has several behaviors -which are specifically tailored towards compatibility with this module. -Version 5.0 or greater is **strongly** recommended, as SQLAlchemy makes -extensive use of the cx_oracle output converters for numeric and -string conversions. - -Connecting ----------- - -Connecting with create_engine() uses the standard URL approach of -``oracle://user:pass@host:port/dbname[?key=value&key=value...]``. If dbname is present, the -host, port, and dbname tokens are converted to a TNS name using the cx_oracle -:func:`makedsn()` function. Otherwise, the host token is taken directly as a TNS name. - -Additional arguments which may be specified either as query string arguments on the -URL, or as keyword arguments to :func:`~sqlalchemy.create_engine()` are: - -* *allow_twophase* - enable two-phase transactions. Defaults to ``True``. - -* *arraysize* - set the cx_oracle.arraysize value on cursors, in SQLAlchemy - it defaults to 50. See the section on "LOB Objects" below. - -* *auto_convert_lobs* - defaults to True, see the section on LOB objects. - -* *auto_setinputsizes* - the cx_oracle.setinputsizes() call is issued for all bind parameters. - This is required for LOB datatypes but can be disabled to reduce overhead. Defaults - to ``True``. - -* *mode* - This is given the string value of SYSDBA or SYSOPER, or alternatively an - integer value. This value is only available as a URL query string argument. - -* *threaded* - enable multithreaded access to cx_oracle connections. Defaults - to ``True``. Note that this is the opposite default of cx_oracle itself. - -Unicode -------- - -cx_oracle 5 fully supports Python unicode objects. SQLAlchemy will pass -all unicode strings directly to cx_oracle, and additionally uses an output -handler so that all string based result values are returned as unicode as well. -Generally, the ``NLS_LANG`` environment variable determines the nature -of the encoding to be used. - -Note that this behavior is disabled when Oracle 8 is detected, as it has been -observed that issues remain when passing Python unicodes to cx_oracle with Oracle 8. - -LOB Objects ------------ - -cx_oracle returns oracle LOBs using the cx_oracle.LOB object. SQLAlchemy converts -these to strings so that the interface of the Binary type is consistent with that of -other backends, and so that the linkage to a live cursor is not needed in scenarios -like result.fetchmany() and result.fetchall(). This means that by default, LOB -objects are fully fetched unconditionally by SQLAlchemy, and the linkage to a live -cursor is broken. - -To disable this processing, pass ``auto_convert_lobs=False`` to :func:`create_engine()`. - -Two Phase Transaction Support ------------------------------ - -Two Phase transactions are implemented using XA transactions, and are known -to work in a rudimental fashion with recent versions of cx_Oracle -as of SQLAlchemy 0.8.0b2, 0.7.10. However, the mechanism is not yet -considered to be robust and should still be regarded as experimental. - -In particular, the cx_Oracle DBAPI as recently as 5.1.2 has a bug regarding -two phase which prevents -a particular DBAPI connection from being consistently usable in both -prepared transactions as well as traditional DBAPI usage patterns; therefore -once a particular connection is used via :meth:`.Connection.begin_prepared`, -all subsequent usages of the underlying DBAPI connection must be within -the context of prepared transactions. - -The default behavior of :class:`.Engine` is to maintain a pool of DBAPI -connections. Therefore, due to the above glitch, a DBAPI connection that has -been used in a two-phase operation, and is then returned to the pool, will -not be usable in a non-two-phase context. To avoid this situation, -the application can make one of several choices: - -* Disable connection pooling using :class:`.NullPool` - -* Ensure that the particular :class:`.Engine` in use is only used - for two-phase operations. A :class:`.Engine` bound to an ORM - :class:`.Session` which includes ``twophase=True`` will consistently - use the two-phase transaction style. - -* For ad-hoc two-phase operations without disabling pooling, the DBAPI - connection in use can be evicted from the connection pool using the - :class:`.Connection.detach` method. - -.. versionchanged:: 0.8.0b2,0.7.10 - Support for cx_oracle prepared transactions has been implemented - and tested. - - -Precision Numerics ------------------- - -The SQLAlchemy dialect goes through a lot of steps to ensure -that decimal numbers are sent and received with full accuracy. -An "outputtypehandler" callable is associated with each -cx_oracle connection object which detects numeric types and -receives them as string values, instead of receiving a Python -``float`` directly, which is then passed to the Python -``Decimal`` constructor. The :class:`.Numeric` and -:class:`.Float` types under the cx_oracle dialect are aware of -this behavior, and will coerce the ``Decimal`` to ``float`` if -the ``asdecimal`` flag is ``False`` (default on :class:`.Float`, -optional on :class:`.Numeric`). - -Because the handler coerces to ``Decimal`` in all cases first, -the feature can detract significantly from performance. -If precision numerics aren't required, the decimal handling -can be disabled by passing the flag ``coerce_to_decimal=False`` -to :func:`.create_engine`:: - - engine = create_engine("oracle+cx_oracle://dsn", - coerce_to_decimal=False) - -.. versionadded:: 0.7.6 - Add the ``coerce_to_decimal`` flag. - -Another alternative to performance is to use the -`cdecimal `_ library; -see :class:`.Numeric` for additional notes. - -The handler attempts to use the "precision" and "scale" -attributes of the result set column to best determine if -subsequent incoming values should be received as ``Decimal`` as -opposed to int (in which case no processing is added). There are -several scenarios where OCI_ does not provide unambiguous data -as to the numeric type, including some situations where -individual rows may return a combination of floating point and -integer values. Certain values for "precision" and "scale" have -been observed to determine this scenario. When it occurs, the -outputtypehandler receives as string and then passes off to a -processing function which detects, for each returned value, if a -decimal point is present, and if so converts to ``Decimal``, -otherwise to int. The intention is that simple int-based -statements like "SELECT my_seq.nextval() FROM DUAL" continue to -return ints and not ``Decimal`` objects, and that any kind of -floating point value is received as a string so that there is no -floating point loss of precision. - -The "decimal point is present" logic itself is also sensitive to -locale. Under OCI_, this is controlled by the NLS_LANG -environment variable. Upon first connection, the dialect runs a -test to determine the current "decimal" character, which can be -a comma "," for european locales. From that point forward the -outputtypehandler uses that character to represent a decimal -point. Note that cx_oracle 5.0.3 or greater is required -when dealing with numerics with locale settings that don't use -a period "." as the decimal character. - -.. versionchanged:: 0.6.6 - The outputtypehandler uses a comma "," character to represent - a decimal point. - -.. _OCI: http://www.oracle.com/technetwork/database/features/oci/index.html - -""" - -from sqlalchemy.dialects.oracle.base import OracleCompiler, OracleDialect, \ - OracleExecutionContext -from sqlalchemy.dialects.oracle import base as oracle -from sqlalchemy.engine import base -from sqlalchemy import types as sqltypes, util, exc, processors -import random -import collections -from sqlalchemy.util.compat import decimal -import re - -class _OracleNumeric(sqltypes.Numeric): - def bind_processor(self, dialect): - # cx_oracle accepts Decimal objects and floats - return None - - def result_processor(self, dialect, coltype): - # we apply a cx_oracle type handler to all connections - # that converts floating point strings to Decimal(). - # However, in some subquery situations, Oracle doesn't - # give us enough information to determine int or Decimal. - # It could even be int/Decimal differently on each row, - # regardless of the scale given for the originating type. - # So we still need an old school isinstance() handler - # here for decimals. - - if dialect.supports_native_decimal: - if self.asdecimal: - if self.scale is None: - fstring = "%.10f" - else: - fstring = "%%.%df" % self.scale - def to_decimal(value): - if value is None: - return None - elif isinstance(value, decimal.Decimal): - return value - else: - return decimal.Decimal(fstring % value) - return to_decimal - else: - if self.precision is None and self.scale is None: - return processors.to_float - elif not getattr(self, '_is_oracle_number', False) \ - and self.scale is not None: - return processors.to_float - else: - return None - else: - # cx_oracle 4 behavior, will assume - # floats - return super(_OracleNumeric, self).\ - result_processor(dialect, coltype) - -class _OracleDate(sqltypes.Date): - def bind_processor(self, dialect): - return None - - def result_processor(self, dialect, coltype): - def process(value): - if value is not None: - return value.date() - else: - return value - return process - -class _LOBMixin(object): - def result_processor(self, dialect, coltype): - if not dialect.auto_convert_lobs: - # return the cx_oracle.LOB directly. - return None - - def process(value): - if value is not None: - return value.read() - else: - return value - return process - -class _NativeUnicodeMixin(object): - # Py3K - #pass - # Py2K - def bind_processor(self, dialect): - if dialect._cx_oracle_with_unicode: - def process(value): - if value is None: - return value - else: - return unicode(value) - return process - else: - return super(_NativeUnicodeMixin, self).bind_processor(dialect) - # end Py2K - - # we apply a connection output handler that returns - # unicode in all cases, so the "native_unicode" flag - # will be set for the default String.result_processor. - -class _OracleChar(_NativeUnicodeMixin, sqltypes.CHAR): - def get_dbapi_type(self, dbapi): - return dbapi.FIXED_CHAR - -class _OracleNVarChar(_NativeUnicodeMixin, sqltypes.NVARCHAR): - def get_dbapi_type(self, dbapi): - return getattr(dbapi, 'UNICODE', dbapi.STRING) - -class _OracleText(_LOBMixin, sqltypes.Text): - def get_dbapi_type(self, dbapi): - return dbapi.CLOB - -class _OracleLong(oracle.LONG): - # a raw LONG is a text type, but does *not* - # get the LobMixin with cx_oracle. - - def get_dbapi_type(self, dbapi): - return dbapi.LONG_STRING - -class _OracleString(_NativeUnicodeMixin, sqltypes.String): - pass - -class _OracleUnicodeText(_LOBMixin, _NativeUnicodeMixin, sqltypes.UnicodeText): - def get_dbapi_type(self, dbapi): - return dbapi.NCLOB - - def result_processor(self, dialect, coltype): - lob_processor = _LOBMixin.result_processor(self, dialect, coltype) - if lob_processor is None: - return None - - string_processor = sqltypes.UnicodeText.result_processor(self, dialect, coltype) - - if string_processor is None: - return lob_processor - else: - def process(value): - return string_processor(lob_processor(value)) - return process - -class _OracleInteger(sqltypes.Integer): - def result_processor(self, dialect, coltype): - def to_int(val): - if val is not None: - val = int(val) - return val - return to_int - -class _OracleBinary(_LOBMixin, sqltypes.LargeBinary): - def get_dbapi_type(self, dbapi): - return dbapi.BLOB - - def bind_processor(self, dialect): - return None - -class _OracleInterval(oracle.INTERVAL): - def get_dbapi_type(self, dbapi): - return dbapi.INTERVAL - -class _OracleRaw(oracle.RAW): - pass - -class _OracleRowid(oracle.ROWID): - def get_dbapi_type(self, dbapi): - return dbapi.ROWID - -class OracleCompiler_cx_oracle(OracleCompiler): - def bindparam_string(self, name, **kw): - if self.preparer._bindparam_requires_quotes(name): - quoted_name = '"%s"' % name - self._quoted_bind_names[name] = quoted_name - return OracleCompiler.bindparam_string(self, quoted_name, **kw) - else: - return OracleCompiler.bindparam_string(self, name, **kw) - - -class OracleExecutionContext_cx_oracle(OracleExecutionContext): - - def pre_exec(self): - quoted_bind_names = \ - getattr(self.compiled, '_quoted_bind_names', None) - if quoted_bind_names: - if not self.dialect.supports_unicode_statements: - # if DBAPI doesn't accept unicode statements, - # keys in self.parameters would have been encoded - # here. so convert names in quoted_bind_names - # to encoded as well. - quoted_bind_names = \ - dict( - (fromname.encode(self.dialect.encoding), - toname.encode(self.dialect.encoding)) - for fromname, toname in - quoted_bind_names.items() - ) - for param in self.parameters: - for fromname, toname in quoted_bind_names.items(): - param[toname] = param[fromname] - del param[fromname] - - if self.dialect.auto_setinputsizes: - # cx_oracle really has issues when you setinputsizes - # on String, including that outparams/RETURNING - # breaks for varchars - self.set_input_sizes(quoted_bind_names, - exclude_types=self.dialect._cx_oracle_exclude_setinputsizes - ) - - # if a single execute, check for outparams - if len(self.compiled_parameters) == 1: - for bindparam in self.compiled.binds.values(): - if bindparam.isoutparam: - dbtype = bindparam.type.dialect_impl(self.dialect).\ - get_dbapi_type(self.dialect.dbapi) - if not hasattr(self, 'out_parameters'): - self.out_parameters = {} - if dbtype is None: - raise exc.InvalidRequestError("Cannot create out parameter for parameter " - "%r - it's type %r is not supported by" - " cx_oracle" % - (name, bindparam.type) - ) - name = self.compiled.bind_names[bindparam] - self.out_parameters[name] = self.cursor.var(dbtype) - self.parameters[0][quoted_bind_names.get(name, name)] = \ - self.out_parameters[name] - - def create_cursor(self): - c = self._dbapi_connection.cursor() - if self.dialect.arraysize: - c.arraysize = self.dialect.arraysize - - return c - - def get_result_proxy(self): - if hasattr(self, 'out_parameters') and self.compiled.returning: - returning_params = dict( - (k, v.getvalue()) - for k, v in self.out_parameters.items() - ) - return ReturningResultProxy(self, returning_params) - - result = None - if self.cursor.description is not None: - for column in self.cursor.description: - type_code = column[1] - if type_code in self.dialect._cx_oracle_binary_types: - result = base.BufferedColumnResultProxy(self) - - if result is None: - result = base.ResultProxy(self) - - if hasattr(self, 'out_parameters'): - if self.compiled_parameters is not None and \ - len(self.compiled_parameters) == 1: - result.out_parameters = out_parameters = {} - - for bind, name in self.compiled.bind_names.items(): - if name in self.out_parameters: - type = bind.type - impl_type = type.dialect_impl(self.dialect) - dbapi_type = impl_type.get_dbapi_type(self.dialect.dbapi) - result_processor = impl_type.\ - result_processor(self.dialect, - dbapi_type) - if result_processor is not None: - out_parameters[name] = \ - result_processor(self.out_parameters[name].getvalue()) - else: - out_parameters[name] = self.out_parameters[name].getvalue() - else: - result.out_parameters = dict( - (k, v.getvalue()) - for k, v in self.out_parameters.items() - ) - - return result - -class OracleExecutionContext_cx_oracle_with_unicode(OracleExecutionContext_cx_oracle): - """Support WITH_UNICODE in Python 2.xx. - - WITH_UNICODE allows cx_Oracle's Python 3 unicode handling - behavior under Python 2.x. This mode in some cases disallows - and in other cases silently passes corrupted data when - non-Python-unicode strings (a.k.a. plain old Python strings) - are passed as arguments to connect(), the statement sent to execute(), - or any of the bind parameter keys or values sent to execute(). - This optional context therefore ensures that all statements are - passed as Python unicode objects. - - """ - def __init__(self, *arg, **kw): - OracleExecutionContext_cx_oracle.__init__(self, *arg, **kw) - self.statement = unicode(self.statement) - - def _execute_scalar(self, stmt): - return super(OracleExecutionContext_cx_oracle_with_unicode, self).\ - _execute_scalar(unicode(stmt)) - -class ReturningResultProxy(base.FullyBufferedResultProxy): - """Result proxy which stuffs the _returning clause + outparams into the fetch.""" - - def __init__(self, context, returning_params): - self._returning_params = returning_params - super(ReturningResultProxy, self).__init__(context) - - def _cursor_description(self): - returning = self.context.compiled.returning - - ret = [] - for c in returning: - if hasattr(c, 'name'): - ret.append((c.name, c.type)) - else: - ret.append((c.anon_label, c.type)) - return ret - - def _buffer_rows(self): - return collections.deque([tuple(self._returning_params["ret_%d" % i] - for i, c in enumerate(self._returning_params))]) - -class OracleDialect_cx_oracle(OracleDialect): - execution_ctx_cls = OracleExecutionContext_cx_oracle - statement_compiler = OracleCompiler_cx_oracle - - driver = "cx_oracle" - - colspecs = colspecs = { - sqltypes.Numeric: _OracleNumeric, - sqltypes.Date : _OracleDate, # generic type, assume datetime.date is desired - oracle.DATE: oracle.DATE, # non generic type - passthru - sqltypes.LargeBinary : _OracleBinary, - sqltypes.Boolean : oracle._OracleBoolean, - sqltypes.Interval : _OracleInterval, - oracle.INTERVAL : _OracleInterval, - sqltypes.Text : _OracleText, - sqltypes.String : _OracleString, - sqltypes.UnicodeText : _OracleUnicodeText, - sqltypes.CHAR : _OracleChar, - - # a raw LONG is a text type, but does *not* - # get the LobMixin with cx_oracle. - oracle.LONG: _OracleLong, - - sqltypes.Integer : _OracleInteger, # this is only needed for OUT parameters. - # it would be nice if we could not use it otherwise. - oracle.RAW: _OracleRaw, - sqltypes.Unicode: _OracleNVarChar, - sqltypes.NVARCHAR : _OracleNVarChar, - oracle.ROWID: _OracleRowid, - } - - - execute_sequence_format = list - - def __init__(self, - auto_setinputsizes=True, - auto_convert_lobs=True, - threaded=True, - allow_twophase=True, - coerce_to_decimal=True, - arraysize=50, **kwargs): - OracleDialect.__init__(self, **kwargs) - self.threaded = threaded - self.arraysize = arraysize - self.allow_twophase = allow_twophase - self.supports_timestamp = self.dbapi is None or hasattr(self.dbapi, 'TIMESTAMP' ) - self.auto_setinputsizes = auto_setinputsizes - self.auto_convert_lobs = auto_convert_lobs - - if hasattr(self.dbapi, 'version'): - self.cx_oracle_ver = tuple([int(x) for x in self.dbapi.version.split('.')]) - else: - self.cx_oracle_ver = (0, 0, 0) - - def types(*names): - return set([ - getattr(self.dbapi, name, None) for name in names - ]).difference([None]) - - self._cx_oracle_exclude_setinputsizes = types("STRING", "UNICODE") - self._cx_oracle_string_types = types("STRING", "UNICODE", "NCLOB", "CLOB") - self._cx_oracle_unicode_types = types("UNICODE", "NCLOB") - self._cx_oracle_binary_types = types("BFILE", "CLOB", "NCLOB", "BLOB") - self.supports_unicode_binds = self.cx_oracle_ver >= (5, 0) - - self.supports_native_decimal = ( - self.cx_oracle_ver >= (5, 0) and - coerce_to_decimal - ) - - self._cx_oracle_native_nvarchar = self.cx_oracle_ver >= (5, 0) - - if self.cx_oracle_ver is None: - # this occurs in tests with mock DBAPIs - self._cx_oracle_string_types = set() - self._cx_oracle_with_unicode = False - elif self.cx_oracle_ver >= (5,) and not hasattr(self.dbapi, 'UNICODE'): - # cx_Oracle WITH_UNICODE mode. *only* python - # unicode objects accepted for anything - self.supports_unicode_statements = True - self.supports_unicode_binds = True - self._cx_oracle_with_unicode = True - # Py2K - # There's really no reason to run with WITH_UNICODE under Python 2.x. - # Give the user a hint. - util.warn("cx_Oracle is compiled under Python 2.xx using the " - "WITH_UNICODE flag. Consider recompiling cx_Oracle without " - "this flag, which is in no way necessary for full support of Unicode. " - "Otherwise, all string-holding bind parameters must " - "be explicitly typed using SQLAlchemy's String type or one of its subtypes," - "or otherwise be passed as Python unicode. Plain Python strings " - "passed as bind parameters will be silently corrupted by cx_Oracle." - ) - self.execution_ctx_cls = OracleExecutionContext_cx_oracle_with_unicode - # end Py2K - else: - self._cx_oracle_with_unicode = False - - if self.cx_oracle_ver is None or \ - not self.auto_convert_lobs or \ - not hasattr(self.dbapi, 'CLOB'): - self.dbapi_type_map = {} - else: - # only use this for LOB objects. using it for strings, dates - # etc. leads to a little too much magic, reflection doesn't know if it should - # expect encoded strings or unicodes, etc. - self.dbapi_type_map = { - self.dbapi.CLOB: oracle.CLOB(), - self.dbapi.NCLOB: oracle.NCLOB(), - self.dbapi.BLOB: oracle.BLOB(), - self.dbapi.BINARY: oracle.RAW(), - } - @classmethod - def dbapi(cls): - cx_Oracle = __import__('cx_Oracle') - return cx_Oracle - - def initialize(self, connection): - super(OracleDialect_cx_oracle, self).initialize(connection) - if self._is_oracle_8: - self.supports_unicode_binds = False - self._detect_decimal_char(connection) - - def _detect_decimal_char(self, connection): - """detect if the decimal separator character is not '.', as - is the case with european locale settings for NLS_LANG. - - cx_oracle itself uses similar logic when it formats Python - Decimal objects to strings on the bind side (as of 5.0.3), - as Oracle sends/receives string numerics only in the - current locale. - - """ - if self.cx_oracle_ver < (5,): - # no output type handlers before version 5 - return - - cx_Oracle = self.dbapi - conn = connection.connection - - # override the output_type_handler that's - # on the cx_oracle connection with a plain - # one on the cursor - - def output_type_handler(cursor, name, defaultType, - size, precision, scale): - return cursor.var( - cx_Oracle.STRING, - 255, arraysize=cursor.arraysize) - - cursor = conn.cursor() - cursor.outputtypehandler = output_type_handler - cursor.execute("SELECT 0.1 FROM DUAL") - val = cursor.fetchone()[0] - cursor.close() - char = re.match(r"([\.,])", val).group(1) - if char != '.': - _detect_decimal = self._detect_decimal - self._detect_decimal = \ - lambda value: _detect_decimal(value.replace(char, '.')) - self._to_decimal = \ - lambda value: decimal.Decimal(value.replace(char, '.')) - - def _detect_decimal(self, value): - if "." in value: - return decimal.Decimal(value) - else: - return int(value) - - _to_decimal = decimal.Decimal - - def on_connect(self): - if self.cx_oracle_ver < (5,): - # no output type handlers before version 5 - return - - cx_Oracle = self.dbapi - def output_type_handler(cursor, name, defaultType, - size, precision, scale): - # convert all NUMBER with precision + positive scale to Decimal - # this almost allows "native decimal" mode. - if self.supports_native_decimal and \ - defaultType == cx_Oracle.NUMBER and \ - precision and scale > 0: - return cursor.var( - cx_Oracle.STRING, - 255, - outconverter=self._to_decimal, - arraysize=cursor.arraysize) - # if NUMBER with zero precision and 0 or neg scale, this appears - # to indicate "ambiguous". Use a slower converter that will - # make a decision based on each value received - the type - # may change from row to row (!). This kills - # off "native decimal" mode, handlers still needed. - elif self.supports_native_decimal and \ - defaultType == cx_Oracle.NUMBER \ - and not precision and scale <= 0: - return cursor.var( - cx_Oracle.STRING, - 255, - outconverter=self._detect_decimal, - arraysize=cursor.arraysize) - # allow all strings to come back natively as Unicode - elif defaultType in (cx_Oracle.STRING, cx_Oracle.FIXED_CHAR): - return cursor.var(unicode, size, cursor.arraysize) - - def on_connect(conn): - conn.outputtypehandler = output_type_handler - - return on_connect - - def create_connect_args(self, url): - dialect_opts = dict(url.query) - for opt in ('use_ansi', 'auto_setinputsizes', 'auto_convert_lobs', - 'threaded', 'allow_twophase'): - if opt in dialect_opts: - util.coerce_kw_type(dialect_opts, opt, bool) - setattr(self, opt, dialect_opts[opt]) - - if url.database: - # if we have a database, then we have a remote host - port = url.port - if port: - port = int(port) - else: - port = 1521 - dsn = self.dbapi.makedsn(url.host, port, url.database) - else: - # we have a local tnsname - dsn = url.host - - opts = dict( - user=url.username, - password=url.password, - dsn=dsn, - threaded=self.threaded, - twophase=self.allow_twophase, - ) - - # Py2K - if self._cx_oracle_with_unicode: - for k, v in opts.items(): - if isinstance(v, str): - opts[k] = unicode(v) - else: - for k, v in opts.items(): - if isinstance(v, unicode): - opts[k] = str(v) - # end Py2K - - if 'mode' in url.query: - opts['mode'] = url.query['mode'] - if isinstance(opts['mode'], basestring): - mode = opts['mode'].upper() - if mode == 'SYSDBA': - opts['mode'] = self.dbapi.SYSDBA - elif mode == 'SYSOPER': - opts['mode'] = self.dbapi.SYSOPER - else: - util.coerce_kw_type(opts, 'mode', int) - return ([], opts) - - def _get_server_version_info(self, connection): - return tuple( - int(x) - for x in connection.connection.version.split('.') - ) - - def is_disconnect(self, e, connection, cursor): - error, = e.args - if isinstance(e, self.dbapi.InterfaceError): - return "not connected" in str(e) - elif hasattr(error, 'code'): - # ORA-00028: your session has been killed - # ORA-03114: not connected to ORACLE - # ORA-03113: end-of-file on communication channel - # ORA-03135: connection lost contact - # ORA-01033: ORACLE initialization or shutdown in progress - # TODO: Others ? - return error.code in (28, 3114, 3113, 3135, 1033) - else: - return False - - def create_xid(self): - """create a two-phase transaction ID. - - this id will be passed to do_begin_twophase(), do_rollback_twophase(), - do_commit_twophase(). its format is unspecified.""" - - id = random.randint(0, 2 ** 128) - return (0x1234, "%032x" % id, "%032x" % 9) - - def do_begin_twophase(self, connection, xid): - connection.connection.begin(*xid) - - def do_prepare_twophase(self, connection, xid): - result = connection.connection.prepare() - connection.info['cx_oracle_prepared'] = result - - def do_rollback_twophase(self, connection, xid, is_prepared=True, - recover=False): - self.do_rollback(connection.connection) - - def do_commit_twophase(self, connection, xid, is_prepared=True, - recover=False): - if not is_prepared: - self.do_commit(connection.connection) - else: - oci_prepared = connection.info['cx_oracle_prepared'] - if oci_prepared: - self.do_commit(connection.connection) - - def do_recover_twophase(self, connection): - connection.info.pop('cx_oracle_prepared', None) - -dialect = OracleDialect_cx_oracle diff --git a/libs/sqlalchemy/dialects/oracle/zxjdbc.py b/libs/sqlalchemy/dialects/oracle/zxjdbc.py deleted file mode 100644 index e4a12ce0..00000000 --- a/libs/sqlalchemy/dialects/oracle/zxjdbc.py +++ /dev/null @@ -1,216 +0,0 @@ -# oracle/zxjdbc.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Support for the Oracle database via the zxjdbc JDBC connector. - -JDBC Driver ------------ - -The official Oracle JDBC driver is at -http://www.oracle.com/technology/software/tech/java/sqlj_jdbc/index.html. - -""" -import decimal -import re - -from sqlalchemy import sql, types as sqltypes, util -from sqlalchemy.connectors.zxJDBC import ZxJDBCConnector -from sqlalchemy.dialects.oracle.base import OracleCompiler, OracleDialect, OracleExecutionContext -from sqlalchemy.engine import base, default -from sqlalchemy.sql import expression -import collections - -SQLException = zxJDBC = None - -class _ZxJDBCDate(sqltypes.Date): - - def result_processor(self, dialect, coltype): - def process(value): - if value is None: - return None - else: - return value.date() - return process - - -class _ZxJDBCNumeric(sqltypes.Numeric): - - def result_processor(self, dialect, coltype): - #XXX: does the dialect return Decimal or not??? - # if it does (in all cases), we could use a None processor as well as - # the to_float generic processor - if self.asdecimal: - def process(value): - if isinstance(value, decimal.Decimal): - return value - else: - return decimal.Decimal(str(value)) - else: - def process(value): - if isinstance(value, decimal.Decimal): - return float(value) - else: - return value - return process - - -class OracleCompiler_zxjdbc(OracleCompiler): - - def returning_clause(self, stmt, returning_cols): - self.returning_cols = list(expression._select_iterables(returning_cols)) - - # within_columns_clause=False so that labels (foo AS bar) don't render - columns = [self.process(c, within_columns_clause=False, result_map=self.result_map) - for c in self.returning_cols] - - if not hasattr(self, 'returning_parameters'): - self.returning_parameters = [] - - binds = [] - for i, col in enumerate(self.returning_cols): - dbtype = col.type.dialect_impl(self.dialect).get_dbapi_type(self.dialect.dbapi) - self.returning_parameters.append((i + 1, dbtype)) - - bindparam = sql.bindparam("ret_%d" % i, value=ReturningParam(dbtype)) - self.binds[bindparam.key] = bindparam - binds.append(self.bindparam_string(self._truncate_bindparam(bindparam))) - - return 'RETURNING ' + ', '.join(columns) + " INTO " + ", ".join(binds) - - -class OracleExecutionContext_zxjdbc(OracleExecutionContext): - - def pre_exec(self): - if hasattr(self.compiled, 'returning_parameters'): - # prepare a zxJDBC statement so we can grab its underlying - # OraclePreparedStatement's getReturnResultSet later - self.statement = self.cursor.prepare(self.statement) - - def get_result_proxy(self): - if hasattr(self.compiled, 'returning_parameters'): - rrs = None - try: - try: - rrs = self.statement.__statement__.getReturnResultSet() - rrs.next() - except SQLException, sqle: - msg = '%s [SQLCode: %d]' % (sqle.getMessage(), sqle.getErrorCode()) - if sqle.getSQLState() is not None: - msg += ' [SQLState: %s]' % sqle.getSQLState() - raise zxJDBC.Error(msg) - else: - row = tuple(self.cursor.datahandler.getPyObject(rrs, index, dbtype) - for index, dbtype in self.compiled.returning_parameters) - return ReturningResultProxy(self, row) - finally: - if rrs is not None: - try: - rrs.close() - except SQLException: - pass - self.statement.close() - - return base.ResultProxy(self) - - def create_cursor(self): - cursor = self._dbapi_connection.cursor() - cursor.datahandler = self.dialect.DataHandler(cursor.datahandler) - return cursor - - -class ReturningResultProxy(base.FullyBufferedResultProxy): - - """ResultProxy backed by the RETURNING ResultSet results.""" - - def __init__(self, context, returning_row): - self._returning_row = returning_row - super(ReturningResultProxy, self).__init__(context) - - def _cursor_description(self): - ret = [] - for c in self.context.compiled.returning_cols: - if hasattr(c, 'name'): - ret.append((c.name, c.type)) - else: - ret.append((c.anon_label, c.type)) - return ret - - def _buffer_rows(self): - return collections.deque([self._returning_row]) - - -class ReturningParam(object): - - """A bindparam value representing a RETURNING parameter. - - Specially handled by OracleReturningDataHandler. - """ - - def __init__(self, type): - self.type = type - - def __eq__(self, other): - if isinstance(other, ReturningParam): - return self.type == other.type - return NotImplemented - - def __ne__(self, other): - if isinstance(other, ReturningParam): - return self.type != other.type - return NotImplemented - - def __repr__(self): - kls = self.__class__ - return '<%s.%s object at 0x%x type=%s>' % (kls.__module__, kls.__name__, id(self), - self.type) - - -class OracleDialect_zxjdbc(ZxJDBCConnector, OracleDialect): - jdbc_db_name = 'oracle' - jdbc_driver_name = 'oracle.jdbc.OracleDriver' - - statement_compiler = OracleCompiler_zxjdbc - execution_ctx_cls = OracleExecutionContext_zxjdbc - - colspecs = util.update_copy( - OracleDialect.colspecs, - { - sqltypes.Date : _ZxJDBCDate, - sqltypes.Numeric: _ZxJDBCNumeric - } - ) - - def __init__(self, *args, **kwargs): - super(OracleDialect_zxjdbc, self).__init__(*args, **kwargs) - global SQLException, zxJDBC - from java.sql import SQLException - from com.ziclix.python.sql import zxJDBC - from com.ziclix.python.sql.handler import OracleDataHandler - class OracleReturningDataHandler(OracleDataHandler): - - """zxJDBC DataHandler that specially handles ReturningParam.""" - - def setJDBCObject(self, statement, index, object, dbtype=None): - if type(object) is ReturningParam: - statement.registerReturnParameter(index, object.type) - elif dbtype is None: - OracleDataHandler.setJDBCObject(self, statement, index, object) - else: - OracleDataHandler.setJDBCObject(self, statement, index, object, dbtype) - self.DataHandler = OracleReturningDataHandler - - def initialize(self, connection): - super(OracleDialect_zxjdbc, self).initialize(connection) - self.implicit_returning = connection.connection.driverversion >= '10.2' - - def _create_jdbc_url(self, url): - return 'jdbc:oracle:thin:@%s:%s:%s' % (url.host, url.port or 1521, url.database) - - def _get_server_version_info(self, connection): - version = re.search(r'Release ([\d\.]+)', connection.connection.dbversion).group(1) - return tuple(int(x) for x in version.split('.')) - -dialect = OracleDialect_zxjdbc diff --git a/libs/sqlalchemy/dialects/postgres.py b/libs/sqlalchemy/dialects/postgres.py deleted file mode 100644 index 82d1a39c..00000000 --- a/libs/sqlalchemy/dialects/postgres.py +++ /dev/null @@ -1,16 +0,0 @@ -# dialects/postgres.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -# backwards compat with the old name -from sqlalchemy.util import warn_deprecated - -warn_deprecated( - "The SQLAlchemy PostgreSQL dialect has been renamed from 'postgres' to 'postgresql'. " - "The new URL format is postgresql[+driver]://:@/" - ) - -from sqlalchemy.dialects.postgresql import * -from sqlalchemy.dialects.postgresql import base diff --git a/libs/sqlalchemy/dialects/postgresql/__init__.py b/libs/sqlalchemy/dialects/postgresql/__init__.py deleted file mode 100644 index 04ae413c..00000000 --- a/libs/sqlalchemy/dialects/postgresql/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# postgresql/__init__.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -from sqlalchemy.dialects.postgresql import base, psycopg2, pg8000, pypostgresql, zxjdbc - -base.dialect = psycopg2.dialect - -from sqlalchemy.dialects.postgresql.base import \ - INTEGER, BIGINT, SMALLINT, VARCHAR, CHAR, TEXT, NUMERIC, FLOAT, REAL, INET, \ - CIDR, UUID, BIT, MACADDR, DOUBLE_PRECISION, TIMESTAMP, TIME,\ - DATE, BYTEA, BOOLEAN, INTERVAL, ARRAY, ENUM, dialect - -__all__ = ( -'INTEGER', 'BIGINT', 'SMALLINT', 'VARCHAR', 'CHAR', 'TEXT', 'NUMERIC', 'FLOAT', 'REAL', 'INET', -'CIDR', 'UUID', 'BIT', 'MACADDR', 'DOUBLE_PRECISION', 'TIMESTAMP', 'TIME', -'DATE', 'BYTEA', 'BOOLEAN', 'INTERVAL', 'ARRAY', 'ENUM', 'dialect' -) diff --git a/libs/sqlalchemy/dialects/postgresql/base.py b/libs/sqlalchemy/dialects/postgresql/base.py deleted file mode 100644 index 384b7616..00000000 --- a/libs/sqlalchemy/dialects/postgresql/base.py +++ /dev/null @@ -1,1705 +0,0 @@ -# postgresql/base.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Support for the PostgreSQL database. - -For information on connecting using specific drivers, see the documentation -section regarding that driver. - -Sequences/SERIAL ----------------- - -PostgreSQL supports sequences, and SQLAlchemy uses these as the default means -of creating new primary key values for integer-based primary key columns. When -creating tables, SQLAlchemy will issue the ``SERIAL`` datatype for -integer-based primary key columns, which generates a sequence and server side -default corresponding to the column. - -To specify a specific named sequence to be used for primary key generation, -use the :func:`~sqlalchemy.schema.Sequence` construct:: - - Table('sometable', metadata, - Column('id', Integer, Sequence('some_id_seq'), primary_key=True) - ) - -When SQLAlchemy issues a single INSERT statement, to fulfill the contract of -having the "last insert identifier" available, a RETURNING clause is added to -the INSERT statement which specifies the primary key columns should be -returned after the statement completes. The RETURNING functionality only takes -place if Postgresql 8.2 or later is in use. As a fallback approach, the -sequence, whether specified explicitly or implicitly via ``SERIAL``, is -executed independently beforehand, the returned value to be used in the -subsequent insert. Note that when an -:func:`~sqlalchemy.sql.expression.insert()` construct is executed using -"executemany" semantics, the "last inserted identifier" functionality does not -apply; no RETURNING clause is emitted nor is the sequence pre-executed in this -case. - -To force the usage of RETURNING by default off, specify the flag -``implicit_returning=False`` to :func:`.create_engine`. - -Transaction Isolation Level ---------------------------- - -:func:`.create_engine` accepts an ``isolation_level`` parameter which results -in the command ``SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL -`` being invoked for every new connection. Valid values for this -parameter are ``READ COMMITTED``, ``READ UNCOMMITTED``, ``REPEATABLE READ``, -and ``SERIALIZABLE``:: - - engine = create_engine( - "postgresql+pg8000://scott:tiger@localhost/test", - isolation_level="READ UNCOMMITTED" - ) - -When using the psycopg2 dialect, a psycopg2-specific method of setting -transaction isolation level is used, but the API of ``isolation_level`` -remains the same - see :ref:`psycopg2_isolation`. - - -Remote / Cross-Schema Table Introspection ------------------------------------------ - -Tables can be introspected from any accessible schema, including -inter-schema foreign key relationships. However, care must be taken -when specifying the "schema" argument for a given :class:`.Table`, when -the given schema is also present in PostgreSQL's ``search_path`` variable -for the current connection. - -If a FOREIGN KEY constraint reports that the remote table's schema is within -the current ``search_path``, the "schema" attribute of the resulting -:class:`.Table` will be set to ``None``, unless the actual schema of the -remote table matches that of the referencing table, and the "schema" argument -was explicitly stated on the referencing table. - -The best practice here is to not use the ``schema`` argument -on :class:`.Table` for any schemas that are present in ``search_path``. -``search_path`` defaults to "public", but care should be taken -to inspect the actual value using:: - - SHOW search_path; - -.. versionchanged:: 0.7.3 - Prior to this version, cross-schema foreign keys when the schemas - were also in the ``search_path`` could make an incorrect assumption - if the schemas were explicitly stated on each :class:`.Table`. - -Background on PG's ``search_path`` is at: -http://www.postgresql.org/docs/9.0/static/ddl-schemas.html#DDL-SCHEMAS-PATH - -INSERT/UPDATE...RETURNING -------------------------- - -The dialect supports PG 8.2's ``INSERT..RETURNING``, ``UPDATE..RETURNING`` and -``DELETE..RETURNING`` syntaxes. ``INSERT..RETURNING`` is used by default -for single-row INSERT statements in order to fetch newly generated -primary key identifiers. To specify an explicit ``RETURNING`` clause, -use the :meth:`._UpdateBase.returning` method on a per-statement basis:: - - # INSERT..RETURNING - result = table.insert().returning(table.c.col1, table.c.col2).\\ - values(name='foo') - print result.fetchall() - - # UPDATE..RETURNING - result = table.update().returning(table.c.col1, table.c.col2).\\ - where(table.c.name=='foo').values(name='bar') - print result.fetchall() - - # DELETE..RETURNING - result = table.delete().returning(table.c.col1, table.c.col2).\\ - where(table.c.name=='foo') - print result.fetchall() - - -.. _postgresql_indexes: - -Postgresql-Specific Index Options ---------------------------------- - -Several extensions to the :class:`.Index` construct are available, specific -to the PostgreSQL dialect. - -Partial Indexes -^^^^^^^^^^^^^^^^ - -Partial indexes add criterion to the index definition so that the index is -applied to a subset of rows. These can be specified on :class:`.Index` -using the ``postgresql_where`` keyword argument:: - - Index('my_index', my_table.c.id, postgresql_where=tbl.c.value > 10) - -Operator Classes -^^^^^^^^^^^^^^^^^ - -PostgreSQL allows the specification of an *operator class* for each column of -an index (see http://www.postgresql.org/docs/8.3/interactive/indexes-opclass.html). -The :class:`.Index` construct allows these to be specified via the ``postgresql_ops`` -keyword argument:: - - Index('my_index', my_table.c.id, my_table.c.data, - postgresql_ops={ - 'data': 'text_pattern_ops', - 'id': 'int4_ops' - }) - -.. versionadded:: 0.7.2 - ``postgresql_ops`` keyword argument to :class:`.Index` construct. - -Note that the keys in the ``postgresql_ops`` dictionary are the "key" name of -the :class:`.Column`, i.e. the name used to access it from the ``.c`` collection -of :class:`.Table`, which can be configured to be different than the actual -name of the column as expressed in the database. - -Index Types -^^^^^^^^^^^^ - -PostgreSQL provides several index types: B-Tree, Hash, GiST, and GIN, as well as -the ability for users to create their own (see -http://www.postgresql.org/docs/8.3/static/indexes-types.html). These can be -specified on :class:`.Index` using the ``postgresql_using`` keyword argument:: - - Index('my_index', my_table.c.data, postgresql_using='gin') - -The value passed to the keyword argument will be simply passed through to the -underlying CREATE INDEX command, so it *must* be a valid index type for your -version of PostgreSQL. - -""" - -import re - -from sqlalchemy import sql, schema, exc, util -from sqlalchemy.engine import default, reflection -from sqlalchemy.sql import compiler, expression, util as sql_util -from sqlalchemy import types as sqltypes - -try: - from uuid import UUID as _python_UUID -except ImportError: - _python_UUID = None - -from sqlalchemy.types import INTEGER, BIGINT, SMALLINT, VARCHAR, \ - CHAR, TEXT, FLOAT, NUMERIC, \ - DATE, BOOLEAN, REAL - -RESERVED_WORDS = set( - ["all", "analyse", "analyze", "and", "any", "array", "as", "asc", - "asymmetric", "both", "case", "cast", "check", "collate", "column", - "constraint", "create", "current_catalog", "current_date", - "current_role", "current_time", "current_timestamp", "current_user", - "default", "deferrable", "desc", "distinct", "do", "else", "end", - "except", "false", "fetch", "for", "foreign", "from", "grant", "group", - "having", "in", "initially", "intersect", "into", "leading", "limit", - "localtime", "localtimestamp", "new", "not", "null", "off", "offset", - "old", "on", "only", "or", "order", "placing", "primary", "references", - "returning", "select", "session_user", "some", "symmetric", "table", - "then", "to", "trailing", "true", "union", "unique", "user", "using", - "variadic", "when", "where", "window", "with", "authorization", - "between", "binary", "cross", "current_schema", "freeze", "full", - "ilike", "inner", "is", "isnull", "join", "left", "like", "natural", - "notnull", "outer", "over", "overlaps", "right", "similar", "verbose" - ]) - -_DECIMAL_TYPES = (1231, 1700) -_FLOAT_TYPES = (700, 701, 1021, 1022) -_INT_TYPES = (20, 21, 23, 26, 1005, 1007, 1016) - -class BYTEA(sqltypes.LargeBinary): - __visit_name__ = 'BYTEA' - -class DOUBLE_PRECISION(sqltypes.Float): - __visit_name__ = 'DOUBLE_PRECISION' - -class INET(sqltypes.TypeEngine): - __visit_name__ = "INET" -PGInet = INET - -class CIDR(sqltypes.TypeEngine): - __visit_name__ = "CIDR" -PGCidr = CIDR - -class MACADDR(sqltypes.TypeEngine): - __visit_name__ = "MACADDR" -PGMacAddr = MACADDR - -class TIMESTAMP(sqltypes.TIMESTAMP): - def __init__(self, timezone=False, precision=None): - super(TIMESTAMP, self).__init__(timezone=timezone) - self.precision = precision - - -class TIME(sqltypes.TIME): - def __init__(self, timezone=False, precision=None): - super(TIME, self).__init__(timezone=timezone) - self.precision = precision - -class INTERVAL(sqltypes.TypeEngine): - """Postgresql INTERVAL type. - - The INTERVAL type may not be supported on all DBAPIs. - It is known to work on psycopg2 and not pg8000 or zxjdbc. - - """ - __visit_name__ = 'INTERVAL' - def __init__(self, precision=None): - self.precision = precision - - @classmethod - def _adapt_from_generic_interval(cls, interval): - return INTERVAL(precision=interval.second_precision) - - @property - def _type_affinity(self): - return sqltypes.Interval - -PGInterval = INTERVAL - -class BIT(sqltypes.TypeEngine): - __visit_name__ = 'BIT' - def __init__(self, length=None, varying=False): - if not varying: - # BIT without VARYING defaults to length 1 - self.length = length or 1 - else: - # but BIT VARYING can be unlimited-length, so no default - self.length = length - self.varying = varying - -PGBit = BIT - -class UUID(sqltypes.TypeEngine): - """Postgresql UUID type. - - Represents the UUID column type, interpreting - data either as natively returned by the DBAPI - or as Python uuid objects. - - The UUID type may not be supported on all DBAPIs. - It is known to work on psycopg2 and not pg8000. - - """ - __visit_name__ = 'UUID' - - def __init__(self, as_uuid=False): - """Construct a UUID type. - - - :param as_uuid=False: if True, values will be interpreted - as Python uuid objects, converting to/from string via the - DBAPI. - - """ - if as_uuid and _python_UUID is None: - raise NotImplementedError( - "This version of Python does not support the native UUID type." - ) - self.as_uuid = as_uuid - - def bind_processor(self, dialect): - if self.as_uuid: - def process(value): - if value is not None: - value = str(value) - return value - return process - else: - return None - - def result_processor(self, dialect, coltype): - if self.as_uuid: - def process(value): - if value is not None: - value = _python_UUID(value) - return value - return process - else: - return None - -PGUuid = UUID - -class ARRAY(sqltypes.MutableType, sqltypes.Concatenable, sqltypes.TypeEngine): - """Postgresql ARRAY type. - - Represents values as Python lists. - - The ARRAY type may not be supported on all DBAPIs. - It is known to work on psycopg2 and not pg8000. - - - """ - __visit_name__ = 'ARRAY' - - def __init__(self, item_type, mutable=False, as_tuple=False): - """Construct an ARRAY. - - E.g.:: - - Column('myarray', ARRAY(Integer)) - - Arguments are: - - :param item_type: The data type of items of this array. Note that - dimensionality is irrelevant here, so multi-dimensional arrays like - ``INTEGER[][]``, are constructed as ``ARRAY(Integer)``, not as - ``ARRAY(ARRAY(Integer))`` or such. The type mapping figures out on - the fly - - :param mutable=False: Specify whether lists passed to this - class should be considered mutable - this enables - "mutable types" mode in the ORM. Be sure to read the - notes for :class:`.MutableType` regarding ORM - performance implications. - - .. versionchanged:: 0.7.0 - Default changed from ``True``\ . - - .. versionchanged:: 0.7 - This functionality is now superseded by the - ``sqlalchemy.ext.mutable`` extension described in - :ref:`mutable_toplevel`. - - :param as_tuple=False: Specify whether return results - should be converted to tuples from lists. DBAPIs such - as psycopg2 return lists by default. When tuples are - returned, the results are hashable. This flag can only - be set to ``True`` when ``mutable`` is set to - ``False``. - - .. versionadded:: 0.6.5 - - """ - if isinstance(item_type, ARRAY): - raise ValueError("Do not nest ARRAY types; ARRAY(basetype) " - "handles multi-dimensional arrays of basetype") - if isinstance(item_type, type): - item_type = item_type() - self.item_type = item_type - self.mutable = mutable - if mutable and as_tuple: - raise exc.ArgumentError( - "mutable must be set to False if as_tuple is True." - ) - self.as_tuple = as_tuple - - def copy_value(self, value): - if value is None: - return None - elif self.mutable: - return list(value) - else: - return value - - def compare_values(self, x, y): - return x == y - - def is_mutable(self): - return self.mutable - - def bind_processor(self, dialect): - item_proc = self.item_type.dialect_impl(dialect).bind_processor(dialect) - if item_proc: - def convert_item(item): - if isinstance(item, (list, tuple)): - return [convert_item(child) for child in item] - else: - return item_proc(item) - else: - def convert_item(item): - if isinstance(item, (list, tuple)): - return [convert_item(child) for child in item] - else: - return item - def process(value): - if value is None: - return value - return [convert_item(item) for item in value] - return process - - def result_processor(self, dialect, coltype): - item_proc = self.item_type.dialect_impl(dialect).result_processor(dialect, coltype) - if item_proc: - def convert_item(item): - if isinstance(item, list): - r = [convert_item(child) for child in item] - if self.as_tuple: - r = tuple(r) - return r - else: - return item_proc(item) - else: - def convert_item(item): - if isinstance(item, list): - r = [convert_item(child) for child in item] - if self.as_tuple: - r = tuple(r) - return r - else: - return item - def process(value): - if value is None: - return value - r = [convert_item(item) for item in value] - if self.as_tuple: - r = tuple(r) - return r - return process -PGArray = ARRAY - -class ENUM(sqltypes.Enum): - """Postgresql ENUM type. - - This is a subclass of :class:`.types.Enum` which includes - support for PG's ``CREATE TYPE``. - - :class:`~.postgresql.ENUM` is used automatically when - using the :class:`.types.Enum` type on PG assuming - the ``native_enum`` is left as ``True``. However, the - :class:`~.postgresql.ENUM` class can also be instantiated - directly in order to access some additional Postgresql-specific - options, namely finer control over whether or not - ``CREATE TYPE`` should be emitted. - - Note that both :class:`.types.Enum` as well as - :class:`~.postgresql.ENUM` feature create/drop - methods; the base :class:`.types.Enum` type ultimately - delegates to the :meth:`~.postgresql.ENUM.create` and - :meth:`~.postgresql.ENUM.drop` methods present here. - - """ - - def __init__(self, *enums, **kw): - """Construct an :class:`~.postgresql.ENUM`. - - Arguments are the same as that of - :class:`.types.Enum`, but also including - the following parameters. - - :param create_type: Defaults to True. - Indicates that ``CREATE TYPE`` should be - emitted, after optionally checking for the - presence of the type, when the parent - table is being created; and additionally - that ``DROP TYPE`` is called when the table - is dropped. When ``False``, no check - will be performed and no ``CREATE TYPE`` - or ``DROP TYPE`` is emitted, unless - :meth:`~.postgresql.ENUM.create` - or :meth:`~.postgresql.ENUM.drop` - are called directly. - Setting to ``False`` is helpful - when invoking a creation scheme to a SQL file - without access to the actual database - - the :meth:`~.postgresql.ENUM.create` and - :meth:`~.postgresql.ENUM.drop` methods can - be used to emit SQL to a target bind. - - .. versionadded:: 0.7.4 - - """ - self.create_type = kw.pop("create_type", True) - super(ENUM, self).__init__(*enums, **kw) - - def create(self, bind=None, checkfirst=True): - """Emit ``CREATE TYPE`` for this - :class:`~.postgresql.ENUM`. - - If the underlying dialect does not support - Postgresql CREATE TYPE, no action is taken. - - :param bind: a connectable :class:`.Engine`, - :class:`.Connection`, or similar object to emit - SQL. - :param checkfirst: if ``True``, a query against - the PG catalog will be first performed to see - if the type does not exist already before - creating. - - """ - if not bind.dialect.supports_native_enum: - return - - if not checkfirst or \ - not bind.dialect.has_type(bind, self.name, schema=self.schema): - bind.execute(CreateEnumType(self)) - - def drop(self, bind=None, checkfirst=True): - """Emit ``DROP TYPE`` for this - :class:`~.postgresql.ENUM`. - - If the underlying dialect does not support - Postgresql DROP TYPE, no action is taken. - - :param bind: a connectable :class:`.Engine`, - :class:`.Connection`, or similar object to emit - SQL. - :param checkfirst: if ``True``, a query against - the PG catalog will be first performed to see - if the type actually exists before dropping. - - """ - if not bind.dialect.supports_native_enum: - return - - if not checkfirst or \ - bind.dialect.has_type(bind, self.name, schema=self.schema): - bind.execute(DropEnumType(self)) - - def _check_for_name_in_memos(self, checkfirst, kw): - """Look in the 'ddl runner' for 'memos', then - note our name in that collection. - - This to ensure a particular named enum is operated - upon only once within any kind of create/drop - sequence without relying upon "checkfirst". - - """ - if not self.create_type: - return True - if '_ddl_runner' in kw: - ddl_runner = kw['_ddl_runner'] - if '_pg_enums' in ddl_runner.memo: - pg_enums = ddl_runner.memo['_pg_enums'] - else: - pg_enums = ddl_runner.memo['_pg_enums'] = set() - present = self.name in pg_enums - pg_enums.add(self.name) - return present - else: - return False - - def _on_table_create(self, target, bind, checkfirst, **kw): - if not self._check_for_name_in_memos(checkfirst, kw): - self.create(bind=bind, checkfirst=checkfirst) - - def _on_metadata_create(self, target, bind, checkfirst, **kw): - if self.metadata is not None and \ - not self._check_for_name_in_memos(checkfirst, kw): - self.create(bind=bind, checkfirst=checkfirst) - - def _on_metadata_drop(self, target, bind, checkfirst, **kw): - if not self._check_for_name_in_memos(checkfirst, kw): - self.drop(bind=bind, checkfirst=checkfirst) - -colspecs = { - sqltypes.Interval:INTERVAL, - sqltypes.Enum:ENUM, -} - -ischema_names = { - 'integer' : INTEGER, - 'bigint' : BIGINT, - 'smallint' : SMALLINT, - 'character varying' : VARCHAR, - 'character' : CHAR, - '"char"' : sqltypes.String, - 'name' : sqltypes.String, - 'text' : TEXT, - 'numeric' : NUMERIC, - 'float' : FLOAT, - 'real' : REAL, - 'inet': INET, - 'cidr': CIDR, - 'uuid': UUID, - 'bit': BIT, - 'bit varying': BIT, - 'macaddr': MACADDR, - 'double precision' : DOUBLE_PRECISION, - 'timestamp' : TIMESTAMP, - 'timestamp with time zone' : TIMESTAMP, - 'timestamp without time zone' : TIMESTAMP, - 'time with time zone' : TIME, - 'time without time zone' : TIME, - 'date' : DATE, - 'time': TIME, - 'bytea' : BYTEA, - 'boolean' : BOOLEAN, - 'interval':INTERVAL, - 'interval year to month':INTERVAL, - 'interval day to second':INTERVAL, -} - - - -class PGCompiler(compiler.SQLCompiler): - - def visit_match_op(self, binary, **kw): - return "%s @@ to_tsquery(%s)" % ( - self.process(binary.left), - self.process(binary.right)) - - def visit_ilike_op(self, binary, **kw): - escape = binary.modifiers.get("escape", None) - return '%s ILIKE %s' % \ - (self.process(binary.left), self.process(binary.right)) \ - + (escape and - (' ESCAPE ' + self.render_literal_value(escape, None)) - or '') - - def visit_notilike_op(self, binary, **kw): - escape = binary.modifiers.get("escape", None) - return '%s NOT ILIKE %s' % \ - (self.process(binary.left), self.process(binary.right)) \ - + (escape and - (' ESCAPE ' + self.render_literal_value(escape, None)) - or '') - - def render_literal_value(self, value, type_): - value = super(PGCompiler, self).render_literal_value(value, type_) - # TODO: need to inspect "standard_conforming_strings" - if self.dialect._backslash_escapes: - value = value.replace('\\', '\\\\') - return value - - def visit_sequence(self, seq): - return "nextval('%s')" % self.preparer.format_sequence(seq) - - def limit_clause(self, select): - text = "" - if select._limit is not None: - text += " \n LIMIT " + self.process(sql.literal(select._limit)) - if select._offset is not None: - if select._limit is None: - text += " \n LIMIT ALL" - text += " OFFSET " + self.process(sql.literal(select._offset)) - return text - - def get_select_precolumns(self, select): - if select._distinct is not False: - if select._distinct is True: - return "DISTINCT " - elif isinstance(select._distinct, (list, tuple)): - return "DISTINCT ON (" + ', '.join( - [self.process(col) for col in select._distinct] - )+ ") " - else: - return "DISTINCT ON (" + self.process(select._distinct) + ") " - else: - return "" - - def for_update_clause(self, select): - if select.for_update == 'nowait': - return " FOR UPDATE NOWAIT" - elif select.for_update == 'read': - return " FOR SHARE" - elif select.for_update == 'read_nowait': - return " FOR SHARE NOWAIT" - else: - return super(PGCompiler, self).for_update_clause(select) - - def returning_clause(self, stmt, returning_cols): - - columns = [ - self.process( - self.label_select_column(None, c, asfrom=False), - within_columns_clause=True, - result_map=self.result_map) - for c in expression._select_iterables(returning_cols) - ] - - return 'RETURNING ' + ', '.join(columns) - - def visit_extract(self, extract, **kwargs): - field = self.extract_map.get(extract.field, extract.field) - if extract.expr.type: - affinity = extract.expr.type._type_affinity - else: - affinity = None - - casts = { - sqltypes.Date:'date', - sqltypes.DateTime:'timestamp', - sqltypes.Interval:'interval', sqltypes.Time:'time' - } - cast = casts.get(affinity, None) - if isinstance(extract.expr, sql.ColumnElement) and cast is not None: - expr = extract.expr.op('::')(sql.literal_column(cast)) - else: - expr = extract.expr - return "EXTRACT(%s FROM %s)" % ( - field, self.process(expr)) - -class PGDDLCompiler(compiler.DDLCompiler): - def get_column_specification(self, column, **kwargs): - colspec = self.preparer.format_column(column) - impl_type = column.type.dialect_impl(self.dialect) - if column.primary_key and \ - column is column.table._autoincrement_column and \ - not isinstance(impl_type, sqltypes.SmallInteger) and \ - ( - column.default is None or - ( - isinstance(column.default, schema.Sequence) and - column.default.optional - ) - ): - if isinstance(impl_type, sqltypes.BigInteger): - colspec += " BIGSERIAL" - else: - colspec += " SERIAL" - else: - colspec += " " + self.dialect.type_compiler.process(column.type) - default = self.get_column_default_string(column) - if default is not None: - colspec += " DEFAULT " + default - - if not column.nullable: - colspec += " NOT NULL" - return colspec - - def visit_create_enum_type(self, create): - type_ = create.element - - return "CREATE TYPE %s AS ENUM (%s)" % ( - self.preparer.format_type(type_), - ",".join("'%s'" % e for e in type_.enums) - ) - - def visit_drop_enum_type(self, drop): - type_ = drop.element - - return "DROP TYPE %s" % ( - self.preparer.format_type(type_) - ) - - def visit_create_index(self, create): - preparer = self.preparer - index = create.element - text = "CREATE " - if index.unique: - text += "UNIQUE " - ops = index.kwargs.get('postgresql_ops', {}) - text += "INDEX %s ON %s " % ( - preparer.quote( - self._index_identifier(index.name), index.quote), - preparer.format_table(index.table) - ) - - if 'postgresql_using' in index.kwargs: - using = index.kwargs['postgresql_using'] - text += "USING %s " % preparer.quote(using, index.quote) - - text += "(%s)" \ - % ( - ', '.join([ - preparer.format_column(c) + - (c.key in ops and (' ' + ops[c.key]) or '') - for c in index.columns]) - ) - - if "postgres_where" in index.kwargs: - whereclause = index.kwargs['postgres_where'] - util.warn_deprecated( - "The 'postgres_where' argument has been renamed " - "to 'postgresql_where'.") - elif 'postgresql_where' in index.kwargs: - whereclause = index.kwargs['postgresql_where'] - else: - whereclause = None - - if whereclause is not None: - whereclause = sql_util.expression_as_ddl(whereclause) - where_compiled = self.sql_compiler.process(whereclause) - text += " WHERE " + where_compiled - return text - - -class PGTypeCompiler(compiler.GenericTypeCompiler): - def visit_INET(self, type_): - return "INET" - - def visit_CIDR(self, type_): - return "CIDR" - - def visit_MACADDR(self, type_): - return "MACADDR" - - def visit_FLOAT(self, type_): - if not type_.precision: - return "FLOAT" - else: - return "FLOAT(%(precision)s)" % {'precision': type_.precision} - - def visit_DOUBLE_PRECISION(self, type_): - return "DOUBLE PRECISION" - - def visit_BIGINT(self, type_): - return "BIGINT" - - def visit_datetime(self, type_): - return self.visit_TIMESTAMP(type_) - - def visit_enum(self, type_): - if not type_.native_enum or not self.dialect.supports_native_enum: - return super(PGTypeCompiler, self).visit_enum(type_) - else: - return self.visit_ENUM(type_) - - def visit_ENUM(self, type_): - return self.dialect.identifier_preparer.format_type(type_) - - def visit_TIMESTAMP(self, type_): - return "TIMESTAMP%s %s" % ( - getattr(type_, 'precision', None) and "(%d)" % - type_.precision or "", - (type_.timezone and "WITH" or "WITHOUT") + " TIME ZONE" - ) - - def visit_TIME(self, type_): - return "TIME%s %s" % ( - getattr(type_, 'precision', None) and "(%d)" % - type_.precision or "", - (type_.timezone and "WITH" or "WITHOUT") + " TIME ZONE" - ) - - def visit_INTERVAL(self, type_): - if type_.precision is not None: - return "INTERVAL(%d)" % type_.precision - else: - return "INTERVAL" - - def visit_BIT(self, type_): - if type_.varying: - compiled = "BIT VARYING" - if type_.length is not None: - compiled += "(%d)" % type_.length - else: - compiled = "BIT(%d)" % type_.length - return compiled - - def visit_UUID(self, type_): - return "UUID" - - def visit_large_binary(self, type_): - return self.visit_BYTEA(type_) - - def visit_BYTEA(self, type_): - return "BYTEA" - - def visit_ARRAY(self, type_): - return self.process(type_.item_type) + '[]' - - -class PGIdentifierPreparer(compiler.IdentifierPreparer): - - reserved_words = RESERVED_WORDS - - def _unquote_identifier(self, value): - if value[0] == self.initial_quote: - value = value[1:-1].\ - replace(self.escape_to_quote, self.escape_quote) - return value - - def format_type(self, type_, use_schema=True): - if not type_.name: - raise exc.CompileError("Postgresql ENUM type requires a name.") - - name = self.quote(type_.name, type_.quote) - if not self.omit_schema and use_schema and type_.schema is not None: - name = self.quote_schema(type_.schema, type_.quote) + "." + name - return name - -class PGInspector(reflection.Inspector): - - def __init__(self, conn): - reflection.Inspector.__init__(self, conn) - - def get_table_oid(self, table_name, schema=None): - """Return the oid from `table_name` and `schema`.""" - - return self.dialect.get_table_oid(self.bind, table_name, schema, - info_cache=self.info_cache) - -class CreateEnumType(schema._CreateDropBase): - __visit_name__ = "create_enum_type" - -class DropEnumType(schema._CreateDropBase): - __visit_name__ = "drop_enum_type" - -class PGExecutionContext(default.DefaultExecutionContext): - def fire_sequence(self, seq, type_): - return self._execute_scalar(("select nextval('%s')" % \ - self.dialect.identifier_preparer.format_sequence(seq)), type_) - - def get_insert_default(self, column): - if column.primary_key and column is column.table._autoincrement_column: - if column.server_default and column.server_default.has_argument: - - # pre-execute passive defaults on primary key columns - return self._execute_scalar("select %s" % - column.server_default.arg, column.type) - - elif (column.default is None or - (column.default.is_sequence and - column.default.optional)): - - # execute the sequence associated with a SERIAL primary - # key column. for non-primary-key SERIAL, the ID just - # generates server side. - - try: - seq_name = column._postgresql_seq_name - except AttributeError: - tab = column.table.name - col = column.name - tab = tab[0:29 + max(0, (29 - len(col)))] - col = col[0:29 + max(0, (29 - len(tab)))] - column._postgresql_seq_name = seq_name = "%s_%s_seq" % (tab, col) - - sch = column.table.schema - if sch is not None: - exc = "select nextval('\"%s\".\"%s\"')" % \ - (sch, seq_name) - else: - exc = "select nextval('\"%s\"')" % \ - (seq_name, ) - - return self._execute_scalar(exc, column.type) - - return super(PGExecutionContext, self).get_insert_default(column) - -class PGDialect(default.DefaultDialect): - name = 'postgresql' - supports_alter = True - max_identifier_length = 63 - supports_sane_rowcount = True - - supports_native_enum = True - supports_native_boolean = True - - supports_sequences = True - sequences_optional = True - preexecute_autoincrement_sequences = True - postfetch_lastrowid = False - - supports_default_values = True - supports_empty_insert = False - default_paramstyle = 'pyformat' - ischema_names = ischema_names - colspecs = colspecs - - statement_compiler = PGCompiler - ddl_compiler = PGDDLCompiler - type_compiler = PGTypeCompiler - preparer = PGIdentifierPreparer - execution_ctx_cls = PGExecutionContext - inspector = PGInspector - isolation_level = None - - # TODO: need to inspect "standard_conforming_strings" - _backslash_escapes = True - - def __init__(self, isolation_level=None, **kwargs): - default.DefaultDialect.__init__(self, **kwargs) - self.isolation_level = isolation_level - - def initialize(self, connection): - super(PGDialect, self).initialize(connection) - self.implicit_returning = self.server_version_info > (8, 2) and \ - self.__dict__.get('implicit_returning', True) - self.supports_native_enum = self.server_version_info >= (8, 3) - if not self.supports_native_enum: - self.colspecs = self.colspecs.copy() - # pop base Enum type - self.colspecs.pop(sqltypes.Enum, None) - # psycopg2, others may have placed ENUM here as well - self.colspecs.pop(ENUM, None) - - def on_connect(self): - if self.isolation_level is not None: - def connect(conn): - self.set_isolation_level(conn, self.isolation_level) - return connect - else: - return None - - _isolation_lookup = set(['SERIALIZABLE', - 'READ UNCOMMITTED', 'READ COMMITTED', 'REPEATABLE READ']) - - def set_isolation_level(self, connection, level): - level = level.replace('_', ' ') - if level not in self._isolation_lookup: - raise exc.ArgumentError( - "Invalid value '%s' for isolation_level. " - "Valid isolation levels for %s are %s" % - (level, self.name, ", ".join(self._isolation_lookup)) - ) - cursor = connection.cursor() - cursor.execute( - "SET SESSION CHARACTERISTICS AS TRANSACTION " - "ISOLATION LEVEL %s" % level) - cursor.execute("COMMIT") - cursor.close() - - def get_isolation_level(self, connection): - cursor = connection.cursor() - cursor.execute('show transaction isolation level') - val = cursor.fetchone()[0] - cursor.close() - return val.upper() - - def do_begin_twophase(self, connection, xid): - self.do_begin(connection.connection) - - def do_prepare_twophase(self, connection, xid): - connection.execute("PREPARE TRANSACTION '%s'" % xid) - - def do_rollback_twophase(self, connection, xid, - is_prepared=True, recover=False): - if is_prepared: - if recover: - #FIXME: ugly hack to get out of transaction - # context when committing recoverable transactions - # Must find out a way how to make the dbapi not - # open a transaction. - connection.execute("ROLLBACK") - connection.execute("ROLLBACK PREPARED '%s'" % xid) - connection.execute("BEGIN") - self.do_rollback(connection.connection) - else: - self.do_rollback(connection.connection) - - def do_commit_twophase(self, connection, xid, - is_prepared=True, recover=False): - if is_prepared: - if recover: - connection.execute("ROLLBACK") - connection.execute("COMMIT PREPARED '%s'" % xid) - connection.execute("BEGIN") - self.do_rollback(connection.connection) - else: - self.do_commit(connection.connection) - - def do_recover_twophase(self, connection): - resultset = connection.execute( - sql.text("SELECT gid FROM pg_prepared_xacts")) - return [row[0] for row in resultset] - - def _get_default_schema_name(self, connection): - return connection.scalar("select current_schema()") - - def has_schema(self, connection, schema): - cursor = connection.execute( - sql.text( - "select nspname from pg_namespace where lower(nspname)=:schema", - bindparams=[ - sql.bindparam( - 'schema', unicode(schema.lower()), - type_=sqltypes.Unicode)] - ) - ) - - return bool(cursor.first()) - - def has_table(self, connection, table_name, schema=None): - # seems like case gets folded in pg_class... - if schema is None: - cursor = connection.execute( - sql.text( - "select relname from pg_class c join pg_namespace n on " - "n.oid=c.relnamespace where n.nspname=current_schema() and " - "relname=:name", - bindparams=[ - sql.bindparam('name', unicode(table_name), - type_=sqltypes.Unicode)] - ) - ) - else: - cursor = connection.execute( - sql.text( - "select relname from pg_class c join pg_namespace n on " - "n.oid=c.relnamespace where n.nspname=:schema and " - "relname=:name", - bindparams=[ - sql.bindparam('name', - unicode(table_name), type_=sqltypes.Unicode), - sql.bindparam('schema', - unicode(schema), type_=sqltypes.Unicode)] - ) - ) - return bool(cursor.first()) - - def has_sequence(self, connection, sequence_name, schema=None): - if schema is None: - cursor = connection.execute( - sql.text( - "SELECT relname FROM pg_class c join pg_namespace n on " - "n.oid=c.relnamespace where relkind='S' and " - "n.nspname=current_schema() " - "and relname=:name", - bindparams=[ - sql.bindparam('name', unicode(sequence_name), - type_=sqltypes.Unicode) - ] - ) - ) - else: - cursor = connection.execute( - sql.text( - "SELECT relname FROM pg_class c join pg_namespace n on " - "n.oid=c.relnamespace where relkind='S' and " - "n.nspname=:schema and relname=:name", - bindparams=[ - sql.bindparam('name', unicode(sequence_name), - type_=sqltypes.Unicode), - sql.bindparam('schema', - unicode(schema), type_=sqltypes.Unicode) - ] - ) - ) - - return bool(cursor.first()) - - def has_type(self, connection, type_name, schema=None): - bindparams = [ - sql.bindparam('typname', - unicode(type_name), type_=sqltypes.Unicode), - sql.bindparam('nspname', - unicode(schema), type_=sqltypes.Unicode), - ] - if schema is not None: - query = """ - SELECT EXISTS ( - SELECT * FROM pg_catalog.pg_type t, pg_catalog.pg_namespace n - WHERE t.typnamespace = n.oid - AND t.typname = :typname - AND n.nspname = :nspname - ) - """ - else: - query = """ - SELECT EXISTS ( - SELECT * FROM pg_catalog.pg_type t - WHERE t.typname = :typname - AND pg_type_is_visible(t.oid) - ) - """ - cursor = connection.execute(sql.text(query, bindparams=bindparams)) - return bool(cursor.scalar()) - - def _get_server_version_info(self, connection): - v = connection.execute("select version()").scalar() - m = re.match('PostgreSQL (\d+)\.(\d+)(?:\.(\d+))?(?:devel)?', v) - if not m: - raise AssertionError( - "Could not determine version from string '%s'" % v) - return tuple([int(x) for x in m.group(1, 2, 3) if x is not None]) - - @reflection.cache - def get_table_oid(self, connection, table_name, schema=None, **kw): - """Fetch the oid for schema.table_name. - - Several reflection methods require the table oid. The idea for using - this method is that it can be fetched one time and cached for - subsequent calls. - - """ - table_oid = None - if schema is not None: - schema_where_clause = "n.nspname = :schema" - else: - schema_where_clause = "pg_catalog.pg_table_is_visible(c.oid)" - query = """ - SELECT c.oid - FROM pg_catalog.pg_class c - LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace - WHERE (%s) - AND c.relname = :table_name AND c.relkind in ('r','v') - """ % schema_where_clause - # Since we're binding to unicode, table_name and schema_name must be - # unicode. - table_name = unicode(table_name) - if schema is not None: - schema = unicode(schema) - s = sql.text(query, bindparams=[ - sql.bindparam('table_name', type_=sqltypes.Unicode), - sql.bindparam('schema', type_=sqltypes.Unicode) - ], - typemap={'oid':sqltypes.Integer} - ) - c = connection.execute(s, table_name=table_name, schema=schema) - table_oid = c.scalar() - if table_oid is None: - raise exc.NoSuchTableError(table_name) - return table_oid - - @reflection.cache - def get_schema_names(self, connection, **kw): - s = """ - SELECT nspname - FROM pg_namespace - ORDER BY nspname - """ - rp = connection.execute(s) - # what about system tables? - # Py3K - #schema_names = [row[0] for row in rp \ - # if not row[0].startswith('pg_')] - # Py2K - schema_names = [row[0].decode(self.encoding) for row in rp \ - if not row[0].startswith('pg_')] - # end Py2K - return schema_names - - @reflection.cache - def get_table_names(self, connection, schema=None, **kw): - if schema is not None: - current_schema = schema - else: - current_schema = self.default_schema_name - - result = connection.execute( - sql.text(u"SELECT relname FROM pg_class c " - "WHERE relkind = 'r' " - "AND '%s' = (select nspname from pg_namespace n " - "where n.oid = c.relnamespace) " % - current_schema, - typemap = {'relname':sqltypes.Unicode} - ) - ) - return [row[0] for row in result] - - - @reflection.cache - def get_view_names(self, connection, schema=None, **kw): - if schema is not None: - current_schema = schema - else: - current_schema = self.default_schema_name - s = """ - SELECT relname - FROM pg_class c - WHERE relkind = 'v' - AND '%(schema)s' = (select nspname from pg_namespace n - where n.oid = c.relnamespace) - """ % dict(schema=current_schema) - # Py3K - #view_names = [row[0] for row in connection.execute(s)] - # Py2K - view_names = [row[0].decode(self.encoding) - for row in connection.execute(s)] - # end Py2K - return view_names - - @reflection.cache - def get_view_definition(self, connection, view_name, schema=None, **kw): - if schema is not None: - current_schema = schema - else: - current_schema = self.default_schema_name - s = """ - SELECT definition FROM pg_views - WHERE schemaname = :schema - AND viewname = :view_name - """ - rp = connection.execute(sql.text(s), - view_name=view_name, schema=current_schema) - if rp: - # Py3K - #view_def = rp.scalar() - # Py2K - view_def = rp.scalar().decode(self.encoding) - # end Py2K - return view_def - - @reflection.cache - def get_columns(self, connection, table_name, schema=None, **kw): - - table_oid = self.get_table_oid(connection, table_name, schema, - info_cache=kw.get('info_cache')) - SQL_COLS = """ - SELECT a.attname, - pg_catalog.format_type(a.atttypid, a.atttypmod), - (SELECT substring(pg_catalog.pg_get_expr(d.adbin, d.adrelid) - for 128) - FROM pg_catalog.pg_attrdef d - WHERE d.adrelid = a.attrelid AND d.adnum = a.attnum - AND a.atthasdef) - AS DEFAULT, - a.attnotnull, a.attnum, a.attrelid as table_oid - FROM pg_catalog.pg_attribute a - WHERE a.attrelid = :table_oid - AND a.attnum > 0 AND NOT a.attisdropped - ORDER BY a.attnum - """ - s = sql.text(SQL_COLS, - bindparams=[sql.bindparam('table_oid', type_=sqltypes.Integer)], - typemap={'attname':sqltypes.Unicode, 'default':sqltypes.Unicode} - ) - c = connection.execute(s, table_oid=table_oid) - rows = c.fetchall() - domains = self._load_domains(connection) - enums = self._load_enums(connection) - - # format columns - columns = [] - for name, format_type, default, notnull, attnum, table_oid in rows: - ## strip (5) from character varying(5), timestamp(5) - # with time zone, etc - attype = re.sub(r'\([\d,]+\)', '', format_type) - - # strip '[]' from integer[], etc. - attype = re.sub(r'\[\]', '', attype) - - nullable = not notnull - is_array = format_type.endswith('[]') - charlen = re.search('\(([\d,]+)\)', format_type) - if charlen: - charlen = charlen.group(1) - kwargs = {} - args = None - - if attype == 'numeric': - if charlen: - prec, scale = charlen.split(',') - args = (int(prec), int(scale)) - else: - args = () - elif attype == 'double precision': - args = (53, ) - elif attype == 'integer': - args = () - elif attype in ('timestamp with time zone', - 'time with time zone'): - kwargs['timezone'] = True - if charlen: - kwargs['precision'] = int(charlen) - args = () - elif attype in ('timestamp without time zone', - 'time without time zone', 'time'): - kwargs['timezone'] = False - if charlen: - kwargs['precision'] = int(charlen) - args = () - elif attype == 'bit varying': - kwargs['varying'] = True - if charlen: - args = (int(charlen),) - else: - args = () - elif attype in ('interval','interval year to month', - 'interval day to second'): - if charlen: - kwargs['precision'] = int(charlen) - args = () - elif charlen: - args = (int(charlen),) - else: - args = () - - while True: - if attype in self.ischema_names: - coltype = self.ischema_names[attype] - break - elif attype in enums: - enum = enums[attype] - coltype = ENUM - if "." in attype: - kwargs['schema'], kwargs['name'] = attype.split('.') - else: - kwargs['name'] = attype - args = tuple(enum['labels']) - break - elif attype in domains: - domain = domains[attype] - attype = domain['attype'] - # A table can't override whether the domain is nullable. - nullable = domain['nullable'] - if domain['default'] and not default: - # It can, however, override the default - # value, but can't set it to null. - default = domain['default'] - continue - else: - coltype = None - break - - if coltype: - coltype = coltype(*args, **kwargs) - if is_array: - coltype = ARRAY(coltype) - else: - util.warn("Did not recognize type '%s' of column '%s'" % - (attype, name)) - coltype = sqltypes.NULLTYPE - # adjust the default value - autoincrement = False - if default is not None: - match = re.search(r"""(nextval\(')([^']+)('.*$)""", default) - if match is not None: - autoincrement = True - # the default is related to a Sequence - sch = schema - if '.' not in match.group(2) and sch is not None: - # unconditionally quote the schema name. this could - # later be enhanced to obey quoting rules / - # "quote schema" - default = match.group(1) + \ - ('"%s"' % sch) + '.' + \ - match.group(2) + match.group(3) - - column_info = dict(name=name, type=coltype, nullable=nullable, - default=default, autoincrement=autoincrement) - columns.append(column_info) - return columns - - @reflection.cache - def get_primary_keys(self, connection, table_name, schema=None, **kw): - table_oid = self.get_table_oid(connection, table_name, schema, - info_cache=kw.get('info_cache')) - - if self.server_version_info < (8, 4): - # unnest() and generate_subscripts() both introduced in - # version 8.4 - PK_SQL = """ - SELECT a.attname - FROM - pg_class t - join pg_index ix on t.oid = ix.indrelid - join pg_attribute a - on t.oid=a.attrelid and a.attnum=ANY(ix.indkey) - WHERE - t.oid = :table_oid and ix.indisprimary = 't' - ORDER BY a.attnum - """ - else: - PK_SQL = """ - SELECT a.attname - FROM pg_attribute a JOIN ( - SELECT unnest(ix.indkey) attnum, - generate_subscripts(ix.indkey, 1) ord - FROM pg_index ix - WHERE ix.indrelid = :table_oid AND ix.indisprimary - ) k ON a.attnum=k.attnum - WHERE a.attrelid = :table_oid - ORDER BY k.ord - """ - t = sql.text(PK_SQL, typemap={'attname': sqltypes.Unicode}) - c = connection.execute(t, table_oid=table_oid) - primary_keys = [r[0] for r in c.fetchall()] - return primary_keys - - @reflection.cache - def get_pk_constraint(self, connection, table_name, schema=None, **kw): - cols = self.get_primary_keys(connection, table_name, - schema=schema, **kw) - - table_oid = self.get_table_oid(connection, table_name, schema, - info_cache=kw.get('info_cache')) - - PK_CONS_SQL = """ - SELECT conname - FROM pg_catalog.pg_constraint r - WHERE r.conrelid = :table_oid AND r.contype = 'p' - ORDER BY 1 - """ - t = sql.text(PK_CONS_SQL, typemap={'conname':sqltypes.Unicode}) - c = connection.execute(t, table_oid=table_oid) - name = c.scalar() - return { - 'constrained_columns':cols, - 'name':name - } - - @reflection.cache - def get_foreign_keys(self, connection, table_name, schema=None, **kw): - preparer = self.identifier_preparer - table_oid = self.get_table_oid(connection, table_name, schema, - info_cache=kw.get('info_cache')) - - FK_SQL = """ - SELECT r.conname, - pg_catalog.pg_get_constraintdef(r.oid, true) as condef, - n.nspname as conschema - FROM pg_catalog.pg_constraint r, - pg_namespace n, - pg_class c - - WHERE r.conrelid = :table AND - r.contype = 'f' AND - c.oid = confrelid AND - n.oid = c.relnamespace - ORDER BY 1 - """ - - t = sql.text(FK_SQL, typemap={ - 'conname':sqltypes.Unicode, - 'condef':sqltypes.Unicode}) - c = connection.execute(t, table=table_oid) - fkeys = [] - for conname, condef, conschema in c.fetchall(): - m = re.search('FOREIGN KEY \((.*?)\) REFERENCES ' - '(?:(.*?)\.)?(.*?)\((.*?)\)', condef).groups() - constrained_columns, referred_schema, \ - referred_table, referred_columns = m - constrained_columns = [preparer._unquote_identifier(x) - for x in re.split(r'\s*,\s*', constrained_columns)] - - if referred_schema: - referred_schema =\ - preparer._unquote_identifier(referred_schema) - elif schema is not None and schema == conschema: - # no schema was returned by pg_get_constraintdef(). This - # means the schema is in the search path. We will leave - # it as None, unless the actual schema, which we pull out - # from pg_namespace even though pg_get_constraintdef() doesn't - # want to give it to us, matches that of the referencing table, - # and an explicit schema was given for the referencing table. - referred_schema = schema - referred_table = preparer._unquote_identifier(referred_table) - referred_columns = [preparer._unquote_identifier(x) - for x in re.split(r'\s*,\s', referred_columns)] - fkey_d = { - 'name' : conname, - 'constrained_columns' : constrained_columns, - 'referred_schema' : referred_schema, - 'referred_table' : referred_table, - 'referred_columns' : referred_columns - } - fkeys.append(fkey_d) - return fkeys - - @reflection.cache - def get_indexes(self, connection, table_name, schema, **kw): - table_oid = self.get_table_oid(connection, table_name, schema, - info_cache=kw.get('info_cache')) - - IDX_SQL = """ - SELECT - i.relname as relname, - ix.indisunique, ix.indexprs, ix.indpred, - a.attname - FROM - pg_class t - join pg_index ix on t.oid = ix.indrelid - join pg_class i on i.oid=ix.indexrelid - left outer join - pg_attribute a - on t.oid=a.attrelid and a.attnum=ANY(ix.indkey) - WHERE - t.relkind = 'r' - and t.oid = :table_oid - and ix.indisprimary = 'f' - ORDER BY - t.relname, - i.relname - """ - - t = sql.text(IDX_SQL, typemap={'attname':sqltypes.Unicode}) - c = connection.execute(t, table_oid=table_oid) - - index_names = {} - indexes = [] - sv_idx_name = None - for row in c.fetchall(): - idx_name, unique, expr, prd, col = row - if expr: - if idx_name != sv_idx_name: - util.warn( - "Skipped unsupported reflection of " - "expression-based index %s" - % idx_name) - sv_idx_name = idx_name - continue - if prd and not idx_name == sv_idx_name: - util.warn( - "Predicate of partial index %s ignored during reflection" - % idx_name) - sv_idx_name = idx_name - if idx_name in index_names: - index_d = index_names[idx_name] - else: - index_d = {'column_names':[]} - indexes.append(index_d) - index_names[idx_name] = index_d - index_d['name'] = idx_name - if col is not None: - index_d['column_names'].append(col) - index_d['unique'] = unique - return indexes - - def _load_enums(self, connection): - if not self.supports_native_enum: - return {} - - ## Load data types for enums: - SQL_ENUMS = """ - SELECT t.typname as "name", - -- no enum defaults in 8.4 at least - -- t.typdefault as "default", - pg_catalog.pg_type_is_visible(t.oid) as "visible", - n.nspname as "schema", - e.enumlabel as "label" - FROM pg_catalog.pg_type t - LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace - LEFT JOIN pg_catalog.pg_enum e ON t.oid = e.enumtypid - WHERE t.typtype = 'e' - ORDER BY "name", e.oid -- e.oid gives us label order - """ - - s = sql.text(SQL_ENUMS, typemap={ - 'attname':sqltypes.Unicode, - 'label':sqltypes.Unicode}) - c = connection.execute(s) - - enums = {} - for enum in c.fetchall(): - if enum['visible']: - # 'visible' just means whether or not the enum is in a - # schema that's on the search path -- or not overridden by - # a schema with higher precedence. If it's not visible, - # it will be prefixed with the schema-name when it's used. - name = enum['name'] - else: - name = "%s.%s" % (enum['schema'], enum['name']) - - if name in enums: - enums[name]['labels'].append(enum['label']) - else: - enums[name] = { - 'labels': [enum['label']], - } - - return enums - - def _load_domains(self, connection): - ## Load data types for domains: - SQL_DOMAINS = """ - SELECT t.typname as "name", - pg_catalog.format_type(t.typbasetype, t.typtypmod) as "attype", - not t.typnotnull as "nullable", - t.typdefault as "default", - pg_catalog.pg_type_is_visible(t.oid) as "visible", - n.nspname as "schema" - FROM pg_catalog.pg_type t - LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace - WHERE t.typtype = 'd' - """ - - s = sql.text(SQL_DOMAINS, typemap={'attname':sqltypes.Unicode}) - c = connection.execute(s) - - domains = {} - for domain in c.fetchall(): - ## strip (30) from character varying(30) - attype = re.search('([^\(]+)', domain['attype']).group(1) - if domain['visible']: - # 'visible' just means whether or not the domain is in a - # schema that's on the search path -- or not overridden by - # a schema with higher precedence. If it's not visible, - # it will be prefixed with the schema-name when it's used. - name = domain['name'] - else: - name = "%s.%s" % (domain['schema'], domain['name']) - - domains[name] = { - 'attype':attype, - 'nullable': domain['nullable'], - 'default': domain['default'] - } - - return domains - diff --git a/libs/sqlalchemy/dialects/postgresql/pg8000.py b/libs/sqlalchemy/dialects/postgresql/pg8000.py deleted file mode 100644 index dc72555e..00000000 --- a/libs/sqlalchemy/dialects/postgresql/pg8000.py +++ /dev/null @@ -1,121 +0,0 @@ -# postgresql/pg8000.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Support for the PostgreSQL database via the pg8000 driver. - -Connecting ----------- - -URLs are of the form -``postgresql+pg8000://user:password@host:port/dbname[?key=value&key=value...]``. - -Unicode -------- - -pg8000 requires that the postgresql client encoding be -configured in the postgresql.conf file in order to use encodings -other than ascii. Set this value to the same value as the -"encoding" parameter on create_engine(), usually "utf-8". - -Interval --------- - -Passing data from/to the Interval type is not supported as of -yet. - -""" -from sqlalchemy import util, exc -from sqlalchemy.util.compat import decimal -from sqlalchemy import processors -from sqlalchemy import types as sqltypes -from sqlalchemy.dialects.postgresql.base import PGDialect, \ - PGCompiler, PGIdentifierPreparer, PGExecutionContext,\ - _DECIMAL_TYPES, _FLOAT_TYPES, _INT_TYPES - -class _PGNumeric(sqltypes.Numeric): - def result_processor(self, dialect, coltype): - if self.asdecimal: - if coltype in _FLOAT_TYPES: - return processors.to_decimal_processor_factory(decimal.Decimal) - elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES: - # pg8000 returns Decimal natively for 1700 - return None - else: - raise exc.InvalidRequestError( - "Unknown PG numeric type: %d" % coltype) - else: - if coltype in _FLOAT_TYPES: - # pg8000 returns float natively for 701 - return None - elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES: - return processors.to_float - else: - raise exc.InvalidRequestError( - "Unknown PG numeric type: %d" % coltype) - - -class _PGNumericNoBind(_PGNumeric): - def bind_processor(self, dialect): - return None - -class PGExecutionContext_pg8000(PGExecutionContext): - pass - - -class PGCompiler_pg8000(PGCompiler): - def visit_mod(self, binary, **kw): - return self.process(binary.left) + " %% " + self.process(binary.right) - - def post_process_text(self, text): - if '%%' in text: - util.warn("The SQLAlchemy postgresql dialect now automatically escapes '%' in text() " - "expressions to '%%'.") - return text.replace('%', '%%') - - -class PGIdentifierPreparer_pg8000(PGIdentifierPreparer): - def _escape_identifier(self, value): - value = value.replace(self.escape_quote, self.escape_to_quote) - return value.replace('%', '%%') - - -class PGDialect_pg8000(PGDialect): - driver = 'pg8000' - - supports_unicode_statements = True - - supports_unicode_binds = True - - default_paramstyle = 'format' - supports_sane_multi_rowcount = False - execution_ctx_cls = PGExecutionContext_pg8000 - statement_compiler = PGCompiler_pg8000 - preparer = PGIdentifierPreparer_pg8000 - description_encoding = 'use_encoding' - - colspecs = util.update_copy( - PGDialect.colspecs, - { - sqltypes.Numeric : _PGNumericNoBind, - sqltypes.Float : _PGNumeric - } - ) - - @classmethod - def dbapi(cls): - return __import__('pg8000').dbapi - - def create_connect_args(self, url): - opts = url.translate_connect_args(username='user') - if 'port' in opts: - opts['port'] = int(opts['port']) - opts.update(url.query) - return ([], opts) - - def is_disconnect(self, e, connection, cursor): - return "connection is closed" in str(e) - -dialect = PGDialect_pg8000 diff --git a/libs/sqlalchemy/dialects/postgresql/psycopg2.py b/libs/sqlalchemy/dialects/postgresql/psycopg2.py deleted file mode 100644 index ecc8d331..00000000 --- a/libs/sqlalchemy/dialects/postgresql/psycopg2.py +++ /dev/null @@ -1,391 +0,0 @@ -# postgresql/psycopg2.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Support for the PostgreSQL database via the psycopg2 driver. - -Driver ------- - -The psycopg2 driver is available at http://pypi.python.org/pypi/psycopg2/ . -The dialect has several behaviors which are specifically tailored towards compatibility -with this module. - -Note that psycopg1 is **not** supported. - -Connecting ----------- - -URLs are of the form -``postgresql+psycopg2://user:password@host:port/dbname[?key=value&key=value...]``. - -psycopg2-specific keyword arguments which are accepted by -:func:`.create_engine()` are: - -* *server_side_cursors* - Enable the usage of "server side cursors" for SQL - statements which support this feature. What this essentially means from a - psycopg2 point of view is that the cursor is created using a name, e.g. - ``connection.cursor('some name')``, which has the effect that result rows are - not immediately pre-fetched and buffered after statement execution, but are - instead left on the server and only retrieved as needed. SQLAlchemy's - :class:`~sqlalchemy.engine.base.ResultProxy` uses special row-buffering - behavior when this feature is enabled, such that groups of 100 rows at a - time are fetched over the wire to reduce conversational overhead. - Note that the ``stream_results=True`` execution option is a more targeted - way of enabling this mode on a per-execution basis. -* *use_native_unicode* - Enable the usage of Psycopg2 "native unicode" mode - per connection. True by default. - -Unix Domain Connections ------------------------- - -psycopg2 supports connecting via Unix domain connections. When the ``host`` -portion of the URL is omitted, SQLAlchemy passes ``None`` to psycopg2, -which specifies Unix-domain communication rather than TCP/IP communication:: - - create_engine("postgresql+psycopg2://user:password@/dbname") - -By default, the socket file used is to connect to a Unix-domain socket -in ``/tmp``, or whatever socket directory was specified when PostgreSQL -was built. This value can be overridden by passing a pathname to psycopg2, -using ``host`` as an additional keyword argument:: - - create_engine("postgresql+psycopg2://user:password@/dbname?host=/var/lib/postgresql") - -See also: - -`PQconnectdbParams `_ - -Per-Statement/Connection Execution Options -------------------------------------------- - -The following DBAPI-specific options are respected when used with -:meth:`.Connection.execution_options`, :meth:`.Executable.execution_options`, -:meth:`.Query.execution_options`, in addition to those not specific to DBAPIs: - -* isolation_level - Set the transaction isolation level for the lifespan of a - :class:`.Connection` (can only be set on a connection, not a statement or query). - This includes the options ``SERIALIZABLE``, ``READ COMMITTED``, - ``READ UNCOMMITTED`` and ``REPEATABLE READ``. -* stream_results - Enable or disable usage of server side cursors. - If ``None`` or not set, the ``server_side_cursors`` option of the :class:`.Engine` is used. - -Unicode -------- - -By default, the psycopg2 driver uses the ``psycopg2.extensions.UNICODE`` -extension, such that the DBAPI receives and returns all strings as Python -Unicode objects directly - SQLAlchemy passes these values through without -change. Psycopg2 here will encode/decode string values based on the -current "client encoding" setting; by default this is the value in -the ``postgresql.conf`` file, which often defaults to ``SQL_ASCII``. -Typically, this can be changed to ``utf-8``, as a more useful default:: - - #client_encoding = sql_ascii # actually, defaults to database - # encoding - client_encoding = utf8 - -A second way to affect the client encoding is to set it within Psycopg2 -locally. SQLAlchemy will call psycopg2's ``set_client_encoding()`` -method (see: http://initd.org/psycopg/docs/connection.html#connection.set_client_encoding) -on all new connections based on the value passed to -:func:`.create_engine` using the ``client_encoding`` parameter:: - - engine = create_engine("postgresql://user:pass@host/dbname", client_encoding='utf8') - -This overrides the encoding specified in the Postgresql client configuration. - -.. versionadded:: 0.7.3 - The psycopg2-specific ``client_encoding`` parameter to :func:`.create_engine`. - -SQLAlchemy can also be instructed to skip the usage of the psycopg2 -``UNICODE`` extension and to instead utilize it's own unicode encode/decode -services, which are normally reserved only for those DBAPIs that don't -fully support unicode directly. Passing ``use_native_unicode=False`` -to :func:`.create_engine` will disable usage of ``psycopg2.extensions.UNICODE``. -SQLAlchemy will instead encode data itself into Python bytestrings on the way -in and coerce from bytes on the way back, -using the value of the :func:`.create_engine` ``encoding`` parameter, which -defaults to ``utf-8``. -SQLAlchemy's own unicode encode/decode functionality is steadily becoming -obsolete as more DBAPIs support unicode fully along with the approach of -Python 3; in modern usage psycopg2 should be relied upon to handle unicode. - -Transactions ------------- - -The psycopg2 dialect fully supports SAVEPOINT and two-phase commit operations. - -.. _psycopg2_isolation: - -Transaction Isolation Level ---------------------------- - -The ``isolation_level`` parameter of :func:`.create_engine` here makes use -psycopg2's ``set_isolation_level()`` connection method, rather than -issuing a ``SET SESSION CHARACTERISTICS`` command. This because psycopg2 -resets the isolation level on each new transaction, and needs to know -at the API level what level should be used. - -NOTICE logging ---------------- - -The psycopg2 dialect will log Postgresql NOTICE messages via the -``sqlalchemy.dialects.postgresql`` logger:: - - import logging - logging.getLogger('sqlalchemy.dialects.postgresql').setLevel(logging.INFO) - - -""" - -import re -import logging - -from sqlalchemy import util, exc -from sqlalchemy.util.compat import decimal -from sqlalchemy import processors -from sqlalchemy.engine import base -from sqlalchemy.sql import expression -from sqlalchemy import types as sqltypes -from sqlalchemy.dialects.postgresql.base import PGDialect, PGCompiler, \ - PGIdentifierPreparer, PGExecutionContext, \ - ENUM, ARRAY, _DECIMAL_TYPES, _FLOAT_TYPES,\ - _INT_TYPES - - -logger = logging.getLogger('sqlalchemy.dialects.postgresql') - - -class _PGNumeric(sqltypes.Numeric): - def bind_processor(self, dialect): - return None - - def result_processor(self, dialect, coltype): - if self.asdecimal: - if coltype in _FLOAT_TYPES: - return processors.to_decimal_processor_factory(decimal.Decimal) - elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES: - # pg8000 returns Decimal natively for 1700 - return None - else: - raise exc.InvalidRequestError( - "Unknown PG numeric type: %d" % coltype) - else: - if coltype in _FLOAT_TYPES: - # pg8000 returns float natively for 701 - return None - elif coltype in _DECIMAL_TYPES or coltype in _INT_TYPES: - return processors.to_float - else: - raise exc.InvalidRequestError( - "Unknown PG numeric type: %d" % coltype) - -class _PGEnum(ENUM): - def __init__(self, *arg, **kw): - super(_PGEnum, self).__init__(*arg, **kw) - # Py2K - if self.convert_unicode: - self.convert_unicode = "force" - # end Py2K - -class _PGArray(ARRAY): - def __init__(self, *arg, **kw): - super(_PGArray, self).__init__(*arg, **kw) - # Py2K - # FIXME: this check won't work for setups that - # have convert_unicode only on their create_engine(). - if isinstance(self.item_type, sqltypes.String) and \ - self.item_type.convert_unicode: - self.item_type.convert_unicode = "force" - # end Py2K - -# When we're handed literal SQL, ensure it's a SELECT-query. Since -# 8.3, combining cursors and "FOR UPDATE" has been fine. -SERVER_SIDE_CURSOR_RE = re.compile( - r'\s*SELECT', - re.I | re.UNICODE) - -_server_side_id = util.counter() - -class PGExecutionContext_psycopg2(PGExecutionContext): - def create_cursor(self): - # TODO: coverage for server side cursors + select.for_update() - - if self.dialect.server_side_cursors: - is_server_side = \ - self.execution_options.get('stream_results', True) and ( - (self.compiled and isinstance(self.compiled.statement, expression.Selectable) \ - or \ - ( - (not self.compiled or - isinstance(self.compiled.statement, expression._TextClause)) - and self.statement and SERVER_SIDE_CURSOR_RE.match(self.statement)) - ) - ) - else: - is_server_side = self.execution_options.get('stream_results', False) - - self.__is_server_side = is_server_side - if is_server_side: - # use server-side cursors: - # http://lists.initd.org/pipermail/psycopg/2007-January/005251.html - ident = "c_%s_%s" % (hex(id(self))[2:], hex(_server_side_id())[2:]) - return self._dbapi_connection.cursor(ident) - else: - return self._dbapi_connection.cursor() - - def get_result_proxy(self): - # TODO: ouch - if logger.isEnabledFor(logging.INFO): - self._log_notices(self.cursor) - - if self.__is_server_side: - return base.BufferedRowResultProxy(self) - else: - return base.ResultProxy(self) - - def _log_notices(self, cursor): - for notice in cursor.connection.notices: - # NOTICE messages have a - # newline character at the end - logger.info(notice.rstrip()) - - cursor.connection.notices[:] = [] - - -class PGCompiler_psycopg2(PGCompiler): - def visit_mod(self, binary, **kw): - return self.process(binary.left) + " %% " + self.process(binary.right) - - def post_process_text(self, text): - return text.replace('%', '%%') - - -class PGIdentifierPreparer_psycopg2(PGIdentifierPreparer): - def _escape_identifier(self, value): - value = value.replace(self.escape_quote, self.escape_to_quote) - return value.replace('%', '%%') - -class PGDialect_psycopg2(PGDialect): - driver = 'psycopg2' - # Py2K - supports_unicode_statements = False - # end Py2K - default_paramstyle = 'pyformat' - supports_sane_multi_rowcount = False - execution_ctx_cls = PGExecutionContext_psycopg2 - statement_compiler = PGCompiler_psycopg2 - preparer = PGIdentifierPreparer_psycopg2 - psycopg2_version = (0, 0) - - colspecs = util.update_copy( - PGDialect.colspecs, - { - sqltypes.Numeric : _PGNumeric, - ENUM : _PGEnum, # needs force_unicode - sqltypes.Enum : _PGEnum, # needs force_unicode - ARRAY : _PGArray, # needs force_unicode - } - ) - - def __init__(self, server_side_cursors=False, use_native_unicode=True, - client_encoding=None, **kwargs): - PGDialect.__init__(self, **kwargs) - self.server_side_cursors = server_side_cursors - self.use_native_unicode = use_native_unicode - self.supports_unicode_binds = use_native_unicode - self.client_encoding = client_encoding - if self.dbapi and hasattr(self.dbapi, '__version__'): - m = re.match(r'(\d+)\.(\d+)(?:\.(\d+))?', - self.dbapi.__version__) - if m: - self.psycopg2_version = tuple( - int(x) - for x in m.group(1, 2, 3) - if x is not None) - - @classmethod - def dbapi(cls): - psycopg = __import__('psycopg2') - return psycopg - - @util.memoized_property - def _isolation_lookup(self): - extensions = __import__('psycopg2.extensions').extensions - return { - 'READ COMMITTED':extensions.ISOLATION_LEVEL_READ_COMMITTED, - 'READ UNCOMMITTED':extensions.ISOLATION_LEVEL_READ_UNCOMMITTED, - 'REPEATABLE READ':extensions.ISOLATION_LEVEL_REPEATABLE_READ, - 'SERIALIZABLE':extensions.ISOLATION_LEVEL_SERIALIZABLE - } - - def set_isolation_level(self, connection, level): - try: - level = self._isolation_lookup[level.replace('_', ' ')] - except KeyError: - raise exc.ArgumentError( - "Invalid value '%s' for isolation_level. " - "Valid isolation levels for %s are %s" % - (level, self.name, ", ".join(self._isolation_lookup)) - ) - - connection.set_isolation_level(level) - - def on_connect(self): - fns = [] - if self.client_encoding is not None: - def on_connect(conn): - conn.set_client_encoding(self.client_encoding) - fns.append(on_connect) - - if self.isolation_level is not None: - def on_connect(conn): - self.set_isolation_level(conn, self.isolation_level) - fns.append(on_connect) - - if self.dbapi and self.use_native_unicode: - extensions = __import__('psycopg2.extensions').extensions - def on_connect(conn): - extensions.register_type(extensions.UNICODE, conn) - fns.append(on_connect) - - if fns: - def on_connect(conn): - for fn in fns: - fn(conn) - return on_connect - else: - return None - - def create_connect_args(self, url): - opts = url.translate_connect_args(username='user') - if 'port' in opts: - opts['port'] = int(opts['port']) - opts.update(url.query) - return ([], opts) - - def is_disconnect(self, e, connection, cursor): - if isinstance(e, self.dbapi.OperationalError): - # these error messages from libpq: interfaces/libpq/fe-misc.c. - # TODO: these are sent through gettext in libpq and we can't - # check within other locales - consider using connection.closed - return 'terminating connection' in str(e) or \ - 'closed the connection' in str(e) or \ - 'connection not open' in str(e) or \ - 'could not receive data from server' in str(e) - elif isinstance(e, self.dbapi.InterfaceError): - # psycopg2 client errors, psycopg2/conenction.h, psycopg2/cursor.h - return 'connection already closed' in str(e) or \ - 'cursor already closed' in str(e) - elif isinstance(e, self.dbapi.ProgrammingError): - # not sure where this path is originally from, it may - # be obsolete. It really says "losed", not "closed". - return "losed the connection unexpectedly" in str(e) - else: - return False - -dialect = PGDialect_psycopg2 - diff --git a/libs/sqlalchemy/dialects/postgresql/pypostgresql.py b/libs/sqlalchemy/dialects/postgresql/pypostgresql.py deleted file mode 100644 index 5303d047..00000000 --- a/libs/sqlalchemy/dialects/postgresql/pypostgresql.py +++ /dev/null @@ -1,73 +0,0 @@ -# postgresql/pypostgresql.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Support for the PostgreSQL database via py-postgresql. - -Connecting ----------- - -URLs are of the form ``postgresql+pypostgresql://user:password@host:port/dbname[?key=value&key=value...]``. - - -""" -from sqlalchemy import util -from sqlalchemy import types as sqltypes -from sqlalchemy.dialects.postgresql.base import PGDialect, PGExecutionContext -from sqlalchemy import processors - -class PGNumeric(sqltypes.Numeric): - def bind_processor(self, dialect): - return processors.to_str - - def result_processor(self, dialect, coltype): - if self.asdecimal: - return None - else: - return processors.to_float - -class PGExecutionContext_pypostgresql(PGExecutionContext): - pass - -class PGDialect_pypostgresql(PGDialect): - driver = 'pypostgresql' - - supports_unicode_statements = True - supports_unicode_binds = True - description_encoding = None - default_paramstyle = 'pyformat' - - # requires trunk version to support sane rowcounts - # TODO: use dbapi version information to set this flag appropriately - supports_sane_rowcount = True - supports_sane_multi_rowcount = False - - execution_ctx_cls = PGExecutionContext_pypostgresql - colspecs = util.update_copy( - PGDialect.colspecs, - { - sqltypes.Numeric : PGNumeric, - sqltypes.Float: sqltypes.Float, # prevents PGNumeric from being used - } - ) - - @classmethod - def dbapi(cls): - from postgresql.driver import dbapi20 - return dbapi20 - - def create_connect_args(self, url): - opts = url.translate_connect_args(username='user') - if 'port' in opts: - opts['port'] = int(opts['port']) - else: - opts['port'] = 5432 - opts.update(url.query) - return ([], opts) - - def is_disconnect(self, e, connection, cursor): - return "connection is closed" in str(e) - -dialect = PGDialect_pypostgresql diff --git a/libs/sqlalchemy/dialects/postgresql/zxjdbc.py b/libs/sqlalchemy/dialects/postgresql/zxjdbc.py deleted file mode 100644 index 4aea9c9b..00000000 --- a/libs/sqlalchemy/dialects/postgresql/zxjdbc.py +++ /dev/null @@ -1,42 +0,0 @@ -# postgresql/zxjdbc.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Support for the PostgreSQL database via the zxjdbc JDBC connector. - -JDBC Driver ------------ - -The official Postgresql JDBC driver is at http://jdbc.postgresql.org/. - -""" -from sqlalchemy.connectors.zxJDBC import ZxJDBCConnector -from sqlalchemy.dialects.postgresql.base import PGDialect, PGExecutionContext - -class PGExecutionContext_zxjdbc(PGExecutionContext): - - def create_cursor(self): - cursor = self._dbapi_connection.cursor() - cursor.datahandler = self.dialect.DataHandler(cursor.datahandler) - return cursor - - -class PGDialect_zxjdbc(ZxJDBCConnector, PGDialect): - jdbc_db_name = 'postgresql' - jdbc_driver_name = 'org.postgresql.Driver' - - execution_ctx_cls = PGExecutionContext_zxjdbc - - supports_native_decimal = True - - def __init__(self, *args, **kwargs): - super(PGDialect_zxjdbc, self).__init__(*args, **kwargs) - from com.ziclix.python.sql.handler import PostgresqlDataHandler - self.DataHandler = PostgresqlDataHandler - - def _get_server_version_info(self, connection): - return tuple(int(x) for x in connection.connection.dbversion.split('.')) - -dialect = PGDialect_zxjdbc diff --git a/libs/sqlalchemy/dialects/sqlite/__init__.py b/libs/sqlalchemy/dialects/sqlite/__init__.py deleted file mode 100644 index c1157b63..00000000 --- a/libs/sqlalchemy/dialects/sqlite/__init__.py +++ /dev/null @@ -1,20 +0,0 @@ -# sqlite/__init__.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -from sqlalchemy.dialects.sqlite import base, pysqlite - -# default dialect -base.dialect = pysqlite.dialect - - -from sqlalchemy.dialects.sqlite.base import \ - BLOB, BOOLEAN, CHAR, DATE, DATETIME, DECIMAL, FLOAT, INTEGER, REAL,\ - NUMERIC, SMALLINT, TEXT, TIME, TIMESTAMP, VARCHAR, dialect - -__all__ = ( - 'BLOB', 'BOOLEAN', 'CHAR', 'DATE', 'DATETIME', 'DECIMAL', 'FLOAT', 'INTEGER', - 'NUMERIC', 'SMALLINT', 'TEXT', 'TIME', 'TIMESTAMP', 'VARCHAR', 'dialect', 'REAL' -) \ No newline at end of file diff --git a/libs/sqlalchemy/dialects/sqlite/base.py b/libs/sqlalchemy/dialects/sqlite/base.py deleted file mode 100644 index 9118ace2..00000000 --- a/libs/sqlalchemy/dialects/sqlite/base.py +++ /dev/null @@ -1,864 +0,0 @@ -# sqlite/base.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Support for the SQLite database. - -For information on connecting using a specific driver, see the documentation -section regarding that driver. - -Date and Time Types -------------------- - -SQLite does not have built-in DATE, TIME, or DATETIME types, and pysqlite does not provide -out of the box functionality for translating values between Python `datetime` objects -and a SQLite-supported format. SQLAlchemy's own :class:`~sqlalchemy.types.DateTime` -and related types provide date formatting and parsing functionality when SQlite is used. -The implementation classes are :class:`~.sqlite.DATETIME`, :class:`~.sqlite.DATE` and :class:`~.sqlite.TIME`. -These types represent dates and times as ISO formatted strings, which also nicely -support ordering. There's no reliance on typical "libc" internals for these functions -so historical dates are fully supported. - -Auto Incrementing Behavior --------------------------- - -Background on SQLite's autoincrement is at: http://sqlite.org/autoinc.html - -Two things to note: - -* The AUTOINCREMENT keyword is **not** required for SQLite tables to - generate primary key values automatically. AUTOINCREMENT only means that - the algorithm used to generate ROWID values should be slightly different. -* SQLite does **not** generate primary key (i.e. ROWID) values, even for - one column, if the table has a composite (i.e. multi-column) primary key. - This is regardless of the AUTOINCREMENT keyword being present or not. - -To specifically render the AUTOINCREMENT keyword on the primary key -column when rendering DDL, add the flag ``sqlite_autoincrement=True`` -to the Table construct:: - - Table('sometable', metadata, - Column('id', Integer, primary_key=True), - sqlite_autoincrement=True) - -Transaction Isolation Level ---------------------------- - -:func:`.create_engine` accepts an ``isolation_level`` parameter which results in -the command ``PRAGMA read_uncommitted `` being invoked for every new -connection. Valid values for this parameter are ``SERIALIZABLE`` and -``READ UNCOMMITTED`` corresponding to a value of 0 and 1, respectively. -See the section :ref:`pysqlite_serializable` for an important workaround -when using serializable isolation with Pysqlite. - -Database Locking Behavior / Concurrency ---------------------------------------- - -Note that SQLite is not designed for a high level of concurrency. The database -itself, being a file, is locked completely during write operations and within -transactions, meaning exactly one connection has exclusive access to the database -during this period - all other connections will be blocked during this time. - -The Python DBAPI specification also calls for a connection model that is always -in a transaction; there is no BEGIN method, only commit and rollback. This implies -that a SQLite DBAPI driver would technically allow only serialized access to a -particular database file at all times. The pysqlite driver attempts to ameliorate this by -deferring the actual BEGIN statement until the first DML (INSERT, UPDATE, or -DELETE) is received within a transaction. While this breaks serializable isolation, -it at least delays the exclusive locking inherent in SQLite's design. - -SQLAlchemy's default mode of usage with the ORM is known -as "autocommit=False", which means the moment the :class:`.Session` begins to be -used, a transaction is begun. As the :class:`.Session` is used, the autoflush -feature, also on by default, will flush out pending changes to the database -before each query. The effect of this is that a :class:`.Session` used in its -default mode will often emit DML early on, long before the transaction is actually -committed. This again will have the effect of serializing access to the SQLite -database. If highly concurrent reads are desired against the SQLite database, -it is advised that the autoflush feature be disabled, and potentially even -that autocommit be re-enabled, which has the effect of each SQL statement and -flush committing changes immediately. - -For more information on SQLite's lack of concurrency by design, please -see `Situations Where Another RDBMS May Work Better - High Concurrency `_ -near the bottom of the page. - -.. _sqlite_foreign_keys: - -Foreign Key Support -------------------- - -SQLite supports FOREIGN KEY syntax when emitting CREATE statements for tables, -however by default these constraints have no effect on the operation -of the table. - -Constraint checking on SQLite has three prerequisites: - -* At least version 3.6.19 of SQLite must be in use -* The SQLite libary must be compiled *without* the SQLITE_OMIT_FOREIGN_KEY - or SQLITE_OMIT_TRIGGER symbols enabled. -* The ``PRAGMA foreign_keys = ON`` statement must be emitted on all connections - before use. - -SQLAlchemy allows for the ``PRAGMA`` statement to be emitted automatically -for new connections through the usage of events:: - - from sqlalchemy.engine import Engine - from sqlalchemy import event - - @event.listens_for(Engine, "connect") - def set_sqlite_pragma(dbapi_connection, connection_record): - cursor = dbapi_connection.cursor() - cursor.execute("PRAGMA foreign_keys=ON") - cursor.close() - -.. seealso:: - - `SQLite Foreign Key Support `_ - - on the SQLite web site. - - :ref:`event_toplevel` - SQLAlchemy event API. - -""" - -import datetime, re - -from sqlalchemy import sql, exc -from sqlalchemy.engine import default, base, reflection -from sqlalchemy import types as sqltypes -from sqlalchemy import util -from sqlalchemy.sql import compiler -from sqlalchemy import processors - -from sqlalchemy.types import BLOB, BOOLEAN, CHAR, DATE, DATETIME, DECIMAL,\ - FLOAT, REAL, INTEGER, NUMERIC, SMALLINT, TEXT, TIME, TIMESTAMP, VARCHAR - -class _DateTimeMixin(object): - _reg = None - _storage_format = None - - def __init__(self, storage_format=None, regexp=None, **kw): - super(_DateTimeMixin, self).__init__(**kw) - if regexp is not None: - self._reg = re.compile(regexp) - if storage_format is not None: - self._storage_format = storage_format - -class DATETIME(_DateTimeMixin, sqltypes.DateTime): - """Represent a Python datetime object in SQLite using a string. - - The default string storage format is:: - - "%04d-%02d-%02d %02d:%02d:%02d.%06d" % (value.year, - value.month, value.day, - value.hour, value.minute, - value.second, value.microsecond) - - e.g.:: - - 2011-03-15 12:05:57.10558 - - The storage format can be customized to some degree using the - ``storage_format`` and ``regexp`` parameters, such as:: - - import re - from sqlalchemy.dialects.sqlite import DATETIME - - dt = DATETIME( - storage_format="%04d/%02d/%02d %02d-%02d-%02d-%06d", - regexp=re.compile("(\d+)/(\d+)/(\d+) (\d+)-(\d+)-(\d+)(?:-(\d+))?") - ) - - :param storage_format: format string which will be applied to the - tuple ``(value.year, value.month, value.day, value.hour, - value.minute, value.second, value.microsecond)``, given a - Python datetime.datetime() object. - - :param regexp: regular expression which will be applied to - incoming result rows. The resulting match object is applied to - the Python datetime() constructor via ``*map(int, - match_obj.groups(0))``. - """ - - _storage_format = "%04d-%02d-%02d %02d:%02d:%02d.%06d" - - def bind_processor(self, dialect): - datetime_datetime = datetime.datetime - datetime_date = datetime.date - format = self._storage_format - def process(value): - if value is None: - return None - elif isinstance(value, datetime_datetime): - return format % (value.year, value.month, value.day, - value.hour, value.minute, value.second, - value.microsecond) - elif isinstance(value, datetime_date): - return format % (value.year, value.month, value.day, - 0, 0, 0, 0) - else: - raise TypeError("SQLite DateTime type only accepts Python " - "datetime and date objects as input.") - return process - - def result_processor(self, dialect, coltype): - if self._reg: - return processors.str_to_datetime_processor_factory( - self._reg, datetime.datetime) - else: - return processors.str_to_datetime - -class DATE(_DateTimeMixin, sqltypes.Date): - """Represent a Python date object in SQLite using a string. - - The default string storage format is:: - - "%04d-%02d-%02d" % (value.year, value.month, value.day) - - e.g.:: - - 2011-03-15 - - The storage format can be customized to some degree using the - ``storage_format`` and ``regexp`` parameters, such as:: - - import re - from sqlalchemy.dialects.sqlite import DATE - - d = DATE( - storage_format="%02d/%02d/%02d", - regexp=re.compile("(\d+)/(\d+)/(\d+)") - ) - - :param storage_format: format string which will be applied to the - tuple ``(value.year, value.month, value.day)``, - given a Python datetime.date() object. - - :param regexp: regular expression which will be applied to - incoming result rows. The resulting match object is applied to - the Python date() constructor via ``*map(int, - match_obj.groups(0))``. - - """ - - _storage_format = "%04d-%02d-%02d" - - def bind_processor(self, dialect): - datetime_date = datetime.date - format = self._storage_format - def process(value): - if value is None: - return None - elif isinstance(value, datetime_date): - return format % (value.year, value.month, value.day) - else: - raise TypeError("SQLite Date type only accepts Python " - "date objects as input.") - return process - - def result_processor(self, dialect, coltype): - if self._reg: - return processors.str_to_datetime_processor_factory( - self._reg, datetime.date) - else: - return processors.str_to_date - -class TIME(_DateTimeMixin, sqltypes.Time): - """Represent a Python time object in SQLite using a string. - - The default string storage format is:: - - "%02d:%02d:%02d.%06d" % (value.hour, value.minute, - value.second, - value.microsecond) - - e.g.:: - - 12:05:57.10558 - - The storage format can be customized to some degree using the - ``storage_format`` and ``regexp`` parameters, such as:: - - import re - from sqlalchemy.dialects.sqlite import TIME - - t = TIME( - storage_format="%02d-%02d-%02d-%06d", - regexp=re.compile("(\d+)-(\d+)-(\d+)-(?:-(\d+))?") - ) - - :param storage_format: format string which will be applied - to the tuple ``(value.hour, value.minute, value.second, - value.microsecond)``, given a Python datetime.time() object. - - :param regexp: regular expression which will be applied to - incoming result rows. The resulting match object is applied to - the Python time() constructor via ``*map(int, - match_obj.groups(0))``. - - """ - - _storage_format = "%02d:%02d:%02d.%06d" - - def bind_processor(self, dialect): - datetime_time = datetime.time - format = self._storage_format - def process(value): - if value is None: - return None - elif isinstance(value, datetime_time): - return format % (value.hour, value.minute, value.second, - value.microsecond) - else: - raise TypeError("SQLite Time type only accepts Python " - "time objects as input.") - return process - - def result_processor(self, dialect, coltype): - if self._reg: - return processors.str_to_datetime_processor_factory( - self._reg, datetime.time) - else: - return processors.str_to_time - -colspecs = { - sqltypes.Date: DATE, - sqltypes.DateTime: DATETIME, - sqltypes.Time: TIME, -} - -ischema_names = { - 'BLOB': sqltypes.BLOB, - 'BOOL': sqltypes.BOOLEAN, - 'BOOLEAN': sqltypes.BOOLEAN, - 'CHAR': sqltypes.CHAR, - 'DATE': sqltypes.DATE, - 'DATETIME': sqltypes.DATETIME, - 'DECIMAL': sqltypes.DECIMAL, - 'FLOAT': sqltypes.FLOAT, - 'INT': sqltypes.INTEGER, - 'INTEGER': sqltypes.INTEGER, - 'NUMERIC': sqltypes.NUMERIC, - 'REAL': sqltypes.REAL, - 'SMALLINT': sqltypes.SMALLINT, - 'TEXT': sqltypes.TEXT, - 'TIME': sqltypes.TIME, - 'TIMESTAMP': sqltypes.TIMESTAMP, - 'VARCHAR': sqltypes.VARCHAR, -} - - - -class SQLiteCompiler(compiler.SQLCompiler): - extract_map = util.update_copy( - compiler.SQLCompiler.extract_map, - { - 'month': '%m', - 'day': '%d', - 'year': '%Y', - 'second': '%S', - 'hour': '%H', - 'doy': '%j', - 'minute': '%M', - 'epoch': '%s', - 'dow': '%w', - 'week': '%W' - }) - - def visit_now_func(self, fn, **kw): - return "CURRENT_TIMESTAMP" - - def visit_localtimestamp_func(self, func, **kw): - return 'DATETIME(CURRENT_TIMESTAMP, "localtime")' - - def visit_true(self, expr, **kw): - return '1' - - def visit_false(self, expr, **kw): - return '0' - - def visit_char_length_func(self, fn, **kw): - return "length%s" % self.function_argspec(fn) - - def visit_cast(self, cast, **kwargs): - if self.dialect.supports_cast: - return super(SQLiteCompiler, self).visit_cast(cast) - else: - return self.process(cast.clause) - - def visit_extract(self, extract, **kw): - try: - return "CAST(STRFTIME('%s', %s) AS INTEGER)" % ( - self.extract_map[extract.field], self.process(extract.expr, **kw)) - except KeyError: - raise exc.CompileError( - "%s is not a valid extract argument." % extract.field) - - def limit_clause(self, select): - text = "" - if select._limit is not None: - text += "\n LIMIT " + self.process(sql.literal(select._limit)) - if select._offset is not None: - if select._limit is None: - text += "\n LIMIT " + self.process(sql.literal(-1)) - text += " OFFSET " + self.process(sql.literal(select._offset)) - else: - text += " OFFSET " + self.process(sql.literal(0)) - return text - - def for_update_clause(self, select): - # sqlite has no "FOR UPDATE" AFAICT - return '' - - -class SQLiteDDLCompiler(compiler.DDLCompiler): - - def get_column_specification(self, column, **kwargs): - colspec = self.preparer.format_column(column) + " " + self.dialect.type_compiler.process(column.type) - default = self.get_column_default_string(column) - if default is not None: - colspec += " DEFAULT " + default - - if not column.nullable: - colspec += " NOT NULL" - - if column.primary_key and \ - column.table.kwargs.get('sqlite_autoincrement', False) and \ - len(column.table.primary_key.columns) == 1 and \ - issubclass(column.type._type_affinity, sqltypes.Integer) and \ - not column.foreign_keys: - colspec += " PRIMARY KEY AUTOINCREMENT" - - return colspec - - def visit_primary_key_constraint(self, constraint): - # for columns with sqlite_autoincrement=True, - # the PRIMARY KEY constraint can only be inline - # with the column itself. - if len(constraint.columns) == 1: - c = list(constraint)[0] - if c.primary_key and \ - c.table.kwargs.get('sqlite_autoincrement', False) and \ - issubclass(c.type._type_affinity, sqltypes.Integer) and \ - not c.foreign_keys: - return None - - return super(SQLiteDDLCompiler, self).\ - visit_primary_key_constraint(constraint) - - def visit_foreign_key_constraint(self, constraint): - - local_table = constraint._elements.values()[0].parent.table - remote_table = list(constraint._elements.values())[0].column.table - - if local_table.schema != remote_table.schema: - return None - else: - return super(SQLiteDDLCompiler, self).visit_foreign_key_constraint(constraint) - - def define_constraint_remote_table(self, constraint, table, preparer): - """Format the remote table clause of a CREATE CONSTRAINT clause.""" - - return preparer.format_table(table, use_schema=False) - - def visit_create_index(self, create): - index = create.element - preparer = self.preparer - text = "CREATE " - if index.unique: - text += "UNIQUE " - text += "INDEX %s ON %s (%s)" \ - % (preparer.format_index(index, - name=self._index_identifier(index.name)), - preparer.format_table(index.table, use_schema=False), - ', '.join(preparer.quote(c.name, c.quote) - for c in index.columns)) - return text - -class SQLiteTypeCompiler(compiler.GenericTypeCompiler): - def visit_large_binary(self, type_): - return self.visit_BLOB(type_) - -class SQLiteIdentifierPreparer(compiler.IdentifierPreparer): - reserved_words = set([ - 'add', 'after', 'all', 'alter', 'analyze', 'and', 'as', 'asc', - 'attach', 'autoincrement', 'before', 'begin', 'between', 'by', - 'cascade', 'case', 'cast', 'check', 'collate', 'column', 'commit', - 'conflict', 'constraint', 'create', 'cross', 'current_date', - 'current_time', 'current_timestamp', 'database', 'default', - 'deferrable', 'deferred', 'delete', 'desc', 'detach', 'distinct', - 'drop', 'each', 'else', 'end', 'escape', 'except', 'exclusive', - 'explain', 'false', 'fail', 'for', 'foreign', 'from', 'full', 'glob', - 'group', 'having', 'if', 'ignore', 'immediate', 'in', 'index', - 'indexed', 'initially', 'inner', 'insert', 'instead', 'intersect', 'into', 'is', - 'isnull', 'join', 'key', 'left', 'like', 'limit', 'match', 'natural', - 'not', 'notnull', 'null', 'of', 'offset', 'on', 'or', 'order', 'outer', - 'plan', 'pragma', 'primary', 'query', 'raise', 'references', - 'reindex', 'rename', 'replace', 'restrict', 'right', 'rollback', - 'row', 'select', 'set', 'table', 'temp', 'temporary', 'then', 'to', - 'transaction', 'trigger', 'true', 'union', 'unique', 'update', 'using', - 'vacuum', 'values', 'view', 'virtual', 'when', 'where', - ]) - - def format_index(self, index, use_schema=True, name=None): - """Prepare a quoted index and schema name.""" - - if name is None: - name = index.name - result = self.quote(name, index.quote) - if not self.omit_schema and use_schema and getattr(index.table, "schema", None): - result = self.quote_schema(index.table.schema, index.table.quote_schema) + "." + result - return result - -class SQLiteExecutionContext(default.DefaultExecutionContext): - @util.memoized_property - def _preserve_raw_colnames(self): - return self.execution_options.get("sqlite_raw_colnames", False) - - def _translate_colname(self, colname): - # adjust for dotted column names. SQLite - # in the case of UNION may store col names as - # "tablename.colname" - # in cursor.description - if not self._preserve_raw_colnames and "." in colname: - return colname.split(".")[1], colname - else: - return colname, None - - -class SQLiteDialect(default.DefaultDialect): - name = 'sqlite' - supports_alter = False - supports_unicode_statements = True - supports_unicode_binds = True - supports_default_values = True - supports_empty_insert = False - supports_cast = True - - default_paramstyle = 'qmark' - execution_ctx_cls = SQLiteExecutionContext - statement_compiler = SQLiteCompiler - ddl_compiler = SQLiteDDLCompiler - type_compiler = SQLiteTypeCompiler - preparer = SQLiteIdentifierPreparer - ischema_names = ischema_names - colspecs = colspecs - isolation_level = None - - supports_cast = True - supports_default_values = True - - _broken_fk_pragma_quotes = False - - def __init__(self, isolation_level=None, native_datetime=False, **kwargs): - default.DefaultDialect.__init__(self, **kwargs) - self.isolation_level = isolation_level - - # this flag used by pysqlite dialect, and perhaps others in the - # future, to indicate the driver is handling date/timestamp - # conversions (and perhaps datetime/time as well on some - # hypothetical driver ?) - self.native_datetime = native_datetime - - if self.dbapi is not None: - self.supports_default_values = \ - self.dbapi.sqlite_version_info >= (3, 3, 8) - self.supports_cast = \ - self.dbapi.sqlite_version_info >= (3, 2, 3) - - # see http://www.sqlalchemy.org/trac/ticket/2568 - # as well as http://www.sqlite.org/src/info/600482d161 - self._broken_fk_pragma_quotes = \ - self.dbapi.sqlite_version_info < (3, 6, 14) - - - _isolation_lookup = { - 'READ UNCOMMITTED':1, - 'SERIALIZABLE':0 - } - def set_isolation_level(self, connection, level): - try: - isolation_level = self._isolation_lookup[level.replace('_', ' ')] - except KeyError: - raise exc.ArgumentError( - "Invalid value '%s' for isolation_level. " - "Valid isolation levels for %s are %s" % - (level, self.name, ", ".join(self._isolation_lookup)) - ) - cursor = connection.cursor() - cursor.execute("PRAGMA read_uncommitted = %d" % isolation_level) - cursor.close() - - def get_isolation_level(self, connection): - cursor = connection.cursor() - cursor.execute('PRAGMA read_uncommitted') - res = cursor.fetchone() - if res: - value = res[0] - else: - # http://www.sqlite.org/changes.html#version_3_3_3 - # "Optional READ UNCOMMITTED isolation (instead of the - # default isolation level of SERIALIZABLE) and - # table level locking when database connections - # share a common cache."" - # pre-SQLite 3.3.0 default to 0 - value = 0 - cursor.close() - if value == 0: - return "SERIALIZABLE" - elif value == 1: - return "READ UNCOMMITTED" - else: - assert False, "Unknown isolation level %s" % value - - def on_connect(self): - if self.isolation_level is not None: - def connect(conn): - self.set_isolation_level(conn, self.isolation_level) - return connect - else: - return None - - @reflection.cache - def get_table_names(self, connection, schema=None, **kw): - if schema is not None: - qschema = self.identifier_preparer.quote_identifier(schema) - master = '%s.sqlite_master' % qschema - s = ("SELECT name FROM %s " - "WHERE type='table' ORDER BY name") % (master,) - rs = connection.execute(s) - else: - try: - s = ("SELECT name FROM " - " (SELECT * FROM sqlite_master UNION ALL " - " SELECT * FROM sqlite_temp_master) " - "WHERE type='table' ORDER BY name") - rs = connection.execute(s) - except exc.DBAPIError: - s = ("SELECT name FROM sqlite_master " - "WHERE type='table' ORDER BY name") - rs = connection.execute(s) - - return [row[0] for row in rs] - - def has_table(self, connection, table_name, schema=None): - quote = self.identifier_preparer.quote_identifier - if schema is not None: - pragma = "PRAGMA %s." % quote(schema) - else: - pragma = "PRAGMA " - qtable = quote(table_name) - cursor = _pragma_cursor(connection.execute("%stable_info(%s)" % (pragma, qtable))) - row = cursor.fetchone() - - # consume remaining rows, to work around - # http://www.sqlite.org/cvstrac/tktview?tn=1884 - while not cursor.closed and cursor.fetchone() is not None: - pass - - return (row is not None) - - @reflection.cache - def get_view_names(self, connection, schema=None, **kw): - if schema is not None: - qschema = self.identifier_preparer.quote_identifier(schema) - master = '%s.sqlite_master' % qschema - s = ("SELECT name FROM %s " - "WHERE type='view' ORDER BY name") % (master,) - rs = connection.execute(s) - else: - try: - s = ("SELECT name FROM " - " (SELECT * FROM sqlite_master UNION ALL " - " SELECT * FROM sqlite_temp_master) " - "WHERE type='view' ORDER BY name") - rs = connection.execute(s) - except exc.DBAPIError: - s = ("SELECT name FROM sqlite_master " - "WHERE type='view' ORDER BY name") - rs = connection.execute(s) - - return [row[0] for row in rs] - - @reflection.cache - def get_view_definition(self, connection, view_name, schema=None, **kw): - quote = self.identifier_preparer.quote_identifier - if schema is not None: - qschema = self.identifier_preparer.quote_identifier(schema) - master = '%s.sqlite_master' % qschema - s = ("SELECT sql FROM %s WHERE name = '%s'" - "AND type='view'") % (master, view_name) - rs = connection.execute(s) - else: - try: - s = ("SELECT sql FROM " - " (SELECT * FROM sqlite_master UNION ALL " - " SELECT * FROM sqlite_temp_master) " - "WHERE name = '%s' " - "AND type='view'") % view_name - rs = connection.execute(s) - except exc.DBAPIError: - s = ("SELECT sql FROM sqlite_master WHERE name = '%s' " - "AND type='view'") % view_name - rs = connection.execute(s) - - result = rs.fetchall() - if result: - return result[0].sql - - @reflection.cache - def get_columns(self, connection, table_name, schema=None, **kw): - quote = self.identifier_preparer.quote_identifier - if schema is not None: - pragma = "PRAGMA %s." % quote(schema) - else: - pragma = "PRAGMA " - qtable = quote(table_name) - c = _pragma_cursor( - connection.execute("%stable_info(%s)" % - (pragma, qtable))) - - rows = c.fetchall() - columns = [] - for row in rows: - (name, type_, nullable, default, primary_key) = \ - (row[1], row[2].upper(), not row[3], - row[4], row[5]) - - columns.append(self._get_column_info(name, type_, nullable, - default, primary_key)) - return columns - - def _get_column_info(self, name, type_, nullable, - default, primary_key): - - match = re.match(r'(\w+)(\(.*?\))?', type_) - if match: - coltype = match.group(1) - args = match.group(2) - else: - coltype = "VARCHAR" - args = '' - try: - coltype = self.ischema_names[coltype] - if args is not None: - args = re.findall(r'(\d+)', args) - coltype = coltype(*[int(a) for a in args]) - except KeyError: - util.warn("Did not recognize type '%s' of column '%s'" % - (coltype, name)) - coltype = sqltypes.NullType() - - if default is not None: - default = unicode(default) - - return { - 'name': name, - 'type': coltype, - 'nullable': nullable, - 'default': default, - 'autoincrement': default is None, - 'primary_key': primary_key - } - - @reflection.cache - def get_primary_keys(self, connection, table_name, schema=None, **kw): - cols = self.get_columns(connection, table_name, schema, **kw) - pkeys = [] - for col in cols: - if col['primary_key']: - pkeys.append(col['name']) - return pkeys - - @reflection.cache - def get_foreign_keys(self, connection, table_name, schema=None, **kw): - quote = self.identifier_preparer.quote_identifier - if schema is not None: - pragma = "PRAGMA %s." % quote(schema) - else: - pragma = "PRAGMA " - qtable = quote(table_name) - statement = "%sforeign_key_list(%s)" % (pragma, qtable) - c = _pragma_cursor(connection.execute(statement)) - fkeys = [] - fks = {} - while True: - row = c.fetchone() - if row is None: - break - (numerical_id, rtbl, lcol, rcol) = (row[0], row[2], row[3], row[4]) - - self._parse_fk(fks, fkeys, numerical_id, rtbl, lcol, rcol) - return fkeys - - def _parse_fk(self, fks, fkeys, numerical_id, rtbl, lcol, rcol): - # sqlite won't return rcol if the table - # was created with REFERENCES , no col - if rcol is None: - rcol = lcol - - if self._broken_fk_pragma_quotes: - rtbl = re.sub(r'^[\"\[`\']|[\"\]`\']$', '', rtbl) - - try: - fk = fks[numerical_id] - except KeyError: - fk = { - 'name': None, - 'constrained_columns': [], - 'referred_schema': None, - 'referred_table': rtbl, - 'referred_columns': [] - } - fkeys.append(fk) - fks[numerical_id] = fk - - if lcol not in fk['constrained_columns']: - fk['constrained_columns'].append(lcol) - if rcol not in fk['referred_columns']: - fk['referred_columns'].append(rcol) - return fk - - @reflection.cache - def get_indexes(self, connection, table_name, schema=None, **kw): - quote = self.identifier_preparer.quote_identifier - if schema is not None: - pragma = "PRAGMA %s." % quote(schema) - else: - pragma = "PRAGMA " - include_auto_indexes = kw.pop('include_auto_indexes', False) - qtable = quote(table_name) - c = _pragma_cursor(connection.execute("%sindex_list(%s)" % (pragma, qtable))) - indexes = [] - while True: - row = c.fetchone() - if row is None: - break - # ignore implicit primary key index. - # http://www.mail-archive.com/sqlite-users@sqlite.org/msg30517.html - elif not include_auto_indexes and row[1].startswith('sqlite_autoindex'): - continue - - indexes.append(dict(name=row[1], column_names=[], unique=row[2])) - # loop thru unique indexes to get the column names. - for idx in indexes: - c = connection.execute("%sindex_info(%s)" % (pragma, quote(idx['name']))) - cols = idx['column_names'] - while True: - row = c.fetchone() - if row is None: - break - cols.append(row[2]) - return indexes - - -def _pragma_cursor(cursor): - """work around SQLite issue whereby cursor.description - is blank when PRAGMA returns no rows.""" - - if cursor.closed: - cursor.fetchone = lambda: None - cursor.fetchall = lambda: [] - return cursor diff --git a/libs/sqlalchemy/dialects/sqlite/pysqlite.py b/libs/sqlalchemy/dialects/sqlite/pysqlite.py deleted file mode 100644 index 826eefd8..00000000 --- a/libs/sqlalchemy/dialects/sqlite/pysqlite.py +++ /dev/null @@ -1,324 +0,0 @@ -# sqlite/pysqlite.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Support for the SQLite database via pysqlite. - -Note that pysqlite is the same driver as the ``sqlite3`` -module included with the Python distribution. - -Driver ------- - -When using Python 2.5 and above, the built in ``sqlite3`` driver is -already installed and no additional installation is needed. Otherwise, -the ``pysqlite2`` driver needs to be present. This is the same driver as -``sqlite3``, just with a different name. - -The ``pysqlite2`` driver will be loaded first, and if not found, ``sqlite3`` -is loaded. This allows an explicitly installed pysqlite driver to take -precedence over the built in one. As with all dialects, a specific -DBAPI module may be provided to :func:`~sqlalchemy.create_engine()` to control -this explicitly:: - - from sqlite3 import dbapi2 as sqlite - e = create_engine('sqlite+pysqlite:///file.db', module=sqlite) - -Full documentation on pysqlite is available at: -``_ - -Connect Strings ---------------- - -The file specification for the SQLite database is taken as the "database" portion of -the URL. Note that the format of a url is:: - - driver://user:pass@host/database - -This means that the actual filename to be used starts with the characters to the -**right** of the third slash. So connecting to a relative filepath looks like:: - - # relative path - e = create_engine('sqlite:///path/to/database.db') - -An absolute path, which is denoted by starting with a slash, means you need **four** -slashes:: - - # absolute path - e = create_engine('sqlite:////path/to/database.db') - -To use a Windows path, regular drive specifications and backslashes can be used. -Double backslashes are probably needed:: - - # absolute path on Windows - e = create_engine('sqlite:///C:\\\\path\\\\to\\\\database.db') - -The sqlite ``:memory:`` identifier is the default if no filepath is present. Specify -``sqlite://`` and nothing else:: - - # in-memory database - e = create_engine('sqlite://') - -Compatibility with sqlite3 "native" date and datetime types ------------------------------------------------------------ - -The pysqlite driver includes the sqlite3.PARSE_DECLTYPES and -sqlite3.PARSE_COLNAMES options, which have the effect of any column -or expression explicitly cast as "date" or "timestamp" will be converted -to a Python date or datetime object. The date and datetime types provided -with the pysqlite dialect are not currently compatible with these options, -since they render the ISO date/datetime including microseconds, which -pysqlite's driver does not. Additionally, SQLAlchemy does not at -this time automatically render the "cast" syntax required for the -freestanding functions "current_timestamp" and "current_date" to return -datetime/date types natively. Unfortunately, pysqlite -does not provide the standard DBAPI types in ``cursor.description``, -leaving SQLAlchemy with no way to detect these types on the fly -without expensive per-row type checks. - -Keeping in mind that pysqlite's parsing option is not recommended, -nor should be necessary, for use with SQLAlchemy, usage of PARSE_DECLTYPES -can be forced if one configures "native_datetime=True" on create_engine():: - - engine = create_engine('sqlite://', - connect_args={'detect_types': sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES}, - native_datetime=True - ) - -With this flag enabled, the DATE and TIMESTAMP types (but note - not the DATETIME -or TIME types...confused yet ?) will not perform any bind parameter or result -processing. Execution of "func.current_date()" will return a string. -"func.current_timestamp()" is registered as returning a DATETIME type in -SQLAlchemy, so this function still receives SQLAlchemy-level result processing. - -Threading/Pooling Behavior ---------------------------- - -Pysqlite's default behavior is to prohibit the usage of a single connection -in more than one thread. This is originally intended to work with older versions -of SQLite that did not support multithreaded operation under -various circumstances. In particular, older SQLite versions -did not allow a ``:memory:`` database to be used in multiple threads -under any circumstances. - -Pysqlite does include a now-undocumented flag known as -``check_same_thread`` which will disable this check, however note that pysqlite -connections are still not safe to use in concurrently in multiple threads. -In particular, any statement execution calls would need to be externally -mutexed, as Pysqlite does not provide for thread-safe propagation of error -messages among other things. So while even ``:memory:`` databases can be -shared among threads in modern SQLite, Pysqlite doesn't provide enough -thread-safety to make this usage worth it. - -SQLAlchemy sets up pooling to work with Pysqlite's default behavior: - -* When a ``:memory:`` SQLite database is specified, the dialect by default will use - :class:`.SingletonThreadPool`. This pool maintains a single connection per - thread, so that all access to the engine within the current thread use the - same ``:memory:`` database - other threads would access a different - ``:memory:`` database. -* When a file-based database is specified, the dialect will use :class:`.NullPool` - as the source of connections. This pool closes and discards connections - which are returned to the pool immediately. SQLite file-based connections - have extremely low overhead, so pooling is not necessary. The scheme also - prevents a connection from being used again in a different thread and works - best with SQLite's coarse-grained file locking. - - .. versionchanged:: 0.7 - Default selection of :class:`.NullPool` for SQLite file-based databases. - Previous versions select :class:`.SingletonThreadPool` by - default for all SQLite databases. - - -Using a Memory Database in Multiple Threads -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -To use a ``:memory:`` database in a multithreaded scenario, the same connection -object must be shared among threads, since the database exists -only within the scope of that connection. The :class:`.StaticPool` implementation -will maintain a single connection globally, and the ``check_same_thread`` flag -can be passed to Pysqlite as ``False``:: - - from sqlalchemy.pool import StaticPool - engine = create_engine('sqlite://', - connect_args={'check_same_thread':False}, - poolclass=StaticPool) - -Note that using a ``:memory:`` database in multiple threads requires a recent -version of SQLite. - -Using Temporary Tables with SQLite -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Due to the way SQLite deals with temporary tables, if you wish to use a temporary table -in a file-based SQLite database across multiple checkouts from the connection pool, such -as when using an ORM :class:`.Session` where the temporary table should continue to remain -after :meth:`.commit` or :meth:`.rollback` is called, -a pool which maintains a single connection must be used. Use :class:`.SingletonThreadPool` -if the scope is only needed within the current thread, or :class:`.StaticPool` is scope is -needed within multiple threads for this case:: - - # maintain the same connection per thread - from sqlalchemy.pool import SingletonThreadPool - engine = create_engine('sqlite:///mydb.db', - poolclass=SingletonThreadPool) - - - # maintain the same connection across all threads - from sqlalchemy.pool import StaticPool - engine = create_engine('sqlite:///mydb.db', - poolclass=StaticPool) - -Note that :class:`.SingletonThreadPool` should be configured for the number of threads -that are to be used; beyond that number, connections will be closed out in a non deterministic -way. - -Unicode -------- - -The pysqlite driver only returns Python ``unicode`` objects in result sets, never -plain strings, and accommodates ``unicode`` objects within bound parameter -values in all cases. Regardless of the SQLAlchemy string type in use, -string-based result values will by Python ``unicode`` in Python 2. -The :class:`.Unicode` type should still be used to indicate those columns that -require unicode, however, so that non-``unicode`` values passed inadvertently -will emit a warning. Pysqlite will emit an error if a non-``unicode`` string -is passed containing non-ASCII characters. - -.. _pysqlite_serializable: - -Serializable Transaction Isolation ----------------------------------- - -The pysqlite DBAPI driver has a long-standing bug in which transactional -state is not begun until the first DML statement, that is INSERT, UPDATE -or DELETE, is emitted. A SELECT statement will not cause transactional -state to begin. While this mode of usage is fine for typical situations -and has the advantage that the SQLite database file is not prematurely -locked, it breaks serializable transaction isolation, which requires -that the database file be locked upon any SQL being emitted. - -To work around this issue, the ``BEGIN`` keyword can be emitted -at the start of each transaction. The following recipe establishes -a :meth:`.ConnectionEvents.begin` handler to achieve this:: - - from sqlalchemy import create_engine, event - - engine = create_engine("sqlite:///myfile.db", isolation_level='SERIALIZABLE') - - @event.listens_for(engine, "begin") - def do_begin(conn): - conn.execute("BEGIN") - -""" - -from sqlalchemy.dialects.sqlite.base import SQLiteDialect, DATETIME, DATE -from sqlalchemy import exc, pool -from sqlalchemy import types as sqltypes -from sqlalchemy import util - -import os - -class _SQLite_pysqliteTimeStamp(DATETIME): - def bind_processor(self, dialect): - if dialect.native_datetime: - return None - else: - return DATETIME.bind_processor(self, dialect) - - def result_processor(self, dialect, coltype): - if dialect.native_datetime: - return None - else: - return DATETIME.result_processor(self, dialect, coltype) - -class _SQLite_pysqliteDate(DATE): - def bind_processor(self, dialect): - if dialect.native_datetime: - return None - else: - return DATE.bind_processor(self, dialect) - - def result_processor(self, dialect, coltype): - if dialect.native_datetime: - return None - else: - return DATE.result_processor(self, dialect, coltype) - -class SQLiteDialect_pysqlite(SQLiteDialect): - default_paramstyle = 'qmark' - - colspecs = util.update_copy( - SQLiteDialect.colspecs, - { - sqltypes.Date:_SQLite_pysqliteDate, - sqltypes.TIMESTAMP:_SQLite_pysqliteTimeStamp, - } - ) - - # Py3K - #description_encoding = None - - driver = 'pysqlite' - - def __init__(self, **kwargs): - SQLiteDialect.__init__(self, **kwargs) - - if self.dbapi is not None: - sqlite_ver = self.dbapi.version_info - if sqlite_ver < (2, 1, 3): - util.warn( - ("The installed version of pysqlite2 (%s) is out-dated " - "and will cause errors in some cases. Version 2.1.3 " - "or greater is recommended.") % - '.'.join([str(subver) for subver in sqlite_ver])) - - @classmethod - def dbapi(cls): - try: - from pysqlite2 import dbapi2 as sqlite - except ImportError, e: - try: - from sqlite3 import dbapi2 as sqlite #try the 2.5+ stdlib name. - except ImportError: - raise e - return sqlite - - @classmethod - def get_pool_class(cls, url): - if url.database and url.database != ':memory:': - return pool.NullPool - else: - return pool.SingletonThreadPool - - def _get_server_version_info(self, connection): - return self.dbapi.sqlite_version_info - - def create_connect_args(self, url): - if url.username or url.password or url.host or url.port: - raise exc.ArgumentError( - "Invalid SQLite URL: %s\n" - "Valid SQLite URL forms are:\n" - " sqlite:///:memory: (or, sqlite://)\n" - " sqlite:///relative/path/to/file.db\n" - " sqlite:////absolute/path/to/file.db" % (url,)) - filename = url.database or ':memory:' - if filename != ':memory:': - filename = os.path.abspath(filename) - - opts = url.query.copy() - util.coerce_kw_type(opts, 'timeout', float) - util.coerce_kw_type(opts, 'isolation_level', str) - util.coerce_kw_type(opts, 'detect_types', int) - util.coerce_kw_type(opts, 'check_same_thread', bool) - util.coerce_kw_type(opts, 'cached_statements', int) - - return ([filename], opts) - - def is_disconnect(self, e, connection, cursor): - return isinstance(e, self.dbapi.ProgrammingError) and \ - "Cannot operate on a closed database." in str(e) - -dialect = SQLiteDialect_pysqlite diff --git a/libs/sqlalchemy/dialects/sybase/__init__.py b/libs/sqlalchemy/dialects/sybase/__init__.py deleted file mode 100644 index 528ebf23..00000000 --- a/libs/sqlalchemy/dialects/sybase/__init__.py +++ /dev/null @@ -1,26 +0,0 @@ -# sybase/__init__.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -from sqlalchemy.dialects.sybase import base, pysybase, pyodbc - - -from base import CHAR, VARCHAR, TIME, NCHAR, NVARCHAR,\ - TEXT,DATE,DATETIME, FLOAT, NUMERIC,\ - BIGINT,INT, INTEGER, SMALLINT, BINARY,\ - VARBINARY,UNITEXT,UNICHAR,UNIVARCHAR,\ - IMAGE,BIT,MONEY,SMALLMONEY,TINYINT - -# default dialect -base.dialect = pyodbc.dialect - -__all__ = ( - 'CHAR', 'VARCHAR', 'TIME', 'NCHAR', 'NVARCHAR', - 'TEXT','DATE','DATETIME', 'FLOAT', 'NUMERIC', - 'BIGINT','INT', 'INTEGER', 'SMALLINT', 'BINARY', - 'VARBINARY','UNITEXT','UNICHAR','UNIVARCHAR', - 'IMAGE','BIT','MONEY','SMALLMONEY','TINYINT', - 'dialect' -) diff --git a/libs/sqlalchemy/dialects/sybase/base.py b/libs/sqlalchemy/dialects/sybase/base.py deleted file mode 100644 index f551bff9..00000000 --- a/libs/sqlalchemy/dialects/sybase/base.py +++ /dev/null @@ -1,438 +0,0 @@ -# sybase/base.py -# Copyright (C) 2010-2011 the SQLAlchemy authors and contributors -# get_select_precolumns(), limit_clause() implementation -# copyright (C) 2007 Fisch Asset Management -# AG http://www.fam.ch, with coding by Alexander Houben -# alexander.houben@thor-solutions.ch -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Support for Sybase Adaptive Server Enterprise (ASE). - -.. note:: - - The Sybase dialect functions on current SQLAlchemy versions - but is not regularly tested, and may have many issues and - caveats not currently handled. In particular, the table - and database reflection features are not implemented. - -""" - -import operator -from sqlalchemy.sql import compiler, expression, text, bindparam -from sqlalchemy.engine import default, base, reflection -from sqlalchemy import types as sqltypes -from sqlalchemy.sql import operators as sql_operators -from sqlalchemy import schema as sa_schema -from sqlalchemy import util, sql, exc - -from sqlalchemy.types import CHAR, VARCHAR, TIME, NCHAR, NVARCHAR,\ - TEXT,DATE,DATETIME, FLOAT, NUMERIC,\ - BIGINT,INT, INTEGER, SMALLINT, BINARY,\ - VARBINARY, DECIMAL, TIMESTAMP, Unicode,\ - UnicodeText - -RESERVED_WORDS = set([ - "add", "all", "alter", "and", - "any", "as", "asc", "backup", - "begin", "between", "bigint", "binary", - "bit", "bottom", "break", "by", - "call", "capability", "cascade", "case", - "cast", "char", "char_convert", "character", - "check", "checkpoint", "close", "comment", - "commit", "connect", "constraint", "contains", - "continue", "convert", "create", "cross", - "cube", "current", "current_timestamp", "current_user", - "cursor", "date", "dbspace", "deallocate", - "dec", "decimal", "declare", "default", - "delete", "deleting", "desc", "distinct", - "do", "double", "drop", "dynamic", - "else", "elseif", "encrypted", "end", - "endif", "escape", "except", "exception", - "exec", "execute", "existing", "exists", - "externlogin", "fetch", "first", "float", - "for", "force", "foreign", "forward", - "from", "full", "goto", "grant", - "group", "having", "holdlock", "identified", - "if", "in", "index", "index_lparen", - "inner", "inout", "insensitive", "insert", - "inserting", "install", "instead", "int", - "integer", "integrated", "intersect", "into", - "iq", "is", "isolation", "join", - "key", "lateral", "left", "like", - "lock", "login", "long", "match", - "membership", "message", "mode", "modify", - "natural", "new", "no", "noholdlock", - "not", "notify", "null", "numeric", - "of", "off", "on", "open", - "option", "options", "or", "order", - "others", "out", "outer", "over", - "passthrough", "precision", "prepare", "primary", - "print", "privileges", "proc", "procedure", - "publication", "raiserror", "readtext", "real", - "reference", "references", "release", "remote", - "remove", "rename", "reorganize", "resource", - "restore", "restrict", "return", "revoke", - "right", "rollback", "rollup", "save", - "savepoint", "scroll", "select", "sensitive", - "session", "set", "setuser", "share", - "smallint", "some", "sqlcode", "sqlstate", - "start", "stop", "subtrans", "subtransaction", - "synchronize", "syntax_error", "table", "temporary", - "then", "time", "timestamp", "tinyint", - "to", "top", "tran", "trigger", - "truncate", "tsequal", "unbounded", "union", - "unique", "unknown", "unsigned", "update", - "updating", "user", "using", "validate", - "values", "varbinary", "varchar", "variable", - "varying", "view", "wait", "waitfor", - "when", "where", "while", "window", - "with", "with_cube", "with_lparen", "with_rollup", - "within", "work", "writetext", - ]) - - -class _SybaseUnitypeMixin(object): - """these types appear to return a buffer object.""" - - def result_processor(self, dialect, coltype): - def process(value): - if value is not None: - return str(value) #.decode("ucs-2") - else: - return None - return process - -class UNICHAR(_SybaseUnitypeMixin, sqltypes.Unicode): - __visit_name__ = 'UNICHAR' - -class UNIVARCHAR(_SybaseUnitypeMixin, sqltypes.Unicode): - __visit_name__ = 'UNIVARCHAR' - -class UNITEXT(_SybaseUnitypeMixin, sqltypes.UnicodeText): - __visit_name__ = 'UNITEXT' - -class TINYINT(sqltypes.Integer): - __visit_name__ = 'TINYINT' - -class BIT(sqltypes.TypeEngine): - __visit_name__ = 'BIT' - -class MONEY(sqltypes.TypeEngine): - __visit_name__ = "MONEY" - -class SMALLMONEY(sqltypes.TypeEngine): - __visit_name__ = "SMALLMONEY" - -class UNIQUEIDENTIFIER(sqltypes.TypeEngine): - __visit_name__ = "UNIQUEIDENTIFIER" - -class IMAGE(sqltypes.LargeBinary): - __visit_name__ = 'IMAGE' - - -class SybaseTypeCompiler(compiler.GenericTypeCompiler): - def visit_large_binary(self, type_): - return self.visit_IMAGE(type_) - - def visit_boolean(self, type_): - return self.visit_BIT(type_) - - def visit_unicode(self, type_): - return self.visit_NVARCHAR(type_) - - def visit_UNICHAR(self, type_): - return "UNICHAR(%d)" % type_.length - - def visit_UNIVARCHAR(self, type_): - return "UNIVARCHAR(%d)" % type_.length - - def visit_UNITEXT(self, type_): - return "UNITEXT" - - def visit_TINYINT(self, type_): - return "TINYINT" - - def visit_IMAGE(self, type_): - return "IMAGE" - - def visit_BIT(self, type_): - return "BIT" - - def visit_MONEY(self, type_): - return "MONEY" - - def visit_SMALLMONEY(self, type_): - return "SMALLMONEY" - - def visit_UNIQUEIDENTIFIER(self, type_): - return "UNIQUEIDENTIFIER" - -ischema_names = { - 'integer' : INTEGER, - 'unsigned int' : INTEGER, # TODO: unsigned flags - 'unsigned smallint' : SMALLINT, # TODO: unsigned flags - 'unsigned bigint' : BIGINT, # TODO: unsigned flags - 'bigint': BIGINT, - 'smallint' : SMALLINT, - 'tinyint' : TINYINT, - 'varchar' : VARCHAR, - 'long varchar' : TEXT, # TODO - 'char' : CHAR, - 'decimal' : DECIMAL, - 'numeric' : NUMERIC, - 'float' : FLOAT, - 'double' : NUMERIC, # TODO - 'binary' : BINARY, - 'varbinary' : VARBINARY, - 'bit': BIT, - 'image' : IMAGE, - 'timestamp': TIMESTAMP, - 'money': MONEY, - 'smallmoney': MONEY, - 'uniqueidentifier': UNIQUEIDENTIFIER, - -} - - -class SybaseExecutionContext(default.DefaultExecutionContext): - _enable_identity_insert = False - - def set_ddl_autocommit(self, connection, value): - """Must be implemented by subclasses to accommodate DDL executions. - - "connection" is the raw unwrapped DBAPI connection. "value" - is True or False. when True, the connection should be configured - such that a DDL can take place subsequently. when False, - a DDL has taken place and the connection should be resumed - into non-autocommit mode. - - """ - raise NotImplementedError() - - def pre_exec(self): - if self.isinsert: - tbl = self.compiled.statement.table - seq_column = tbl._autoincrement_column - insert_has_sequence = seq_column is not None - - if insert_has_sequence: - self._enable_identity_insert = \ - seq_column.key in self.compiled_parameters[0] - else: - self._enable_identity_insert = False - - if self._enable_identity_insert: - self.cursor.execute("SET IDENTITY_INSERT %s ON" % - self.dialect.identifier_preparer.format_table(tbl)) - - if self.isddl: - # TODO: to enhance this, we can detect "ddl in tran" on the - # database settings. this error message should be improved to - # include a note about that. - if not self.should_autocommit: - raise exc.InvalidRequestError( - "The Sybase dialect only supports " - "DDL in 'autocommit' mode at this time.") - - self.root_connection.engine.logger.info( - "AUTOCOMMIT (Assuming no Sybase 'ddl in tran')") - - self.set_ddl_autocommit( - self.root_connection.connection.connection, - True) - - - def post_exec(self): - if self.isddl: - self.set_ddl_autocommit(self.root_connection, False) - - if self._enable_identity_insert: - self.cursor.execute( - "SET IDENTITY_INSERT %s OFF" % - self.dialect.identifier_preparer. - format_table(self.compiled.statement.table) - ) - - def get_lastrowid(self): - cursor = self.create_cursor() - cursor.execute("SELECT @@identity AS lastrowid") - lastrowid = cursor.fetchone()[0] - cursor.close() - return lastrowid - -class SybaseSQLCompiler(compiler.SQLCompiler): - ansi_bind_rules = True - - extract_map = util.update_copy( - compiler.SQLCompiler.extract_map, - { - 'doy': 'dayofyear', - 'dow': 'weekday', - 'milliseconds': 'millisecond' - }) - - def get_select_precolumns(self, select): - s = select._distinct and "DISTINCT " or "" - # TODO: don't think Sybase supports - # bind params for FIRST / TOP - if select._limit: - #if select._limit == 1: - #s += "FIRST " - #else: - #s += "TOP %s " % (select._limit,) - s += "TOP %s " % (select._limit,) - if select._offset: - if not select._limit: - # FIXME: sybase doesn't allow an offset without a limit - # so use a huge value for TOP here - s += "TOP 1000000 " - s += "START AT %s " % (select._offset+1,) - return s - - def get_from_hint_text(self, table, text): - return text - - def limit_clause(self, select): - # Limit in sybase is after the select keyword - return "" - - def visit_extract(self, extract, **kw): - field = self.extract_map.get(extract.field, extract.field) - return 'DATEPART("%s", %s)' % ( - field, self.process(extract.expr, **kw)) - - def for_update_clause(self, select): - # "FOR UPDATE" is only allowed on "DECLARE CURSOR" - # which SQLAlchemy doesn't use - return '' - - def order_by_clause(self, select, **kw): - kw['literal_binds'] = True - order_by = self.process(select._order_by_clause, **kw) - - # SybaseSQL only allows ORDER BY in subqueries if there is a LIMIT - if order_by and (not self.is_subquery() or select._limit): - return " ORDER BY " + order_by - else: - return "" - - -class SybaseDDLCompiler(compiler.DDLCompiler): - def get_column_specification(self, column, **kwargs): - colspec = self.preparer.format_column(column) + " " + \ - self.dialect.type_compiler.process(column.type) - - if column.table is None: - raise exc.CompileError( - "The Sybase dialect requires Table-bound " - "columns in order to generate DDL") - seq_col = column.table._autoincrement_column - - # install a IDENTITY Sequence if we have an implicit IDENTITY column - if seq_col is column: - sequence = isinstance(column.default, sa_schema.Sequence) \ - and column.default - if sequence: - start, increment = sequence.start or 1, \ - sequence.increment or 1 - else: - start, increment = 1, 1 - if (start, increment) == (1, 1): - colspec += " IDENTITY" - else: - # TODO: need correct syntax for this - colspec += " IDENTITY(%s,%s)" % (start, increment) - else: - if column.nullable is not None: - if not column.nullable or column.primary_key: - colspec += " NOT NULL" - else: - colspec += " NULL" - - default = self.get_column_default_string(column) - if default is not None: - colspec += " DEFAULT " + default - - return colspec - - def visit_drop_index(self, drop): - index = drop.element - return "\nDROP INDEX %s.%s" % ( - self.preparer.quote_identifier(index.table.name), - self.preparer.quote( - self._index_identifier(index.name), index.quote) - ) - -class SybaseIdentifierPreparer(compiler.IdentifierPreparer): - reserved_words = RESERVED_WORDS - -class SybaseDialect(default.DefaultDialect): - name = 'sybase' - supports_unicode_statements = False - supports_sane_rowcount = False - supports_sane_multi_rowcount = False - - supports_native_boolean = False - supports_unicode_binds = False - postfetch_lastrowid = True - - colspecs = {} - ischema_names = ischema_names - - type_compiler = SybaseTypeCompiler - statement_compiler = SybaseSQLCompiler - ddl_compiler = SybaseDDLCompiler - preparer = SybaseIdentifierPreparer - - def _get_default_schema_name(self, connection): - return connection.scalar( - text("SELECT user_name() as user_name", - typemap={'user_name':Unicode}) - ) - - def initialize(self, connection): - super(SybaseDialect, self).initialize(connection) - if self.server_version_info is not None and\ - self.server_version_info < (15, ): - self.max_identifier_length = 30 - else: - self.max_identifier_length = 255 - - @reflection.cache - def get_table_names(self, connection, schema=None, **kw): - if schema is None: - schema = self.default_schema_name - - result = connection.execute( - text("select sysobjects.name from sysobjects, sysusers " - "where sysobjects.uid=sysusers.uid and " - "sysusers.name=:schemaname and " - "sysobjects.type='U'", - bindparams=[ - bindparam('schemaname', schema) - ]) - ) - return [r[0] for r in result] - - def has_table(self, connection, tablename, schema=None): - if schema is None: - schema = self.default_schema_name - - result = connection.execute( - text("select sysobjects.name from sysobjects, sysusers " - "where sysobjects.uid=sysusers.uid and " - "sysobjects.name=:tablename and " - "sysusers.name=:schemaname and " - "sysobjects.type='U'", - bindparams=[ - bindparam('tablename', tablename), - bindparam('schemaname', schema) - ]) - ) - return result.scalar() is not None - - def reflecttable(self, connection, table, include_columns): - raise NotImplementedError() - diff --git a/libs/sqlalchemy/dialects/sybase/mxodbc.py b/libs/sqlalchemy/dialects/sybase/mxodbc.py deleted file mode 100644 index db60b9b2..00000000 --- a/libs/sqlalchemy/dialects/sybase/mxodbc.py +++ /dev/null @@ -1,23 +0,0 @@ -# sybase/mxodbc.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -""" -Support for Sybase via mxodbc. - -This dialect is a stub only and is likely non functional at this time. - - -""" -from sqlalchemy.dialects.sybase.base import SybaseDialect, SybaseExecutionContext -from sqlalchemy.connectors.mxodbc import MxODBCConnector - -class SybaseExecutionContext_mxodbc(SybaseExecutionContext): - pass - -class SybaseDialect_mxodbc(MxODBCConnector, SybaseDialect): - execution_ctx_cls = SybaseExecutionContext_mxodbc - -dialect = SybaseDialect_mxodbc diff --git a/libs/sqlalchemy/dialects/sybase/pyodbc.py b/libs/sqlalchemy/dialects/sybase/pyodbc.py deleted file mode 100644 index 8e3729b3..00000000 --- a/libs/sqlalchemy/dialects/sybase/pyodbc.py +++ /dev/null @@ -1,83 +0,0 @@ -# sybase/pyodbc.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -""" -Support for Sybase via pyodbc. - -http://pypi.python.org/pypi/pyodbc/ - -Connect strings are of the form:: - - sybase+pyodbc://:@/ - sybase+pyodbc://:@/ - -Unicode Support ---------------- - -The pyodbc driver currently supports usage of these Sybase types with -Unicode or multibyte strings:: - - CHAR - NCHAR - NVARCHAR - TEXT - VARCHAR - -Currently *not* supported are:: - - UNICHAR - UNITEXT - UNIVARCHAR - -""" - -from sqlalchemy.dialects.sybase.base import SybaseDialect,\ - SybaseExecutionContext -from sqlalchemy.connectors.pyodbc import PyODBCConnector -from sqlalchemy import types as sqltypes, util, processors -from sqlalchemy.util.compat import decimal - -class _SybNumeric_pyodbc(sqltypes.Numeric): - """Turns Decimals with adjusted() < -6 into floats. - - It's not yet known how to get decimals with many - significant digits or very large adjusted() into Sybase - via pyodbc. - - """ - - def bind_processor(self, dialect): - super_process = super(_SybNumeric_pyodbc,self).\ - bind_processor(dialect) - - def process(value): - if self.asdecimal and \ - isinstance(value, decimal.Decimal): - - if value.adjusted() < -6: - return processors.to_float(value) - - if super_process: - return super_process(value) - else: - return value - return process - -class SybaseExecutionContext_pyodbc(SybaseExecutionContext): - def set_ddl_autocommit(self, connection, value): - if value: - connection.autocommit = True - else: - connection.autocommit = False - -class SybaseDialect_pyodbc(PyODBCConnector, SybaseDialect): - execution_ctx_cls = SybaseExecutionContext_pyodbc - - colspecs = { - sqltypes.Numeric:_SybNumeric_pyodbc, - } - -dialect = SybaseDialect_pyodbc diff --git a/libs/sqlalchemy/dialects/sybase/pysybase.py b/libs/sqlalchemy/dialects/sybase/pysybase.py deleted file mode 100644 index bf8c2096..00000000 --- a/libs/sqlalchemy/dialects/sybase/pysybase.py +++ /dev/null @@ -1,100 +0,0 @@ -# sybase/pysybase.py -# Copyright (C) 2010-2011 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -""" -Support for Sybase via the python-sybase driver. - -http://python-sybase.sourceforge.net/ - -Connect strings are of the form:: - - sybase+pysybase://:@/[database name] - -Unicode Support ---------------- - -The python-sybase driver does not appear to support non-ASCII strings of any -kind at this time. - -""" - -from sqlalchemy import types as sqltypes, processors -from sqlalchemy.dialects.sybase.base import SybaseDialect, \ - SybaseExecutionContext, SybaseSQLCompiler - - -class _SybNumeric(sqltypes.Numeric): - def result_processor(self, dialect, type_): - if not self.asdecimal: - return processors.to_float - else: - return sqltypes.Numeric.result_processor(self, dialect, type_) - -class SybaseExecutionContext_pysybase(SybaseExecutionContext): - - def set_ddl_autocommit(self, dbapi_connection, value): - if value: - # call commit() on the Sybase connection directly, - # to avoid any side effects of calling a Connection - # transactional method inside of pre_exec() - dbapi_connection.commit() - - def pre_exec(self): - SybaseExecutionContext.pre_exec(self) - - for param in self.parameters: - for key in list(param): - param["@" + key] = param[key] - del param[key] - - -class SybaseSQLCompiler_pysybase(SybaseSQLCompiler): - def bindparam_string(self, name, **kw): - return "@" + name - -class SybaseDialect_pysybase(SybaseDialect): - driver = 'pysybase' - execution_ctx_cls = SybaseExecutionContext_pysybase - statement_compiler = SybaseSQLCompiler_pysybase - - colspecs={ - sqltypes.Numeric:_SybNumeric, - sqltypes.Float:sqltypes.Float - } - - @classmethod - def dbapi(cls): - import Sybase - return Sybase - - def create_connect_args(self, url): - opts = url.translate_connect_args(username='user', password='passwd') - - return ([opts.pop('host')], opts) - - def do_executemany(self, cursor, statement, parameters, context=None): - # calling python-sybase executemany yields: - # TypeError: string too long for buffer - for param in parameters: - cursor.execute(statement, param) - - def _get_server_version_info(self, connection): - vers = connection.scalar("select @@version_number") - # i.e. 15500, 15000, 12500 == (15, 5, 0, 0), (15, 0, 0, 0), - # (12, 5, 0, 0) - return (vers / 1000, vers % 1000 / 100, vers % 100 / 10, vers % 10) - - def is_disconnect(self, e, connection, cursor): - if isinstance(e, (self.dbapi.OperationalError, - self.dbapi.ProgrammingError)): - msg = str(e) - return ('Unable to complete network request to host' in msg or - 'Invalid connection state' in msg or - 'Invalid cursor state' in msg) - else: - return False - -dialect = SybaseDialect_pysybase diff --git a/libs/sqlalchemy/dialects/type_migration_guidelines.txt b/libs/sqlalchemy/dialects/type_migration_guidelines.txt deleted file mode 100644 index 1ca15f7f..00000000 --- a/libs/sqlalchemy/dialects/type_migration_guidelines.txt +++ /dev/null @@ -1,145 +0,0 @@ -Rules for Migrating TypeEngine classes to 0.6 ---------------------------------------------- - -1. the TypeEngine classes are used for: - - a. Specifying behavior which needs to occur for bind parameters - or result row columns. - - b. Specifying types that are entirely specific to the database - in use and have no analogue in the sqlalchemy.types package. - - c. Specifying types where there is an analogue in sqlalchemy.types, - but the database in use takes vendor-specific flags for those - types. - - d. If a TypeEngine class doesn't provide any of this, it should be - *removed* from the dialect. - -2. the TypeEngine classes are *no longer* used for generating DDL. Dialects -now have a TypeCompiler subclass which uses the same visit_XXX model as -other compilers. - -3. the "ischema_names" and "colspecs" dictionaries are now required members on -the Dialect class. - -4. The names of types within dialects are now important. If a dialect-specific type -is a subclass of an existing generic type and is only provided for bind/result behavior, -the current mixed case naming can remain, i.e. _PGNumeric for Numeric - in this case, -end users would never need to use _PGNumeric directly. However, if a dialect-specific -type is specifying a type *or* arguments that are not present generically, it should -match the real name of the type on that backend, in uppercase. E.g. postgresql.INET, -mysql.ENUM, postgresql.ARRAY. - -Or follow this handy flowchart: - - is the type meant to provide bind/result is the type the same name as an - behavior to a generic type (i.e. MixedCase) ---- no ---> UPPERCASE type in types.py ? - type in types.py ? | | - | no yes - yes | | - | | does your type need special - | +<--- yes --- behavior or arguments ? - | | | - | | no - name the type using | | - _MixedCase, i.e. v V - _OracleBoolean. it name the type don't make a - stays private to the dialect identically as that type, make sure the dialect's - and is invoked *only* via within the DB, base.py imports the types.py - the colspecs dict. using UPPERCASE UPPERCASE name into its namespace - | (i.e. BIT, NCHAR, INTERVAL). - | Users can import it. - | | - v v - subclass the closest is the name of this type - MixedCase type types.py, identical to an UPPERCASE - i.e. <--- no ------- name in types.py ? - class _DateTime(types.DateTime), - class DATETIME2(types.DateTime), | - class BIT(types.TypeEngine). yes - | - v - the type should - subclass the - UPPERCASE - type in types.py - (i.e. class BLOB(types.BLOB)) - - -Example 1. pysqlite needs bind/result processing for the DateTime type in types.py, -which applies to all DateTimes and subclasses. It's named _SLDateTime and -subclasses types.DateTime. - -Example 2. MS-SQL has a TIME type which takes a non-standard "precision" argument -that is rendered within DDL. So it's named TIME in the MS-SQL dialect's base.py, -and subclasses types.TIME. Users can then say mssql.TIME(precision=10). - -Example 3. MS-SQL dialects also need special bind/result processing for date -But its DATE type doesn't render DDL differently than that of a plain -DATE, i.e. it takes no special arguments. Therefore we are just adding behavior -to types.Date, so it's named _MSDate in the MS-SQL dialect's base.py, and subclasses -types.Date. - -Example 4. MySQL has a SET type, there's no analogue for this in types.py. So -MySQL names it SET in the dialect's base.py, and it subclasses types.String, since -it ultimately deals with strings. - -Example 5. Postgresql has a DATETIME type. The DBAPIs handle dates correctly, -and no special arguments are used in PG's DDL beyond what types.py provides. -Postgresql dialect therefore imports types.DATETIME into its base.py. - -Ideally one should be able to specify a schema using names imported completely from a -dialect, all matching the real name on that backend: - - from sqlalchemy.dialects.postgresql import base as pg - - t = Table('mytable', metadata, - Column('id', pg.INTEGER, primary_key=True), - Column('name', pg.VARCHAR(300)), - Column('inetaddr', pg.INET) - ) - -where above, the INTEGER and VARCHAR types are ultimately from sqlalchemy.types, -but the PG dialect makes them available in its own namespace. - -5. "colspecs" now is a dictionary of generic or uppercased types from sqlalchemy.types -linked to types specified in the dialect. Again, if a type in the dialect does not -specify any special behavior for bind_processor() or result_processor() and does not -indicate a special type only available in this database, it must be *removed* from the -module and from this dictionary. - -6. "ischema_names" indicates string descriptions of types as returned from the database -linked to TypeEngine classes. - - a. The string name should be matched to the most specific type possible within - sqlalchemy.types, unless there is no matching type within sqlalchemy.types in which - case it points to a dialect type. *It doesn't matter* if the dialect has it's - own subclass of that type with special bind/result behavior - reflect to the types.py - UPPERCASE type as much as possible. With very few exceptions, all types - should reflect to an UPPERCASE type. - - b. If the dialect contains a matching dialect-specific type that takes extra arguments - which the generic one does not, then point to the dialect-specific type. E.g. - mssql.VARCHAR takes a "collation" parameter which should be preserved. - -5. DDL, or what was formerly issued by "get_col_spec()", is now handled exclusively by -a subclass of compiler.GenericTypeCompiler. - - a. your TypeCompiler class will receive generic and uppercase types from - sqlalchemy.types. Do not assume the presence of dialect-specific attributes on - these types. - - b. the visit_UPPERCASE methods on GenericTypeCompiler should *not* be overridden with - methods that produce a different DDL name. Uppercase types don't do any kind of - "guessing" - if visit_TIMESTAMP is called, the DDL should render as TIMESTAMP in - all cases, regardless of whether or not that type is legal on the backend database. - - c. the visit_UPPERCASE methods *should* be overridden with methods that add additional - arguments and flags to those types. - - d. the visit_lowercase methods are overridden to provide an interpretation of a generic - type. E.g. visit_large_binary() might be overridden to say "return self.visit_BIT(type_)". - - e. visit_lowercase methods should *never* render strings directly - it should always - be via calling a visit_UPPERCASE() method. diff --git a/libs/sqlalchemy/engine/__init__.py b/libs/sqlalchemy/engine/__init__.py deleted file mode 100644 index 6ff8ba15..00000000 --- a/libs/sqlalchemy/engine/__init__.py +++ /dev/null @@ -1,376 +0,0 @@ -# engine/__init__.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""SQL connections, SQL execution and high-level DB-API interface. - -The engine package defines the basic components used to interface -DB-API modules with higher-level statement construction, -connection-management, execution and result contexts. The primary -"entry point" class into this package is the Engine and it's public -constructor ``create_engine()``. - -This package includes: - -base.py - Defines interface classes and some implementation classes which - comprise the basic components used to interface between a DB-API, - constructed and plain-text statements, connections, transactions, - and results. - -default.py - Contains default implementations of some of the components defined - in base.py. All current database dialects use the classes in - default.py as base classes for their own database-specific - implementations. - -strategies.py - The mechanics of constructing ``Engine`` objects are represented - here. Defines the ``EngineStrategy`` class which represents how - to go from arguments specified to the ``create_engine()`` - function, to a fully constructed ``Engine``, including - initialization of connection pooling, dialects, and specific - subclasses of ``Engine``. - -threadlocal.py - The ``TLEngine`` class is defined here, which is a subclass of - the generic ``Engine`` and tracks ``Connection`` and - ``Transaction`` objects against the identity of the current - thread. This allows certain programming patterns based around - the concept of a "thread-local connection" to be possible. - The ``TLEngine`` is created by using the "threadlocal" engine - strategy in conjunction with the ``create_engine()`` function. - -url.py - Defines the ``URL`` class which represents the individual - components of a string URL passed to ``create_engine()``. Also - defines a basic module-loading strategy for the dialect specifier - within a URL. -""" - -# not sure what this was used for -#import sqlalchemy.databases - -from sqlalchemy.engine.base import ( - BufferedColumnResultProxy, - BufferedColumnRow, - BufferedRowResultProxy, - Compiled, - Connectable, - Connection, - Dialect, - Engine, - ExecutionContext, - NestedTransaction, - ResultProxy, - RootTransaction, - RowProxy, - Transaction, - TwoPhaseTransaction, - TypeCompiler - ) -from sqlalchemy.engine import strategies -from sqlalchemy import util - - -__all__ = ( - 'BufferedColumnResultProxy', - 'BufferedColumnRow', - 'BufferedRowResultProxy', - 'Compiled', - 'Connectable', - 'Connection', - 'Dialect', - 'Engine', - 'ExecutionContext', - 'NestedTransaction', - 'ResultProxy', - 'RootTransaction', - 'RowProxy', - 'Transaction', - 'TwoPhaseTransaction', - 'TypeCompiler', - 'create_engine', - 'engine_from_config', - ) - - -default_strategy = 'plain' -def create_engine(*args, **kwargs): - """Create a new :class:`.Engine` instance. - - The standard calling form is to send the URL as the - first positional argument, usually a string - that indicates database dialect and connection arguments. - Additional keyword arguments may then follow it which - establish various options on the resulting :class:`.Engine` - and its underlying :class:`.Dialect` and :class:`.Pool` - constructs. - - The string form of the URL is - ``dialect+driver://user:password@host/dbname[?key=value..]``, where - ``dialect`` is a database name such as ``mysql``, ``oracle``, - ``postgresql``, etc., and ``driver`` the name of a DBAPI, such as - ``psycopg2``, ``pyodbc``, ``cx_oracle``, etc. Alternatively, - the URL can be an instance of :class:`~sqlalchemy.engine.url.URL`. - - ``**kwargs`` takes a wide variety of options which are routed - towards their appropriate components. Arguments may be - specific to the :class:`.Engine`, the underlying :class:`.Dialect`, as well as the - :class:`.Pool`. Specific dialects also accept keyword arguments that - are unique to that dialect. Here, we describe the parameters - that are common to most :func:`.create_engine()` usage. - - Once established, the newly resulting :class:`.Engine` will - request a connection from the underlying :class:`.Pool` once - :meth:`.Engine.connect` is called, or a method which depends on it - such as :meth:`.Engine.execute` is invoked. The :class:`.Pool` in turn - will establish the first actual DBAPI connection when this request - is received. The :func:`.create_engine` call itself does **not** - establish any actual DBAPI connections directly. - - See also: - - :ref:`engines_toplevel` - - :ref:`connections_toplevel` - - :param assert_unicode: Deprecated. This flag - sets an engine-wide default value for - the ``assert_unicode`` flag on the - :class:`.String` type - see that - type for further details. - - :param connect_args: a dictionary of options which will be - passed directly to the DBAPI's ``connect()`` method as - additional keyword arguments. See the example - at :ref:`custom_dbapi_args`. - - :param convert_unicode=False: if set to True, sets - the default behavior of ``convert_unicode`` on the - :class:`.String` type to ``True``, regardless - of a setting of ``False`` on an individual - :class:`.String` type, thus causing all :class:`.String` - -based columns - to accommodate Python ``unicode`` objects. This flag - is useful as an engine-wide setting when using a - DBAPI that does not natively support Python - ``unicode`` objects and raises an error when - one is received (such as pyodbc with FreeTDS). - - See :class:`.String` for further details on - what this flag indicates. - - :param creator: a callable which returns a DBAPI connection. - This creation function will be passed to the underlying - connection pool and will be used to create all new database - connections. Usage of this function causes connection - parameters specified in the URL argument to be bypassed. - - :param echo=False: if True, the Engine will log all statements - as well as a repr() of their parameter lists to the engines - logger, which defaults to sys.stdout. The ``echo`` attribute of - ``Engine`` can be modified at any time to turn logging on and - off. If set to the string ``"debug"``, result rows will be - printed to the standard output as well. This flag ultimately - controls a Python logger; see :ref:`dbengine_logging` for - information on how to configure logging directly. - - :param echo_pool=False: if True, the connection pool will log - all checkouts/checkins to the logging stream, which defaults to - sys.stdout. This flag ultimately controls a Python logger; see - :ref:`dbengine_logging` for information on how to configure logging - directly. - - :param encoding: Defaults to ``utf-8``. This is the string - encoding used by SQLAlchemy for string encode/decode - operations which occur within SQLAlchemy, **outside of - the DBAPI.** Most modern DBAPIs feature some degree of - direct support for Python ``unicode`` objects, - what you see in Python 2 as a string of the form - ``u'some string'``. For those scenarios where the - DBAPI is detected as not supporting a Python ``unicode`` - object, this encoding is used to determine the - source/destination encoding. It is **not used** - for those cases where the DBAPI handles unicode - directly. - - To properly configure a system to accommodate Python - ``unicode`` objects, the DBAPI should be - configured to handle unicode to the greatest - degree as is appropriate - see - the notes on unicode pertaining to the specific - target database in use at :ref:`dialect_toplevel`. - - Areas where string encoding may need to be accommodated - outside of the DBAPI include zero or more of: - - * the values passed to bound parameters, corresponding to - the :class:`.Unicode` type or the :class:`.String` type - when ``convert_unicode`` is ``True``; - * the values returned in result set columns corresponding - to the :class:`.Unicode` type or the :class:`.String` - type when ``convert_unicode`` is ``True``; - * the string SQL statement passed to the DBAPI's - ``cursor.execute()`` method; - * the string names of the keys in the bound parameter - dictionary passed to the DBAPI's ``cursor.execute()`` - as well as ``cursor.setinputsizes()`` methods; - * the string column names retrieved from the DBAPI's - ``cursor.description`` attribute. - - When using Python 3, the DBAPI is required to support - *all* of the above values as Python ``unicode`` objects, - which in Python 3 are just known as ``str``. In Python 2, - the DBAPI does not specify unicode behavior at all, - so SQLAlchemy must make decisions for each of the above - values on a per-DBAPI basis - implementations are - completely inconsistent in their behavior. - - :param execution_options: Dictionary execution options which will - be applied to all connections. See - :meth:`~sqlalchemy.engine.base.Connection.execution_options` - - :param implicit_returning=True: When ``True``, a RETURNING- - compatible construct, if available, will be used to - fetch newly generated primary key values when a single row - INSERT statement is emitted with no existing returning() - clause. This applies to those backends which support RETURNING - or a compatible construct, including Postgresql, Firebird, Oracle, - Microsoft SQL Server. Set this to ``False`` to disable - the automatic usage of RETURNING. - - :param label_length=None: optional integer value which limits - the size of dynamically generated column labels to that many - characters. If less than 6, labels are generated as - "_(counter)". If ``None``, the value of - ``dialect.max_identifier_length`` is used instead. - - :param listeners: A list of one or more - :class:`~sqlalchemy.interfaces.PoolListener` objects which will - receive connection pool events. - - :param logging_name: String identifier which will be used within - the "name" field of logging records generated within the - "sqlalchemy.engine" logger. Defaults to a hexstring of the - object's id. - - :param max_overflow=10: the number of connections to allow in - connection pool "overflow", that is connections that can be - opened above and beyond the pool_size setting, which defaults - to five. this is only used with :class:`~sqlalchemy.pool.QueuePool`. - - :param module=None: reference to a Python module object (the module itself, not - its string name). Specifies an alternate DBAPI module to be used - by the engine's dialect. Each sub-dialect references a specific DBAPI which - will be imported before first connect. This parameter causes the - import to be bypassed, and the given module to be used instead. - Can be used for testing of DBAPIs as well as to inject "mock" - DBAPI implementations into the :class:`.Engine`. - - :param pool=None: an already-constructed instance of - :class:`~sqlalchemy.pool.Pool`, such as a - :class:`~sqlalchemy.pool.QueuePool` instance. If non-None, this - pool will be used directly as the underlying connection pool - for the engine, bypassing whatever connection parameters are - present in the URL argument. For information on constructing - connection pools manually, see :ref:`pooling_toplevel`. - - :param poolclass=None: a :class:`~sqlalchemy.pool.Pool` - subclass, which will be used to create a connection pool - instance using the connection parameters given in the URL. Note - this differs from ``pool`` in that you don't actually - instantiate the pool in this case, you just indicate what type - of pool to be used. - - :param pool_logging_name: String identifier which will be used within - the "name" field of logging records generated within the - "sqlalchemy.pool" logger. Defaults to a hexstring of the object's - id. - - :param pool_size=5: the number of connections to keep open - inside the connection pool. This used with :class:`~sqlalchemy.pool.QueuePool` as - well as :class:`~sqlalchemy.pool.SingletonThreadPool`. With - :class:`~sqlalchemy.pool.QueuePool`, a ``pool_size`` setting - of 0 indicates no limit; to disable pooling, set ``poolclass`` to - :class:`~sqlalchemy.pool.NullPool` instead. - - :param pool_recycle=-1: this setting causes the pool to recycle - connections after the given number of seconds has passed. It - defaults to -1, or no timeout. For example, setting to 3600 - means connections will be recycled after one hour. Note that - MySQL in particular will disconnect automatically if no - activity is detected on a connection for eight hours (although - this is configurable with the MySQLDB connection itself and the - server configuration as well). - - :param pool_reset_on_return='rollback': set the "reset on return" - behavior of the pool, which is whether ``rollback()``, - ``commit()``, or nothing is called upon connections - being returned to the pool. See the docstring for - ``reset_on_return`` at :class:`.Pool`. - - .. versionadded:: 0.7.6 - - :param pool_timeout=30: number of seconds to wait before giving - up on getting a connection from the pool. This is only used - with :class:`~sqlalchemy.pool.QueuePool`. - - :param strategy='plain': selects alternate engine implementations. - Currently available are: - - * the ``threadlocal`` strategy, which is described in - :ref:`threadlocal_strategy`; - * the ``mock`` strategy, which dispatches all statement - execution to a function passed as the argument ``executor``. - See `example in the FAQ `_. - - :param executor=None: a function taking arguments - ``(sql, *multiparams, **params)``, to which the ``mock`` strategy will - dispatch all statement execution. Used only by ``strategy='mock'``. - - """ - - strategy = kwargs.pop('strategy', default_strategy) - strategy = strategies.strategies[strategy] - return strategy.create(*args, **kwargs) - -def engine_from_config(configuration, prefix='sqlalchemy.', **kwargs): - """Create a new Engine instance using a configuration dictionary. - - The dictionary is typically produced from a config file where keys - are prefixed, such as sqlalchemy.url, sqlalchemy.echo, etc. The - 'prefix' argument indicates the prefix to be searched for. - - A select set of keyword arguments will be "coerced" to their - expected type based on string values. In a future release, this - functionality will be expanded and include dialect-specific - arguments. - """ - - opts = _coerce_config(configuration, prefix) - opts.update(kwargs) - url = opts.pop('url') - return create_engine(url, **opts) - -def _coerce_config(configuration, prefix): - """Convert configuration values to expected types.""" - - options = dict((key[len(prefix):], configuration[key]) - for key in configuration - if key.startswith(prefix)) - for option, type_ in ( - ('convert_unicode', util.bool_or_str('force')), - ('pool_timeout', int), - ('echo', util.bool_or_str('debug')), - ('echo_pool', util.bool_or_str('debug')), - ('pool_recycle', int), - ('pool_size', int), - ('max_overflow', int), - ('pool_threadlocal', bool), - ('use_native_unicode', bool), - ): - util.coerce_kw_type(options, option, type_) - return options diff --git a/libs/sqlalchemy/engine/base.py b/libs/sqlalchemy/engine/base.py deleted file mode 100644 index 302fb779..00000000 --- a/libs/sqlalchemy/engine/base.py +++ /dev/null @@ -1,3473 +0,0 @@ -# engine/base.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - - -"""Basic components for SQL execution and interfacing with DB-API. - -Defines the basic components used to interface DB-API modules with -higher-level statement-construction, connection-management, execution -and result contexts. -""" - -__all__ = [ - 'BufferedColumnResultProxy', 'BufferedColumnRow', - 'BufferedRowResultProxy','Compiled', 'Connectable', 'Connection', - 'Dialect', 'Engine','ExecutionContext', 'NestedTransaction', - 'ResultProxy', 'RootTransaction','RowProxy', 'SchemaIterator', - 'StringIO', 'Transaction', 'TwoPhaseTransaction', - 'connection_memoize'] - -import inspect, StringIO, sys, operator -from itertools import izip -from sqlalchemy import exc, schema, util, types, log, interfaces, \ - event, events -from sqlalchemy.sql import expression, util as sql_util -from sqlalchemy import processors -import collections - -class Dialect(object): - """Define the behavior of a specific database and DB-API combination. - - Any aspect of metadata definition, SQL query generation, - execution, result-set handling, or anything else which varies - between databases is defined under the general category of the - Dialect. The Dialect acts as a factory for other - database-specific object implementations including - ExecutionContext, Compiled, DefaultGenerator, and TypeEngine. - - All Dialects implement the following attributes: - - name - identifying name for the dialect from a DBAPI-neutral point of view - (i.e. 'sqlite') - - driver - identifying name for the dialect's DBAPI - - positional - True if the paramstyle for this Dialect is positional. - - paramstyle - the paramstyle to be used (some DB-APIs support multiple - paramstyles). - - convert_unicode - True if Unicode conversion should be applied to all ``str`` - types. - - encoding - type of encoding to use for unicode, usually defaults to - 'utf-8'. - - statement_compiler - a :class:`~Compiled` class used to compile SQL statements - - ddl_compiler - a :class:`~Compiled` class used to compile DDL statements - - server_version_info - a tuple containing a version number for the DB backend in use. - This value is only available for supporting dialects, and is - typically populated during the initial connection to the database. - - default_schema_name - the name of the default schema. This value is only available for - supporting dialects, and is typically populated during the - initial connection to the database. - - execution_ctx_cls - a :class:`.ExecutionContext` class used to handle statement execution - - execute_sequence_format - either the 'tuple' or 'list' type, depending on what cursor.execute() - accepts for the second argument (they vary). - - preparer - a :class:`~sqlalchemy.sql.compiler.IdentifierPreparer` class used to - quote identifiers. - - supports_alter - ``True`` if the database supports ``ALTER TABLE``. - - max_identifier_length - The maximum length of identifier names. - - supports_unicode_statements - Indicate whether the DB-API can receive SQL statements as Python - unicode strings - - supports_unicode_binds - Indicate whether the DB-API can receive string bind parameters - as Python unicode strings - - supports_sane_rowcount - Indicate whether the dialect properly implements rowcount for - ``UPDATE`` and ``DELETE`` statements. - - supports_sane_multi_rowcount - Indicate whether the dialect properly implements rowcount for - ``UPDATE`` and ``DELETE`` statements when executed via - executemany. - - preexecute_autoincrement_sequences - True if 'implicit' primary key functions must be executed separately - in order to get their value. This is currently oriented towards - Postgresql. - - implicit_returning - use RETURNING or equivalent during INSERT execution in order to load - newly generated primary keys and other column defaults in one execution, - which are then available via inserted_primary_key. - If an insert statement has returning() specified explicitly, - the "implicit" functionality is not used and inserted_primary_key - will not be available. - - dbapi_type_map - A mapping of DB-API type objects present in this Dialect's - DB-API implementation mapped to TypeEngine implementations used - by the dialect. - - This is used to apply types to result sets based on the DB-API - types present in cursor.description; it only takes effect for - result sets against textual statements where no explicit - typemap was present. - - colspecs - A dictionary of TypeEngine classes from sqlalchemy.types mapped - to subclasses that are specific to the dialect class. This - dictionary is class-level only and is not accessed from the - dialect instance itself. - - supports_default_values - Indicates if the construct ``INSERT INTO tablename DEFAULT - VALUES`` is supported - - supports_sequences - Indicates if the dialect supports CREATE SEQUENCE or similar. - - sequences_optional - If True, indicates if the "optional" flag on the Sequence() construct - should signal to not generate a CREATE SEQUENCE. Applies only to - dialects that support sequences. Currently used only to allow Postgresql - SERIAL to be used on a column that specifies Sequence() for usage on - other backends. - - supports_native_enum - Indicates if the dialect supports a native ENUM construct. - This will prevent types.Enum from generating a CHECK - constraint when that type is used. - - supports_native_boolean - Indicates if the dialect supports a native boolean construct. - This will prevent types.Boolean from generating a CHECK - constraint when that type is used. - - """ - - def create_connect_args(self, url): - """Build DB-API compatible connection arguments. - - Given a :class:`~sqlalchemy.engine.url.URL` object, returns a tuple - consisting of a `*args`/`**kwargs` suitable to send directly - to the dbapi's connect function. - - """ - - raise NotImplementedError() - - @classmethod - def type_descriptor(cls, typeobj): - """Transform a generic type to a dialect-specific type. - - Dialect classes will usually use the - :func:`~sqlalchemy.types.adapt_type` function in the types module to - make this job easy. - - The returned result is cached *per dialect class* so can - contain no dialect-instance state. - - """ - - raise NotImplementedError() - - def initialize(self, connection): - """Called during strategized creation of the dialect with a - connection. - - Allows dialects to configure options based on server version info or - other properties. - - The connection passed here is a SQLAlchemy Connection object, - with full capabilities. - - The initalize() method of the base dialect should be called via - super(). - - """ - - pass - - def reflecttable(self, connection, table, include_columns=None): - """Load table description from the database. - - Given a :class:`.Connection` and a - :class:`~sqlalchemy.schema.Table` object, reflect its columns and - properties from the database. If include_columns (a list or - set) is specified, limit the autoload to the given column - names. - - The default implementation uses the - :class:`~sqlalchemy.engine.reflection.Inspector` interface to - provide the output, building upon the granular table/column/ - constraint etc. methods of :class:`.Dialect`. - - """ - - raise NotImplementedError() - - def get_columns(self, connection, table_name, schema=None, **kw): - """Return information about columns in `table_name`. - - Given a :class:`.Connection`, a string - `table_name`, and an optional string `schema`, return column - information as a list of dictionaries with these keys: - - name - the column's name - - type - [sqlalchemy.types#TypeEngine] - - nullable - boolean - - default - the column's default value - - autoincrement - boolean - - sequence - a dictionary of the form - {'name' : str, 'start' :int, 'increment': int} - - Additional column attributes may be present. - """ - - raise NotImplementedError() - - def get_primary_keys(self, connection, table_name, schema=None, **kw): - """Return information about primary keys in `table_name`. - - Given a :class:`.Connection`, a string - `table_name`, and an optional string `schema`, return primary - key information as a list of column names. - - """ - raise NotImplementedError() - - def get_pk_constraint(self, table_name, schema=None, **kw): - """Return information about the primary key constraint on - table_name`. - - Given a string `table_name`, and an optional string `schema`, return - primary key information as a dictionary with these keys: - - constrained_columns - a list of column names that make up the primary key - - name - optional name of the primary key constraint. - - """ - raise NotImplementedError() - - def get_foreign_keys(self, connection, table_name, schema=None, **kw): - """Return information about foreign_keys in `table_name`. - - Given a :class:`.Connection`, a string - `table_name`, and an optional string `schema`, return foreign - key information as a list of dicts with these keys: - - name - the constraint's name - - constrained_columns - a list of column names that make up the foreign key - - referred_schema - the name of the referred schema - - referred_table - the name of the referred table - - referred_columns - a list of column names in the referred table that correspond to - constrained_columns - """ - - raise NotImplementedError() - - def get_table_names(self, connection, schema=None, **kw): - """Return a list of table names for `schema`.""" - - raise NotImplementedError - - def get_view_names(self, connection, schema=None, **kw): - """Return a list of all view names available in the database. - - schema: - Optional, retrieve names from a non-default schema. - """ - - raise NotImplementedError() - - def get_view_definition(self, connection, view_name, schema=None, **kw): - """Return view definition. - - Given a :class:`.Connection`, a string - `view_name`, and an optional string `schema`, return the view - definition. - """ - - raise NotImplementedError() - - def get_indexes(self, connection, table_name, schema=None, **kw): - """Return information about indexes in `table_name`. - - Given a :class:`.Connection`, a string - `table_name` and an optional string `schema`, return index - information as a list of dictionaries with these keys: - - name - the index's name - - column_names - list of column names in order - - unique - boolean - """ - - raise NotImplementedError() - - def normalize_name(self, name): - """convert the given name to lowercase if it is detected as - case insensitive. - - this method is only used if the dialect defines - requires_name_normalize=True. - - """ - raise NotImplementedError() - - def denormalize_name(self, name): - """convert the given name to a case insensitive identifier - for the backend if it is an all-lowercase name. - - this method is only used if the dialect defines - requires_name_normalize=True. - - """ - raise NotImplementedError() - - def has_table(self, connection, table_name, schema=None): - """Check the existence of a particular table in the database. - - Given a :class:`.Connection` object and a string - `table_name`, return True if the given table (possibly within - the specified `schema`) exists in the database, False - otherwise. - """ - - raise NotImplementedError() - - def has_sequence(self, connection, sequence_name, schema=None): - """Check the existence of a particular sequence in the database. - - Given a :class:`.Connection` object and a string - `sequence_name`, return True if the given sequence exists in - the database, False otherwise. - """ - - raise NotImplementedError() - - def _get_server_version_info(self, connection): - """Retrieve the server version info from the given connection. - - This is used by the default implementation to populate the - "server_version_info" attribute and is called exactly - once upon first connect. - - """ - - raise NotImplementedError() - - def _get_default_schema_name(self, connection): - """Return the string name of the currently selected schema from - the given connection. - - This is used by the default implementation to populate the - "default_schema_name" attribute and is called exactly - once upon first connect. - - """ - - raise NotImplementedError() - - def do_begin(self, connection): - """Provide an implementation of *connection.begin()*, given a - DB-API connection.""" - - raise NotImplementedError() - - def do_rollback(self, connection): - """Provide an implementation of *connection.rollback()*, given - a DB-API connection.""" - - raise NotImplementedError() - - def create_xid(self): - """Create a two-phase transaction ID. - - This id will be passed to do_begin_twophase(), - do_rollback_twophase(), do_commit_twophase(). Its format is - unspecified. - """ - - raise NotImplementedError() - - def do_commit(self, connection): - """Provide an implementation of *connection.commit()*, given a - DB-API connection.""" - - raise NotImplementedError() - - def do_savepoint(self, connection, name): - """Create a savepoint with the given name on a SQLAlchemy - connection.""" - - raise NotImplementedError() - - def do_rollback_to_savepoint(self, connection, name): - """Rollback a SQL Alchemy connection to the named savepoint.""" - - raise NotImplementedError() - - def do_release_savepoint(self, connection, name): - """Release the named savepoint on a SQL Alchemy connection.""" - - raise NotImplementedError() - - def do_begin_twophase(self, connection, xid): - """Begin a two phase transaction on the given connection.""" - - raise NotImplementedError() - - def do_prepare_twophase(self, connection, xid): - """Prepare a two phase transaction on the given connection.""" - - raise NotImplementedError() - - def do_rollback_twophase(self, connection, xid, is_prepared=True, - recover=False): - """Rollback a two phase transaction on the given connection.""" - - raise NotImplementedError() - - def do_commit_twophase(self, connection, xid, is_prepared=True, - recover=False): - """Commit a two phase transaction on the given connection.""" - - raise NotImplementedError() - - def do_recover_twophase(self, connection): - """Recover list of uncommited prepared two phase transaction - identifiers on the given connection.""" - - raise NotImplementedError() - - def do_executemany(self, cursor, statement, parameters, context=None): - """Provide an implementation of ``cursor.executemany(statement, - parameters)``.""" - - raise NotImplementedError() - - def do_execute(self, cursor, statement, parameters, context=None): - """Provide an implementation of ``cursor.execute(statement, - parameters)``.""" - - raise NotImplementedError() - - def do_execute_no_params(self, cursor, statement, parameters, context=None): - """Provide an implementation of ``cursor.execute(statement)``. - - The parameter collection should not be sent. - - """ - - raise NotImplementedError() - - def is_disconnect(self, e, connection, cursor): - """Return True if the given DB-API error indicates an invalid - connection""" - - raise NotImplementedError() - - def connect(self): - """return a callable which sets up a newly created DBAPI connection. - - The callable accepts a single argument "conn" which is the - DBAPI connection itself. It has no return value. - - This is used to set dialect-wide per-connection options such as - isolation modes, unicode modes, etc. - - If a callable is returned, it will be assembled into a pool listener - that receives the direct DBAPI connection, with all wrappers removed. - - If None is returned, no listener will be generated. - - """ - return None - - def reset_isolation_level(self, dbapi_conn): - """Given a DBAPI connection, revert its isolation to the default.""" - - raise NotImplementedError() - - def set_isolation_level(self, dbapi_conn, level): - """Given a DBAPI connection, set its isolation level.""" - - raise NotImplementedError() - - def get_isolation_level(self, dbapi_conn): - """Given a DBAPI connection, return its isolation level.""" - - raise NotImplementedError() - - -class ExecutionContext(object): - """A messenger object for a Dialect that corresponds to a single - execution. - - ExecutionContext should have these data members: - - connection - Connection object which can be freely used by default value - generators to execute SQL. This Connection should reference the - same underlying connection/transactional resources of - root_connection. - - root_connection - Connection object which is the source of this ExecutionContext. This - Connection may have close_with_result=True set, in which case it can - only be used once. - - dialect - dialect which created this ExecutionContext. - - cursor - DB-API cursor procured from the connection, - - compiled - if passed to constructor, sqlalchemy.engine.base.Compiled object - being executed, - - statement - string version of the statement to be executed. Is either - passed to the constructor, or must be created from the - sql.Compiled object by the time pre_exec() has completed. - - parameters - bind parameters passed to the execute() method. For compiled - statements, this is a dictionary or list of dictionaries. For - textual statements, it should be in a format suitable for the - dialect's paramstyle (i.e. dict or list of dicts for non - positional, list or list of lists/tuples for positional). - - isinsert - True if the statement is an INSERT. - - isupdate - True if the statement is an UPDATE. - - should_autocommit - True if the statement is a "committable" statement. - - postfetch_cols - a list of Column objects for which a server-side default or - inline SQL expression value was fired off. Applies to inserts - and updates. - """ - - def create_cursor(self): - """Return a new cursor generated from this ExecutionContext's - connection. - - Some dialects may wish to change the behavior of - connection.cursor(), such as postgresql which may return a PG - "server side" cursor. - """ - - raise NotImplementedError() - - def pre_exec(self): - """Called before an execution of a compiled statement. - - If a compiled statement was passed to this ExecutionContext, - the `statement` and `parameters` datamembers must be - initialized after this statement is complete. - """ - - raise NotImplementedError() - - def post_exec(self): - """Called after the execution of a compiled statement. - - If a compiled statement was passed to this ExecutionContext, - the `last_insert_ids`, `last_inserted_params`, etc. - datamembers should be available after this method completes. - """ - - raise NotImplementedError() - - def result(self): - """Return a result object corresponding to this ExecutionContext. - - Returns a ResultProxy. - """ - - raise NotImplementedError() - - def handle_dbapi_exception(self, e): - """Receive a DBAPI exception which occurred upon execute, result - fetch, etc.""" - - raise NotImplementedError() - - def should_autocommit_text(self, statement): - """Parse the given textual statement and return True if it refers to - a "committable" statement""" - - raise NotImplementedError() - - def lastrow_has_defaults(self): - """Return True if the last INSERT or UPDATE row contained - inlined or database-side defaults. - """ - - raise NotImplementedError() - - def get_rowcount(self): - """Return the DBAPI ``cursor.rowcount`` value, or in some - cases an interpreted value. - - See :attr:`.ResultProxy.rowcount` for details on this. - - """ - - raise NotImplementedError() - - -class Compiled(object): - """Represent a compiled SQL or DDL expression. - - The ``__str__`` method of the ``Compiled`` object should produce - the actual text of the statement. ``Compiled`` objects are - specific to their underlying database dialect, and also may - or may not be specific to the columns referenced within a - particular set of bind parameters. In no case should the - ``Compiled`` object be dependent on the actual values of those - bind parameters, even though it may reference those values as - defaults. - """ - - def __init__(self, dialect, statement, bind=None): - """Construct a new ``Compiled`` object. - - :param dialect: ``Dialect`` to compile against. - - :param statement: ``ClauseElement`` to be compiled. - - :param bind: Optional Engine or Connection to compile this - statement against. - """ - - self.dialect = dialect - self.bind = bind - if statement is not None: - self.statement = statement - self.can_execute = statement.supports_execution - self.string = self.process(self.statement) - - @util.deprecated("0.7", ":class:`.Compiled` objects now compile " - "within the constructor.") - def compile(self): - """Produce the internal string representation of this element.""" - pass - - @property - def sql_compiler(self): - """Return a Compiled that is capable of processing SQL expressions. - - If this compiler is one, it would likely just return 'self'. - - """ - - raise NotImplementedError() - - def process(self, obj, **kwargs): - return obj._compiler_dispatch(self, **kwargs) - - def __str__(self): - """Return the string text of the generated SQL or DDL.""" - - return self.string or '' - - def construct_params(self, params=None): - """Return the bind params for this compiled object. - - :param params: a dict of string/object pairs whose values will - override bind values compiled in to the - statement. - """ - - raise NotImplementedError() - - @property - def params(self): - """Return the bind params for this compiled object.""" - return self.construct_params() - - def execute(self, *multiparams, **params): - """Execute this compiled object.""" - - e = self.bind - if e is None: - raise exc.UnboundExecutionError( - "This Compiled object is not bound to any Engine " - "or Connection.") - return e._execute_compiled(self, multiparams, params) - - def scalar(self, *multiparams, **params): - """Execute this compiled object and return the result's - scalar value.""" - - return self.execute(*multiparams, **params).scalar() - - -class TypeCompiler(object): - """Produces DDL specification for TypeEngine objects.""" - - def __init__(self, dialect): - self.dialect = dialect - - def process(self, type_): - return type_._compiler_dispatch(self) - - -class Connectable(object): - """Interface for an object which supports execution of SQL constructs. - - The two implementations of :class:`.Connectable` are :class:`.Connection` and - :class:`.Engine`. - - Connectable must also implement the 'dialect' member which references a - :class:`.Dialect` instance. - - """ - - def connect(self, **kwargs): - """Return a :class:`.Connection` object. - - Depending on context, this may be ``self`` if this object - is already an instance of :class:`.Connection`, or a newly - procured :class:`.Connection` if this object is an instance - of :class:`.Engine`. - - """ - - def contextual_connect(self): - """Return a :class:`.Connection` object which may be part of an ongoing - context. - - Depending on context, this may be ``self`` if this object - is already an instance of :class:`.Connection`, or a newly - procured :class:`.Connection` if this object is an instance - of :class:`.Engine`. - - """ - - raise NotImplementedError() - - @util.deprecated("0.7", "Use the create() method on the given schema " - "object directly, i.e. :meth:`.Table.create`, " - ":meth:`.Index.create`, :meth:`.MetaData.create_all`") - def create(self, entity, **kwargs): - """Emit CREATE statements for the given schema entity.""" - - raise NotImplementedError() - - @util.deprecated("0.7", "Use the drop() method on the given schema " - "object directly, i.e. :meth:`.Table.drop`, " - ":meth:`.Index.drop`, :meth:`.MetaData.drop_all`") - def drop(self, entity, **kwargs): - """Emit DROP statements for the given schema entity.""" - - raise NotImplementedError() - - def execute(self, object, *multiparams, **params): - """Executes the given construct and returns a :class:`.ResultProxy`.""" - raise NotImplementedError() - - def scalar(self, object, *multiparams, **params): - """Executes and returns the first column of the first row. - - The underlying cursor is closed after execution. - """ - raise NotImplementedError() - - def _run_visitor(self, visitorcallable, element, - **kwargs): - raise NotImplementedError() - - def _execute_clauseelement(self, elem, multiparams=None, params=None): - raise NotImplementedError() - - -class Connection(Connectable): - """Provides high-level functionality for a wrapped DB-API connection. - - Provides execution support for string-based SQL statements as well as - :class:`.ClauseElement`, :class:`.Compiled` and :class:`.DefaultGenerator` - objects. Provides a :meth:`begin` method to return :class:`.Transaction` - objects. - - The Connection object is **not** thread-safe. While a Connection can be - shared among threads using properly synchronized access, it is still - possible that the underlying DBAPI connection may not support shared - access between threads. Check the DBAPI documentation for details. - - The Connection object represents a single dbapi connection checked out - from the connection pool. In this state, the connection pool has no affect - upon the connection, including its expiration or timeout state. For the - connection pool to properly manage connections, connections should be - returned to the connection pool (i.e. ``connection.close()``) whenever the - connection is not in use. - - .. index:: - single: thread safety; Connection - - """ - - def __init__(self, engine, connection=None, close_with_result=False, - _branch=False, _execution_options=None): - """Construct a new Connection. - - The constructor here is not public and is only called only by an - :class:`.Engine`. See :meth:`.Engine.connect` and - :meth:`.Engine.contextual_connect` methods. - - """ - self.engine = engine - self.dialect = engine.dialect - self.__connection = connection or engine.raw_connection() - self.__transaction = None - self.should_close_with_result = close_with_result - self.__savepoint_seq = 0 - self.__branch = _branch - self.__invalid = False - self._has_events = engine._has_events - self._echo = self.engine._should_log_info() - if _execution_options: - self._execution_options =\ - engine._execution_options.union(_execution_options) - else: - self._execution_options = engine._execution_options - - def _branch(self): - """Return a new Connection which references this Connection's - engine and connection; but does not have close_with_result enabled, - and also whose close() method does nothing. - - This is used to execute "sub" statements within a single execution, - usually an INSERT statement. - """ - - return self.engine._connection_cls( - self.engine, - self.__connection, _branch=True) - - def _clone(self): - """Create a shallow copy of this Connection. - - """ - c = self.__class__.__new__(self.__class__) - c.__dict__ = self.__dict__.copy() - return c - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - self.close() - - def execution_options(self, **opt): - """ Set non-SQL options for the connection which take effect - during execution. - - The method returns a copy of this :class:`.Connection` which references - the same underlying DBAPI connection, but also defines the given - execution options which will take effect for a call to - :meth:`execute`. As the new :class:`.Connection` references the same - underlying resource, it is probably best to ensure that the copies - would be discarded immediately, which is implicit if used as in:: - - result = connection.execution_options(stream_results=True).\\ - execute(stmt) - - :meth:`.Connection.execution_options` accepts all options as those - accepted by :meth:`.Executable.execution_options`. Additionally, - it includes options that are applicable only to - :class:`.Connection`. - - :param autocommit: Available on: Connection, statement. - When True, a COMMIT will be invoked after execution - when executed in 'autocommit' mode, i.e. when an explicit - transaction is not begun on the connection. Note that DBAPI - connections by default are always in a transaction - SQLAlchemy uses - rules applied to different kinds of statements to determine if - COMMIT will be invoked in order to provide its "autocommit" feature. - Typically, all INSERT/UPDATE/DELETE statements as well as - CREATE/DROP statements have autocommit behavior enabled; SELECT - constructs do not. Use this option when invoking a SELECT or other - specific SQL construct where COMMIT is desired (typically when - calling stored procedures and such), and an explicit - transaction is not in progress. - - :param compiled_cache: Available on: Connection. - A dictionary where :class:`.Compiled` objects - will be cached when the :class:`.Connection` compiles a clause - expression into a :class:`.Compiled` object. - It is the user's responsibility to - manage the size of this dictionary, which will have keys - corresponding to the dialect, clause element, the column - names within the VALUES or SET clause of an INSERT or UPDATE, - as well as the "batch" mode for an INSERT or UPDATE statement. - The format of this dictionary is not guaranteed to stay the - same in future releases. - - Note that the ORM makes use of its own "compiled" caches for - some operations, including flush operations. The caching - used by the ORM internally supersedes a cache dictionary - specified here. - - :param isolation_level: Available on: Connection. - Set the transaction isolation level for - the lifespan of this connection. Valid values include - those string values accepted by the ``isolation_level`` - parameter passed to :func:`.create_engine`, and are - database specific, including those for :ref:`sqlite_toplevel`, - :ref:`postgresql_toplevel` - see those dialect's documentation - for further info. - - Note that this option necessarily affects the underlying - DBAPI connection for the lifespan of the originating - :class:`.Connection`, and is not per-execution. This - setting is not removed until the underlying DBAPI connection - is returned to the connection pool, i.e. - the :meth:`.Connection.close` method is called. - - :param no_parameters: When ``True``, if the final parameter - list or dictionary is totally empty, will invoke the - statement on the cursor as ``cursor.execute(statement)``, - not passing the parameter collection at all. - Some DBAPIs such as psycopg2 and mysql-python consider - percent signs as significant only when parameters are - present; this option allows code to generate SQL - containing percent signs (and possibly other characters) - that is neutral regarding whether it's executed by the DBAPI - or piped into a script that's later invoked by - command line tools. - - .. versionadded:: 0.7.6 - - :param stream_results: Available on: Connection, statement. - Indicate to the dialect that results should be - "streamed" and not pre-buffered, if possible. This is a limitation - of many DBAPIs. The flag is currently understood only by the - psycopg2 dialect. - - """ - c = self._clone() - c._execution_options = c._execution_options.union(opt) - if 'isolation_level' in opt: - c._set_isolation_level() - return c - - def _set_isolation_level(self): - self.dialect.set_isolation_level(self.connection, - self._execution_options['isolation_level']) - self.connection._connection_record.finalize_callback = \ - self.dialect.reset_isolation_level - - @property - def closed(self): - """Return True if this connection is closed.""" - - return not self.__invalid and '_Connection__connection' \ - not in self.__dict__ - - @property - def invalidated(self): - """Return True if this connection was invalidated.""" - - return self.__invalid - - @property - def connection(self): - "The underlying DB-API connection managed by this Connection." - - try: - return self.__connection - except AttributeError: - return self._revalidate_connection() - - def _revalidate_connection(self): - if self.__invalid: - if self.__transaction is not None: - raise exc.InvalidRequestError( - "Can't reconnect until invalid " - "transaction is rolled back") - self.__connection = self.engine.raw_connection() - self.__invalid = False - return self.__connection - raise exc.ResourceClosedError("This Connection is closed") - - @property - def _connection_is_valid(self): - # use getattr() for is_valid to support exceptions raised in - # dialect initializer, where the connection is not wrapped in - # _ConnectionFairy - - return getattr(self.__connection, 'is_valid', False) - - @property - def _still_open_and_connection_is_valid(self): - return \ - not self.closed and \ - not self.invalidated and \ - getattr(self.__connection, 'is_valid', False) - - @property - def info(self): - """A collection of per-DB-API connection instance properties.""" - - return self.connection.info - - def connect(self): - """Returns self. - - This ``Connectable`` interface method returns self, allowing - Connections to be used interchangeably with Engines in most - situations that require a bind. - """ - - return self - - def contextual_connect(self, **kwargs): - """Returns self. - - This ``Connectable`` interface method returns self, allowing - Connections to be used interchangeably with Engines in most - situations that require a bind. - """ - - return self - - def invalidate(self, exception=None): - """Invalidate the underlying DBAPI connection associated with - this Connection. - - The underlying DB-API connection is literally closed (if - possible), and is discarded. Its source connection pool will - typically lazily create a new connection to replace it. - - Upon the next usage, this Connection will attempt to reconnect - to the pool with a new connection. - - Transactions in progress remain in an "opened" state (even though the - actual transaction is gone); these must be explicitly rolled back - before a reconnect on this Connection can proceed. This is to prevent - applications from accidentally continuing their transactional - operations in a non-transactional state. - - """ - if self.invalidated: - return - - if self.closed: - raise exc.ResourceClosedError("This Connection is closed") - - if self._connection_is_valid: - self.__connection.invalidate(exception) - del self.__connection - self.__invalid = True - - - def detach(self): - """Detach the underlying DB-API connection from its connection pool. - - This Connection instance will remain usable. When closed, - the DB-API connection will be literally closed and not - returned to its pool. The pool will typically lazily create a - new connection to replace the detached connection. - - This method can be used to insulate the rest of an application - from a modified state on a connection (such as a transaction - isolation level or similar). Also see - :class:`~sqlalchemy.interfaces.PoolListener` for a mechanism to modify - connection state when connections leave and return to their - connection pool. - """ - - self.__connection.detach() - - def begin(self): - """Begin a transaction and return a transaction handle. - - The returned object is an instance of :class:`.Transaction`. - This object represents the "scope" of the transaction, - which completes when either the :meth:`.Transaction.rollback` - or :meth:`.Transaction.commit` method is called. - - Nested calls to :meth:`.begin` on the same :class:`.Connection` - will return new :class:`.Transaction` objects that represent - an emulated transaction within the scope of the enclosing - transaction, that is:: - - trans = conn.begin() # outermost transaction - trans2 = conn.begin() # "nested" - trans2.commit() # does nothing - trans.commit() # actually commits - - Calls to :meth:`.Transaction.commit` only have an effect - when invoked via the outermost :class:`.Transaction` object, though the - :meth:`.Transaction.rollback` method of any of the - :class:`.Transaction` objects will roll back the - transaction. - - See also: - - :meth:`.Connection.begin_nested` - use a SAVEPOINT - - :meth:`.Connection.begin_twophase` - use a two phase /XID transaction - - :meth:`.Engine.begin` - context manager available from :class:`.Engine`. - - """ - - if self.__transaction is None: - self.__transaction = RootTransaction(self) - return self.__transaction - else: - return Transaction(self, self.__transaction) - - def begin_nested(self): - """Begin a nested transaction and return a transaction handle. - - The returned object is an instance of :class:`.NestedTransaction`. - - Nested transactions require SAVEPOINT support in the - underlying database. Any transaction in the hierarchy may - ``commit`` and ``rollback``, however the outermost transaction - still controls the overall ``commit`` or ``rollback`` of the - transaction of a whole. - - See also :meth:`.Connection.begin`, - :meth:`.Connection.begin_twophase`. - """ - - if self.__transaction is None: - self.__transaction = RootTransaction(self) - else: - self.__transaction = NestedTransaction(self, self.__transaction) - return self.__transaction - - def begin_twophase(self, xid=None): - """Begin a two-phase or XA transaction and return a transaction - handle. - - The returned object is an instance of :class:`.TwoPhaseTransaction`, - which in addition to the methods provided by - :class:`.Transaction`, also provides a :meth:`~.TwoPhaseTransaction.prepare` - method. - - :param xid: the two phase transaction id. If not supplied, a - random id will be generated. - - See also :meth:`.Connection.begin`, - :meth:`.Connection.begin_twophase`. - - """ - - if self.__transaction is not None: - raise exc.InvalidRequestError( - "Cannot start a two phase transaction when a transaction " - "is already in progress.") - if xid is None: - xid = self.engine.dialect.create_xid(); - self.__transaction = TwoPhaseTransaction(self, xid) - return self.__transaction - - def recover_twophase(self): - return self.engine.dialect.do_recover_twophase(self) - - def rollback_prepared(self, xid, recover=False): - self.engine.dialect.do_rollback_twophase(self, xid, recover=recover) - - def commit_prepared(self, xid, recover=False): - self.engine.dialect.do_commit_twophase(self, xid, recover=recover) - - def in_transaction(self): - """Return True if a transaction is in progress.""" - - return self.__transaction is not None - - def _begin_impl(self): - if self._echo: - self.engine.logger.info("BEGIN (implicit)") - - if self._has_events: - self.engine.dispatch.begin(self) - - try: - self.engine.dialect.do_begin(self.connection) - except Exception, e: - self._handle_dbapi_exception(e, None, None, None, None) - raise - - def _rollback_impl(self): - if self._has_events: - self.engine.dispatch.rollback(self) - - if self._still_open_and_connection_is_valid: - if self._echo: - self.engine.logger.info("ROLLBACK") - try: - self.engine.dialect.do_rollback(self.connection) - self.__transaction = None - except Exception, e: - self._handle_dbapi_exception(e, None, None, None, None) - raise - else: - self.__transaction = None - - def _commit_impl(self): - if self._has_events: - self.engine.dispatch.commit(self) - - if self._echo: - self.engine.logger.info("COMMIT") - try: - self.engine.dialect.do_commit(self.connection) - self.__transaction = None - except Exception, e: - self._handle_dbapi_exception(e, None, None, None, None) - raise - - def _savepoint_impl(self, name=None): - if self._has_events: - self.engine.dispatch.savepoint(self, name) - - if name is None: - self.__savepoint_seq += 1 - name = 'sa_savepoint_%s' % self.__savepoint_seq - if self._still_open_and_connection_is_valid: - self.engine.dialect.do_savepoint(self, name) - return name - - def _rollback_to_savepoint_impl(self, name, context): - if self._has_events: - self.engine.dispatch.rollback_savepoint(self, name, context) - - if self._still_open_and_connection_is_valid: - self.engine.dialect.do_rollback_to_savepoint(self, name) - self.__transaction = context - - def _release_savepoint_impl(self, name, context): - if self._has_events: - self.engine.dispatch.release_savepoint(self, name, context) - - if self._still_open_and_connection_is_valid: - self.engine.dialect.do_release_savepoint(self, name) - self.__transaction = context - - def _begin_twophase_impl(self, xid): - if self._has_events: - self.engine.dispatch.begin_twophase(self, xid) - - if self._still_open_and_connection_is_valid: - self.engine.dialect.do_begin_twophase(self, xid) - - def _prepare_twophase_impl(self, xid): - if self._has_events: - self.engine.dispatch.prepare_twophase(self, xid) - - if self._still_open_and_connection_is_valid: - assert isinstance(self.__transaction, TwoPhaseTransaction) - self.engine.dialect.do_prepare_twophase(self, xid) - - def _rollback_twophase_impl(self, xid, is_prepared): - if self._has_events: - self.engine.dispatch.rollback_twophase(self, xid, is_prepared) - - if self._still_open_and_connection_is_valid: - assert isinstance(self.__transaction, TwoPhaseTransaction) - self.engine.dialect.do_rollback_twophase(self, xid, is_prepared) - self.__transaction = None - - def _commit_twophase_impl(self, xid, is_prepared): - if self._has_events: - self.engine.dispatch.commit_twophase(self, xid, is_prepared) - - if self._still_open_and_connection_is_valid: - assert isinstance(self.__transaction, TwoPhaseTransaction) - self.engine.dialect.do_commit_twophase(self, xid, is_prepared) - self.__transaction = None - - def _autorollback(self): - if not self.in_transaction(): - self._rollback_impl() - - def close(self): - """Close this :class:`.Connection`. - - This results in a release of the underlying database - resources, that is, the DBAPI connection referenced - internally. The DBAPI connection is typically restored - back to the connection-holding :class:`.Pool` referenced - by the :class:`.Engine` that produced this - :class:`.Connection`. Any transactional state present on - the DBAPI connection is also unconditionally released via - the DBAPI connection's ``rollback()`` method, regardless - of any :class:`.Transaction` object that may be - outstanding with regards to this :class:`.Connection`. - - After :meth:`~.Connection.close` is called, the - :class:`.Connection` is permanently in a closed state, - and will allow no further operations. - - """ - - try: - conn = self.__connection - except AttributeError: - return - if not self.__branch: - conn.close() - self.__invalid = False - del self.__connection - self.__transaction = None - - def scalar(self, object, *multiparams, **params): - """Executes and returns the first column of the first row. - - The underlying result/cursor is closed after execution. - """ - - return self.execute(object, *multiparams, **params).scalar() - - def execute(self, object, *multiparams, **params): - """Executes the a SQL statement construct and returns a :class:`.ResultProxy`. - - :param object: The statement to be executed. May be - one of: - - * a plain string - * any :class:`.ClauseElement` construct that is also - a subclass of :class:`.Executable`, such as a - :func:`~.expression.select` construct - * a :class:`.FunctionElement`, such as that generated - by :attr:`.func`, will be automatically wrapped in - a SELECT statement, which is then executed. - * a :class:`.DDLElement` object - * a :class:`.DefaultGenerator` object - * a :class:`.Compiled` object - - :param \*multiparams/\**params: represent bound parameter - values to be used in the execution. Typically, - the format is either a collection of one or more - dictionaries passed to \*multiparams:: - - conn.execute( - table.insert(), - {"id":1, "value":"v1"}, - {"id":2, "value":"v2"} - ) - - ...or individual key/values interpreted by \**params:: - - conn.execute( - table.insert(), id=1, value="v1" - ) - - In the case that a plain SQL string is passed, and the underlying - DBAPI accepts positional bind parameters, a collection of tuples - or individual values in \*multiparams may be passed:: - - conn.execute( - "INSERT INTO table (id, value) VALUES (?, ?)", - (1, "v1"), (2, "v2") - ) - - conn.execute( - "INSERT INTO table (id, value) VALUES (?, ?)", - 1, "v1" - ) - - Note above, the usage of a question mark "?" or other - symbol is contingent upon the "paramstyle" accepted by the DBAPI - in use, which may be any of "qmark", "named", "pyformat", "format", - "numeric". See `pep-249 `_ - for details on paramstyle. - - To execute a textual SQL statement which uses bound parameters in a - DBAPI-agnostic way, use the :func:`~.expression.text` construct. - - """ - for c in type(object).__mro__: - if c in Connection.executors: - return Connection.executors[c]( - self, - object, - multiparams, - params) - else: - raise exc.InvalidRequestError( - "Unexecutable object type: %s" % - type(object)) - - def __distill_params(self, multiparams, params): - """Given arguments from the calling form *multiparams, **params, - return a list of bind parameter structures, usually a list of - dictionaries. - - In the case of 'raw' execution which accepts positional parameters, - it may be a list of tuples or lists. - - """ - - if not multiparams: - if params: - return [params] - else: - return [] - elif len(multiparams) == 1: - zero = multiparams[0] - if isinstance(zero, (list, tuple)): - if not zero or hasattr(zero[0], '__iter__') and \ - not hasattr(zero[0], 'strip'): - return zero - else: - return [zero] - elif hasattr(zero, 'keys'): - return [zero] - else: - return [[zero]] - else: - if hasattr(multiparams[0], '__iter__') and \ - not hasattr(multiparams[0], 'strip'): - return multiparams - else: - return [multiparams] - - def _execute_function(self, func, multiparams, params): - """Execute a sql.FunctionElement object.""" - - return self._execute_clauseelement(func.select(), - multiparams, params) - - def _execute_default(self, default, multiparams, params): - """Execute a schema.ColumnDefault object.""" - - if self._has_events: - for fn in self.engine.dispatch.before_execute: - default, multiparams, params = \ - fn(self, default, multiparams, params) - - try: - try: - conn = self.__connection - except AttributeError: - conn = self._revalidate_connection() - - dialect = self.dialect - ctx = dialect.execution_ctx_cls._init_default( - dialect, self, conn) - except Exception, e: - self._handle_dbapi_exception(e, None, None, None, None) - raise - - ret = ctx._exec_default(default, None) - if self.should_close_with_result: - self.close() - - if self._has_events: - self.engine.dispatch.after_execute(self, - default, multiparams, params, ret) - - return ret - - def _execute_ddl(self, ddl, multiparams, params): - """Execute a schema.DDL object.""" - - if self._has_events: - for fn in self.engine.dispatch.before_execute: - ddl, multiparams, params = \ - fn(self, ddl, multiparams, params) - - dialect = self.dialect - - compiled = ddl.compile(dialect=dialect) - ret = self._execute_context( - dialect, - dialect.execution_ctx_cls._init_ddl, - compiled, - None, - compiled - ) - if self._has_events: - self.engine.dispatch.after_execute(self, - ddl, multiparams, params, ret) - return ret - - def _execute_clauseelement(self, elem, multiparams, params): - """Execute a sql.ClauseElement object.""" - - if self._has_events: - for fn in self.engine.dispatch.before_execute: - elem, multiparams, params = \ - fn(self, elem, multiparams, params) - - distilled_params = self.__distill_params(multiparams, params) - if distilled_params: - keys = distilled_params[0].keys() - else: - keys = [] - - dialect = self.dialect - if 'compiled_cache' in self._execution_options: - key = dialect, elem, tuple(keys), len(distilled_params) > 1 - if key in self._execution_options['compiled_cache']: - compiled_sql = self._execution_options['compiled_cache'][key] - else: - compiled_sql = elem.compile( - dialect=dialect, column_keys=keys, - inline=len(distilled_params) > 1) - self._execution_options['compiled_cache'][key] = compiled_sql - else: - compiled_sql = elem.compile( - dialect=dialect, column_keys=keys, - inline=len(distilled_params) > 1) - - - ret = self._execute_context( - dialect, - dialect.execution_ctx_cls._init_compiled, - compiled_sql, - distilled_params, - compiled_sql, distilled_params - ) - if self._has_events: - self.engine.dispatch.after_execute(self, - elem, multiparams, params, ret) - return ret - - def _execute_compiled(self, compiled, multiparams, params): - """Execute a sql.Compiled object.""" - - if self._has_events: - for fn in self.engine.dispatch.before_execute: - compiled, multiparams, params = \ - fn(self, compiled, multiparams, params) - - dialect = self.dialect - parameters=self.__distill_params(multiparams, params) - ret = self._execute_context( - dialect, - dialect.execution_ctx_cls._init_compiled, - compiled, - parameters, - compiled, parameters - ) - if self._has_events: - self.engine.dispatch.after_execute(self, - compiled, multiparams, params, ret) - return ret - - def _execute_text(self, statement, multiparams, params): - """Execute a string SQL statement.""" - - if self._has_events: - for fn in self.engine.dispatch.before_execute: - statement, multiparams, params = \ - fn(self, statement, multiparams, params) - - dialect = self.dialect - parameters = self.__distill_params(multiparams, params) - ret = self._execute_context( - dialect, - dialect.execution_ctx_cls._init_statement, - statement, - parameters, - statement, parameters - ) - if self._has_events: - self.engine.dispatch.after_execute(self, - statement, multiparams, params, ret) - return ret - - def _execute_context(self, dialect, constructor, - statement, parameters, - *args): - """Create an :class:`.ExecutionContext` and execute, returning - a :class:`.ResultProxy`.""" - - try: - try: - conn = self.__connection - except AttributeError: - conn = self._revalidate_connection() - - context = constructor(dialect, self, conn, *args) - except Exception, e: - self._handle_dbapi_exception(e, - str(statement), parameters, - None, None) - raise - - if context.compiled: - context.pre_exec() - - cursor, statement, parameters = context.cursor, \ - context.statement, \ - context.parameters - - if not context.executemany: - parameters = parameters[0] - - if self._has_events: - for fn in self.engine.dispatch.before_cursor_execute: - statement, parameters = \ - fn(self, cursor, statement, parameters, - context, context.executemany) - - if self._echo: - self.engine.logger.info(statement) - self.engine.logger.info("%r", - sql_util._repr_params(parameters, batches=10)) - try: - if context.executemany: - self.dialect.do_executemany( - cursor, - statement, - parameters, - context) - elif not parameters and context.no_parameters: - self.dialect.do_execute_no_params( - cursor, - statement, - context) - else: - self.dialect.do_execute( - cursor, - statement, - parameters, - context) - except Exception, e: - self._handle_dbapi_exception( - e, - statement, - parameters, - cursor, - context) - raise - - - if self._has_events: - self.engine.dispatch.after_cursor_execute(self, cursor, - statement, - parameters, - context, - context.executemany) - - if context.compiled: - context.post_exec() - - if context.isinsert and not context.executemany: - context.post_insert() - - # create a resultproxy, get rowcount/implicit RETURNING - # rows, close cursor if no further results pending - result = context.get_result_proxy() - if context.isinsert: - if context._is_implicit_returning: - context._fetch_implicit_returning(result) - result.close(_autoclose_connection=False) - elif not context._is_explicit_returning: - result.close(_autoclose_connection=False) - elif result._metadata is None: - # no results, get rowcount - # (which requires open cursor on some drivers - # such as kintersbasdb, mxodbc), - result.rowcount - result.close(_autoclose_connection=False) - - if self.__transaction is None and context.should_autocommit: - self._commit_impl() - - if result.closed and self.should_close_with_result: - self.close() - - return result - - def _cursor_execute(self, cursor, statement, parameters): - """Execute a statement + params on the given cursor. - - Adds appropriate logging and exception handling. - - This method is used by DefaultDialect for special-case - executions, such as for sequences and column defaults. - The path of statement execution in the majority of cases - terminates at _execute_context(). - - """ - if self._echo: - self.engine.logger.info(statement) - self.engine.logger.info("%r", parameters) - try: - self.dialect.do_execute( - cursor, - statement, - parameters) - except Exception, e: - self._handle_dbapi_exception( - e, - statement, - parameters, - cursor, - None) - raise - - def _safe_close_cursor(self, cursor): - """Close the given cursor, catching exceptions - and turning into log warnings. - - """ - try: - cursor.close() - except Exception, e: - try: - ex_text = str(e) - except TypeError: - ex_text = repr(e) - self.connection._logger.warn("Error closing cursor: %s", ex_text) - - if isinstance(e, (SystemExit, KeyboardInterrupt)): - raise - - def _handle_dbapi_exception(self, - e, - statement, - parameters, - cursor, - context): - if getattr(self, '_reentrant_error', False): - # Py3K - #raise exc.DBAPIError.instance(statement, parameters, e, - # self.dialect.dbapi.Error) from e - # Py2K - raise exc.DBAPIError.instance(statement, - parameters, - e, - self.dialect.dbapi.Error), \ - None, sys.exc_info()[2] - # end Py2K - self._reentrant_error = True - try: - # non-DBAPI error - if we already got a context, - # or theres no string statement, don't wrap it - should_wrap = isinstance(e, self.dialect.dbapi.Error) or \ - (statement is not None and context is None) - - if should_wrap and context: - if self._has_events: - self.engine.dispatch.dbapi_error(self, - cursor, - statement, - parameters, - context, - e) - context.handle_dbapi_exception(e) - - is_disconnect = isinstance(e, self.dialect.dbapi.Error) and \ - self.dialect.is_disconnect(e, self.__connection, cursor) - - - if is_disconnect: - self.invalidate(e) - self.engine.dispose() - else: - if cursor: - self._safe_close_cursor(cursor) - self._autorollback() - if self.should_close_with_result: - self.close() - - if not should_wrap: - return - - # Py3K - #raise exc.DBAPIError.instance( - # statement, - # parameters, - # e, - # self.dialect.dbapi.Error, - # connection_invalidated=is_disconnect) \ - # from e - # Py2K - raise exc.DBAPIError.instance( - statement, - parameters, - e, - self.dialect.dbapi.Error, - connection_invalidated=is_disconnect), \ - None, sys.exc_info()[2] - # end Py2K - - finally: - del self._reentrant_error - - # poor man's multimethod/generic function thingy - executors = { - expression.FunctionElement: _execute_function, - expression.ClauseElement: _execute_clauseelement, - Compiled: _execute_compiled, - schema.SchemaItem: _execute_default, - schema.DDLElement: _execute_ddl, - basestring: _execute_text - } - - @util.deprecated("0.7", "Use the create() method on the given schema " - "object directly, i.e. :meth:`.Table.create`, " - ":meth:`.Index.create`, :meth:`.MetaData.create_all`") - def create(self, entity, **kwargs): - """Emit CREATE statements for the given schema entity.""" - - return self.engine.create(entity, connection=self, **kwargs) - - @util.deprecated("0.7", "Use the drop() method on the given schema " - "object directly, i.e. :meth:`.Table.drop`, " - ":meth:`.Index.drop`, :meth:`.MetaData.drop_all`") - def drop(self, entity, **kwargs): - """Emit DROP statements for the given schema entity.""" - - return self.engine.drop(entity, connection=self, **kwargs) - - @util.deprecated("0.7", "Use autoload=True with :class:`.Table`, " - "or use the :class:`.Inspector` object.") - def reflecttable(self, table, include_columns=None): - """Load table description from the database. - - Given a :class:`.Table` object, reflect its columns and - properties from the database, populating the given :class:`.Table` - object with attributes.. If include_columns (a list or - set) is specified, limit the autoload to the given column - names. - - The default implementation uses the - :class:`.Inspector` interface to - provide the output, building upon the granular table/column/ - constraint etc. methods of :class:`.Dialect`. - - """ - return self.engine.reflecttable(table, self, include_columns) - - def default_schema_name(self): - return self.engine.dialect.get_default_schema_name(self) - - def transaction(self, callable_, *args, **kwargs): - """Execute the given function within a transaction boundary. - - The function is passed this :class:`.Connection` - as the first argument, followed by the given \*args and \**kwargs, - e.g.:: - - def do_something(conn, x, y): - conn.execute("some statement", {'x':x, 'y':y}) - - conn.transaction(do_something, 5, 10) - - The operations inside the function are all invoked within the - context of a single :class:`.Transaction`. - Upon success, the transaction is committed. If an - exception is raised, the transaction is rolled back - before propagating the exception. - - .. note:: - - The :meth:`.transaction` method is superseded by - the usage of the Python ``with:`` statement, which can - be used with :meth:`.Connection.begin`:: - - with conn.begin(): - conn.execute("some statement", {'x':5, 'y':10}) - - As well as with :meth:`.Engine.begin`:: - - with engine.begin() as conn: - conn.execute("some statement", {'x':5, 'y':10}) - - See also: - - :meth:`.Engine.begin` - engine-level transactional - context - - :meth:`.Engine.transaction` - engine-level version of - :meth:`.Connection.transaction` - - """ - - trans = self.begin() - try: - ret = self.run_callable(callable_, *args, **kwargs) - trans.commit() - return ret - except: - trans.rollback() - raise - - def run_callable(self, callable_, *args, **kwargs): - """Given a callable object or function, execute it, passing - a :class:`.Connection` as the first argument. - - The given \*args and \**kwargs are passed subsequent - to the :class:`.Connection` argument. - - This function, along with :meth:`.Engine.run_callable`, - allows a function to be run with a :class:`.Connection` - or :class:`.Engine` object without the need to know - which one is being dealt with. - - """ - return callable_(self, *args, **kwargs) - - def _run_visitor(self, visitorcallable, element, **kwargs): - visitorcallable(self.dialect, self, - **kwargs).traverse_single(element) - - -class Transaction(object): - """Represent a database transaction in progress. - - The :class:`.Transaction` object is procured by - calling the :meth:`~.Connection.begin` method of - :class:`.Connection`:: - - from sqlalchemy import create_engine - engine = create_engine("postgresql://scott:tiger@localhost/test") - connection = engine.connect() - trans = connection.begin() - connection.execute("insert into x (a, b) values (1, 2)") - trans.commit() - - The object provides :meth:`.rollback` and :meth:`.commit` - methods in order to control transaction boundaries. It - also implements a context manager interface so that - the Python ``with`` statement can be used with the - :meth:`.Connection.begin` method:: - - with connection.begin(): - connection.execute("insert into x (a, b) values (1, 2)") - - The Transaction object is **not** threadsafe. - - See also: :meth:`.Connection.begin`, :meth:`.Connection.begin_twophase`, - :meth:`.Connection.begin_nested`. - - .. index:: - single: thread safety; Transaction - """ - - def __init__(self, connection, parent): - self.connection = connection - self._parent = parent or self - self.is_active = True - - def close(self): - """Close this :class:`.Transaction`. - - If this transaction is the base transaction in a begin/commit - nesting, the transaction will rollback(). Otherwise, the - method returns. - - This is used to cancel a Transaction without affecting the scope of - an enclosing transaction. - - """ - if not self._parent.is_active: - return - if self._parent is self: - self.rollback() - - def rollback(self): - """Roll back this :class:`.Transaction`. - - """ - if not self._parent.is_active: - return - self._do_rollback() - self.is_active = False - - def _do_rollback(self): - self._parent.rollback() - - def commit(self): - """Commit this :class:`.Transaction`.""" - - if not self._parent.is_active: - raise exc.InvalidRequestError("This transaction is inactive") - self._do_commit() - self.is_active = False - - def _do_commit(self): - pass - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - if type is None and self.is_active: - try: - self.commit() - except: - self.rollback() - raise - else: - self.rollback() - -class RootTransaction(Transaction): - def __init__(self, connection): - super(RootTransaction, self).__init__(connection, None) - self.connection._begin_impl() - - def _do_rollback(self): - if self.is_active: - self.connection._rollback_impl() - - def _do_commit(self): - if self.is_active: - self.connection._commit_impl() - - -class NestedTransaction(Transaction): - """Represent a 'nested', or SAVEPOINT transaction. - - A new :class:`.NestedTransaction` object may be procured - using the :meth:`.Connection.begin_nested` method. - - The interface is the same as that of :class:`.Transaction`. - - """ - def __init__(self, connection, parent): - super(NestedTransaction, self).__init__(connection, parent) - self._savepoint = self.connection._savepoint_impl() - - def _do_rollback(self): - if self.is_active: - self.connection._rollback_to_savepoint_impl( - self._savepoint, self._parent) - - def _do_commit(self): - if self.is_active: - self.connection._release_savepoint_impl( - self._savepoint, self._parent) - - -class TwoPhaseTransaction(Transaction): - """Represent a two-phase transaction. - - A new :class:`.TwoPhaseTransaction` object may be procured - using the :meth:`.Connection.begin_twophase` method. - - The interface is the same as that of :class:`.Transaction` - with the addition of the :meth:`prepare` method. - - """ - def __init__(self, connection, xid): - super(TwoPhaseTransaction, self).__init__(connection, None) - self._is_prepared = False - self.xid = xid - self.connection._begin_twophase_impl(self.xid) - - def prepare(self): - """Prepare this :class:`.TwoPhaseTransaction`. - - After a PREPARE, the transaction can be committed. - - """ - if not self._parent.is_active: - raise exc.InvalidRequestError("This transaction is inactive") - self.connection._prepare_twophase_impl(self.xid) - self._is_prepared = True - - def _do_rollback(self): - self.connection._rollback_twophase_impl(self.xid, self._is_prepared) - - def _do_commit(self): - self.connection._commit_twophase_impl(self.xid, self._is_prepared) - - -class Engine(Connectable, log.Identified): - """ - Connects a :class:`~sqlalchemy.pool.Pool` and - :class:`~sqlalchemy.engine.base.Dialect` together to provide a source - of database connectivity and behavior. - - An :class:`.Engine` object is instantiated publicly using the - :func:`~sqlalchemy.create_engine` function. - - See also: - - :ref:`engines_toplevel` - - :ref:`connections_toplevel` - - """ - - _execution_options = util.immutabledict() - _has_events = False - _connection_cls = Connection - - def __init__(self, pool, dialect, url, - logging_name=None, echo=None, proxy=None, - execution_options=None - ): - self.pool = pool - self.url = url - self.dialect = dialect - if logging_name: - self.logging_name = logging_name - self.echo = echo - self.engine = self - log.instance_logger(self, echoflag=echo) - if proxy: - interfaces.ConnectionProxy._adapt_listener(self, proxy) - if execution_options: - if 'isolation_level' in execution_options: - raise exc.ArgumentError( - "'isolation_level' execution option may " - "only be specified on Connection.execution_options(). " - "To set engine-wide isolation level, " - "use the isolation_level argument to create_engine()." - ) - self.update_execution_options(**execution_options) - - dispatch = event.dispatcher(events.ConnectionEvents) - - def update_execution_options(self, **opt): - """Update the default execution_options dictionary - of this :class:`.Engine`. - - The given keys/values in \**opt are added to the - default execution options that will be used for - all connections. The initial contents of this dictionary - can be sent via the ``execution_options`` parameter - to :func:`.create_engine`. - - See :meth:`.Connection.execution_options` for more - details on execution options. - - """ - self._execution_options = \ - self._execution_options.union(opt) - - @property - def name(self): - """String name of the :class:`~sqlalchemy.engine.Dialect` in use by - this ``Engine``.""" - - return self.dialect.name - - @property - def driver(self): - """Driver name of the :class:`~sqlalchemy.engine.Dialect` in use by - this ``Engine``.""" - - return self.dialect.driver - - echo = log.echo_property() - - def __repr__(self): - return 'Engine(%s)' % str(self.url) - - def dispose(self): - """Dispose of the connection pool used by this :class:`.Engine`. - - A new connection pool is created immediately after the old one has - been disposed. This new pool, like all SQLAlchemy connection pools, - does not make any actual connections to the database until one is - first requested. - - This method has two general use cases: - - * When a dropped connection is detected, it is assumed that all - connections held by the pool are potentially dropped, and - the entire pool is replaced. - - * An application may want to use :meth:`dispose` within a test - suite that is creating multiple engines. - - It is critical to note that :meth:`dispose` does **not** guarantee - that the application will release all open database connections - only - those connections that are checked into the pool are closed. - Connections which remain checked out or have been detached from - the engine are not affected. - - """ - self.pool.dispose() - self.pool = self.pool._replace() - - @util.deprecated("0.7", "Use the create() method on the given schema " - "object directly, i.e. :meth:`.Table.create`, " - ":meth:`.Index.create`, :meth:`.MetaData.create_all`") - def create(self, entity, connection=None, **kwargs): - """Emit CREATE statements for the given schema entity.""" - - from sqlalchemy.engine import ddl - - self._run_visitor(ddl.SchemaGenerator, entity, - connection=connection, **kwargs) - - @util.deprecated("0.7", "Use the drop() method on the given schema " - "object directly, i.e. :meth:`.Table.drop`, " - ":meth:`.Index.drop`, :meth:`.MetaData.drop_all`") - def drop(self, entity, connection=None, **kwargs): - """Emit DROP statements for the given schema entity.""" - - from sqlalchemy.engine import ddl - - self._run_visitor(ddl.SchemaDropper, entity, - connection=connection, **kwargs) - - def _execute_default(self, default): - connection = self.contextual_connect() - try: - return connection._execute_default(default, (), {}) - finally: - connection.close() - - @property - @util.deprecated("0.7", - "Use :attr:`~sqlalchemy.sql.expression.func` to create function constructs.") - def func(self): - return expression._FunctionGenerator(bind=self) - - @util.deprecated("0.7", - "Use :func:`.expression.text` to create text constructs.") - def text(self, text, *args, **kwargs): - """Return a :func:`~sqlalchemy.sql.expression.text` construct, - bound to this engine. - - This is equivalent to:: - - text("SELECT * FROM table", bind=engine) - - """ - - return expression.text(text, bind=self, *args, **kwargs) - - def _run_visitor(self, visitorcallable, element, - connection=None, **kwargs): - if connection is None: - conn = self.contextual_connect(close_with_result=False) - else: - conn = connection - try: - conn._run_visitor(visitorcallable, element, **kwargs) - finally: - if connection is None: - conn.close() - - class _trans_ctx(object): - def __init__(self, conn, transaction, close_with_result): - self.conn = conn - self.transaction = transaction - self.close_with_result = close_with_result - - def __enter__(self): - return self.conn - - def __exit__(self, type, value, traceback): - if type is not None: - self.transaction.rollback() - else: - self.transaction.commit() - if not self.close_with_result: - self.conn.close() - - def begin(self, close_with_result=False): - """Return a context manager delivering a :class:`.Connection` - with a :class:`.Transaction` established. - - E.g.:: - - with engine.begin() as conn: - conn.execute("insert into table (x, y, z) values (1, 2, 3)") - conn.execute("my_special_procedure(5)") - - Upon successful operation, the :class:`.Transaction` - is committed. If an error is raised, the :class:`.Transaction` - is rolled back. - - The ``close_with_result`` flag is normally ``False``, and indicates - that the :class:`.Connection` will be closed when the operation - is complete. When set to ``True``, it indicates the :class:`.Connection` - is in "single use" mode, where the :class:`.ResultProxy` - returned by the first call to :meth:`.Connection.execute` will - close the :class:`.Connection` when that :class:`.ResultProxy` - has exhausted all result rows. - - .. versionadded:: 0.7.6 - - See also: - - :meth:`.Engine.connect` - procure a :class:`.Connection` from - an :class:`.Engine`. - - :meth:`.Connection.begin` - start a :class:`.Transaction` - for a particular :class:`.Connection`. - - """ - conn = self.contextual_connect(close_with_result=close_with_result) - try: - trans = conn.begin() - except: - conn.close() - raise - return Engine._trans_ctx(conn, trans, close_with_result) - - def transaction(self, callable_, *args, **kwargs): - """Execute the given function within a transaction boundary. - - The function is passed a :class:`.Connection` newly procured - from :meth:`.Engine.contextual_connect` as the first argument, - followed by the given \*args and \**kwargs. - - e.g.:: - - def do_something(conn, x, y): - conn.execute("some statement", {'x':x, 'y':y}) - - engine.transaction(do_something, 5, 10) - - The operations inside the function are all invoked within the - context of a single :class:`.Transaction`. - Upon success, the transaction is committed. If an - exception is raised, the transaction is rolled back - before propagating the exception. - - .. note:: - - The :meth:`.transaction` method is superseded by - the usage of the Python ``with:`` statement, which can - be used with :meth:`.Engine.begin`:: - - with engine.begin() as conn: - conn.execute("some statement", {'x':5, 'y':10}) - - See also: - - :meth:`.Engine.begin` - engine-level transactional - context - - :meth:`.Connection.transaction` - connection-level version of - :meth:`.Engine.transaction` - - """ - - conn = self.contextual_connect() - try: - return conn.transaction(callable_, *args, **kwargs) - finally: - conn.close() - - def run_callable(self, callable_, *args, **kwargs): - """Given a callable object or function, execute it, passing - a :class:`.Connection` as the first argument. - - The given \*args and \**kwargs are passed subsequent - to the :class:`.Connection` argument. - - This function, along with :meth:`.Connection.run_callable`, - allows a function to be run with a :class:`.Connection` - or :class:`.Engine` object without the need to know - which one is being dealt with. - - """ - conn = self.contextual_connect() - try: - return conn.run_callable(callable_, *args, **kwargs) - finally: - conn.close() - - def execute(self, statement, *multiparams, **params): - """Executes the given construct and returns a :class:`.ResultProxy`. - - The arguments are the same as those used by - :meth:`.Connection.execute`. - - Here, a :class:`.Connection` is acquired using the - :meth:`~.Engine.contextual_connect` method, and the statement executed - with that connection. The returned :class:`.ResultProxy` is flagged - such that when the :class:`.ResultProxy` is exhausted and its - underlying cursor is closed, the :class:`.Connection` created here - will also be closed, which allows its associated DBAPI connection - resource to be returned to the connection pool. - - """ - - connection = self.contextual_connect(close_with_result=True) - return connection.execute(statement, *multiparams, **params) - - def scalar(self, statement, *multiparams, **params): - return self.execute(statement, *multiparams, **params).scalar() - - def _execute_clauseelement(self, elem, multiparams=None, params=None): - connection = self.contextual_connect(close_with_result=True) - return connection._execute_clauseelement(elem, multiparams, params) - - def _execute_compiled(self, compiled, multiparams, params): - connection = self.contextual_connect(close_with_result=True) - return connection._execute_compiled(compiled, multiparams, params) - - def connect(self, **kwargs): - """Return a new :class:`.Connection` object. - - The :class:`.Connection` object is a facade that uses a DBAPI connection internally - in order to communicate with the database. This connection is procured - from the connection-holding :class:`.Pool` referenced by this :class:`.Engine`. - When the :meth:`~.Connection.close` method of the :class:`.Connection` object is called, - the underlying DBAPI connection is then returned to the connection pool, - where it may be used again in a subsequent call to :meth:`~.Engine.connect`. - - """ - - return self._connection_cls(self, **kwargs) - - def contextual_connect(self, close_with_result=False, **kwargs): - """Return a :class:`.Connection` object which may be part of some ongoing context. - - By default, this method does the same thing as :meth:`.Engine.connect`. - Subclasses of :class:`.Engine` may override this method - to provide contextual behavior. - - :param close_with_result: When True, the first :class:`.ResultProxy` created - by the :class:`.Connection` will call the :meth:`.Connection.close` method - of that connection as soon as any pending result rows are exhausted. - This is used to supply the "connectionless execution" behavior provided - by the :meth:`.Engine.execute` method. - - """ - - return self._connection_cls(self, - self.pool.connect(), - close_with_result=close_with_result, - **kwargs) - - def table_names(self, schema=None, connection=None): - """Return a list of all table names available in the database. - - :param schema: Optional, retrieve names from a non-default schema. - - :param connection: Optional, use a specified connection. Default is - the ``contextual_connect`` for this ``Engine``. - """ - - if connection is None: - conn = self.contextual_connect() - else: - conn = connection - if not schema: - schema = self.dialect.default_schema_name - try: - return self.dialect.get_table_names(conn, schema) - finally: - if connection is None: - conn.close() - - @util.deprecated("0.7", "Use autoload=True with :class:`.Table`, " - "or use the :class:`.Inspector` object.") - def reflecttable(self, table, connection=None, include_columns=None): - """Load table description from the database. - - Uses the given :class:`.Connection`, or if None produces - its own :class:`.Connection`, and passes the ``table`` - and ``include_columns`` arguments onto that - :class:`.Connection` object's :meth:`.Connection.reflecttable` - method. The :class:`.Table` object is then populated - with new attributes. - - """ - if connection is None: - conn = self.contextual_connect() - else: - conn = connection - try: - self.dialect.reflecttable(conn, table, include_columns) - finally: - if connection is None: - conn.close() - - def has_table(self, table_name, schema=None): - return self.run_callable(self.dialect.has_table, table_name, schema) - - def raw_connection(self): - """Return a "raw" DBAPI connection from the connection pool. - - The returned object is a proxied version of the DBAPI - connection object used by the underlying driver in use. - The object will have all the same behavior as the real DBAPI - connection, except that its ``close()`` method will result in the - connection being returned to the pool, rather than being closed - for real. - - This method provides direct DBAPI connection access for - special situations. In most situations, the :class:`.Connection` - object should be used, which is procured using the - :meth:`.Engine.connect` method. - - """ - - return self.pool.unique_connection() - - -# This reconstructor is necessary so that pickles with the C extension or -# without use the same Binary format. -try: - # We need a different reconstructor on the C extension so that we can - # add extra checks that fields have correctly been initialized by - # __setstate__. - from sqlalchemy.cresultproxy import safe_rowproxy_reconstructor - - # The extra function embedding is needed so that the - # reconstructor function has the same signature whether or not - # the extension is present. - def rowproxy_reconstructor(cls, state): - return safe_rowproxy_reconstructor(cls, state) -except ImportError: - def rowproxy_reconstructor(cls, state): - obj = cls.__new__(cls) - obj.__setstate__(state) - return obj - -try: - from sqlalchemy.cresultproxy import BaseRowProxy -except ImportError: - class BaseRowProxy(object): - __slots__ = ('_parent', '_row', '_processors', '_keymap') - - def __init__(self, parent, row, processors, keymap): - """RowProxy objects are constructed by ResultProxy objects.""" - - self._parent = parent - self._row = row - self._processors = processors - self._keymap = keymap - - def __reduce__(self): - return (rowproxy_reconstructor, - (self.__class__, self.__getstate__())) - - def values(self): - """Return the values represented by this RowProxy as a list.""" - return list(self) - - def __iter__(self): - for processor, value in izip(self._processors, self._row): - if processor is None: - yield value - else: - yield processor(value) - - def __len__(self): - return len(self._row) - - def __getitem__(self, key): - try: - processor, obj, index = self._keymap[key] - except KeyError: - processor, obj, index = self._parent._key_fallback(key) - except TypeError: - if isinstance(key, slice): - l = [] - for processor, value in izip(self._processors[key], - self._row[key]): - if processor is None: - l.append(value) - else: - l.append(processor(value)) - return tuple(l) - else: - raise - if index is None: - raise exc.InvalidRequestError( - "Ambiguous column name '%s' in result set! " - "try 'use_labels' option on select statement." % key) - if processor is not None: - return processor(self._row[index]) - else: - return self._row[index] - - def __getattr__(self, name): - try: - return self[name] - except KeyError, e: - raise AttributeError(e.args[0]) - - -class RowProxy(BaseRowProxy): - """Proxy values from a single cursor row. - - Mostly follows "ordered dictionary" behavior, mapping result - values to the string-based column name, the integer position of - the result in the row, as well as Column instances which can be - mapped to the original Columns that produced this result set (for - results that correspond to constructed SQL expressions). - """ - __slots__ = () - - def __contains__(self, key): - return self._parent._has_key(self._row, key) - - def __getstate__(self): - return { - '_parent': self._parent, - '_row': tuple(self) - } - - def __setstate__(self, state): - self._parent = parent = state['_parent'] - self._row = state['_row'] - self._processors = parent._processors - self._keymap = parent._keymap - - __hash__ = None - - def __eq__(self, other): - return other is self or other == tuple(self) - - def __ne__(self, other): - return not self.__eq__(other) - - def __repr__(self): - return repr(tuple(self)) - - def has_key(self, key): - """Return True if this RowProxy contains the given key.""" - - return self._parent._has_key(self._row, key) - - def items(self): - """Return a list of tuples, each tuple containing a key/value pair.""" - # TODO: no coverage here - return [(key, self[key]) for key in self.iterkeys()] - - def keys(self): - """Return the list of keys as strings represented by this RowProxy.""" - - return self._parent.keys - - def iterkeys(self): - return iter(self._parent.keys) - - def itervalues(self): - return iter(self) - -try: - # Register RowProxy with Sequence, - # so sequence protocol is implemented - from collections import Sequence - Sequence.register(RowProxy) -except ImportError: - pass - - -class ResultMetaData(object): - """Handle cursor.description, applying additional info from an execution - context.""" - - def __init__(self, parent, metadata): - self._processors = processors = [] - - # We do not strictly need to store the processor in the key mapping, - # though it is faster in the Python version (probably because of the - # saved attribute lookup self._processors) - self._keymap = keymap = {} - self.keys = [] - context = parent.context - dialect = context.dialect - typemap = dialect.dbapi_type_map - translate_colname = context._translate_colname - - # high precedence key values. - primary_keymap = {} - - for i, rec in enumerate(metadata): - colname = rec[0] - coltype = rec[1] - - if dialect.description_encoding: - colname = dialect._description_decoder(colname) - - if translate_colname: - colname, untranslated = translate_colname(colname) - - if context.result_map: - try: - name, obj, type_ = context.result_map[colname.lower()] - except KeyError: - name, obj, type_ = \ - colname, None, typemap.get(coltype, types.NULLTYPE) - else: - name, obj, type_ = \ - colname, None, typemap.get(coltype, types.NULLTYPE) - - processor = type_._cached_result_processor(dialect, coltype) - - processors.append(processor) - rec = (processor, obj, i) - - # indexes as keys. This is only needed for the Python version of - # RowProxy (the C version uses a faster path for integer indexes). - primary_keymap[i] = rec - - # populate primary keymap, looking for conflicts. - if primary_keymap.setdefault(name.lower(), rec) is not rec: - # place a record that doesn't have the "index" - this - # is interpreted later as an AmbiguousColumnError, - # but only when actually accessed. Columns - # colliding by name is not a problem if those names - # aren't used; integer and ColumnElement access is always - # unambiguous. - primary_keymap[name.lower()] = (processor, obj, None) - - if dialect.requires_name_normalize: - colname = dialect.normalize_name(colname) - - self.keys.append(colname) - if obj: - for o in obj: - keymap[o] = rec - - if translate_colname and \ - untranslated: - keymap[untranslated] = rec - - # overwrite keymap values with those of the - # high precedence keymap. - keymap.update(primary_keymap) - - if parent._echo: - context.engine.logger.debug( - "Col %r", tuple(x[0] for x in metadata)) - - @util.pending_deprecation("0.8", "sqlite dialect uses " - "_translate_colname() now") - def _set_keymap_synonym(self, name, origname): - """Set a synonym for the given name. - - Some dialects (SQLite at the moment) may use this to - adjust the column names that are significant within a - row. - - """ - rec = (processor, obj, i) = self._keymap[origname.lower()] - if self._keymap.setdefault(name, rec) is not rec: - self._keymap[name] = (processor, obj, None) - - def _key_fallback(self, key, raiseerr=True): - map = self._keymap - result = None - if isinstance(key, basestring): - result = map.get(key.lower()) - # fallback for targeting a ColumnElement to a textual expression - # this is a rare use case which only occurs when matching text() - # or colummn('name') constructs to ColumnElements, or after a - # pickle/unpickle roundtrip - elif isinstance(key, expression.ColumnElement): - if key._label and key._label.lower() in map: - result = map[key._label.lower()] - elif hasattr(key, 'name') and key.name.lower() in map: - # match is only on name. - result = map[key.name.lower()] - # search extra hard to make sure this - # isn't a column/label name overlap. - # this check isn't currently available if the row - # was unpickled. - if result is not None and \ - result[1] is not None: - for obj in result[1]: - if key._compare_name_for_result(obj): - break - else: - result = None - if result is None: - if raiseerr: - raise exc.NoSuchColumnError( - "Could not locate column in row for column '%s'" % - expression._string_or_unprintable(key)) - else: - return None - else: - map[key] = result - return result - - def _has_key(self, row, key): - if key in self._keymap: - return True - else: - return self._key_fallback(key, False) is not None - - def __getstate__(self): - return { - '_pickled_keymap': dict( - (key, index) - for key, (processor, obj, index) in self._keymap.iteritems() - if isinstance(key, (basestring, int)) - ), - 'keys': self.keys - } - - def __setstate__(self, state): - # the row has been processed at pickling time so we don't need any - # processor anymore - self._processors = [None for _ in xrange(len(state['keys']))] - self._keymap = keymap = {} - for key, index in state['_pickled_keymap'].iteritems(): - # not preserving "obj" here, unfortunately our - # proxy comparison fails with the unpickle - keymap[key] = (None, None, index) - self.keys = state['keys'] - self._echo = False - - -class ResultProxy(object): - """Wraps a DB-API cursor object to provide easier access to row columns. - - Individual columns may be accessed by their integer position, - case-insensitive column name, or by ``schema.Column`` - object. e.g.:: - - row = fetchone() - - col1 = row[0] # access via integer position - - col2 = row['col2'] # access via name - - col3 = row[mytable.c.mycol] # access via Column object. - - ``ResultProxy`` also handles post-processing of result column - data using ``TypeEngine`` objects, which are referenced from - the originating SQL statement that produced this result set. - - """ - - _process_row = RowProxy - out_parameters = None - _can_close_connection = False - - def __init__(self, context): - self.context = context - self.dialect = context.dialect - self.closed = False - self.cursor = self._saved_cursor = context.cursor - self.connection = context.root_connection - self._echo = self.connection._echo and \ - context.engine._should_log_debug() - self._init_metadata() - - def _init_metadata(self): - metadata = self._cursor_description() - if metadata is None: - self._metadata = None - else: - self._metadata = ResultMetaData(self, metadata) - - def keys(self): - """Return the current set of string keys for rows.""" - if self._metadata: - return self._metadata.keys - else: - return [] - - @util.memoized_property - def rowcount(self): - """Return the 'rowcount' for this result. - - The 'rowcount' reports the number of rows *matched* - by the WHERE criterion of an UPDATE or DELETE statement. - - .. note:: - - Notes regarding :attr:`.ResultProxy.rowcount`: - - - * This attribute returns the number of rows *matched*, - which is not necessarily the same as the number of rows - that were actually *modified* - an UPDATE statement, for example, - may have no net change on a given row if the SET values - given are the same as those present in the row already. - Such a row would be matched but not modified. - On backends that feature both styles, such as MySQL, - rowcount is configured by default to return the match - count in all cases. - - * :attr:`.ResultProxy.rowcount` is *only* useful in conjunction - with an UPDATE or DELETE statement. Contrary to what the Python - DBAPI says, it does *not* return the - number of rows available from the results of a SELECT statement - as DBAPIs cannot support this functionality when rows are - unbuffered. - - * :attr:`.ResultProxy.rowcount` may not be fully implemented by - all dialects. In particular, most DBAPIs do not support an - aggregate rowcount result from an executemany call. - The :meth:`.ResultProxy.supports_sane_rowcount` and - :meth:`.ResultProxy.supports_sane_multi_rowcount` methods - will report from the dialect if each usage is known to be - supported. - - * Statements that use RETURNING may not return a correct - rowcount. - - """ - try: - return self.context.rowcount - except Exception, e: - self.connection._handle_dbapi_exception( - e, None, None, self.cursor, self.context) - raise - - @property - def lastrowid(self): - """return the 'lastrowid' accessor on the DBAPI cursor. - - This is a DBAPI specific method and is only functional - for those backends which support it, for statements - where it is appropriate. It's behavior is not - consistent across backends. - - Usage of this method is normally unnecessary; the - :attr:`~ResultProxy.inserted_primary_key` attribute provides a - tuple of primary key values for a newly inserted row, - regardless of database backend. - - """ - try: - return self._saved_cursor.lastrowid - except Exception, e: - self.connection._handle_dbapi_exception( - e, None, None, - self._saved_cursor, self.context) - raise - - @property - def returns_rows(self): - """True if this :class:`.ResultProxy` returns rows. - - I.e. if it is legal to call the methods - :meth:`~.ResultProxy.fetchone`, - :meth:`~.ResultProxy.fetchmany` - :meth:`~.ResultProxy.fetchall`. - - """ - return self._metadata is not None - - @property - def is_insert(self): - """True if this :class:`.ResultProxy` is the result - of a executing an expression language compiled - :func:`.expression.insert` construct. - - When True, this implies that the - :attr:`inserted_primary_key` attribute is accessible, - assuming the statement did not include - a user defined "returning" construct. - - """ - return self.context.isinsert - - def _cursor_description(self): - """May be overridden by subclasses.""" - - return self._saved_cursor.description - - def close(self, _autoclose_connection=True): - """Close this ResultProxy. - - Closes the underlying DBAPI cursor corresponding to the execution. - - Note that any data cached within this ResultProxy is still available. - For some types of results, this may include buffered rows. - - If this ResultProxy was generated from an implicit execution, - the underlying Connection will also be closed (returns the - underlying DBAPI connection to the connection pool.) - - This method is called automatically when: - - * all result rows are exhausted using the fetchXXX() methods. - * cursor.description is None. - - """ - - if not self.closed: - self.closed = True - self.connection._safe_close_cursor(self.cursor) - if _autoclose_connection and \ - self.connection.should_close_with_result: - self.connection.close() - # allow consistent errors - self.cursor = None - - def __iter__(self): - while True: - row = self.fetchone() - if row is None: - raise StopIteration - else: - yield row - - @util.memoized_property - def inserted_primary_key(self): - """Return the primary key for the row just inserted. - - The return value is a list of scalar values - corresponding to the list of primary key columns - in the target table. - - This only applies to single row :func:`.insert` - constructs which did not explicitly specify - :meth:`.Insert.returning`. - - Note that primary key columns which specify a - server_default clause, - or otherwise do not qualify as "autoincrement" - columns (see the notes at :class:`.Column`), and were - generated using the database-side default, will - appear in this list as ``None`` unless the backend - supports "returning" and the insert statement executed - with the "implicit returning" enabled. - - """ - - if not self.context.isinsert: - raise exc.InvalidRequestError( - "Statement is not an insert() expression construct.") - elif self.context._is_explicit_returning: - raise exc.InvalidRequestError( - "Can't call inserted_primary_key when returning() " - "is used.") - - return self.context.inserted_primary_key - - @util.deprecated("0.6", "Use :attr:`.ResultProxy.inserted_primary_key`") - def last_inserted_ids(self): - """Return the primary key for the row just inserted.""" - - return self.inserted_primary_key - - def last_updated_params(self): - """Return the collection of updated parameters from this - execution. - - """ - if self.context.executemany: - return self.context.compiled_parameters - else: - return self.context.compiled_parameters[0] - - def last_inserted_params(self): - """Return the collection of inserted parameters from this - execution. - - """ - if self.context.executemany: - return self.context.compiled_parameters - else: - return self.context.compiled_parameters[0] - - def lastrow_has_defaults(self): - """Return ``lastrow_has_defaults()`` from the underlying - ExecutionContext. - - See ExecutionContext for details. - """ - - return self.context.lastrow_has_defaults() - - def postfetch_cols(self): - """Return ``postfetch_cols()`` from the underlying ExecutionContext. - - See ExecutionContext for details. - """ - - return self.context.postfetch_cols - - def prefetch_cols(self): - return self.context.prefetch_cols - - def supports_sane_rowcount(self): - """Return ``supports_sane_rowcount`` from the dialect. - - See :attr:`.ResultProxy.rowcount` for background. - - """ - - return self.dialect.supports_sane_rowcount - - def supports_sane_multi_rowcount(self): - """Return ``supports_sane_multi_rowcount`` from the dialect. - - See :attr:`.ResultProxy.rowcount` for background. - - """ - - return self.dialect.supports_sane_multi_rowcount - - def _fetchone_impl(self): - try: - return self.cursor.fetchone() - except AttributeError: - self._non_result() - - def _fetchmany_impl(self, size=None): - try: - if size is None: - return self.cursor.fetchmany() - else: - return self.cursor.fetchmany(size) - except AttributeError: - self._non_result() - - def _fetchall_impl(self): - try: - return self.cursor.fetchall() - except AttributeError: - self._non_result() - - def _non_result(self): - if self._metadata is None: - raise exc.ResourceClosedError( - "This result object does not return rows. " - "It has been closed automatically.", - ) - else: - raise exc.ResourceClosedError("This result object is closed.") - - def process_rows(self, rows): - process_row = self._process_row - metadata = self._metadata - keymap = metadata._keymap - processors = metadata._processors - if self._echo: - log = self.context.engine.logger.debug - l = [] - for row in rows: - log("Row %r", row) - l.append(process_row(metadata, row, processors, keymap)) - return l - else: - return [process_row(metadata, row, processors, keymap) - for row in rows] - - def fetchall(self): - """Fetch all rows, just like DB-API ``cursor.fetchall()``.""" - - try: - l = self.process_rows(self._fetchall_impl()) - self.close() - return l - except Exception, e: - self.connection._handle_dbapi_exception( - e, None, None, - self.cursor, self.context) - raise - - def fetchmany(self, size=None): - """Fetch many rows, just like DB-API - ``cursor.fetchmany(size=cursor.arraysize)``. - - If rows are present, the cursor remains open after this is called. - Else the cursor is automatically closed and an empty list is returned. - - """ - - try: - l = self.process_rows(self._fetchmany_impl(size)) - if len(l) == 0: - self.close() - return l - except Exception, e: - self.connection._handle_dbapi_exception( - e, None, None, - self.cursor, self.context) - raise - - def fetchone(self): - """Fetch one row, just like DB-API ``cursor.fetchone()``. - - If a row is present, the cursor remains open after this is called. - Else the cursor is automatically closed and None is returned. - - """ - try: - row = self._fetchone_impl() - if row is not None: - return self.process_rows([row])[0] - else: - self.close() - return None - except Exception, e: - self.connection._handle_dbapi_exception( - e, None, None, - self.cursor, self.context) - raise - - def first(self): - """Fetch the first row and then close the result set unconditionally. - - Returns None if no row is present. - - """ - if self._metadata is None: - self._non_result() - - try: - row = self._fetchone_impl() - except Exception, e: - self.connection._handle_dbapi_exception( - e, None, None, - self.cursor, self.context) - raise - - try: - if row is not None: - return self.process_rows([row])[0] - else: - return None - finally: - self.close() - - def scalar(self): - """Fetch the first column of the first row, and close the result set. - - Returns None if no row is present. - - """ - row = self.first() - if row is not None: - return row[0] - else: - return None - -class BufferedRowResultProxy(ResultProxy): - """A ResultProxy with row buffering behavior. - - ``ResultProxy`` that buffers the contents of a selection of rows - before ``fetchone()`` is called. This is to allow the results of - ``cursor.description`` to be available immediately, when - interfacing with a DB-API that requires rows to be consumed before - this information is available (currently psycopg2, when used with - server-side cursors). - - The pre-fetching behavior fetches only one row initially, and then - grows its buffer size by a fixed amount with each successive need - for additional rows up to a size of 100. - """ - - def _init_metadata(self): - self.__buffer_rows() - super(BufferedRowResultProxy, self)._init_metadata() - - # this is a "growth chart" for the buffering of rows. - # each successive __buffer_rows call will use the next - # value in the list for the buffer size until the max - # is reached - size_growth = { - 1 : 5, - 5 : 10, - 10 : 20, - 20 : 50, - 50 : 100, - 100 : 250, - 250 : 500, - 500 : 1000 - } - - def __buffer_rows(self): - size = getattr(self, '_bufsize', 1) - self.__rowbuffer = collections.deque(self.cursor.fetchmany(size)) - self._bufsize = self.size_growth.get(size, size) - - def _fetchone_impl(self): - if self.closed: - return None - if not self.__rowbuffer: - self.__buffer_rows() - if not self.__rowbuffer: - return None - return self.__rowbuffer.popleft() - - def _fetchmany_impl(self, size=None): - if size is None: - return self._fetchall_impl() - result = [] - for x in range(0, size): - row = self._fetchone_impl() - if row is None: - break - result.append(row) - return result - - def _fetchall_impl(self): - self.__rowbuffer.extend(self.cursor.fetchall()) - ret = self.__rowbuffer - self.__rowbuffer = collections.deque() - return ret - -class FullyBufferedResultProxy(ResultProxy): - """A result proxy that buffers rows fully upon creation. - - Used for operations where a result is to be delivered - after the database conversation can not be continued, - such as MSSQL INSERT...OUTPUT after an autocommit. - - """ - def _init_metadata(self): - super(FullyBufferedResultProxy, self)._init_metadata() - self.__rowbuffer = self._buffer_rows() - - def _buffer_rows(self): - return collections.deque(self.cursor.fetchall()) - - def _fetchone_impl(self): - if self.__rowbuffer: - return self.__rowbuffer.popleft() - else: - return None - - def _fetchmany_impl(self, size=None): - if size is None: - return self._fetchall_impl() - result = [] - for x in range(0, size): - row = self._fetchone_impl() - if row is None: - break - result.append(row) - return result - - def _fetchall_impl(self): - ret = self.__rowbuffer - self.__rowbuffer = collections.deque() - return ret - -class BufferedColumnRow(RowProxy): - def __init__(self, parent, row, processors, keymap): - # preprocess row - row = list(row) - # this is a tad faster than using enumerate - index = 0 - for processor in parent._orig_processors: - if processor is not None: - row[index] = processor(row[index]) - index += 1 - row = tuple(row) - super(BufferedColumnRow, self).__init__(parent, row, - processors, keymap) - -class BufferedColumnResultProxy(ResultProxy): - """A ResultProxy with column buffering behavior. - - ``ResultProxy`` that loads all columns into memory each time - fetchone() is called. If fetchmany() or fetchall() are called, - the full grid of results is fetched. This is to operate with - databases where result rows contain "live" results that fall out - of scope unless explicitly fetched. Currently this includes - cx_Oracle LOB objects. - - """ - - _process_row = BufferedColumnRow - - def _init_metadata(self): - super(BufferedColumnResultProxy, self)._init_metadata() - metadata = self._metadata - # orig_processors will be used to preprocess each row when they are - # constructed. - metadata._orig_processors = metadata._processors - # replace the all type processors by None processors. - metadata._processors = [None for _ in xrange(len(metadata.keys))] - keymap = {} - for k, (func, obj, index) in metadata._keymap.iteritems(): - keymap[k] = (None, obj, index) - self._metadata._keymap = keymap - - def fetchall(self): - # can't call cursor.fetchall(), since rows must be - # fully processed before requesting more from the DBAPI. - l = [] - while True: - row = self.fetchone() - if row is None: - break - l.append(row) - return l - - def fetchmany(self, size=None): - # can't call cursor.fetchmany(), since rows must be - # fully processed before requesting more from the DBAPI. - if size is None: - return self.fetchall() - l = [] - for i in xrange(size): - row = self.fetchone() - if row is None: - break - l.append(row) - return l - -def connection_memoize(key): - """Decorator, memoize a function in a connection.info stash. - - Only applicable to functions which take no arguments other than a - connection. The memo will be stored in ``connection.info[key]``. - """ - - @util.decorator - def decorated(fn, self, connection): - connection = connection.connect() - try: - return connection.info[key] - except KeyError: - connection.info[key] = val = fn(self, connection) - return val - - return decorated diff --git a/libs/sqlalchemy/engine/ddl.py b/libs/sqlalchemy/engine/ddl.py deleted file mode 100644 index c3b32505..00000000 --- a/libs/sqlalchemy/engine/ddl.py +++ /dev/null @@ -1,182 +0,0 @@ -# engine/ddl.py -# Copyright (C) 2009-2011 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Routines to handle CREATE/DROP workflow.""" - -from sqlalchemy import engine, schema -from sqlalchemy.sql import util as sql_util - - -class DDLBase(schema.SchemaVisitor): - def __init__(self, connection): - self.connection = connection - -class SchemaGenerator(DDLBase): - def __init__(self, dialect, connection, checkfirst=False, tables=None, **kwargs): - super(SchemaGenerator, self).__init__(connection, **kwargs) - self.checkfirst = checkfirst - self.tables = tables and set(tables) or None - self.preparer = dialect.identifier_preparer - self.dialect = dialect - self.memo = {} - - def _can_create_table(self, table): - self.dialect.validate_identifier(table.name) - if table.schema: - self.dialect.validate_identifier(table.schema) - return not self.checkfirst or \ - not self.dialect.has_table(self.connection, - table.name, schema=table.schema) - - def _can_create_sequence(self, sequence): - return self.dialect.supports_sequences and \ - ( - (not self.dialect.sequences_optional or - not sequence.optional) and - ( - not self.checkfirst or - not self.dialect.has_sequence( - self.connection, - sequence.name, - schema=sequence.schema) - ) - ) - - def visit_metadata(self, metadata): - if self.tables: - tables = self.tables - else: - tables = metadata.tables.values() - collection = [t for t in sql_util.sort_tables(tables) - if self._can_create_table(t)] - seq_coll = [s for s in metadata._sequences.values() - if s.column is None and self._can_create_sequence(s)] - - metadata.dispatch.before_create(metadata, self.connection, - tables=collection, - checkfirst=self.checkfirst, - _ddl_runner=self) - - for seq in seq_coll: - self.traverse_single(seq, create_ok=True) - - for table in collection: - self.traverse_single(table, create_ok=True) - - metadata.dispatch.after_create(metadata, self.connection, - tables=collection, - checkfirst=self.checkfirst, - _ddl_runner=self) - - def visit_table(self, table, create_ok=False): - if not create_ok and not self._can_create_table(table): - return - - table.dispatch.before_create(table, self.connection, - checkfirst=self.checkfirst, - _ddl_runner=self) - - for column in table.columns: - if column.default is not None: - self.traverse_single(column.default) - - self.connection.execute(schema.CreateTable(table)) - - if hasattr(table, 'indexes'): - for index in table.indexes: - self.traverse_single(index) - - table.dispatch.after_create(table, self.connection, - checkfirst=self.checkfirst, - _ddl_runner=self) - - def visit_sequence(self, sequence, create_ok=False): - if not create_ok and not self._can_create_sequence(sequence): - return - self.connection.execute(schema.CreateSequence(sequence)) - - def visit_index(self, index): - self.connection.execute(schema.CreateIndex(index)) - - -class SchemaDropper(DDLBase): - def __init__(self, dialect, connection, checkfirst=False, tables=None, **kwargs): - super(SchemaDropper, self).__init__(connection, **kwargs) - self.checkfirst = checkfirst - self.tables = tables - self.preparer = dialect.identifier_preparer - self.dialect = dialect - self.memo = {} - - def visit_metadata(self, metadata): - if self.tables: - tables = self.tables - else: - tables = metadata.tables.values() - collection = [t for t in reversed(sql_util.sort_tables(tables)) - if self._can_drop_table(t)] - seq_coll = [s for s in metadata._sequences.values() - if s.column is None and self._can_drop_sequence(s)] - - metadata.dispatch.before_drop(metadata, self.connection, - tables=collection, - checkfirst=self.checkfirst, - _ddl_runner=self) - - for table in collection: - self.traverse_single(table, drop_ok=True) - - for seq in seq_coll: - self.traverse_single(seq, drop_ok=True) - - metadata.dispatch.after_drop(metadata, self.connection, - tables=collection, - checkfirst=self.checkfirst, - _ddl_runner=self) - - def _can_drop_table(self, table): - self.dialect.validate_identifier(table.name) - if table.schema: - self.dialect.validate_identifier(table.schema) - return not self.checkfirst or self.dialect.has_table(self.connection, - table.name, schema=table.schema) - - def _can_drop_sequence(self, sequence): - return self.dialect.supports_sequences and \ - ((not self.dialect.sequences_optional or - not sequence.optional) and - (not self.checkfirst or - self.dialect.has_sequence( - self.connection, - sequence.name, - schema=sequence.schema)) - ) - - def visit_index(self, index): - self.connection.execute(schema.DropIndex(index)) - - def visit_table(self, table, drop_ok=False): - if not drop_ok and not self._can_drop_table(table): - return - - table.dispatch.before_drop(table, self.connection, - checkfirst=self.checkfirst, - _ddl_runner=self) - - for column in table.columns: - if column.default is not None: - self.traverse_single(column.default) - - self.connection.execute(schema.DropTable(table)) - - table.dispatch.after_drop(table, self.connection, - checkfirst=self.checkfirst, - _ddl_runner=self) - - def visit_sequence(self, sequence, drop_ok=False): - if not drop_ok and not self._can_drop_sequence(sequence): - return - self.connection.execute(schema.DropSequence(sequence)) diff --git a/libs/sqlalchemy/engine/default.py b/libs/sqlalchemy/engine/default.py deleted file mode 100644 index f3dfd95e..00000000 --- a/libs/sqlalchemy/engine/default.py +++ /dev/null @@ -1,820 +0,0 @@ -# engine/default.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Default implementations of per-dialect sqlalchemy.engine classes. - -These are semi-private implementation classes which are only of importance -to database dialect authors; dialects will usually use the classes here -as the base class for their own corresponding classes. - -""" - -import re, random -from sqlalchemy.engine import base, reflection -from sqlalchemy.sql import compiler, expression -from sqlalchemy import exc, types as sqltypes, util, pool, processors -import codecs -import weakref - -AUTOCOMMIT_REGEXP = re.compile( - r'\s*(?:UPDATE|INSERT|CREATE|DELETE|DROP|ALTER)', - re.I | re.UNICODE) - - -class DefaultDialect(base.Dialect): - """Default implementation of Dialect""" - - statement_compiler = compiler.SQLCompiler - ddl_compiler = compiler.DDLCompiler - type_compiler = compiler.GenericTypeCompiler - preparer = compiler.IdentifierPreparer - supports_alter = True - - # most DBAPIs happy with this for execute(). - # not cx_oracle. - execute_sequence_format = tuple - - supports_views = True - supports_sequences = False - sequences_optional = False - preexecute_autoincrement_sequences = False - postfetch_lastrowid = True - implicit_returning = False - - - supports_native_enum = False - supports_native_boolean = False - - # if the NUMERIC type - # returns decimal.Decimal. - # *not* the FLOAT type however. - supports_native_decimal = False - - # Py3K - #supports_unicode_statements = True - #supports_unicode_binds = True - #returns_unicode_strings = True - #description_encoding = None - # Py2K - supports_unicode_statements = False - supports_unicode_binds = False - returns_unicode_strings = False - description_encoding = 'use_encoding' - # end Py2K - - - name = 'default' - - # length at which to truncate - # any identifier. - max_identifier_length = 9999 - - # length at which to truncate - # the name of an index. - # Usually None to indicate - # 'use max_identifier_length'. - # thanks to MySQL, sigh - max_index_name_length = None - - supports_sane_rowcount = True - supports_sane_multi_rowcount = True - dbapi_type_map = {} - colspecs = {} - default_paramstyle = 'named' - supports_default_values = False - supports_empty_insert = True - - server_version_info = None - - # indicates symbol names are - # UPPERCASEd if they are case insensitive - # within the database. - # if this is True, the methods normalize_name() - # and denormalize_name() must be provided. - requires_name_normalize = False - - reflection_options = () - - def __init__(self, convert_unicode=False, assert_unicode=False, - encoding='utf-8', paramstyle=None, dbapi=None, - implicit_returning=None, - label_length=None, **kwargs): - - if not getattr(self, 'ported_sqla_06', True): - util.warn( - "The %s dialect is not yet ported to SQLAlchemy 0.6/0.7" % - self.name) - - self.convert_unicode = convert_unicode - if assert_unicode: - util.warn_deprecated( - "assert_unicode is deprecated. " - "SQLAlchemy emits a warning in all cases where it " - "would otherwise like to encode a Python unicode object " - "into a specific encoding but a plain bytestring is " - "received. " - "This does *not* apply to DBAPIs that coerce Unicode " - "natively.") - - self.encoding = encoding - self.positional = False - self._ischema = None - self.dbapi = dbapi - if paramstyle is not None: - self.paramstyle = paramstyle - elif self.dbapi is not None: - self.paramstyle = self.dbapi.paramstyle - else: - self.paramstyle = self.default_paramstyle - if implicit_returning is not None: - self.implicit_returning = implicit_returning - self.positional = self.paramstyle in ('qmark', 'format', 'numeric') - self.identifier_preparer = self.preparer(self) - self.type_compiler = self.type_compiler(self) - - if label_length and label_length > self.max_identifier_length: - raise exc.ArgumentError( - "Label length of %d is greater than this dialect's" - " maximum identifier length of %d" % - (label_length, self.max_identifier_length)) - self.label_length = label_length - - if self.description_encoding == 'use_encoding': - self._description_decoder = processors.to_unicode_processor_factory( - encoding - ) - elif self.description_encoding is not None: - self._description_decoder = processors.to_unicode_processor_factory( - self.description_encoding - ) - self._encoder = codecs.getencoder(self.encoding) - self._decoder = processors.to_unicode_processor_factory(self.encoding) - - @util.memoized_property - def _type_memos(self): - return weakref.WeakKeyDictionary() - - @property - def dialect_description(self): - return self.name + "+" + self.driver - - @classmethod - def get_pool_class(cls, url): - return getattr(cls, 'poolclass', pool.QueuePool) - - def initialize(self, connection): - try: - self.server_version_info = \ - self._get_server_version_info(connection) - except NotImplementedError: - self.server_version_info = None - try: - self.default_schema_name = \ - self._get_default_schema_name(connection) - except NotImplementedError: - self.default_schema_name = None - - try: - self.default_isolation_level = \ - self.get_isolation_level(connection.connection) - except NotImplementedError: - self.default_isolation_level = None - - self.returns_unicode_strings = self._check_unicode_returns(connection) - - self.do_rollback(connection.connection) - - def on_connect(self): - """return a callable which sets up a newly created DBAPI connection. - - This is used to set dialect-wide per-connection options such as - isolation modes, unicode modes, etc. - - If a callable is returned, it will be assembled into a pool listener - that receives the direct DBAPI connection, with all wrappers removed. - - If None is returned, no listener will be generated. - - """ - return None - - def _check_unicode_returns(self, connection): - # Py2K - if self.supports_unicode_statements: - cast_to = unicode - else: - cast_to = str - # end Py2K - # Py3K - #cast_to = str - def check_unicode(formatstr, type_): - cursor = connection.connection.cursor() - try: - try: - cursor.execute( - cast_to( - expression.select( - [expression.cast( - expression.literal_column( - "'test %s returns'" % formatstr), type_) - ]).compile(dialect=self) - ) - ) - row = cursor.fetchone() - - return isinstance(row[0], unicode) - except self.dbapi.Error, de: - util.warn("Exception attempting to " - "detect unicode returns: %r" % de) - return False - finally: - cursor.close() - - # detect plain VARCHAR - unicode_for_varchar = check_unicode("plain", sqltypes.VARCHAR(60)) - - # detect if there's an NVARCHAR type with different behavior available - unicode_for_unicode = check_unicode("unicode", sqltypes.Unicode(60)) - - if unicode_for_unicode and not unicode_for_varchar: - return "conditional" - else: - return unicode_for_varchar - - def type_descriptor(self, typeobj): - """Provide a database-specific ``TypeEngine`` object, given - the generic object which comes from the types module. - - This method looks for a dictionary called - ``colspecs`` as a class or instance-level variable, - and passes on to ``types.adapt_type()``. - - """ - return sqltypes.adapt_type(typeobj, self.colspecs) - - def reflecttable(self, connection, table, include_columns, exclude_columns=None): - insp = reflection.Inspector.from_engine(connection) - return insp.reflecttable(table, include_columns, exclude_columns) - - def get_pk_constraint(self, conn, table_name, schema=None, **kw): - """Compatibility method, adapts the result of get_primary_keys() - for those dialects which don't implement get_pk_constraint(). - - """ - return { - 'constrained_columns': - self.get_primary_keys(conn, table_name, - schema=schema, **kw) - } - - def validate_identifier(self, ident): - if len(ident) > self.max_identifier_length: - raise exc.IdentifierError( - "Identifier '%s' exceeds maximum length of %d characters" % - (ident, self.max_identifier_length) - ) - - def connect(self, *cargs, **cparams): - return self.dbapi.connect(*cargs, **cparams) - - def create_connect_args(self, url): - opts = url.translate_connect_args() - opts.update(url.query) - return [[], opts] - - def do_begin(self, connection): - """Implementations might want to put logic here for turning - autocommit on/off, etc. - """ - - pass - - def do_rollback(self, connection): - """Implementations might want to put logic here for turning - autocommit on/off, etc. - """ - - connection.rollback() - - def do_commit(self, connection): - """Implementations might want to put logic here for turning - autocommit on/off, etc. - """ - - connection.commit() - - def create_xid(self): - """Create a random two-phase transaction ID. - - This id will be passed to do_begin_twophase(), do_rollback_twophase(), - do_commit_twophase(). Its format is unspecified. - """ - - return "_sa_%032x" % random.randint(0, 2 ** 128) - - def do_savepoint(self, connection, name): - connection.execute(expression.SavepointClause(name)) - - def do_rollback_to_savepoint(self, connection, name): - connection.execute(expression.RollbackToSavepointClause(name)) - - def do_release_savepoint(self, connection, name): - connection.execute(expression.ReleaseSavepointClause(name)) - - def do_executemany(self, cursor, statement, parameters, context=None): - cursor.executemany(statement, parameters) - - def do_execute(self, cursor, statement, parameters, context=None): - cursor.execute(statement, parameters) - - def do_execute_no_params(self, cursor, statement, context=None): - cursor.execute(statement) - - def is_disconnect(self, e, connection, cursor): - return False - - def reset_isolation_level(self, dbapi_conn): - # default_isolation_level is read from the first connection - # after the initial set of 'isolation_level', if any, so is - # the configured default of this dialect. - self.set_isolation_level(dbapi_conn, self.default_isolation_level) - -class DefaultExecutionContext(base.ExecutionContext): - isinsert = False - isupdate = False - isdelete = False - isddl = False - executemany = False - result_map = None - compiled = None - statement = None - postfetch_cols = None - prefetch_cols = None - _is_implicit_returning = False - _is_explicit_returning = False - - # a hook for SQLite's translation of - # result column names - _translate_colname = None - - @classmethod - def _init_ddl(cls, dialect, connection, dbapi_connection, compiled_ddl): - """Initialize execution context for a DDLElement construct.""" - - self = cls.__new__(cls) - self.dialect = dialect - self.root_connection = connection - self._dbapi_connection = dbapi_connection - self.engine = connection.engine - - self.compiled = compiled = compiled_ddl - self.isddl = True - - self.execution_options = compiled.statement._execution_options - if connection._execution_options: - self.execution_options = dict(self.execution_options) - self.execution_options.update(connection._execution_options) - - if not dialect.supports_unicode_statements: - self.unicode_statement = unicode(compiled) - self.statement = dialect._encoder(self.unicode_statement)[0] - else: - self.statement = self.unicode_statement = unicode(compiled) - - self.cursor = self.create_cursor() - self.compiled_parameters = [] - - if dialect.positional: - self.parameters = [dialect.execute_sequence_format()] - else: - self.parameters = [{}] - - return self - - @classmethod - def _init_compiled(cls, dialect, connection, dbapi_connection, compiled, parameters): - """Initialize execution context for a Compiled construct.""" - - self = cls.__new__(cls) - self.dialect = dialect - self.root_connection = connection - self._dbapi_connection = dbapi_connection - self.engine = connection.engine - - self.compiled = compiled - - if not compiled.can_execute: - raise exc.ArgumentError("Not an executable clause") - - self.execution_options = compiled.statement._execution_options - if connection._execution_options: - self.execution_options = dict(self.execution_options) - self.execution_options.update(connection._execution_options) - - # compiled clauseelement. process bind params, process table defaults, - # track collections used by ResultProxy to target and process results - - self.result_map = compiled.result_map - - self.unicode_statement = unicode(compiled) - if not dialect.supports_unicode_statements: - self.statement = self.unicode_statement.encode(self.dialect.encoding) - else: - self.statement = self.unicode_statement - - self.isinsert = compiled.isinsert - self.isupdate = compiled.isupdate - self.isdelete = compiled.isdelete - - if self.isinsert or self.isupdate or self.isdelete: - self._is_explicit_returning = compiled.statement._returning - self._is_implicit_returning = compiled.returning and \ - not compiled.statement._returning - - if not parameters: - self.compiled_parameters = [compiled.construct_params()] - else: - self.compiled_parameters = \ - [compiled.construct_params(m, _group_number=grp) for - grp,m in enumerate(parameters)] - - self.executemany = len(parameters) > 1 - - self.cursor = self.create_cursor() - if self.isinsert or self.isupdate: - self.postfetch_cols = self.compiled.postfetch - self.prefetch_cols = self.compiled.prefetch - self.__process_defaults() - - processors = compiled._bind_processors - - # Convert the dictionary of bind parameter values - # into a dict or list to be sent to the DBAPI's - # execute() or executemany() method. - parameters = [] - if dialect.positional: - for compiled_params in self.compiled_parameters: - param = [] - for key in self.compiled.positiontup: - if key in processors: - param.append(processors[key](compiled_params[key])) - else: - param.append(compiled_params[key]) - parameters.append(dialect.execute_sequence_format(param)) - else: - encode = not dialect.supports_unicode_statements - for compiled_params in self.compiled_parameters: - param = {} - if encode: - for key in compiled_params: - if key in processors: - param[dialect._encoder(key)[0]] = \ - processors[key](compiled_params[key]) - else: - param[dialect._encoder(key)[0]] = compiled_params[key] - else: - for key in compiled_params: - if key in processors: - param[key] = processors[key](compiled_params[key]) - else: - param[key] = compiled_params[key] - parameters.append(param) - self.parameters = dialect.execute_sequence_format(parameters) - - return self - - @classmethod - def _init_statement(cls, dialect, connection, dbapi_connection, statement, parameters): - """Initialize execution context for a string SQL statement.""" - - self = cls.__new__(cls) - self.dialect = dialect - self.root_connection = connection - self._dbapi_connection = dbapi_connection - self.engine = connection.engine - - # plain text statement - self.execution_options = connection._execution_options - - if not parameters: - if self.dialect.positional: - self.parameters = [dialect.execute_sequence_format()] - else: - self.parameters = [{}] - elif isinstance(parameters[0], dialect.execute_sequence_format): - self.parameters = parameters - elif isinstance(parameters[0], dict): - if dialect.supports_unicode_statements: - self.parameters = parameters - else: - self.parameters= [ - dict((dialect._encoder(k)[0], d[k]) for k in d) - for d in parameters - ] or [{}] - else: - self.parameters = [dialect.execute_sequence_format(p) - for p in parameters] - - self.executemany = len(parameters) > 1 - - if not dialect.supports_unicode_statements and isinstance(statement, unicode): - self.unicode_statement = statement - self.statement = dialect._encoder(statement)[0] - else: - self.statement = self.unicode_statement = statement - - self.cursor = self.create_cursor() - return self - - @classmethod - def _init_default(cls, dialect, connection, dbapi_connection): - """Initialize execution context for a ColumnDefault construct.""" - - self = cls.__new__(cls) - self.dialect = dialect - self.root_connection = connection - self._dbapi_connection = dbapi_connection - self.engine = connection.engine - self.execution_options = connection._execution_options - self.cursor = self.create_cursor() - return self - - @util.memoized_property - def no_parameters(self): - return self.execution_options.get("no_parameters", False) - - @util.memoized_property - def is_crud(self): - return self.isinsert or self.isupdate or self.isdelete - - @util.memoized_property - def should_autocommit(self): - autocommit = self.execution_options.get('autocommit', - not self.compiled and - self.statement and - expression.PARSE_AUTOCOMMIT - or False) - - if autocommit is expression.PARSE_AUTOCOMMIT: - return self.should_autocommit_text(self.unicode_statement) - else: - return autocommit - - def _execute_scalar(self, stmt, type_): - """Execute a string statement on the current cursor, returning a - scalar result. - - Used to fire off sequences, default phrases, and "select lastrowid" - types of statements individually or in the context of a parent INSERT - or UPDATE statement. - - """ - - conn = self.root_connection - if isinstance(stmt, unicode) and \ - not self.dialect.supports_unicode_statements: - stmt = self.dialect._encoder(stmt)[0] - - if self.dialect.positional: - default_params = self.dialect.execute_sequence_format() - else: - default_params = {} - - conn._cursor_execute(self.cursor, stmt, default_params) - r = self.cursor.fetchone()[0] - if type_ is not None: - # apply type post processors to the result - proc = type_._cached_result_processor( - self.dialect, - self.cursor.description[0][1] - ) - if proc: - return proc(r) - return r - - @property - def connection(self): - return self.root_connection._branch() - - def should_autocommit_text(self, statement): - return AUTOCOMMIT_REGEXP.match(statement) - - def create_cursor(self): - return self._dbapi_connection.cursor() - - def pre_exec(self): - pass - - def post_exec(self): - pass - - def get_lastrowid(self): - """return self.cursor.lastrowid, or equivalent, after an INSERT. - - This may involve calling special cursor functions, - issuing a new SELECT on the cursor (or a new one), - or returning a stored value that was - calculated within post_exec(). - - This function will only be called for dialects - which support "implicit" primary key generation, - keep preexecute_autoincrement_sequences set to False, - and when no explicit id value was bound to the - statement. - - The function is called once, directly after - post_exec() and before the transaction is committed - or ResultProxy is generated. If the post_exec() - method assigns a value to `self._lastrowid`, the - value is used in place of calling get_lastrowid(). - - Note that this method is *not* equivalent to the - ``lastrowid`` method on ``ResultProxy``, which is a - direct proxy to the DBAPI ``lastrowid`` accessor - in all cases. - - """ - return self.cursor.lastrowid - - def handle_dbapi_exception(self, e): - pass - - def get_result_proxy(self): - return base.ResultProxy(self) - - @property - def rowcount(self): - return self.cursor.rowcount - - def supports_sane_rowcount(self): - return self.dialect.supports_sane_rowcount - - def supports_sane_multi_rowcount(self): - return self.dialect.supports_sane_multi_rowcount - - def post_insert(self): - if not self._is_implicit_returning and \ - self.dialect.postfetch_lastrowid and \ - (not self.inserted_primary_key or \ - None in self.inserted_primary_key): - - table = self.compiled.statement.table - lastrowid = self.get_lastrowid() - - autoinc_col = table._autoincrement_column - if autoinc_col is not None: - # apply type post processors to the lastrowid - proc = autoinc_col.type._cached_result_processor(self.dialect, None) - if proc is not None: - lastrowid = proc(lastrowid) - - self.inserted_primary_key = [ - c is autoinc_col and lastrowid or v - for c, v in zip( - table.primary_key, - self.inserted_primary_key) - ] - - def _fetch_implicit_returning(self, resultproxy): - table = self.compiled.statement.table - row = resultproxy.fetchone() - - ipk = [] - for c, v in zip(table.primary_key, self.inserted_primary_key): - if v is not None: - ipk.append(v) - else: - ipk.append(row[c]) - - self.inserted_primary_key = ipk - - def lastrow_has_defaults(self): - return (self.isinsert or self.isupdate) and \ - bool(self.postfetch_cols) - - def set_input_sizes(self, translate=None, exclude_types=None): - """Given a cursor and ClauseParameters, call the appropriate - style of ``setinputsizes()`` on the cursor, using DB-API types - from the bind parameter's ``TypeEngine`` objects. - - This method only called by those dialects which require it, - currently cx_oracle. - - """ - - if not hasattr(self.compiled, 'bind_names'): - return - - types = dict( - (self.compiled.bind_names[bindparam], bindparam.type) - for bindparam in self.compiled.bind_names) - - if self.dialect.positional: - inputsizes = [] - for key in self.compiled.positiontup: - typeengine = types[key] - dbtype = typeengine.dialect_impl(self.dialect).get_dbapi_type(self.dialect.dbapi) - if dbtype is not None and (not exclude_types or dbtype not in exclude_types): - inputsizes.append(dbtype) - try: - self.cursor.setinputsizes(*inputsizes) - except Exception, e: - self.root_connection._handle_dbapi_exception(e, None, None, None, self) - raise - else: - inputsizes = {} - for key in self.compiled.bind_names.values(): - typeengine = types[key] - dbtype = typeengine.dialect_impl(self.dialect).get_dbapi_type(self.dialect.dbapi) - if dbtype is not None and (not exclude_types or dbtype not in exclude_types): - if translate: - key = translate.get(key, key) - inputsizes[self.dialect._encoder(key)[0]] = dbtype - try: - self.cursor.setinputsizes(**inputsizes) - except Exception, e: - self.root_connection._handle_dbapi_exception(e, None, None, None, self) - raise - - def _exec_default(self, default, type_): - if default.is_sequence: - return self.fire_sequence(default, type_) - elif default.is_callable: - return default.arg(self) - elif default.is_clause_element: - # TODO: expensive branching here should be - # pulled into _exec_scalar() - conn = self.connection - c = expression.select([default.arg]).compile(bind=conn) - return conn._execute_compiled(c, (), {}).scalar() - else: - return default.arg - - def get_insert_default(self, column): - if column.default is None: - return None - else: - return self._exec_default(column.default, column.type) - - def get_update_default(self, column): - if column.onupdate is None: - return None - else: - return self._exec_default(column.onupdate, column.type) - - def __process_defaults(self): - """Generate default values for compiled insert/update statements, - and generate inserted_primary_key collection. - """ - - if self.executemany: - if len(self.compiled.prefetch): - scalar_defaults = {} - - # pre-determine scalar Python-side defaults - # to avoid many calls of get_insert_default()/ - # get_update_default() - for c in self.prefetch_cols: - if self.isinsert and c.default and c.default.is_scalar: - scalar_defaults[c] = c.default.arg - elif self.isupdate and c.onupdate and c.onupdate.is_scalar: - scalar_defaults[c] = c.onupdate.arg - - for param in self.compiled_parameters: - self.current_parameters = param - for c in self.prefetch_cols: - if c in scalar_defaults: - val = scalar_defaults[c] - elif self.isinsert: - val = self.get_insert_default(c) - else: - val = self.get_update_default(c) - if val is not None: - param[c.key] = val - del self.current_parameters - else: - self.current_parameters = compiled_parameters = \ - self.compiled_parameters[0] - - for c in self.compiled.prefetch: - if self.isinsert: - val = self.get_insert_default(c) - else: - val = self.get_update_default(c) - - if val is not None: - compiled_parameters[c.key] = val - del self.current_parameters - - if self.isinsert: - self.inserted_primary_key = [ - self.compiled_parameters[0].get(c.key, None) - for c in self.compiled.\ - statement.table.primary_key - ] - - -DefaultDialect.execution_ctx_cls = DefaultExecutionContext diff --git a/libs/sqlalchemy/engine/reflection.py b/libs/sqlalchemy/engine/reflection.py deleted file mode 100644 index 6d34a279..00000000 --- a/libs/sqlalchemy/engine/reflection.py +++ /dev/null @@ -1,469 +0,0 @@ -# engine/reflection.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Provides an abstraction for obtaining database schema information. - -Usage Notes: - -Here are some general conventions when accessing the low level inspector -methods such as get_table_names, get_columns, etc. - -1. Inspector methods return lists of dicts in most cases for the following - reasons: - - * They're both standard types that can be serialized. - * Using a dict instead of a tuple allows easy expansion of attributes. - * Using a list for the outer structure maintains order and is easy to work - with (e.g. list comprehension [d['name'] for d in cols]). - -2. Records that contain a name, such as the column name in a column record - use the key 'name'. So for most return values, each record will have a - 'name' attribute.. -""" - -import sqlalchemy -from sqlalchemy import exc, sql -from sqlalchemy import util -from sqlalchemy.util import topological -from sqlalchemy.types import TypeEngine -from sqlalchemy import schema as sa_schema - - -@util.decorator -def cache(fn, self, con, *args, **kw): - info_cache = kw.get('info_cache', None) - if info_cache is None: - return fn(self, con, *args, **kw) - key = ( - fn.__name__, - tuple(a for a in args if isinstance(a, basestring)), - tuple((k, v) for k, v in kw.iteritems() if isinstance(v, (basestring, int, float))) - ) - ret = info_cache.get(key) - if ret is None: - ret = fn(self, con, *args, **kw) - info_cache[key] = ret - return ret - - -class Inspector(object): - """Performs database schema inspection. - - The Inspector acts as a proxy to the reflection methods of the - :class:`~sqlalchemy.engine.base.Dialect`, providing a - consistent interface as well as caching support for previously - fetched metadata. - - The preferred method to construct an :class:`.Inspector` is via the - :meth:`Inspector.from_engine` method. I.e.:: - - engine = create_engine('...') - insp = Inspector.from_engine(engine) - - Where above, the :class:`~sqlalchemy.engine.base.Dialect` may opt - to return an :class:`.Inspector` subclass that provides additional - methods specific to the dialect's target database. - - """ - - def __init__(self, bind): - """Initialize a new :class:`.Inspector`. - - :param bind: a :class:`~sqlalchemy.engine.base.Connectable`, - which is typically an instance of - :class:`~sqlalchemy.engine.base.Engine` or - :class:`~sqlalchemy.engine.base.Connection`. - - For a dialect-specific instance of :class:`.Inspector`, see - :meth:`Inspector.from_engine` - - """ - # this might not be a connection, it could be an engine. - self.bind = bind - - # set the engine - if hasattr(bind, 'engine'): - self.engine = bind.engine - else: - self.engine = bind - - if self.engine is bind: - # if engine, ensure initialized - bind.connect().close() - - self.dialect = self.engine.dialect - self.info_cache = {} - - @classmethod - def from_engine(cls, bind): - """Construct a new dialect-specific Inspector object from the given engine or connection. - - :param bind: a :class:`~sqlalchemy.engine.base.Connectable`, - which is typically an instance of - :class:`~sqlalchemy.engine.base.Engine` or - :class:`~sqlalchemy.engine.base.Connection`. - - This method differs from direct a direct constructor call of :class:`.Inspector` - in that the :class:`~sqlalchemy.engine.base.Dialect` is given a chance to provide - a dialect-specific :class:`.Inspector` instance, which may provide additional - methods. - - See the example at :class:`.Inspector`. - - """ - if hasattr(bind.dialect, 'inspector'): - return bind.dialect.inspector(bind) - return Inspector(bind) - - @property - def default_schema_name(self): - """Return the default schema name presented by the dialect - for the current engine's database user. - - E.g. this is typically ``public`` for Postgresql and ``dbo`` - for SQL Server. - - """ - return self.dialect.default_schema_name - - def get_schema_names(self): - """Return all schema names. - """ - - if hasattr(self.dialect, 'get_schema_names'): - return self.dialect.get_schema_names(self.bind, - info_cache=self.info_cache) - return [] - - def get_table_names(self, schema=None, order_by=None): - """Return all table names in `schema`. - - :param schema: Optional, retrieve names from a non-default schema. - :param order_by: Optional, may be the string "foreign_key" to sort - the result on foreign key dependencies. - - This should probably not return view names or maybe it should return - them with an indicator t or v. - """ - - if hasattr(self.dialect, 'get_table_names'): - tnames = self.dialect.get_table_names(self.bind, - schema, info_cache=self.info_cache) - else: - tnames = self.engine.table_names(schema) - if order_by == 'foreign_key': - import random - random.shuffle(tnames) - - tuples = [] - for tname in tnames: - for fkey in self.get_foreign_keys(tname, schema): - if tname != fkey['referred_table']: - tuples.append((tname, fkey['referred_table'])) - tnames = list(topological.sort(tuples, tnames)) - return tnames - - def get_table_options(self, table_name, schema=None, **kw): - """Return a dictionary of options specified when the table of the given name was created. - - This currently includes some options that apply to MySQL tables. - - """ - if hasattr(self.dialect, 'get_table_options'): - return self.dialect.get_table_options(self.bind, table_name, schema, - info_cache=self.info_cache, - **kw) - return {} - - def get_view_names(self, schema=None): - """Return all view names in `schema`. - - :param schema: Optional, retrieve names from a non-default schema. - """ - - return self.dialect.get_view_names(self.bind, schema, - info_cache=self.info_cache) - - def get_view_definition(self, view_name, schema=None): - """Return definition for `view_name`. - - :param schema: Optional, retrieve names from a non-default schema. - """ - - return self.dialect.get_view_definition( - self.bind, view_name, schema, info_cache=self.info_cache) - - def get_columns(self, table_name, schema=None, **kw): - """Return information about columns in `table_name`. - - Given a string `table_name` and an optional string `schema`, return - column information as a list of dicts with these keys: - - name - the column's name - - type - :class:`~sqlalchemy.types.TypeEngine` - - nullable - boolean - - default - the column's default value - - attrs - dict containing optional column attributes - """ - - col_defs = self.dialect.get_columns(self.bind, table_name, schema, - info_cache=self.info_cache, - **kw) - for col_def in col_defs: - # make this easy and only return instances for coltype - coltype = col_def['type'] - if not isinstance(coltype, TypeEngine): - col_def['type'] = coltype() - return col_defs - - def get_primary_keys(self, table_name, schema=None, **kw): - """Return information about primary keys in `table_name`. - - Given a string `table_name`, and an optional string `schema`, return - primary key information as a list of column names. - """ - - pkeys = self.dialect.get_primary_keys(self.bind, table_name, schema, - info_cache=self.info_cache, - **kw) - - return pkeys - - def get_pk_constraint(self, table_name, schema=None, **kw): - """Return information about primary key constraint on `table_name`. - - Given a string `table_name`, and an optional string `schema`, return - primary key information as a dictionary with these keys: - - constrained_columns - a list of column names that make up the primary key - - name - optional name of the primary key constraint. - - """ - pkeys = self.dialect.get_pk_constraint(self.bind, table_name, schema, - info_cache=self.info_cache, - **kw) - - return pkeys - - - def get_foreign_keys(self, table_name, schema=None, **kw): - """Return information about foreign_keys in `table_name`. - - Given a string `table_name`, and an optional string `schema`, return - foreign key information as a list of dicts with these keys: - - constrained_columns - a list of column names that make up the foreign key - - referred_schema - the name of the referred schema - - referred_table - the name of the referred table - - referred_columns - a list of column names in the referred table that correspond to - constrained_columns - - name - optional name of the foreign key constraint. - - \**kw - other options passed to the dialect's get_foreign_keys() method. - - """ - - fk_defs = self.dialect.get_foreign_keys(self.bind, table_name, schema, - info_cache=self.info_cache, - **kw) - return fk_defs - - def get_indexes(self, table_name, schema=None, **kw): - """Return information about indexes in `table_name`. - - Given a string `table_name` and an optional string `schema`, return - index information as a list of dicts with these keys: - - name - the index's name - - column_names - list of column names in order - - unique - boolean - - \**kw - other options passed to the dialect's get_indexes() method. - """ - - indexes = self.dialect.get_indexes(self.bind, table_name, - schema, - info_cache=self.info_cache, **kw) - return indexes - - def reflecttable(self, table, include_columns, exclude_columns=()): - """Given a Table object, load its internal constructs based on introspection. - - This is the underlying method used by most dialects to produce - table reflection. Direct usage is like:: - - from sqlalchemy import create_engine, MetaData, Table - from sqlalchemy.engine import reflection - - engine = create_engine('...') - meta = MetaData() - user_table = Table('user', meta) - insp = Inspector.from_engine(engine) - insp.reflecttable(user_table, None) - - :param table: a :class:`~sqlalchemy.schema.Table` instance. - :param include_columns: a list of string column names to include - in the reflection process. If ``None``, all columns are reflected. - - """ - dialect = self.bind.dialect - - # table attributes we might need. - reflection_options = dict( - (k, table.kwargs.get(k)) for k in dialect.reflection_options if k in table.kwargs) - - schema = table.schema - table_name = table.name - - # apply table options - tbl_opts = self.get_table_options(table_name, schema, **table.kwargs) - if tbl_opts: - table.kwargs.update(tbl_opts) - - # table.kwargs will need to be passed to each reflection method. Make - # sure keywords are strings. - tblkw = table.kwargs.copy() - for (k, v) in tblkw.items(): - del tblkw[k] - tblkw[str(k)] = v - - # Py2K - if isinstance(schema, str): - schema = schema.decode(dialect.encoding) - if isinstance(table_name, str): - table_name = table_name.decode(dialect.encoding) - # end Py2K - - # columns - found_table = False - for col_d in self.get_columns(table_name, schema, **tblkw): - found_table = True - table.dispatch.column_reflect(table, col_d) - - name = col_d['name'] - if include_columns and name not in include_columns: - continue - if exclude_columns and name in exclude_columns: - continue - - coltype = col_d['type'] - col_kw = { - 'nullable': col_d['nullable'], - } - for k in ('autoincrement', 'quote', 'info', 'key'): - if k in col_d: - col_kw[k] = col_d[k] - - colargs = [] - if col_d.get('default') is not None: - # the "default" value is assumed to be a literal SQL expression, - # so is wrapped in text() so that no quoting occurs on re-issuance. - colargs.append( - sa_schema.DefaultClause( - sql.text(col_d['default']), _reflected=True - ) - ) - - if 'sequence' in col_d: - # TODO: mssql, maxdb and sybase are using this. - seq = col_d['sequence'] - sequence = sa_schema.Sequence(seq['name'], 1, 1) - if 'start' in seq: - sequence.start = seq['start'] - if 'increment' in seq: - sequence.increment = seq['increment'] - colargs.append(sequence) - - col = sa_schema.Column(name, coltype, *colargs, **col_kw) - table.append_column(col) - - if not found_table: - raise exc.NoSuchTableError(table.name) - - # Primary keys - pk_cons = self.get_pk_constraint(table_name, schema, **tblkw) - if pk_cons: - pk_cols = [table.c[pk] - for pk in pk_cons['constrained_columns'] - if pk in table.c and pk not in exclude_columns - ] + [pk for pk in table.primary_key if pk.key in exclude_columns] - primary_key_constraint = sa_schema.PrimaryKeyConstraint(name=pk_cons.get('name'), - *pk_cols - ) - - table.append_constraint(primary_key_constraint) - - # Foreign keys - fkeys = self.get_foreign_keys(table_name, schema, **tblkw) - for fkey_d in fkeys: - conname = fkey_d['name'] - constrained_columns = fkey_d['constrained_columns'] - referred_schema = fkey_d['referred_schema'] - referred_table = fkey_d['referred_table'] - referred_columns = fkey_d['referred_columns'] - refspec = [] - if referred_schema is not None: - sa_schema.Table(referred_table, table.metadata, - autoload=True, schema=referred_schema, - autoload_with=self.bind, - **reflection_options - ) - for column in referred_columns: - refspec.append(".".join( - [referred_schema, referred_table, column])) - else: - sa_schema.Table(referred_table, table.metadata, autoload=True, - autoload_with=self.bind, - **reflection_options - ) - for column in referred_columns: - refspec.append(".".join([referred_table, column])) - table.append_constraint( - sa_schema.ForeignKeyConstraint(constrained_columns, refspec, - conname, link_to_name=True)) - # Indexes - indexes = self.get_indexes(table_name, schema) - for index_d in indexes: - name = index_d['name'] - columns = index_d['column_names'] - unique = index_d['unique'] - flavor = index_d.get('type', 'unknown type') - if include_columns and \ - not set(columns).issubset(include_columns): - util.warn( - "Omitting %s KEY for (%s), key covers omitted columns." % - (flavor, ', '.join(columns))) - continue - sa_schema.Index(name, *[table.columns[c] for c in columns], - **dict(unique=unique)) diff --git a/libs/sqlalchemy/engine/strategies.py b/libs/sqlalchemy/engine/strategies.py deleted file mode 100644 index fab97975..00000000 --- a/libs/sqlalchemy/engine/strategies.py +++ /dev/null @@ -1,258 +0,0 @@ -# engine/strategies.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Strategies for creating new instances of Engine types. - -These are semi-private implementation classes which provide the -underlying behavior for the "strategy" keyword argument available on -:func:`~sqlalchemy.engine.create_engine`. Current available options are -``plain``, ``threadlocal``, and ``mock``. - -New strategies can be added via new ``EngineStrategy`` classes. -""" - -from operator import attrgetter - -from sqlalchemy.engine import base, threadlocal, url -from sqlalchemy import util, exc, event -from sqlalchemy import pool as poollib - -strategies = {} - - -class EngineStrategy(object): - """An adaptor that processes input arguments and produces an Engine. - - Provides a ``create`` method that receives input arguments and - produces an instance of base.Engine or a subclass. - - """ - - def __init__(self): - strategies[self.name] = self - - def create(self, *args, **kwargs): - """Given arguments, returns a new Engine instance.""" - - raise NotImplementedError() - - -class DefaultEngineStrategy(EngineStrategy): - """Base class for built-in strategies.""" - - def create(self, name_or_url, **kwargs): - # create url.URL object - u = url.make_url(name_or_url) - - dialect_cls = u.get_dialect() - - dialect_args = {} - # consume dialect arguments from kwargs - for k in util.get_cls_kwargs(dialect_cls): - if k in kwargs: - dialect_args[k] = kwargs.pop(k) - - dbapi = kwargs.pop('module', None) - if dbapi is None: - dbapi_args = {} - for k in util.get_func_kwargs(dialect_cls.dbapi): - if k in kwargs: - dbapi_args[k] = kwargs.pop(k) - dbapi = dialect_cls.dbapi(**dbapi_args) - - dialect_args['dbapi'] = dbapi - - # create dialect - dialect = dialect_cls(**dialect_args) - - # assemble connection arguments - (cargs, cparams) = dialect.create_connect_args(u) - cparams.update(kwargs.pop('connect_args', {})) - - # look for existing pool or create - pool = kwargs.pop('pool', None) - if pool is None: - def connect(): - try: - return dialect.connect(*cargs, **cparams) - except Exception, e: - # Py3K - #raise exc.DBAPIError.instance(None, None, - # e, dialect.dbapi.Error, - # connection_invalidated= - # dialect.is_disconnect(e, None, None) - # ) from e - # Py2K - import sys - raise exc.DBAPIError.instance( - None, None, e, dialect.dbapi.Error, - connection_invalidated= - dialect.is_disconnect(e, None, None)), \ - None, sys.exc_info()[2] - # end Py2K - - creator = kwargs.pop('creator', connect) - - poolclass = kwargs.pop('poolclass', None) - if poolclass is None: - poolclass = dialect_cls.get_pool_class(u) - pool_args = {} - - # consume pool arguments from kwargs, translating a few of - # the arguments - translate = {'logging_name': 'pool_logging_name', - 'echo': 'echo_pool', - 'timeout': 'pool_timeout', - 'recycle': 'pool_recycle', - 'events':'pool_events', - 'use_threadlocal':'pool_threadlocal', - 'reset_on_return':'pool_reset_on_return'} - for k in util.get_cls_kwargs(poolclass): - tk = translate.get(k, k) - if tk in kwargs: - pool_args[k] = kwargs.pop(tk) - pool = poolclass(creator, **pool_args) - else: - if isinstance(pool, poollib._DBProxy): - pool = pool.get_pool(*cargs, **cparams) - else: - pool = pool - - # create engine. - engineclass = self.engine_cls - engine_args = {} - for k in util.get_cls_kwargs(engineclass): - if k in kwargs: - engine_args[k] = kwargs.pop(k) - - _initialize = kwargs.pop('_initialize', True) - - # all kwargs should be consumed - if kwargs: - raise TypeError( - "Invalid argument(s) %s sent to create_engine(), " - "using configuration %s/%s/%s. Please check that the " - "keyword arguments are appropriate for this combination " - "of components." % (','.join("'%s'" % k for k in kwargs), - dialect.__class__.__name__, - pool.__class__.__name__, - engineclass.__name__)) - - engine = engineclass(pool, dialect, u, **engine_args) - - if _initialize: - do_on_connect = dialect.on_connect() - if do_on_connect: - def on_connect(dbapi_connection, connection_record): - conn = getattr(dbapi_connection, '_sqla_unwrap', dbapi_connection) - if conn is None: - return - do_on_connect(conn) - - event.listen(pool, 'first_connect', on_connect) - event.listen(pool, 'connect', on_connect) - - def first_connect(dbapi_connection, connection_record): - c = base.Connection(engine, connection=dbapi_connection) - - # TODO: removing this allows the on connect activities - # to generate events. tests currently assume these aren't - # sent. do we want users to get all the initial connect - # activities as events ? - c._has_events = False - - dialect.initialize(c) - event.listen(pool, 'first_connect', first_connect) - - return engine - - -class PlainEngineStrategy(DefaultEngineStrategy): - """Strategy for configuring a regular Engine.""" - - name = 'plain' - engine_cls = base.Engine - -PlainEngineStrategy() - - -class ThreadLocalEngineStrategy(DefaultEngineStrategy): - """Strategy for configuring an Engine with threadlocal behavior.""" - - name = 'threadlocal' - engine_cls = threadlocal.TLEngine - -ThreadLocalEngineStrategy() - - -class MockEngineStrategy(EngineStrategy): - """Strategy for configuring an Engine-like object with mocked execution. - - Produces a single mock Connectable object which dispatches - statement execution to a passed-in function. - - """ - - name = 'mock' - - def create(self, name_or_url, executor, **kwargs): - # create url.URL object - u = url.make_url(name_or_url) - - dialect_cls = u.get_dialect() - - dialect_args = {} - # consume dialect arguments from kwargs - for k in util.get_cls_kwargs(dialect_cls): - if k in kwargs: - dialect_args[k] = kwargs.pop(k) - - # create dialect - dialect = dialect_cls(**dialect_args) - - return MockEngineStrategy.MockConnection(dialect, executor) - - class MockConnection(base.Connectable): - def __init__(self, dialect, execute): - self._dialect = dialect - self.execute = execute - - engine = property(lambda s: s) - dialect = property(attrgetter('_dialect')) - name = property(lambda s: s._dialect.name) - - def contextual_connect(self, **kwargs): - return self - - def execution_options(self, **kw): - return self - - def compiler(self, statement, parameters, **kwargs): - return self._dialect.compiler( - statement, parameters, engine=self, **kwargs) - - def create(self, entity, **kwargs): - kwargs['checkfirst'] = False - from sqlalchemy.engine import ddl - - ddl.SchemaGenerator(self.dialect, self, **kwargs).traverse_single(entity) - - def drop(self, entity, **kwargs): - kwargs['checkfirst'] = False - from sqlalchemy.engine import ddl - ddl.SchemaDropper(self.dialect, self, **kwargs).traverse_single(entity) - - def _run_visitor(self, visitorcallable, element, - connection=None, - **kwargs): - kwargs['checkfirst'] = False - visitorcallable(self.dialect, self, - **kwargs).traverse_single(element) - - def execute(self, object, *multiparams, **params): - raise NotImplementedError() - -MockEngineStrategy() diff --git a/libs/sqlalchemy/engine/threadlocal.py b/libs/sqlalchemy/engine/threadlocal.py deleted file mode 100644 index c8a16272..00000000 --- a/libs/sqlalchemy/engine/threadlocal.py +++ /dev/null @@ -1,126 +0,0 @@ -# engine/threadlocal.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Provides a thread-local transactional wrapper around the root Engine class. - -The ``threadlocal`` module is invoked when using the ``strategy="threadlocal"`` flag -with :func:`~sqlalchemy.engine.create_engine`. This module is semi-private and is -invoked automatically when the threadlocal engine strategy is used. -""" - -from sqlalchemy import util, event -from sqlalchemy.engine import base -import weakref - -class TLConnection(base.Connection): - def __init__(self, *arg, **kw): - super(TLConnection, self).__init__(*arg, **kw) - self.__opencount = 0 - - def _increment_connect(self): - self.__opencount += 1 - return self - - def close(self): - if self.__opencount == 1: - base.Connection.close(self) - self.__opencount -= 1 - - def _force_close(self): - self.__opencount = 0 - base.Connection.close(self) - -class TLEngine(base.Engine): - """An Engine that includes support for thread-local managed transactions.""" - - _tl_connection_cls = TLConnection - - def __init__(self, *args, **kwargs): - super(TLEngine, self).__init__(*args, **kwargs) - self._connections = util.threading.local() - - - def contextual_connect(self, **kw): - if not hasattr(self._connections, 'conn'): - connection = None - else: - connection = self._connections.conn() - - if connection is None or connection.closed: - # guards against pool-level reapers, if desired. - # or not connection.connection.is_valid: - connection = self._tl_connection_cls(self, self.pool.connect(), **kw) - self._connections.conn = conn = weakref.ref(connection) - - return connection._increment_connect() - - def begin_twophase(self, xid=None): - if not hasattr(self._connections, 'trans'): - self._connections.trans = [] - self._connections.trans.append(self.contextual_connect().begin_twophase(xid=xid)) - return self - - def begin_nested(self): - if not hasattr(self._connections, 'trans'): - self._connections.trans = [] - self._connections.trans.append(self.contextual_connect().begin_nested()) - return self - - def begin(self): - if not hasattr(self._connections, 'trans'): - self._connections.trans = [] - self._connections.trans.append(self.contextual_connect().begin()) - return self - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - if type is None: - self.commit() - else: - self.rollback() - - def prepare(self): - if not hasattr(self._connections, 'trans') or \ - not self._connections.trans: - return - self._connections.trans[-1].prepare() - - def commit(self): - if not hasattr(self._connections, 'trans') or \ - not self._connections.trans: - return - trans = self._connections.trans.pop(-1) - trans.commit() - - def rollback(self): - if not hasattr(self._connections, 'trans') or \ - not self._connections.trans: - return - trans = self._connections.trans.pop(-1) - trans.rollback() - - def dispose(self): - self._connections = util.threading.local() - super(TLEngine, self).dispose() - - @property - def closed(self): - return not hasattr(self._connections, 'conn') or \ - self._connections.conn() is None or \ - self._connections.conn().closed - - def close(self): - if not self.closed: - self.contextual_connect().close() - connection = self._connections.conn() - connection._force_close() - del self._connections.conn - self._connections.trans = [] - - def __repr__(self): - return 'TLEngine(%s)' % str(self.url) diff --git a/libs/sqlalchemy/engine/url.py b/libs/sqlalchemy/engine/url.py deleted file mode 100644 index 9cabc8dc..00000000 --- a/libs/sqlalchemy/engine/url.py +++ /dev/null @@ -1,228 +0,0 @@ -# engine/url.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Provides the :class:`~sqlalchemy.engine.url.URL` class which encapsulates -information about a database connection specification. - -The URL object is created automatically when :func:`~sqlalchemy.engine.create_engine` is called -with a string argument; alternatively, the URL is a public-facing construct which can -be used directly and is also accepted directly by ``create_engine()``. -""" - -import re, urllib -from sqlalchemy import exc, util - - -class URL(object): - """ - Represent the components of a URL used to connect to a database. - - This object is suitable to be passed directly to a - ``create_engine()`` call. The fields of the URL are parsed from a - string by the ``module-level make_url()`` function. the string - format of the URL is an RFC-1738-style string. - - All initialization parameters are available as public attributes. - - :param drivername: the name of the database backend. - This name will correspond to a module in sqlalchemy/databases - or a third party plug-in. - - :param username: The user name. - - :param password: database password. - - :param host: The name of the host. - - :param port: The port number. - - :param database: The database name. - - :param query: A dictionary of options to be passed to the - dialect and/or the DBAPI upon connect. - - """ - - def __init__(self, drivername, username=None, password=None, - host=None, port=None, database=None, query=None): - self.drivername = drivername - self.username = username - self.password = password - self.host = host - if port is not None: - self.port = int(port) - else: - self.port = None - self.database = database - self.query = query or {} - - def __str__(self): - s = self.drivername + "://" - if self.username is not None: - s += self.username - if self.password is not None: - s += ':' + urllib.quote_plus(self.password) - s += "@" - if self.host is not None: - s += self.host - if self.port is not None: - s += ':' + str(self.port) - if self.database is not None: - s += '/' + self.database - if self.query: - keys = self.query.keys() - keys.sort() - s += '?' + "&".join("%s=%s" % (k, self.query[k]) for k in keys) - return s - - def __hash__(self): - return hash(str(self)) - - def __eq__(self, other): - return \ - isinstance(other, URL) and \ - self.drivername == other.drivername and \ - self.username == other.username and \ - self.password == other.password and \ - self.host == other.host and \ - self.database == other.database and \ - self.query == other.query - - def get_dialect(self): - """Return the SQLAlchemy database dialect class corresponding - to this URL's driver name. - """ - - try: - if '+' in self.drivername: - dialect, driver = self.drivername.split('+') - else: - dialect, driver = self.drivername, 'base' - - module = __import__('sqlalchemy.dialects.%s' % (dialect, )).dialects - module = getattr(module, dialect) - if hasattr(module, driver): - module = getattr(module, driver) - else: - module = self._load_entry_point() - if module is None: - raise exc.ArgumentError( - "Could not determine dialect for '%s'." % - self.drivername) - - return module.dialect - except ImportError: - module = self._load_entry_point() - if module is not None: - return module - else: - raise exc.ArgumentError( - "Could not determine dialect for '%s'." % self.drivername) - - def _load_entry_point(self): - """attempt to load this url's dialect from entry points, or return None - if pkg_resources is not installed or there is no matching entry point. - - Raise ImportError if the actual load fails. - - """ - try: - import pkg_resources - except ImportError: - return None - - for res in pkg_resources.iter_entry_points('sqlalchemy.dialects'): - if res.name == self.drivername.replace("+", "."): - return res.load() - else: - return None - - def translate_connect_args(self, names=[], **kw): - """Translate url attributes into a dictionary of connection arguments. - - Returns attributes of this url (`host`, `database`, `username`, - `password`, `port`) as a plain dictionary. The attribute names are - used as the keys by default. Unset or false attributes are omitted - from the final dictionary. - - :param \**kw: Optional, alternate key names for url attributes. - - :param names: Deprecated. Same purpose as the keyword-based alternate names, - but correlates the name to the original positionally. - """ - - translated = {} - attribute_names = ['host', 'database', 'username', 'password', 'port'] - for sname in attribute_names: - if names: - name = names.pop(0) - elif sname in kw: - name = kw[sname] - else: - name = sname - if name is not None and getattr(self, sname, False): - translated[name] = getattr(self, sname) - return translated - -def make_url(name_or_url): - """Given a string or unicode instance, produce a new URL instance. - - The given string is parsed according to the RFC 1738 spec. If an - existing URL object is passed, just returns the object. - """ - - if isinstance(name_or_url, basestring): - return _parse_rfc1738_args(name_or_url) - else: - return name_or_url - -def _parse_rfc1738_args(name): - pattern = re.compile(r''' - (?P[\w\+]+):// - (?: - (?P[^:/]*) - (?::(?P[^/]*))? - @)? - (?: - (?P[^/:]*) - (?::(?P[^/]*))? - )? - (?:/(?P.*))? - ''' - , re.X) - - m = pattern.match(name) - if m is not None: - components = m.groupdict() - if components['database'] is not None: - tokens = components['database'].split('?', 2) - components['database'] = tokens[0] - query = (len(tokens) > 1 and dict(util.parse_qsl(tokens[1]))) or None - # Py2K - if query is not None: - query = dict((k.encode('ascii'), query[k]) for k in query) - # end Py2K - else: - query = None - components['query'] = query - - if components['password'] is not None: - components['password'] = urllib.unquote_plus(components['password']) - - name = components.pop('name') - return URL(name, **components) - else: - raise exc.ArgumentError( - "Could not parse rfc1738 URL from string '%s'" % name) - -def _parse_keyvalue_args(name): - m = re.match( r'(\w+)://(.*)', name) - if m is not None: - (name, args) = m.group(1, 2) - opts = dict( util.parse_qsl( args ) ) - return URL(name, *opts) - else: - return None diff --git a/libs/sqlalchemy/event.py b/libs/sqlalchemy/event.py deleted file mode 100644 index dabebb81..00000000 --- a/libs/sqlalchemy/event.py +++ /dev/null @@ -1,460 +0,0 @@ -# sqlalchemy/event.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Base event API.""" - -from sqlalchemy import util, exc -import weakref - -CANCEL = util.symbol('CANCEL') -NO_RETVAL = util.symbol('NO_RETVAL') - -def listen(target, identifier, fn, *args, **kw): - """Register a listener function for the given target. - - e.g.:: - - from sqlalchemy import event - from sqlalchemy.schema import UniqueConstraint - - def unique_constraint_name(const, table): - const.name = "uq_%s_%s" % ( - table.name, - list(const.columns)[0].name - ) - event.listen( - UniqueConstraint, - "after_parent_attach", - unique_constraint_name) - - """ - - for evt_cls in _registrars[identifier]: - tgt = evt_cls._accept_with(target) - if tgt is not None: - tgt.dispatch._listen(tgt, identifier, fn, *args, **kw) - return - raise exc.InvalidRequestError("No such event '%s' for target '%s'" % - (identifier, target)) - -def listens_for(target, identifier, *args, **kw): - """Decorate a function as a listener for the given target + identifier. - - e.g.:: - - from sqlalchemy import event - from sqlalchemy.schema import UniqueConstraint - - @event.listens_for(UniqueConstraint, "after_parent_attach") - def unique_constraint_name(const, table): - const.name = "uq_%s_%s" % ( - table.name, - list(const.columns)[0].name - ) - """ - def decorate(fn): - listen(target, identifier, fn, *args, **kw) - return fn - return decorate - -def remove(target, identifier, fn): - """Remove an event listener. - - Note that some event removals, particularly for those event dispatchers - which create wrapper functions and secondary even listeners, may not yet - be supported. - - """ - for evt_cls in _registrars[identifier]: - for tgt in evt_cls._accept_with(target): - tgt.dispatch._remove(identifier, tgt, fn) - return - -_registrars = util.defaultdict(list) - -def _is_event_name(name): - return not name.startswith('_') and name != 'dispatch' - -class _UnpickleDispatch(object): - """Serializable callable that re-generates an instance of :class:`_Dispatch` - given a particular :class:`.Events` subclass. - - """ - def __call__(self, _parent_cls): - for cls in _parent_cls.__mro__: - if 'dispatch' in cls.__dict__: - return cls.__dict__['dispatch'].dispatch_cls(_parent_cls) - else: - raise AttributeError("No class with a 'dispatch' member present.") - -class _Dispatch(object): - """Mirror the event listening definitions of an Events class with - listener collections. - - Classes which define a "dispatch" member will return a - non-instantiated :class:`._Dispatch` subclass when the member - is accessed at the class level. When the "dispatch" member is - accessed at the instance level of its owner, an instance - of the :class:`._Dispatch` class is returned. - - A :class:`._Dispatch` class is generated for each :class:`.Events` - class defined, by the :func:`._create_dispatcher_class` function. - The original :class:`.Events` classes remain untouched. - This decouples the construction of :class:`.Events` subclasses from - the implementation used by the event internals, and allows - inspecting tools like Sphinx to work in an unsurprising - way against the public API. - - """ - - def __init__(self, _parent_cls): - self._parent_cls = _parent_cls - - def __reduce__(self): - return _UnpickleDispatch(), (self._parent_cls, ) - - def _update(self, other, only_propagate=True): - """Populate from the listeners in another :class:`_Dispatch` - object.""" - - for ls in _event_descriptors(other): - getattr(self, ls.name).\ - for_modify(self)._update(ls, only_propagate=only_propagate) - -def _event_descriptors(target): - return [getattr(target, k) for k in dir(target) if _is_event_name(k)] - -class _EventMeta(type): - """Intercept new Event subclasses and create - associated _Dispatch classes.""" - - def __init__(cls, classname, bases, dict_): - _create_dispatcher_class(cls, classname, bases, dict_) - return type.__init__(cls, classname, bases, dict_) - -def _create_dispatcher_class(cls, classname, bases, dict_): - """Create a :class:`._Dispatch` class corresponding to an - :class:`.Events` class.""" - - # there's all kinds of ways to do this, - # i.e. make a Dispatch class that shares the '_listen' method - # of the Event class, this is the straight monkeypatch. - dispatch_base = getattr(cls, 'dispatch', _Dispatch) - cls.dispatch = dispatch_cls = type("%sDispatch" % classname, - (dispatch_base, ), {}) - dispatch_cls._listen = cls._listen - dispatch_cls._clear = cls._clear - - for k in dict_: - if _is_event_name(k): - setattr(dispatch_cls, k, _DispatchDescriptor(dict_[k])) - _registrars[k].append(cls) - -def _remove_dispatcher(cls): - for k in dir(cls): - if _is_event_name(k): - _registrars[k].remove(cls) - if not _registrars[k]: - del _registrars[k] - -class Events(object): - """Define event listening functions for a particular target type.""" - - - __metaclass__ = _EventMeta - - @classmethod - def _accept_with(cls, target): - # Mapper, ClassManager, Session override this to - # also accept classes, scoped_sessions, sessionmakers, etc. - if hasattr(target, 'dispatch') and ( - isinstance(target.dispatch, cls.dispatch) or \ - isinstance(target.dispatch, type) and \ - issubclass(target.dispatch, cls.dispatch) - ): - return target - else: - return None - - @classmethod - def _listen(cls, target, identifier, fn, propagate=False, insert=False): - if insert: - getattr(target.dispatch, identifier).\ - for_modify(target.dispatch).insert(fn, target, propagate) - else: - getattr(target.dispatch, identifier).\ - for_modify(target.dispatch).append(fn, target, propagate) - - @classmethod - def _remove(cls, target, identifier, fn): - getattr(target.dispatch, identifier).remove(fn, target) - - @classmethod - def _clear(cls): - for attr in dir(cls.dispatch): - if _is_event_name(attr): - getattr(cls.dispatch, attr).clear() - -class _DispatchDescriptor(object): - """Class-level attributes on :class:`._Dispatch` classes.""" - - def __init__(self, fn): - self.__name__ = fn.__name__ - self.__doc__ = fn.__doc__ - self._clslevel = weakref.WeakKeyDictionary() - self._empty_listeners = weakref.WeakKeyDictionary() - - def _contains(self, cls, evt): - return cls in self._clslevel and \ - evt in self._clslevel[cls] - - def insert(self, obj, target, propagate): - assert isinstance(target, type), \ - "Class-level Event targets must be classes." - stack = [target] - while stack: - cls = stack.pop(0) - stack.extend(cls.__subclasses__()) - if cls is not target and cls not in self._clslevel: - self.update_subclass(cls) - else: - if cls not in self._clslevel: - self._clslevel[cls] = [] - self._clslevel[cls].insert(0, obj) - - def append(self, obj, target, propagate): - assert isinstance(target, type), \ - "Class-level Event targets must be classes." - - stack = [target] - while stack: - cls = stack.pop(0) - stack.extend(cls.__subclasses__()) - if cls is not target and cls not in self._clslevel: - self.update_subclass(cls) - else: - if cls not in self._clslevel: - self._clslevel[cls] = [] - self._clslevel[cls].append(obj) - - def update_subclass(self, target): - if target not in self._clslevel: - self._clslevel[target] = [] - clslevel = self._clslevel[target] - for cls in target.__mro__[1:]: - if cls in self._clslevel: - clslevel.extend([ - fn for fn - in self._clslevel[cls] - if fn not in clslevel - ]) - - def remove(self, obj, target): - stack = [target] - while stack: - cls = stack.pop(0) - stack.extend(cls.__subclasses__()) - if cls in self._clslevel: - self._clslevel[cls].remove(obj) - - def clear(self): - """Clear all class level listeners""" - - for dispatcher in self._clslevel.values(): - dispatcher[:] = [] - - def for_modify(self, obj): - """Return an event collection which can be modified. - - For _DispatchDescriptor at the class level of - a dispatcher, this returns self. - - """ - return self - - def __get__(self, obj, cls): - if obj is None: - return self - elif obj._parent_cls in self._empty_listeners: - ret = self._empty_listeners[obj._parent_cls] - else: - self._empty_listeners[obj._parent_cls] = ret = \ - _EmptyListener(self, obj._parent_cls) - # assigning it to __dict__ means - # memoized for fast re-access. but more memory. - obj.__dict__[self.__name__] = ret - return ret - -class _EmptyListener(object): - """Serves as a class-level interface to the events - served by a _DispatchDescriptor, when there are no - instance-level events present. - - Is replaced by _ListenerCollection when instance-level - events are added. - - """ - def __init__(self, parent, target_cls): - if target_cls not in parent._clslevel: - parent.update_subclass(target_cls) - self.parent = parent - self.parent_listeners = parent._clslevel[target_cls] - self.name = parent.__name__ - self.propagate = frozenset() - self.listeners = () - - def for_modify(self, obj): - """Return an event collection which can be modified. - - For _EmptyListener at the instance level of - a dispatcher, this generates a new - _ListenerCollection, applies it to the instance, - and returns it. - - """ - obj.__dict__[self.name] = result = _ListenerCollection( - self.parent, obj._parent_cls) - return result - - def _needs_modify(self, *args, **kw): - raise NotImplementedError("need to call for_modify()") - - exec_once = insert = append = remove = clear = _needs_modify - - def __call__(self, *args, **kw): - """Execute this event.""" - - for fn in self.parent_listeners: - fn(*args, **kw) - - def __len__(self): - return len(self.parent_listeners) - - def __iter__(self): - return iter(self.parent_listeners) - - def __getitem__(self, index): - return (self.parent_listeners)[index] - - def __nonzero__(self): - return bool(self.parent_listeners) - - -class _ListenerCollection(object): - """Instance-level attributes on instances of :class:`._Dispatch`. - - Represents a collection of listeners. - - As of 0.7.9, _ListenerCollection is only first - created via the _EmptyListener.for_modify() method. - - """ - - _exec_once = False - - def __init__(self, parent, target_cls): - if target_cls not in parent._clslevel: - parent.update_subclass(target_cls) - self.parent_listeners = parent._clslevel[target_cls] - self.name = parent.__name__ - self.listeners = [] - self.propagate = set() - - def for_modify(self, obj): - """Return an event collection which can be modified. - - For _ListenerCollection at the instance level of - a dispatcher, this returns self. - - """ - return self - - def exec_once(self, *args, **kw): - """Execute this event, but only if it has not been - executed already for this collection.""" - - if not self._exec_once: - self(*args, **kw) - self._exec_once = True - - def __call__(self, *args, **kw): - """Execute this event.""" - - for fn in self.parent_listeners: - fn(*args, **kw) - for fn in self.listeners: - fn(*args, **kw) - - # I'm not entirely thrilled about the overhead here, - # but this allows class-level listeners to be added - # at any point. - # - # In the absense of instance-level listeners, - # we stay with the _EmptyListener object when called - # at the instance level. - - def __len__(self): - return len(self.parent_listeners + self.listeners) - - def __iter__(self): - return iter(self.parent_listeners + self.listeners) - - def __getitem__(self, index): - return (self.parent_listeners + self.listeners)[index] - - def __nonzero__(self): - return bool(self.listeners or self.parent_listeners) - - def _update(self, other, only_propagate=True): - """Populate from the listeners in another :class:`_Dispatch` - object.""" - - existing_listeners = self.listeners - existing_listener_set = set(existing_listeners) - self.propagate.update(other.propagate) - existing_listeners.extend([l for l - in other.listeners - if l not in existing_listener_set - and not only_propagate or l in self.propagate - ]) - - def insert(self, obj, target, propagate): - if obj not in self.listeners: - self.listeners.insert(0, obj) - if propagate: - self.propagate.add(obj) - - def append(self, obj, target, propagate): - if obj not in self.listeners: - self.listeners.append(obj) - if propagate: - self.propagate.add(obj) - - def remove(self, obj, target): - if obj in self.listeners: - self.listeners.remove(obj) - self.propagate.discard(obj) - - def clear(self): - self.listeners[:] = [] - self.propagate.clear() - -class dispatcher(object): - """Descriptor used by target classes to - deliver the _Dispatch class at the class level - and produce new _Dispatch instances for target - instances. - - """ - def __init__(self, events): - self.dispatch_cls = events.dispatch - self.events = events - - def __get__(self, obj, cls): - if obj is None: - return self.dispatch_cls - obj.__dict__['dispatch'] = disp = self.dispatch_cls(cls) - return disp diff --git a/libs/sqlalchemy/events.py b/libs/sqlalchemy/events.py deleted file mode 100644 index e7aa34f2..00000000 --- a/libs/sqlalchemy/events.py +++ /dev/null @@ -1,463 +0,0 @@ -# sqlalchemy/events.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Core event interfaces.""" - -from sqlalchemy import event, exc, util -engine = util.importlater('sqlalchemy', 'engine') -pool = util.importlater('sqlalchemy', 'pool') - - -class DDLEvents(event.Events): - """ - Define event listeners for schema objects, - that is, :class:`.SchemaItem` and :class:`.SchemaEvent` - subclasses, including :class:`.MetaData`, :class:`.Table`, - :class:`.Column`. - - :class:`.MetaData` and :class:`.Table` support events - specifically regarding when CREATE and DROP - DDL is emitted to the database. - - Attachment events are also provided to customize - behavior whenever a child schema element is associated - with a parent, such as, when a :class:`.Column` is associated - with its :class:`.Table`, when a :class:`.ForeignKeyConstraint` - is associated with a :class:`.Table`, etc. - - Example using the ``after_create`` event:: - - from sqlalchemy import event - from sqlalchemy import Table, Column, Metadata, Integer - - m = MetaData() - some_table = Table('some_table', m, Column('data', Integer)) - - def after_create(target, connection, **kw): - connection.execute("ALTER TABLE %s SET name=foo_%s" % - (target.name, target.name)) - - event.listen(some_table, "after_create", after_create) - - DDL events integrate closely with the - :class:`.DDL` class and the :class:`.DDLElement` hierarchy - of DDL clause constructs, which are themselves appropriate - as listener callables:: - - from sqlalchemy import DDL - event.listen( - some_table, - "after_create", - DDL("ALTER TABLE %(table)s SET name=foo_%(table)s") - ) - - The methods here define the name of an event as well - as the names of members that are passed to listener - functions. - - See also: - - :ref:`event_toplevel` - - :class:`.DDLElement` - - :class:`.DDL` - - :ref:`schema_ddl_sequences` - - """ - - def before_create(self, target, connection, **kw): - """Called before CREATE statments are emitted. - - :param target: the :class:`.MetaData` or :class:`.Table` - object which is the target of the event. - :param connection: the :class:`.Connection` where the - CREATE statement or statements will be emitted. - :param \**kw: additional keyword arguments relevant - to the event. The contents of this dictionary - may vary across releases, and include the - list of tables being generated for a metadata-level - event, the checkfirst flag, and other - elements used by internal events. - - """ - - def after_create(self, target, connection, **kw): - """Called after CREATE statments are emitted. - - :param target: the :class:`.MetaData` or :class:`.Table` - object which is the target of the event. - :param connection: the :class:`.Connection` where the - CREATE statement or statements have been emitted. - :param \**kw: additional keyword arguments relevant - to the event. The contents of this dictionary - may vary across releases, and include the - list of tables being generated for a metadata-level - event, the checkfirst flag, and other - elements used by internal events. - - """ - - def before_drop(self, target, connection, **kw): - """Called before DROP statments are emitted. - - :param target: the :class:`.MetaData` or :class:`.Table` - object which is the target of the event. - :param connection: the :class:`.Connection` where the - DROP statement or statements will be emitted. - :param \**kw: additional keyword arguments relevant - to the event. The contents of this dictionary - may vary across releases, and include the - list of tables being generated for a metadata-level - event, the checkfirst flag, and other - elements used by internal events. - - """ - - def after_drop(self, target, connection, **kw): - """Called after DROP statments are emitted. - - :param target: the :class:`.MetaData` or :class:`.Table` - object which is the target of the event. - :param connection: the :class:`.Connection` where the - DROP statement or statements have been emitted. - :param \**kw: additional keyword arguments relevant - to the event. The contents of this dictionary - may vary across releases, and include the - list of tables being generated for a metadata-level - event, the checkfirst flag, and other - elements used by internal events. - - """ - - def before_parent_attach(self, target, parent): - """Called before a :class:`.SchemaItem` is associated with - a parent :class:`.SchemaItem`. - - :param target: the target object - :param parent: the parent to which the target is being attached. - - :func:`.event.listen` also accepts a modifier for this event: - - :param propagate=False: When True, the listener function will - be established for any copies made of the target object, - i.e. those copies that are generated when - :meth:`.Table.tometadata` is used. - - """ - - def after_parent_attach(self, target, parent): - """Called after a :class:`.SchemaItem` is associated with - a parent :class:`.SchemaItem`. - - :param target: the target object - :param parent: the parent to which the target is being attached. - - :func:`.event.listen` also accepts a modifier for this event: - - :param propagate=False: When True, the listener function will - be established for any copies made of the target object, - i.e. those copies that are generated when - :meth:`.Table.tometadata` is used. - - """ - - def column_reflect(self, table, column_info): - """Called for each unit of 'column info' retrieved when - a :class:`.Table` is being reflected. - - The dictionary of column information as returned by the - dialect is passed, and can be modified. The dictionary - is that returned in each element of the list returned - by :meth:`.reflection.Inspector.get_columns`. - - The event is called before any action is taken against - this dictionary, and the contents can be modified. - The :class:`.Column` specific arguments ``info``, ``key``, - and ``quote`` can also be added to the dictionary and - will be passed to the constructor of :class:`.Column`. - - Note that this event is only meaningful if either - associated with the :class:`.Table` class across the - board, e.g.:: - - from sqlalchemy.schema import Table - from sqlalchemy import event - - def listen_for_reflect(table, column_info): - "receive a column_reflect event" - # ... - - event.listen( - Table, - 'column_reflect', - listen_for_reflect) - - ...or with a specific :class:`.Table` instance using - the ``listeners`` argument:: - - def listen_for_reflect(table, column_info): - "receive a column_reflect event" - # ... - - t = Table( - 'sometable', - autoload=True, - listeners=[ - ('column_reflect', listen_for_reflect) - ]) - - This because the reflection process initiated by ``autoload=True`` - completes within the scope of the constructor for :class:`.Table`. - - """ - -class SchemaEventTarget(object): - """Base class for elements that are the targets of :class:`.DDLEvents` events. - - This includes :class:`.SchemaItem` as well as :class:`.SchemaType`. - - """ - dispatch = event.dispatcher(DDLEvents) - - def _set_parent(self, parent): - """Associate with this SchemaEvent's parent object.""" - - raise NotImplementedError() - - def _set_parent_with_dispatch(self, parent): - self.dispatch.before_parent_attach(self, parent) - self._set_parent(parent) - self.dispatch.after_parent_attach(self, parent) - -class PoolEvents(event.Events): - """Available events for :class:`.Pool`. - - The methods here define the name of an event as well - as the names of members that are passed to listener - functions. - - e.g.:: - - from sqlalchemy import event - - def my_on_checkout(dbapi_conn, connection_rec, connection_proxy): - "handle an on checkout event" - - event.listen(Pool, 'checkout', my_on_checkout) - - In addition to accepting the :class:`.Pool` class and :class:`.Pool` instances, - :class:`.PoolEvents` also accepts :class:`.Engine` objects and - the :class:`.Engine` class as targets, which will be resolved - to the ``.pool`` attribute of the given engine or the :class:`.Pool` - class:: - - engine = create_engine("postgresql://scott:tiger@localhost/test") - - # will associate with engine.pool - event.listen(engine, 'checkout', my_on_checkout) - - """ - - @classmethod - def _accept_with(cls, target): - if isinstance(target, type): - if issubclass(target, engine.Engine): - return pool.Pool - elif issubclass(target, pool.Pool): - return target - elif isinstance(target, engine.Engine): - return target.pool - else: - return target - - def connect(self, dbapi_connection, connection_record): - """Called once for each new DB-API connection or Pool's ``creator()``. - - :param dbapi_con: - A newly connected raw DB-API connection (not a SQLAlchemy - ``Connection`` wrapper). - - :param con_record: - The ``_ConnectionRecord`` that persistently manages the connection - - """ - - def first_connect(self, dbapi_connection, connection_record): - """Called exactly once for the first DB-API connection. - - :param dbapi_con: - A newly connected raw DB-API connection (not a SQLAlchemy - ``Connection`` wrapper). - - :param con_record: - The ``_ConnectionRecord`` that persistently manages the connection - - """ - - def checkout(self, dbapi_connection, connection_record, connection_proxy): - """Called when a connection is retrieved from the Pool. - - :param dbapi_con: - A raw DB-API connection - - :param con_record: - The ``_ConnectionRecord`` that persistently manages the connection - - :param con_proxy: - The ``_ConnectionFairy`` which manages the connection for the span of - the current checkout. - - If you raise a :class:`~sqlalchemy.exc.DisconnectionError`, the current - connection will be disposed and a fresh connection retrieved. - Processing of all checkout listeners will abort and restart - using the new connection. - """ - - def checkin(self, dbapi_connection, connection_record): - """Called when a connection returns to the pool. - - Note that the connection may be closed, and may be None if the - connection has been invalidated. ``checkin`` will not be called - for detached connections. (They do not return to the pool.) - - :param dbapi_con: - A raw DB-API connection - - :param con_record: - The ``_ConnectionRecord`` that persistently manages the connection - - """ - -class ConnectionEvents(event.Events): - """Available events for :class:`.Connection`. - - The methods here define the name of an event as well as the names of members that are passed to listener functions. - - e.g.:: - - from sqlalchemy import event, create_engine - - def before_execute(conn, clauseelement, multiparams, params): - log.info("Received statement: %s" % clauseelement) - - engine = create_engine('postgresql://scott:tiger@localhost/test') - event.listen(engine, "before_execute", before_execute) - - Some events allow modifiers to the listen() function. - - :param retval=False: Applies to the :meth:`.before_execute` and - :meth:`.before_cursor_execute` events only. When True, the - user-defined event function must have a return value, which - is a tuple of parameters that replace the given statement - and parameters. See those methods for a description of - specific return arguments. - - """ - - @classmethod - def _listen(cls, target, identifier, fn, retval=False): - target._has_events = True - - if not retval: - if identifier == 'before_execute': - orig_fn = fn - def wrap(conn, clauseelement, multiparams, params): - orig_fn(conn, clauseelement, multiparams, params) - return clauseelement, multiparams, params - fn = wrap - elif identifier == 'before_cursor_execute': - orig_fn = fn - def wrap(conn, cursor, statement, - parameters, context, executemany): - orig_fn(conn, cursor, statement, - parameters, context, executemany) - return statement, parameters - fn = wrap - - elif retval and identifier not in ('before_execute', 'before_cursor_execute'): - raise exc.ArgumentError( - "Only the 'before_execute' and " - "'before_cursor_execute' engine " - "event listeners accept the 'retval=True' " - "argument.") - event.Events._listen(target, identifier, fn) - - def before_execute(self, conn, clauseelement, multiparams, params): - """Intercept high level execute() events.""" - - def after_execute(self, conn, clauseelement, multiparams, params, result): - """Intercept high level execute() events.""" - - def before_cursor_execute(self, conn, cursor, statement, - parameters, context, executemany): - """Intercept low-level cursor execute() events.""" - - def after_cursor_execute(self, conn, cursor, statement, - parameters, context, executemany): - """Intercept low-level cursor execute() events.""" - - def dbapi_error(self, conn, cursor, statement, parameters, - context, exception): - """Intercept a raw DBAPI error. - - This event is called with the DBAPI exception instance - received from the DBAPI itself, *before* SQLAlchemy wraps the - exception with it's own exception wrappers, and before any - other operations are performed on the DBAPI cursor; the - existing transaction remains in effect as well as any state - on the cursor. - - The use case here is to inject low-level exception handling - into an :class:`.Engine`, typically for logging and - debugging purposes. In general, user code should **not** modify - any state or throw any exceptions here as this will - interfere with SQLAlchemy's cleanup and error handling - routines. - - Subsequent to this hook, SQLAlchemy may attempt any - number of operations on the connection/cursor, including - closing the cursor, rolling back of the transaction in the - case of connectionless execution, and disposing of the entire - connection pool if a "disconnect" was detected. The - exception is then wrapped in a SQLAlchemy DBAPI exception - wrapper and re-thrown. - - .. versionadded:: 0.7.7 - - """ - - def begin(self, conn): - """Intercept begin() events.""" - - def rollback(self, conn): - """Intercept rollback() events.""" - - def commit(self, conn): - """Intercept commit() events.""" - - def savepoint(self, conn, name=None): - """Intercept savepoint() events.""" - - def rollback_savepoint(self, conn, name, context): - """Intercept rollback_savepoint() events.""" - - def release_savepoint(self, conn, name, context): - """Intercept release_savepoint() events.""" - - def begin_twophase(self, conn, xid): - """Intercept begin_twophase() events.""" - - def prepare_twophase(self, conn, xid): - """Intercept prepare_twophase() events.""" - - def rollback_twophase(self, conn, xid, is_prepared): - """Intercept rollback_twophase() events.""" - - def commit_twophase(self, conn, xid, is_prepared): - """Intercept commit_twophase() events.""" - diff --git a/libs/sqlalchemy/exc.py b/libs/sqlalchemy/exc.py deleted file mode 100644 index febee3fe..00000000 --- a/libs/sqlalchemy/exc.py +++ /dev/null @@ -1,313 +0,0 @@ -# sqlalchemy/exc.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Exceptions used with SQLAlchemy. - -The base exception class is :class:`.SQLAlchemyError`. Exceptions which are raised as a -result of DBAPI exceptions are all subclasses of -:class:`.DBAPIError`. - -""" - -import traceback - -class SQLAlchemyError(Exception): - """Generic error class.""" - - -class ArgumentError(SQLAlchemyError): - """Raised when an invalid or conflicting function argument is supplied. - - This error generally corresponds to construction time state errors. - - """ - - -class CircularDependencyError(SQLAlchemyError): - """Raised by topological sorts when a circular dependency is detected. - - There are two scenarios where this error occurs: - - * In a Session flush operation, if two objects are mutually dependent - on each other, they can not be inserted or deleted via INSERT or - DELETE statements alone; an UPDATE will be needed to post-associate - or pre-deassociate one of the foreign key constrained values. - The ``post_update`` flag described at :ref:`post_update` can resolve - this cycle. - * In a :meth:`.MetaData.create_all`, :meth:`.MetaData.drop_all`, - :attr:`.MetaData.sorted_tables` operation, two :class:`.ForeignKey` - or :class:`.ForeignKeyConstraint` objects mutually refer to each - other. Apply the ``use_alter=True`` flag to one or both, - see :ref:`use_alter`. - - """ - def __init__(self, message, cycles, edges, msg=None): - if msg is None: - message += " Cycles: %r all edges: %r" % (cycles, edges) - else: - message = msg - SQLAlchemyError.__init__(self, message) - self.cycles = cycles - self.edges = edges - - def __reduce__(self): - return self.__class__, (None, self.cycles, - self.edges, self.args[0]) - -class CompileError(SQLAlchemyError): - """Raised when an error occurs during SQL compilation""" - -class IdentifierError(SQLAlchemyError): - """Raised when a schema name is beyond the max character limit""" - -# Moved to orm.exc; compatibility definition installed by orm import until 0.6 -ConcurrentModificationError = None - -class DisconnectionError(SQLAlchemyError): - """A disconnect is detected on a raw DB-API connection. - - This error is raised and consumed internally by a connection pool. It can - be raised by the :meth:`.PoolEvents.checkout` event - so that the host pool forces a retry; the exception will be caught - three times in a row before the pool gives up and raises - :class:`~sqlalchemy.exc.InvalidRequestError` regarding the connection attempt. - - """ - - -# Moved to orm.exc; compatibility definition installed by orm import until 0.6 -FlushError = None - -class TimeoutError(SQLAlchemyError): - """Raised when a connection pool times out on getting a connection.""" - - -class InvalidRequestError(SQLAlchemyError): - """SQLAlchemy was asked to do something it can't do. - - This error generally corresponds to runtime state errors. - - """ - -class ResourceClosedError(InvalidRequestError): - """An operation was requested from a connection, cursor, or other - object that's in a closed state.""" - -class NoSuchColumnError(KeyError, InvalidRequestError): - """A nonexistent column is requested from a ``RowProxy``.""" - -class NoReferenceError(InvalidRequestError): - """Raised by ``ForeignKey`` to indicate a reference cannot be resolved.""" - -class NoReferencedTableError(NoReferenceError): - """Raised by ``ForeignKey`` when the referred ``Table`` cannot be located.""" - - def __init__(self, message, tname): - NoReferenceError.__init__(self, message) - self.table_name = tname - - def __reduce__(self): - return self.__class__, (self.args[0], self.table_name) - -class NoReferencedColumnError(NoReferenceError): - """Raised by ``ForeignKey`` when the referred ``Column`` cannot be located.""" - - def __init__(self, message, tname, cname): - NoReferenceError.__init__(self, message) - self.table_name = tname - self.column_name = cname - - def __reduce__(self): - return self.__class__, (self.args[0], self.table_name, - self.column_name) - -class NoSuchTableError(InvalidRequestError): - """Table does not exist or is not visible to a connection.""" - - -class UnboundExecutionError(InvalidRequestError): - """SQL was attempted without a database connection to execute it on.""" - - -class DontWrapMixin(object): - """A mixin class which, when applied to a user-defined Exception class, - will not be wrapped inside of :class:`.StatementError` if the error is - emitted within the process of executing a statement. - - E.g.:: - from sqlalchemy.exc import DontWrapMixin - - class MyCustomException(Exception, DontWrapMixin): - pass - - class MySpecialType(TypeDecorator): - impl = String - - def process_bind_param(self, value, dialect): - if value == 'invalid': - raise MyCustomException("invalid!") - - """ -import sys -if sys.version_info < (2, 5): - class DontWrapMixin: - pass - -# Moved to orm.exc; compatibility definition installed by orm import until 0.6 -UnmappedColumnError = None - -class StatementError(SQLAlchemyError): - """An error occurred during execution of a SQL statement. - - :class:`StatementError` wraps the exception raised - during execution, and features :attr:`.statement` - and :attr:`.params` attributes which supply context regarding - the specifics of the statement which had an issue. - - The wrapped exception object is available in - the :attr:`.orig` attribute. - - """ - - statement = None - """The string SQL statement being invoked when this exception occurred.""" - - params = None - """The parameter list being used when this exception occurred.""" - - orig = None - """The DBAPI exception object.""" - - def __init__(self, message, statement, params, orig): - SQLAlchemyError.__init__(self, message) - self.statement = statement - self.params = params - self.orig = orig - - def __reduce__(self): - return self.__class__, (self.args[0], self.statement, - self.params, self.orig) - - def __str__(self): - from sqlalchemy.sql import util - params_repr = util._repr_params(self.params, 10) - return ' '.join((SQLAlchemyError.__str__(self), - repr(self.statement), repr(params_repr))) - - -class DBAPIError(StatementError): - """Raised when the execution of a database operation fails. - - Wraps exceptions raised by the DB-API underlying the - database operation. Driver-specific implementations of the standard - DB-API exception types are wrapped by matching sub-types of SQLAlchemy's - :class:`DBAPIError` when possible. DB-API's ``Error`` type maps to - :class:`DBAPIError` in SQLAlchemy, otherwise the names are identical. Note - that there is no guarantee that different DB-API implementations will - raise the same exception type for any given error condition. - - :class:`DBAPIError` features :attr:`~.StatementError.statement` - and :attr:`~.StatementError.params` attributes which supply context regarding - the specifics of the statement which had an issue, for the - typical case when the error was raised within the context of - emitting a SQL statement. - - The wrapped exception object is available in the :attr:`~.StatementError.orig` attribute. - Its type and properties are DB-API implementation specific. - - """ - - @classmethod - def instance(cls, statement, params, - orig, - dbapi_base_err, - connection_invalidated=False): - # Don't ever wrap these, just return them directly as if - # DBAPIError didn't exist. - if isinstance(orig, (KeyboardInterrupt, SystemExit, DontWrapMixin)): - return orig - - if orig is not None: - # not a DBAPI error, statement is present. - # raise a StatementError - if not isinstance(orig, dbapi_base_err) and statement: - return StatementError( - "%s (original cause: %s)" % ( - str(orig), - traceback.format_exception_only(orig.__class__, orig)[-1].strip() - ), statement, params, orig) - - name, glob = orig.__class__.__name__, globals() - if name in glob and issubclass(glob[name], DBAPIError): - cls = glob[name] - - return cls(statement, params, orig, connection_invalidated) - - def __reduce__(self): - return self.__class__, (self.statement, self.params, - self.orig, self.connection_invalidated) - - def __init__(self, statement, params, orig, connection_invalidated=False): - try: - text = str(orig) - except (KeyboardInterrupt, SystemExit): - raise - except Exception, e: - text = 'Error in str() of DB-API-generated exception: ' + str(e) - StatementError.__init__( - self, - '(%s) %s' % (orig.__class__.__name__, text), - statement, - params, - orig - ) - self.connection_invalidated = connection_invalidated - - -class InterfaceError(DBAPIError): - """Wraps a DB-API InterfaceError.""" - - -class DatabaseError(DBAPIError): - """Wraps a DB-API DatabaseError.""" - - -class DataError(DatabaseError): - """Wraps a DB-API DataError.""" - - -class OperationalError(DatabaseError): - """Wraps a DB-API OperationalError.""" - - -class IntegrityError(DatabaseError): - """Wraps a DB-API IntegrityError.""" - - -class InternalError(DatabaseError): - """Wraps a DB-API InternalError.""" - - -class ProgrammingError(DatabaseError): - """Wraps a DB-API ProgrammingError.""" - - -class NotSupportedError(DatabaseError): - """Wraps a DB-API NotSupportedError.""" - - -# Warnings - -class SADeprecationWarning(DeprecationWarning): - """Issued once per usage of a deprecated API.""" - - -class SAPendingDeprecationWarning(PendingDeprecationWarning): - """Issued once per usage of a deprecated API.""" - - -class SAWarning(RuntimeWarning): - """Issued at runtime.""" diff --git a/libs/sqlalchemy/ext/__init__.py b/libs/sqlalchemy/ext/__init__.py deleted file mode 100644 index 4a6e1952..00000000 --- a/libs/sqlalchemy/ext/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -# ext/__init__.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - diff --git a/libs/sqlalchemy/ext/associationproxy.py b/libs/sqlalchemy/ext/associationproxy.py deleted file mode 100644 index d5b0ab69..00000000 --- a/libs/sqlalchemy/ext/associationproxy.py +++ /dev/null @@ -1,997 +0,0 @@ -# ext/associationproxy.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Contain the ``AssociationProxy`` class. - -The ``AssociationProxy`` is a Python property object which provides -transparent proxied access to the endpoint of an association object. - -See the example ``examples/association/proxied_association.py``. - -""" -import itertools -import operator -import weakref -from sqlalchemy import exceptions -from sqlalchemy import orm -from sqlalchemy import util -from sqlalchemy.orm import collections, ColumnProperty -from sqlalchemy.sql import not_ - - -def association_proxy(target_collection, attr, **kw): - """Return a Python property implementing a view of a target - attribute which references an attribute on members of the - target. - - The returned value is an instance of :class:`.AssociationProxy`. - - Implements a Python property representing a relationship as a collection of - simpler values, or a scalar value. The proxied property will mimic the collection type of - the target (list, dict or set), or, in the case of a one to one relationship, - a simple scalar value. - - :param target_collection: Name of the attribute we'll proxy to. - This attribute is typically mapped by - :func:`~sqlalchemy.orm.relationship` to link to a target collection, but - can also be a many-to-one or non-scalar relationship. - - :param attr: Attribute on the associated instance or instances we'll proxy for. - - For example, given a target collection of [obj1, obj2], a list created - by this proxy property would look like [getattr(obj1, *attr*), - getattr(obj2, *attr*)] - - If the relationship is one-to-one or otherwise uselist=False, then simply: - getattr(obj, *attr*) - - :param creator: optional. - - When new items are added to this proxied collection, new instances of - the class collected by the target collection will be created. For list - and set collections, the target class constructor will be called with - the 'value' for the new instance. For dict types, two arguments are - passed: key and value. - - If you want to construct instances differently, supply a *creator* - function that takes arguments as above and returns instances. - - For scalar relationships, creator() will be called if the target is None. - If the target is present, set operations are proxied to setattr() on the - associated object. - - If you have an associated object with multiple attributes, you may set - up multiple association proxies mapping to different attributes. See - the unit tests for examples, and for examples of how creator() functions - can be used to construct the scalar relationship on-demand in this - situation. - - :param \*\*kw: Passes along any other keyword arguments to - :class:`.AssociationProxy`. - - """ - return AssociationProxy(target_collection, attr, **kw) - - -class AssociationProxy(object): - """A descriptor that presents a read/write view of an object attribute.""" - - def __init__(self, target_collection, attr, creator=None, - getset_factory=None, proxy_factory=None, - proxy_bulk_set=None): - """Construct a new :class:`.AssociationProxy`. - - The :func:`.association_proxy` function is provided as the usual - entrypoint here, though :class:`.AssociationProxy` can be instantiated - and/or subclassed directly. - - :param target_collection: Name of the collection we'll proxy to, - usually created with :func:`.relationship`. - - :param attr: Attribute on the collected instances we'll proxy for. For example, - given a target collection of [obj1, obj2], a list created by this - proxy property would look like [getattr(obj1, attr), getattr(obj2, - attr)] - - :param creator: Optional. When new items are added to this proxied collection, new - instances of the class collected by the target collection will be - created. For list and set collections, the target class constructor - will be called with the 'value' for the new instance. For dict - types, two arguments are passed: key and value. - - If you want to construct instances differently, supply a 'creator' - function that takes arguments as above and returns instances. - - :param getset_factory: Optional. Proxied attribute access is automatically handled by - routines that get and set values based on the `attr` argument for - this proxy. - - If you would like to customize this behavior, you may supply a - `getset_factory` callable that produces a tuple of `getter` and - `setter` functions. The factory is called with two arguments, the - abstract type of the underlying collection and this proxy instance. - - :param proxy_factory: Optional. The type of collection to emulate is determined by - sniffing the target collection. If your collection type can't be - determined by duck typing or you'd like to use a different - collection implementation, you may supply a factory function to - produce those collections. Only applicable to non-scalar relationships. - - :param proxy_bulk_set: Optional, use with proxy_factory. See - the _set() method for details. - - """ - self.target_collection = target_collection - self.value_attr = attr - self.creator = creator - self.getset_factory = getset_factory - self.proxy_factory = proxy_factory - self.proxy_bulk_set = proxy_bulk_set - - self.owning_class = None - self.key = '_%s_%s_%s' % ( - type(self).__name__, target_collection, id(self)) - self.collection_class = None - - @property - def remote_attr(self): - """The 'remote' :class:`.MapperProperty` referenced by this - :class:`.AssociationProxy`. - - .. versionadded:: 0.7.3 - - See also: - - :attr:`.AssociationProxy.attr` - - :attr:`.AssociationProxy.local_attr` - - """ - return getattr(self.target_class, self.value_attr) - - @property - def local_attr(self): - """The 'local' :class:`.MapperProperty` referenced by this - :class:`.AssociationProxy`. - - .. versionadded:: 0.7.3 - - See also: - - :attr:`.AssociationProxy.attr` - - :attr:`.AssociationProxy.remote_attr` - - """ - return getattr(self.owning_class, self.target_collection) - - @property - def attr(self): - """Return a tuple of ``(local_attr, remote_attr)``. - - This attribute is convenient when specifying a join - using :meth:`.Query.join` across two relationships:: - - sess.query(Parent).join(*Parent.proxied.attr) - - .. versionadded:: 0.7.3 - - See also: - - :attr:`.AssociationProxy.local_attr` - - :attr:`.AssociationProxy.remote_attr` - - """ - return (self.local_attr, self.remote_attr) - - def _get_property(self): - return (orm.class_mapper(self.owning_class). - get_property(self.target_collection)) - - @util.memoized_property - def target_class(self): - """The intermediary class handled by this :class:`.AssociationProxy`. - - Intercepted append/set/assignment events will result - in the generation of new instances of this class. - - """ - return self._get_property().mapper.class_ - - @util.memoized_property - def scalar(self): - """Return ``True`` if this :class:`.AssociationProxy` proxies a scalar - relationship on the local side.""" - - scalar = not self._get_property().uselist - if scalar: - self._initialize_scalar_accessors() - return scalar - - @util.memoized_property - def _value_is_scalar(self): - return not self._get_property().\ - mapper.get_property(self.value_attr).uselist - - def __get__(self, obj, class_): - if self.owning_class is None: - self.owning_class = class_ and class_ or type(obj) - if obj is None: - return self - - if self.scalar: - return self._scalar_get(getattr(obj, self.target_collection)) - else: - try: - # If the owning instance is reborn (orm session resurrect, - # etc.), refresh the proxy cache. - creator_id, proxy = getattr(obj, self.key) - if id(obj) == creator_id: - return proxy - except AttributeError: - pass - proxy = self._new(_lazy_collection(obj, self.target_collection)) - setattr(obj, self.key, (id(obj), proxy)) - return proxy - - def __set__(self, obj, values): - if self.owning_class is None: - self.owning_class = type(obj) - - if self.scalar: - creator = self.creator and self.creator or self.target_class - target = getattr(obj, self.target_collection) - if target is None: - setattr(obj, self.target_collection, creator(values)) - else: - self._scalar_set(target, values) - else: - proxy = self.__get__(obj, None) - if proxy is not values: - proxy.clear() - self._set(proxy, values) - - def __delete__(self, obj): - if self.owning_class is None: - self.owning_class = type(obj) - delattr(obj, self.key) - - def _initialize_scalar_accessors(self): - if self.getset_factory: - get, set = self.getset_factory(None, self) - else: - get, set = self._default_getset(None) - self._scalar_get, self._scalar_set = get, set - - def _default_getset(self, collection_class): - attr = self.value_attr - getter = operator.attrgetter(attr) - if collection_class is dict: - setter = lambda o, k, v: setattr(o, attr, v) - else: - setter = lambda o, v: setattr(o, attr, v) - return getter, setter - - def _new(self, lazy_collection): - creator = self.creator and self.creator or self.target_class - self.collection_class = util.duck_type_collection(lazy_collection()) - - if self.proxy_factory: - return self.proxy_factory(lazy_collection, creator, self.value_attr, self) - - if self.getset_factory: - getter, setter = self.getset_factory(self.collection_class, self) - else: - getter, setter = self._default_getset(self.collection_class) - - if self.collection_class is list: - return _AssociationList(lazy_collection, creator, getter, setter, self) - elif self.collection_class is dict: - return _AssociationDict(lazy_collection, creator, getter, setter, self) - elif self.collection_class is set: - return _AssociationSet(lazy_collection, creator, getter, setter, self) - else: - raise exceptions.ArgumentError( - 'could not guess which interface to use for ' - 'collection_class "%s" backing "%s"; specify a ' - 'proxy_factory and proxy_bulk_set manually' % - (self.collection_class.__name__, self.target_collection)) - - def _inflate(self, proxy): - creator = self.creator and self.creator or self.target_class - - if self.getset_factory: - getter, setter = self.getset_factory(self.collection_class, self) - else: - getter, setter = self._default_getset(self.collection_class) - - proxy.creator = creator - proxy.getter = getter - proxy.setter = setter - - def _set(self, proxy, values): - if self.proxy_bulk_set: - self.proxy_bulk_set(proxy, values) - elif self.collection_class is list: - proxy.extend(values) - elif self.collection_class is dict: - proxy.update(values) - elif self.collection_class is set: - proxy.update(values) - else: - raise exceptions.ArgumentError( - 'no proxy_bulk_set supplied for custom ' - 'collection_class implementation') - - @property - def _comparator(self): - return self._get_property().comparator - - def any(self, criterion=None, **kwargs): - """Produce a proxied 'any' expression using EXISTS. - - This expression will be a composed product - using the :meth:`.RelationshipProperty.Comparator.any` - and/or :meth:`.RelationshipProperty.Comparator.has` - operators of the underlying proxied attributes. - - """ - - if self._value_is_scalar: - value_expr = getattr(self.target_class, self.value_attr).has(criterion, **kwargs) - else: - value_expr = getattr(self.target_class, self.value_attr).any(criterion, **kwargs) - - # check _value_is_scalar here, otherwise - # we're scalar->scalar - call .any() so that - # the "can't call any() on a scalar" msg is raised. - if self.scalar and not self._value_is_scalar: - return self._comparator.has( - value_expr - ) - else: - return self._comparator.any( - value_expr - ) - - def has(self, criterion=None, **kwargs): - """Produce a proxied 'has' expression using EXISTS. - - This expression will be a composed product - using the :meth:`.RelationshipProperty.Comparator.any` - and/or :meth:`.RelationshipProperty.Comparator.has` - operators of the underlying proxied attributes. - - """ - - return self._comparator.has( - getattr(self.target_class, self.value_attr).\ - has(criterion, **kwargs) - ) - - def contains(self, obj): - """Produce a proxied 'contains' expression using EXISTS. - - This expression will be a composed product - using the :meth:`.RelationshipProperty.Comparator.any` - , :meth:`.RelationshipProperty.Comparator.has`, - and/or :meth:`.RelationshipProperty.Comparator.contains` - operators of the underlying proxied attributes. - """ - - if self.scalar and not self._value_is_scalar: - return self._comparator.has( - getattr(self.target_class, self.value_attr).contains(obj) - ) - else: - return self._comparator.any(**{self.value_attr: obj}) - - def __eq__(self, obj): - return self._comparator.has(**{self.value_attr: obj}) - - def __ne__(self, obj): - return not_(self.__eq__(obj)) - - -class _lazy_collection(object): - def __init__(self, obj, target): - self.ref = weakref.ref(obj) - self.target = target - - def __call__(self): - obj = self.ref() - if obj is None: - raise exceptions.InvalidRequestError( - "stale association proxy, parent object has gone out of " - "scope") - return getattr(obj, self.target) - - def __getstate__(self): - return {'obj':self.ref(), 'target':self.target} - - def __setstate__(self, state): - self.ref = weakref.ref(state['obj']) - self.target = state['target'] - -class _AssociationCollection(object): - def __init__(self, lazy_collection, creator, getter, setter, parent): - """Constructs an _AssociationCollection. - - This will always be a subclass of either _AssociationList, - _AssociationSet, or _AssociationDict. - - lazy_collection - A callable returning a list-based collection of entities (usually an - object attribute managed by a SQLAlchemy relationship()) - - creator - A function that creates new target entities. Given one parameter: - value. This assertion is assumed:: - - obj = creator(somevalue) - assert getter(obj) == somevalue - - getter - A function. Given an associated object, return the 'value'. - - setter - A function. Given an associated object and a value, store that - value on the object. - - """ - self.lazy_collection = lazy_collection - self.creator = creator - self.getter = getter - self.setter = setter - self.parent = parent - - col = property(lambda self: self.lazy_collection()) - - def __len__(self): - return len(self.col) - - def __nonzero__(self): - return bool(self.col) - - def __getstate__(self): - return {'parent':self.parent, 'lazy_collection':self.lazy_collection} - - def __setstate__(self, state): - self.parent = state['parent'] - self.lazy_collection = state['lazy_collection'] - self.parent._inflate(self) - -class _AssociationList(_AssociationCollection): - """Generic, converting, list-to-list proxy.""" - - def _create(self, value): - return self.creator(value) - - def _get(self, object): - return self.getter(object) - - def _set(self, object, value): - return self.setter(object, value) - - def __getitem__(self, index): - return self._get(self.col[index]) - - def __setitem__(self, index, value): - if not isinstance(index, slice): - self._set(self.col[index], value) - else: - if index.stop is None: - stop = len(self) - elif index.stop < 0: - stop = len(self) + index.stop - else: - stop = index.stop - step = index.step or 1 - - rng = range(index.start or 0, stop, step) - if step == 1: - for i in rng: - del self[index.start] - i = index.start - for item in value: - self.insert(i, item) - i += 1 - else: - if len(value) != len(rng): - raise ValueError( - "attempt to assign sequence of size %s to " - "extended slice of size %s" % (len(value), - len(rng))) - for i, item in zip(rng, value): - self._set(self.col[i], item) - - def __delitem__(self, index): - del self.col[index] - - def __contains__(self, value): - for member in self.col: - # testlib.pragma exempt:__eq__ - if self._get(member) == value: - return True - return False - - def __getslice__(self, start, end): - return [self._get(member) for member in self.col[start:end]] - - def __setslice__(self, start, end, values): - members = [self._create(v) for v in values] - self.col[start:end] = members - - def __delslice__(self, start, end): - del self.col[start:end] - - def __iter__(self): - """Iterate over proxied values. - - For the actual domain objects, iterate over .col instead or - just use the underlying collection directly from its property - on the parent. - """ - - for member in self.col: - yield self._get(member) - raise StopIteration - - def append(self, value): - item = self._create(value) - self.col.append(item) - - def count(self, value): - return sum([1 for _ in - itertools.ifilter(lambda v: v == value, iter(self))]) - - def extend(self, values): - for v in values: - self.append(v) - - def insert(self, index, value): - self.col[index:index] = [self._create(value)] - - def pop(self, index=-1): - return self.getter(self.col.pop(index)) - - def remove(self, value): - for i, val in enumerate(self): - if val == value: - del self.col[i] - return - raise ValueError("value not in list") - - def reverse(self): - """Not supported, use reversed(mylist)""" - - raise NotImplementedError - - def sort(self): - """Not supported, use sorted(mylist)""" - - raise NotImplementedError - - def clear(self): - del self.col[0:len(self.col)] - - def __eq__(self, other): - return list(self) == other - - def __ne__(self, other): - return list(self) != other - - def __lt__(self, other): - return list(self) < other - - def __le__(self, other): - return list(self) <= other - - def __gt__(self, other): - return list(self) > other - - def __ge__(self, other): - return list(self) >= other - - def __cmp__(self, other): - return cmp(list(self), other) - - def __add__(self, iterable): - try: - other = list(iterable) - except TypeError: - return NotImplemented - return list(self) + other - - def __radd__(self, iterable): - try: - other = list(iterable) - except TypeError: - return NotImplemented - return other + list(self) - - def __mul__(self, n): - if not isinstance(n, int): - return NotImplemented - return list(self) * n - __rmul__ = __mul__ - - def __iadd__(self, iterable): - self.extend(iterable) - return self - - def __imul__(self, n): - # unlike a regular list *=, proxied __imul__ will generate unique - # backing objects for each copy. *= on proxied lists is a bit of - # a stretch anyhow, and this interpretation of the __imul__ contract - # is more plausibly useful than copying the backing objects. - if not isinstance(n, int): - return NotImplemented - if n == 0: - self.clear() - elif n > 1: - self.extend(list(self) * (n - 1)) - return self - - def copy(self): - return list(self) - - def __repr__(self): - return repr(list(self)) - - def __hash__(self): - raise TypeError("%s objects are unhashable" % type(self).__name__) - - for func_name, func in locals().items(): - if (util.callable(func) and func.func_name == func_name and - not func.__doc__ and hasattr(list, func_name)): - func.__doc__ = getattr(list, func_name).__doc__ - del func_name, func - - -_NotProvided = util.symbol('_NotProvided') -class _AssociationDict(_AssociationCollection): - """Generic, converting, dict-to-dict proxy.""" - - def _create(self, key, value): - return self.creator(key, value) - - def _get(self, object): - return self.getter(object) - - def _set(self, object, key, value): - return self.setter(object, key, value) - - def __getitem__(self, key): - return self._get(self.col[key]) - - def __setitem__(self, key, value): - if key in self.col: - self._set(self.col[key], key, value) - else: - self.col[key] = self._create(key, value) - - def __delitem__(self, key): - del self.col[key] - - def __contains__(self, key): - # testlib.pragma exempt:__hash__ - return key in self.col - - def has_key(self, key): - # testlib.pragma exempt:__hash__ - return key in self.col - - def __iter__(self): - return self.col.iterkeys() - - def clear(self): - self.col.clear() - - def __eq__(self, other): - return dict(self) == other - - def __ne__(self, other): - return dict(self) != other - - def __lt__(self, other): - return dict(self) < other - - def __le__(self, other): - return dict(self) <= other - - def __gt__(self, other): - return dict(self) > other - - def __ge__(self, other): - return dict(self) >= other - - def __cmp__(self, other): - return cmp(dict(self), other) - - def __repr__(self): - return repr(dict(self.items())) - - def get(self, key, default=None): - try: - return self[key] - except KeyError: - return default - - def setdefault(self, key, default=None): - if key not in self.col: - self.col[key] = self._create(key, default) - return default - else: - return self[key] - - def keys(self): - return self.col.keys() - - def iterkeys(self): - return self.col.iterkeys() - - def values(self): - return [ self._get(member) for member in self.col.values() ] - - def itervalues(self): - for key in self.col: - yield self._get(self.col[key]) - raise StopIteration - - def items(self): - return [(k, self._get(self.col[k])) for k in self] - - def iteritems(self): - for key in self.col: - yield (key, self._get(self.col[key])) - raise StopIteration - - def pop(self, key, default=_NotProvided): - if default is _NotProvided: - member = self.col.pop(key) - else: - member = self.col.pop(key, default) - return self._get(member) - - def popitem(self): - item = self.col.popitem() - return (item[0], self._get(item[1])) - - def update(self, *a, **kw): - if len(a) > 1: - raise TypeError('update expected at most 1 arguments, got %i' % - len(a)) - elif len(a) == 1: - seq_or_map = a[0] - # discern dict from sequence - took the advice - # from http://www.voidspace.org.uk/python/articles/duck_typing.shtml - # still not perfect :( - if hasattr(seq_or_map, 'keys'): - for item in seq_or_map: - self[item] = seq_or_map[item] - else: - try: - for k, v in seq_or_map: - self[k] = v - except ValueError: - raise ValueError( - "dictionary update sequence " - "requires 2-element tuples") - - for key, value in kw: - self[key] = value - - def copy(self): - return dict(self.items()) - - def __hash__(self): - raise TypeError("%s objects are unhashable" % type(self).__name__) - - for func_name, func in locals().items(): - if (util.callable(func) and func.func_name == func_name and - not func.__doc__ and hasattr(dict, func_name)): - func.__doc__ = getattr(dict, func_name).__doc__ - del func_name, func - - -class _AssociationSet(_AssociationCollection): - """Generic, converting, set-to-set proxy.""" - - def _create(self, value): - return self.creator(value) - - def _get(self, object): - return self.getter(object) - - def _set(self, object, value): - return self.setter(object, value) - - def __len__(self): - return len(self.col) - - def __nonzero__(self): - if self.col: - return True - else: - return False - - def __contains__(self, value): - for member in self.col: - # testlib.pragma exempt:__eq__ - if self._get(member) == value: - return True - return False - - def __iter__(self): - """Iterate over proxied values. - - For the actual domain objects, iterate over .col instead or just use - the underlying collection directly from its property on the parent. - - """ - for member in self.col: - yield self._get(member) - raise StopIteration - - def add(self, value): - if value not in self: - self.col.add(self._create(value)) - - # for discard and remove, choosing a more expensive check strategy rather - # than call self.creator() - def discard(self, value): - for member in self.col: - if self._get(member) == value: - self.col.discard(member) - break - - def remove(self, value): - for member in self.col: - if self._get(member) == value: - self.col.discard(member) - return - raise KeyError(value) - - def pop(self): - if not self.col: - raise KeyError('pop from an empty set') - member = self.col.pop() - return self._get(member) - - def update(self, other): - for value in other: - self.add(value) - - def __ior__(self, other): - if not collections._set_binops_check_strict(self, other): - return NotImplemented - for value in other: - self.add(value) - return self - - def _set(self): - return set(iter(self)) - - def union(self, other): - return set(self).union(other) - - __or__ = union - - def difference(self, other): - return set(self).difference(other) - - __sub__ = difference - - def difference_update(self, other): - for value in other: - self.discard(value) - - def __isub__(self, other): - if not collections._set_binops_check_strict(self, other): - return NotImplemented - for value in other: - self.discard(value) - return self - - def intersection(self, other): - return set(self).intersection(other) - - __and__ = intersection - - def intersection_update(self, other): - want, have = self.intersection(other), set(self) - - remove, add = have - want, want - have - - for value in remove: - self.remove(value) - for value in add: - self.add(value) - - def __iand__(self, other): - if not collections._set_binops_check_strict(self, other): - return NotImplemented - want, have = self.intersection(other), set(self) - - remove, add = have - want, want - have - - for value in remove: - self.remove(value) - for value in add: - self.add(value) - return self - - def symmetric_difference(self, other): - return set(self).symmetric_difference(other) - - __xor__ = symmetric_difference - - def symmetric_difference_update(self, other): - want, have = self.symmetric_difference(other), set(self) - - remove, add = have - want, want - have - - for value in remove: - self.remove(value) - for value in add: - self.add(value) - - def __ixor__(self, other): - if not collections._set_binops_check_strict(self, other): - return NotImplemented - want, have = self.symmetric_difference(other), set(self) - - remove, add = have - want, want - have - - for value in remove: - self.remove(value) - for value in add: - self.add(value) - return self - - def issubset(self, other): - return set(self).issubset(other) - - def issuperset(self, other): - return set(self).issuperset(other) - - def clear(self): - self.col.clear() - - def copy(self): - return set(self) - - def __eq__(self, other): - return set(self) == other - - def __ne__(self, other): - return set(self) != other - - def __lt__(self, other): - return set(self) < other - - def __le__(self, other): - return set(self) <= other - - def __gt__(self, other): - return set(self) > other - - def __ge__(self, other): - return set(self) >= other - - def __repr__(self): - return repr(set(self)) - - def __hash__(self): - raise TypeError("%s objects are unhashable" % type(self).__name__) - - for func_name, func in locals().items(): - if (util.callable(func) and func.func_name == func_name and - not func.__doc__ and hasattr(set, func_name)): - func.__doc__ = getattr(set, func_name).__doc__ - del func_name, func diff --git a/libs/sqlalchemy/ext/compiler.py b/libs/sqlalchemy/ext/compiler.py deleted file mode 100644 index 9bd9b42e..00000000 --- a/libs/sqlalchemy/ext/compiler.py +++ /dev/null @@ -1,410 +0,0 @@ -# ext/compiler.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Provides an API for creation of custom ClauseElements and compilers. - -Synopsis -======== - -Usage involves the creation of one or more :class:`~sqlalchemy.sql.expression.ClauseElement` -subclasses and one or more callables defining its compilation:: - - from sqlalchemy.ext.compiler import compiles - from sqlalchemy.sql.expression import ColumnClause - - class MyColumn(ColumnClause): - pass - - @compiles(MyColumn) - def compile_mycolumn(element, compiler, **kw): - return "[%s]" % element.name - -Above, ``MyColumn`` extends :class:`~sqlalchemy.sql.expression.ColumnClause`, -the base expression element for named column objects. The ``compiles`` -decorator registers itself with the ``MyColumn`` class so that it is invoked -when the object is compiled to a string:: - - from sqlalchemy import select - - s = select([MyColumn('x'), MyColumn('y')]) - print str(s) - -Produces:: - - SELECT [x], [y] - -Dialect-specific compilation rules -================================== - -Compilers can also be made dialect-specific. The appropriate compiler will be -invoked for the dialect in use:: - - from sqlalchemy.schema import DDLElement - - class AlterColumn(DDLElement): - - def __init__(self, column, cmd): - self.column = column - self.cmd = cmd - - @compiles(AlterColumn) - def visit_alter_column(element, compiler, **kw): - return "ALTER COLUMN %s ..." % element.column.name - - @compiles(AlterColumn, 'postgresql') - def visit_alter_column(element, compiler, **kw): - return "ALTER TABLE %s ALTER COLUMN %s ..." % (element.table.name, element.column.name) - -The second ``visit_alter_table`` will be invoked when any ``postgresql`` dialect is used. - -Compiling sub-elements of a custom expression construct -======================================================= - -The ``compiler`` argument is the :class:`~sqlalchemy.engine.base.Compiled` -object in use. This object can be inspected for any information about the -in-progress compilation, including ``compiler.dialect``, -``compiler.statement`` etc. The :class:`~sqlalchemy.sql.compiler.SQLCompiler` -and :class:`~sqlalchemy.sql.compiler.DDLCompiler` both include a ``process()`` -method which can be used for compilation of embedded attributes:: - - from sqlalchemy.sql.expression import Executable, ClauseElement - - class InsertFromSelect(Executable, ClauseElement): - def __init__(self, table, select): - self.table = table - self.select = select - - @compiles(InsertFromSelect) - def visit_insert_from_select(element, compiler, **kw): - return "INSERT INTO %s (%s)" % ( - compiler.process(element.table, asfrom=True), - compiler.process(element.select) - ) - - insert = InsertFromSelect(t1, select([t1]).where(t1.c.x>5)) - print insert - -Produces:: - - "INSERT INTO mytable (SELECT mytable.x, mytable.y, mytable.z FROM mytable WHERE mytable.x > :x_1)" - -.. note:: - - The above ``InsertFromSelect`` construct probably wants to have "autocommit" - enabled. See :ref:`enabling_compiled_autocommit` for this step. - -Cross Compiling between SQL and DDL compilers ---------------------------------------------- - -SQL and DDL constructs are each compiled using different base compilers - ``SQLCompiler`` -and ``DDLCompiler``. A common need is to access the compilation rules of SQL expressions -from within a DDL expression. The ``DDLCompiler`` includes an accessor ``sql_compiler`` for this reason, such as below where we generate a CHECK -constraint that embeds a SQL expression:: - - @compiles(MyConstraint) - def compile_my_constraint(constraint, ddlcompiler, **kw): - return "CONSTRAINT %s CHECK (%s)" % ( - constraint.name, - ddlcompiler.sql_compiler.process(constraint.expression) - ) - -.. _enabling_compiled_autocommit: - -Enabling Autocommit on a Construct -================================== - -Recall from the section :ref:`autocommit` that the :class:`.Engine`, when asked to execute -a construct in the absence of a user-defined transaction, detects if the given -construct represents DML or DDL, that is, a data modification or data definition statement, which -requires (or may require, in the case of DDL) that the transaction generated by the DBAPI be committed -(recall that DBAPI always has a transaction going on regardless of what SQLAlchemy does). Checking -for this is actually accomplished -by checking for the "autocommit" execution option on the construct. When building a construct like -an INSERT derivation, a new DDL type, or perhaps a stored procedure that alters data, the "autocommit" -option needs to be set in order for the statement to function with "connectionless" execution -(as described in :ref:`dbengine_implicit`). - -Currently a quick way to do this is to subclass :class:`.Executable`, then add the "autocommit" flag -to the ``_execution_options`` dictionary (note this is a "frozen" dictionary which supplies a generative -``union()`` method):: - - from sqlalchemy.sql.expression import Executable, ClauseElement - - class MyInsertThing(Executable, ClauseElement): - _execution_options = \\ - Executable._execution_options.union({'autocommit': True}) - -More succinctly, if the construct is truly similar to an INSERT, UPDATE, or DELETE, :class:`.UpdateBase` -can be used, which already is a subclass of :class:`.Executable`, :class:`.ClauseElement` and includes the -``autocommit`` flag:: - - from sqlalchemy.sql.expression import UpdateBase - - class MyInsertThing(UpdateBase): - def __init__(self, ...): - ... - - - - -DDL elements that subclass :class:`.DDLElement` already have the "autocommit" flag turned on. - - - - -Changing the default compilation of existing constructs -======================================================= - -The compiler extension applies just as well to the existing constructs. When overriding -the compilation of a built in SQL construct, the @compiles decorator is invoked upon -the appropriate class (be sure to use the class, i.e. ``Insert`` or ``Select``, instead of the creation function such as ``insert()`` or ``select()``). - -Within the new compilation function, to get at the "original" compilation routine, -use the appropriate visit_XXX method - this because compiler.process() will call upon the -overriding routine and cause an endless loop. Such as, to add "prefix" to all insert statements:: - - from sqlalchemy.sql.expression import Insert - - @compiles(Insert) - def prefix_inserts(insert, compiler, **kw): - return compiler.visit_insert(insert.prefix_with("some prefix"), **kw) - -The above compiler will prefix all INSERT statements with "some prefix" when compiled. - -.. _type_compilation_extension: - -Changing Compilation of Types -============================= - -``compiler`` works for types, too, such as below where we implement the MS-SQL specific 'max' keyword for ``String``/``VARCHAR``:: - - @compiles(String, 'mssql') - @compiles(VARCHAR, 'mssql') - def compile_varchar(element, compiler, **kw): - if element.length == 'max': - return "VARCHAR('max')" - else: - return compiler.visit_VARCHAR(element, **kw) - - foo = Table('foo', metadata, - Column('data', VARCHAR('max')) - ) - -Subclassing Guidelines -====================== - -A big part of using the compiler extension is subclassing SQLAlchemy -expression constructs. To make this easier, the expression and -schema packages feature a set of "bases" intended for common tasks. -A synopsis is as follows: - -* :class:`~sqlalchemy.sql.expression.ClauseElement` - This is the root - expression class. Any SQL expression can be derived from this base, and is - probably the best choice for longer constructs such as specialized INSERT - statements. - -* :class:`~sqlalchemy.sql.expression.ColumnElement` - The root of all - "column-like" elements. Anything that you'd place in the "columns" clause of - a SELECT statement (as well as order by and group by) can derive from this - - the object will automatically have Python "comparison" behavior. - - :class:`~sqlalchemy.sql.expression.ColumnElement` classes want to have a - ``type`` member which is expression's return type. This can be established - at the instance level in the constructor, or at the class level if its - generally constant:: - - class timestamp(ColumnElement): - type = TIMESTAMP() - -* :class:`~sqlalchemy.sql.expression.FunctionElement` - This is a hybrid of a - ``ColumnElement`` and a "from clause" like object, and represents a SQL - function or stored procedure type of call. Since most databases support - statements along the line of "SELECT FROM " - ``FunctionElement`` adds in the ability to be used in the FROM clause of a - ``select()`` construct:: - - from sqlalchemy.sql.expression import FunctionElement - - class coalesce(FunctionElement): - name = 'coalesce' - - @compiles(coalesce) - def compile(element, compiler, **kw): - return "coalesce(%s)" % compiler.process(element.clauses) - - @compiles(coalesce, 'oracle') - def compile(element, compiler, **kw): - if len(element.clauses) > 2: - raise TypeError("coalesce only supports two arguments on Oracle") - return "nvl(%s)" % compiler.process(element.clauses) - -* :class:`~sqlalchemy.schema.DDLElement` - The root of all DDL expressions, - like CREATE TABLE, ALTER TABLE, etc. Compilation of ``DDLElement`` - subclasses is issued by a ``DDLCompiler`` instead of a ``SQLCompiler``. - ``DDLElement`` also features ``Table`` and ``MetaData`` event hooks via the - ``execute_at()`` method, allowing the construct to be invoked during CREATE - TABLE and DROP TABLE sequences. - -* :class:`~sqlalchemy.sql.expression.Executable` - This is a mixin which should be - used with any expression class that represents a "standalone" SQL statement that - can be passed directly to an ``execute()`` method. It is already implicit - within ``DDLElement`` and ``FunctionElement``. - -Further Examples -================ - -"UTC timestamp" function -------------------------- - -A function that works like "CURRENT_TIMESTAMP" except applies the appropriate conversions -so that the time is in UTC time. Timestamps are best stored in relational databases -as UTC, without time zones. UTC so that your database doesn't think time has gone -backwards in the hour when daylight savings ends, without timezones because timezones -are like character encodings - they're best applied only at the endpoints of an -application (i.e. convert to UTC upon user input, re-apply desired timezone upon display). - -For Postgresql and Microsoft SQL Server:: - - from sqlalchemy.sql import expression - from sqlalchemy.ext.compiler import compiles - from sqlalchemy.types import DateTime - - class utcnow(expression.FunctionElement): - type = DateTime() - - @compiles(utcnow, 'postgresql') - def pg_utcnow(element, compiler, **kw): - return "TIMEZONE('utc', CURRENT_TIMESTAMP)" - - @compiles(utcnow, 'mssql') - def ms_utcnow(element, compiler, **kw): - return "GETUTCDATE()" - -Example usage:: - - from sqlalchemy import ( - Table, Column, Integer, String, DateTime, MetaData - ) - metadata = MetaData() - event = Table("event", metadata, - Column("id", Integer, primary_key=True), - Column("description", String(50), nullable=False), - Column("timestamp", DateTime, server_default=utcnow()) - ) - -"GREATEST" function -------------------- - -The "GREATEST" function is given any number of arguments and returns the one that is -of the highest value - it's equivalent to Python's ``max`` function. A SQL -standard version versus a CASE based version which only accommodates two -arguments:: - - from sqlalchemy.sql import expression - from sqlalchemy.ext.compiler import compiles - from sqlalchemy.types import Numeric - - class greatest(expression.FunctionElement): - type = Numeric() - name = 'greatest' - - @compiles(greatest) - def default_greatest(element, compiler, **kw): - return compiler.visit_function(element) - - @compiles(greatest, 'sqlite') - @compiles(greatest, 'mssql') - @compiles(greatest, 'oracle') - def case_greatest(element, compiler, **kw): - arg1, arg2 = list(element.clauses) - return "CASE WHEN %s > %s THEN %s ELSE %s END" % ( - compiler.process(arg1), - compiler.process(arg2), - compiler.process(arg1), - compiler.process(arg2), - ) - -Example usage:: - - Session.query(Account).\\ - filter( - greatest( - Account.checking_balance, - Account.savings_balance) > 10000 - ) - -"false" expression ------------------- - -Render a "false" constant expression, rendering as "0" on platforms that don't have a "false" constant:: - - from sqlalchemy.sql import expression - from sqlalchemy.ext.compiler import compiles - - class sql_false(expression.ColumnElement): - pass - - @compiles(sql_false) - def default_false(element, compiler, **kw): - return "false" - - @compiles(sql_false, 'mssql') - @compiles(sql_false, 'mysql') - @compiles(sql_false, 'oracle') - def int_false(element, compiler, **kw): - return "0" - -Example usage:: - - from sqlalchemy import select, union_all - - exp = union_all( - select([users.c.name, sql_false().label("enrolled")]), - select([customers.c.name, customers.c.enrolled]) - ) - -""" -from sqlalchemy import exc - -def compiles(class_, *specs): - def decorate(fn): - existing = class_.__dict__.get('_compiler_dispatcher', None) - existing_dispatch = class_.__dict__.get('_compiler_dispatch') - if not existing: - existing = _dispatcher() - - if existing_dispatch: - existing.specs['default'] = existing_dispatch - - # TODO: why is the lambda needed ? - setattr(class_, '_compiler_dispatch', lambda *arg, **kw: existing(*arg, **kw)) - setattr(class_, '_compiler_dispatcher', existing) - - if specs: - for s in specs: - existing.specs[s] = fn - - else: - existing.specs['default'] = fn - return fn - return decorate - -class _dispatcher(object): - def __init__(self): - self.specs = {} - - def __call__(self, element, compiler, **kw): - # TODO: yes, this could also switch off of DBAPI in use. - fn = self.specs.get(compiler.dialect.name, None) - if not fn: - try: - fn = self.specs['default'] - except KeyError: - raise exc.CompileError( - "%s construct has no default " - "compilation handler." % type(element)) - return fn(element, compiler, **kw) - diff --git a/libs/sqlalchemy/ext/declarative.py b/libs/sqlalchemy/ext/declarative.py deleted file mode 100755 index b0876f0d..00000000 --- a/libs/sqlalchemy/ext/declarative.py +++ /dev/null @@ -1,1761 +0,0 @@ -# ext/declarative.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -""" -Synopsis -======== - -SQLAlchemy object-relational configuration involves the -combination of :class:`.Table`, :func:`.mapper`, and class -objects to define a mapped class. -:mod:`~sqlalchemy.ext.declarative` allows all three to be -expressed at once within the class declaration. As much as -possible, regular SQLAlchemy schema and ORM constructs are -used directly, so that configuration between "classical" ORM -usage and declarative remain highly similar. - -As a simple example:: - - from sqlalchemy.ext.declarative import declarative_base - - Base = declarative_base() - - class SomeClass(Base): - __tablename__ = 'some_table' - id = Column(Integer, primary_key=True) - name = Column(String(50)) - -Above, the :func:`declarative_base` callable returns a new base class from -which all mapped classes should inherit. When the class definition is -completed, a new :class:`.Table` and -:func:`.mapper` will have been generated. - -The resulting table and mapper are accessible via -``__table__`` and ``__mapper__`` attributes on the -``SomeClass`` class:: - - # access the mapped Table - SomeClass.__table__ - - # access the Mapper - SomeClass.__mapper__ - -Defining Attributes -=================== - -In the previous example, the :class:`.Column` objects are -automatically named with the name of the attribute to which they are -assigned. - -To name columns explicitly with a name distinct from their mapped attribute, -just give the column a name. Below, column "some_table_id" is mapped to the -"id" attribute of `SomeClass`, but in SQL will be represented as "some_table_id":: - - class SomeClass(Base): - __tablename__ = 'some_table' - id = Column("some_table_id", Integer, primary_key=True) - -Attributes may be added to the class after its construction, and they will be -added to the underlying :class:`.Table` and -:func:`.mapper()` definitions as appropriate:: - - SomeClass.data = Column('data', Unicode) - SomeClass.related = relationship(RelatedInfo) - -Classes which are constructed using declarative can interact freely -with classes that are mapped explicitly with :func:`mapper`. - -It is recommended, though not required, that all tables -share the same underlying :class:`~sqlalchemy.schema.MetaData` object, -so that string-configured :class:`~sqlalchemy.schema.ForeignKey` -references can be resolved without issue. - -Accessing the MetaData -======================= - -The :func:`declarative_base` base class contains a -:class:`.MetaData` object where newly defined -:class:`.Table` objects are collected. This object is -intended to be accessed directly for -:class:`.MetaData`-specific operations. Such as, to issue -CREATE statements for all tables:: - - engine = create_engine('sqlite://') - Base.metadata.create_all(engine) - -:func:`declarative_base` can also receive a pre-existing -:class:`.MetaData` object, which allows a -declarative setup to be associated with an already -existing traditional collection of :class:`~sqlalchemy.schema.Table` -objects:: - - mymetadata = MetaData() - Base = declarative_base(metadata=mymetadata) - -Configuring Relationships -========================= - -Relationships to other classes are done in the usual way, with the added -feature that the class specified to :func:`~sqlalchemy.orm.relationship` -may be a string name. The "class registry" associated with ``Base`` -is used at mapper compilation time to resolve the name into the actual -class object, which is expected to have been defined once the mapper -configuration is used:: - - class User(Base): - __tablename__ = 'users' - - id = Column(Integer, primary_key=True) - name = Column(String(50)) - addresses = relationship("Address", backref="user") - - class Address(Base): - __tablename__ = 'addresses' - - id = Column(Integer, primary_key=True) - email = Column(String(50)) - user_id = Column(Integer, ForeignKey('users.id')) - -Column constructs, since they are just that, are immediately usable, -as below where we define a primary join condition on the ``Address`` -class using them:: - - class Address(Base): - __tablename__ = 'addresses' - - id = Column(Integer, primary_key=True) - email = Column(String(50)) - user_id = Column(Integer, ForeignKey('users.id')) - user = relationship(User, primaryjoin=user_id == User.id) - -In addition to the main argument for :func:`~sqlalchemy.orm.relationship`, -other arguments which depend upon the columns present on an as-yet -undefined class may also be specified as strings. These strings are -evaluated as Python expressions. The full namespace available within -this evaluation includes all classes mapped for this declarative base, -as well as the contents of the ``sqlalchemy`` package, including -expression functions like :func:`~sqlalchemy.sql.expression.desc` and -:attr:`~sqlalchemy.sql.expression.func`:: - - class User(Base): - # .... - addresses = relationship("Address", - order_by="desc(Address.email)", - primaryjoin="Address.user_id==User.id") - -As an alternative to string-based attributes, attributes may also be -defined after all classes have been created. Just add them to the target -class after the fact:: - - User.addresses = relationship(Address, - primaryjoin=Address.user_id==User.id) - -Configuring Many-to-Many Relationships -====================================== - -Many-to-many relationships are also declared in the same way -with declarative as with traditional mappings. The -``secondary`` argument to -:func:`.relationship` is as usual passed a -:class:`.Table` object, which is typically declared in the -traditional way. The :class:`.Table` usually shares -the :class:`.MetaData` object used by the declarative base:: - - keywords = Table( - 'keywords', Base.metadata, - Column('author_id', Integer, ForeignKey('authors.id')), - Column('keyword_id', Integer, ForeignKey('keywords.id')) - ) - - class Author(Base): - __tablename__ = 'authors' - id = Column(Integer, primary_key=True) - keywords = relationship("Keyword", secondary=keywords) - -Like other :func:`.relationship` arguments, a string is accepted as well, -passing the string name of the table as defined in the ``Base.metadata.tables`` -collection:: - - class Author(Base): - __tablename__ = 'authors' - id = Column(Integer, primary_key=True) - keywords = relationship("Keyword", secondary="keywords") - -As with traditional mapping, its generally not a good idea to use -a :class:`.Table` as the "secondary" argument which is also mapped to -a class, unless the :class:`.relationship` is declared with ``viewonly=True``. -Otherwise, the unit-of-work system may attempt duplicate INSERT and -DELETE statements against the underlying table. - -.. _declarative_sql_expressions: - -Defining SQL Expressions -======================== - -See :ref:`mapper_sql_expressions` for examples on declaratively -mapping attributes to SQL expressions. - -.. _declarative_table_args: - -Table Configuration -=================== - -Table arguments other than the name, metadata, and mapped Column -arguments are specified using the ``__table_args__`` class attribute. -This attribute accommodates both positional as well as keyword -arguments that are normally sent to the -:class:`~sqlalchemy.schema.Table` constructor. -The attribute can be specified in one of two forms. One is as a -dictionary:: - - class MyClass(Base): - __tablename__ = 'sometable' - __table_args__ = {'mysql_engine':'InnoDB'} - -The other, a tuple, where each argument is positional -(usually constraints):: - - class MyClass(Base): - __tablename__ = 'sometable' - __table_args__ = ( - ForeignKeyConstraint(['id'], ['remote_table.id']), - UniqueConstraint('foo'), - ) - -Keyword arguments can be specified with the above form by -specifying the last argument as a dictionary:: - - class MyClass(Base): - __tablename__ = 'sometable' - __table_args__ = ( - ForeignKeyConstraint(['id'], ['remote_table.id']), - UniqueConstraint('foo'), - {'autoload':True} - ) - -Using a Hybrid Approach with __table__ -======================================= - -As an alternative to ``__tablename__``, a direct -:class:`~sqlalchemy.schema.Table` construct may be used. The -:class:`~sqlalchemy.schema.Column` objects, which in this case require -their names, will be added to the mapping just like a regular mapping -to a table:: - - class MyClass(Base): - __table__ = Table('my_table', Base.metadata, - Column('id', Integer, primary_key=True), - Column('name', String(50)) - ) - -``__table__`` provides a more focused point of control for establishing -table metadata, while still getting most of the benefits of using declarative. -An application that uses reflection might want to load table metadata elsewhere -and pass it to declarative classes:: - - from sqlalchemy.ext.declarative import declarative_base - - Base = declarative_base() - Base.metadata.reflect(some_engine) - - class User(Base): - __table__ = metadata.tables['user'] - - class Address(Base): - __table__ = metadata.tables['address'] - -Some configuration schemes may find it more appropriate to use ``__table__``, -such as those which already take advantage of the data-driven nature of -:class:`.Table` to customize and/or automate schema definition. - -Note that when the ``__table__`` approach is used, the object is immediately -usable as a plain :class:`.Table` within the class declaration body itself, -as a Python class is only another syntactical block. Below this is illustrated -by using the ``id`` column in the ``primaryjoin`` condition of a :func:`.relationship`:: - - class MyClass(Base): - __table__ = Table('my_table', Base.metadata, - Column('id', Integer, primary_key=True), - Column('name', String(50)) - ) - - widgets = relationship(Widget, - primaryjoin=Widget.myclass_id==__table__.c.id) - -Similarly, mapped attributes which refer to ``__table__`` can be placed inline, -as below where we assign the ``name`` column to the attribute ``_name``, generating -a synonym for ``name``:: - - from sqlalchemy.ext.declarative import synonym_for - - class MyClass(Base): - __table__ = Table('my_table', Base.metadata, - Column('id', Integer, primary_key=True), - Column('name', String(50)) - ) - - _name = __table__.c.name - - @synonym_for("_name") - def name(self): - return "Name: %s" % _name - -Using Reflection with Declarative -================================= - -It's easy to set up a :class:`.Table` that uses ``autoload=True`` -in conjunction with a mapped class:: - - class MyClass(Base): - __table__ = Table('mytable', Base.metadata, - autoload=True, autoload_with=some_engine) - -However, one improvement that can be made here is to not -require the :class:`.Engine` to be available when classes are -being first declared. To achieve this, use the example -described at :ref:`examples_declarative_reflection` to build a -declarative base that sets up mappings only after a special -``prepare(engine)`` step is called:: - - Base = declarative_base(cls=DeclarativeReflectedBase) - - class Foo(Base): - __tablename__ = 'foo' - bars = relationship("Bar") - - class Bar(Base): - __tablename__ = 'bar' - - # illustrate overriding of "bar.foo_id" to have - # a foreign key constraint otherwise not - # reflected, such as when using MySQL - foo_id = Column(Integer, ForeignKey('foo.id')) - - Base.prepare(e) - - -Mapper Configuration -==================== - -Declarative makes use of the :func:`~.orm.mapper` function internally -when it creates the mapping to the declared table. The options -for :func:`~.orm.mapper` are passed directly through via the ``__mapper_args__`` -class attribute. As always, arguments which reference locally -mapped columns can reference them directly from within the -class declaration:: - - from datetime import datetime - - class Widget(Base): - __tablename__ = 'widgets' - - id = Column(Integer, primary_key=True) - timestamp = Column(DateTime, nullable=False) - - __mapper_args__ = { - 'version_id_col': timestamp, - 'version_id_generator': lambda v:datetime.now() - } - -.. _declarative_inheritance: - -Inheritance Configuration -========================= - -Declarative supports all three forms of inheritance as intuitively -as possible. The ``inherits`` mapper keyword argument is not needed -as declarative will determine this from the class itself. The various -"polymorphic" keyword arguments are specified using ``__mapper_args__``. - -Joined Table Inheritance -~~~~~~~~~~~~~~~~~~~~~~~~ - -Joined table inheritance is defined as a subclass that defines its own -table:: - - class Person(Base): - __tablename__ = 'people' - id = Column(Integer, primary_key=True) - discriminator = Column('type', String(50)) - __mapper_args__ = {'polymorphic_on': discriminator} - - class Engineer(Person): - __tablename__ = 'engineers' - __mapper_args__ = {'polymorphic_identity': 'engineer'} - id = Column(Integer, ForeignKey('people.id'), primary_key=True) - primary_language = Column(String(50)) - -Note that above, the ``Engineer.id`` attribute, since it shares the -same attribute name as the ``Person.id`` attribute, will in fact -represent the ``people.id`` and ``engineers.id`` columns together, -with the "Engineer.id" column taking precedence if queried directly. -To provide the ``Engineer`` class with an attribute that represents -only the ``engineers.id`` column, give it a different attribute name:: - - class Engineer(Person): - __tablename__ = 'engineers' - __mapper_args__ = {'polymorphic_identity': 'engineer'} - engineer_id = Column('id', Integer, ForeignKey('people.id'), - primary_key=True) - primary_language = Column(String(50)) - - -.. versionchanged:: 0.7 joined table inheritance favors the subclass - column over that of the superclass, such as querying above - for ``Engineer.id``. Prior to 0.7 this was the reverse. - -Single Table Inheritance -~~~~~~~~~~~~~~~~~~~~~~~~ - -Single table inheritance is defined as a subclass that does not have -its own table; you just leave out the ``__table__`` and ``__tablename__`` -attributes:: - - class Person(Base): - __tablename__ = 'people' - id = Column(Integer, primary_key=True) - discriminator = Column('type', String(50)) - __mapper_args__ = {'polymorphic_on': discriminator} - - class Engineer(Person): - __mapper_args__ = {'polymorphic_identity': 'engineer'} - primary_language = Column(String(50)) - -When the above mappers are configured, the ``Person`` class is mapped -to the ``people`` table *before* the ``primary_language`` column is -defined, and this column will not be included in its own mapping. -When ``Engineer`` then defines the ``primary_language`` column, the -column is added to the ``people`` table so that it is included in the -mapping for ``Engineer`` and is also part of the table's full set of -columns. Columns which are not mapped to ``Person`` are also excluded -from any other single or joined inheriting classes using the -``exclude_properties`` mapper argument. Below, ``Manager`` will have -all the attributes of ``Person`` and ``Manager`` but *not* the -``primary_language`` attribute of ``Engineer``:: - - class Manager(Person): - __mapper_args__ = {'polymorphic_identity': 'manager'} - golf_swing = Column(String(50)) - -The attribute exclusion logic is provided by the -``exclude_properties`` mapper argument, and declarative's default -behavior can be disabled by passing an explicit ``exclude_properties`` -collection (empty or otherwise) to the ``__mapper_args__``. - -Concrete Table Inheritance -~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Concrete is defined as a subclass which has its own table and sets the -``concrete`` keyword argument to ``True``:: - - class Person(Base): - __tablename__ = 'people' - id = Column(Integer, primary_key=True) - name = Column(String(50)) - - class Engineer(Person): - __tablename__ = 'engineers' - __mapper_args__ = {'concrete':True} - id = Column(Integer, primary_key=True) - primary_language = Column(String(50)) - name = Column(String(50)) - -Usage of an abstract base class is a little less straightforward as it -requires usage of :func:`~sqlalchemy.orm.util.polymorphic_union`, -which needs to be created with the :class:`.Table` objects -before the class is built:: - - engineers = Table('engineers', Base.metadata, - Column('id', Integer, primary_key=True), - Column('name', String(50)), - Column('primary_language', String(50)) - ) - managers = Table('managers', Base.metadata, - Column('id', Integer, primary_key=True), - Column('name', String(50)), - Column('golf_swing', String(50)) - ) - - punion = polymorphic_union({ - 'engineer':engineers, - 'manager':managers - }, 'type', 'punion') - - class Person(Base): - __table__ = punion - __mapper_args__ = {'polymorphic_on':punion.c.type} - - class Engineer(Person): - __table__ = engineers - __mapper_args__ = {'polymorphic_identity':'engineer', 'concrete':True} - - class Manager(Person): - __table__ = managers - __mapper_args__ = {'polymorphic_identity':'manager', 'concrete':True} - -.. _declarative_concrete_helpers: - -Using the Concrete Helpers -^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Helper classes provides a simpler pattern for concrete inheritance. -With these objects, the ``__declare_last__`` helper is used to configure the "polymorphic" -loader for the mapper after all subclasses have been declared. - -.. versionadded:: 0.7.3 - -An abstract base can be declared using the :class:`.AbstractConcreteBase` class:: - - from sqlalchemy.ext.declarative import AbstractConcreteBase - - class Employee(AbstractConcreteBase, Base): - pass - -To have a concrete ``employee`` table, use :class:`.ConcreteBase` instead:: - - from sqlalchemy.ext.declarative import ConcreteBase - - class Employee(ConcreteBase, Base): - __tablename__ = 'employee' - employee_id = Column(Integer, primary_key=True) - name = Column(String(50)) - __mapper_args__ = { - 'polymorphic_identity':'employee', - 'concrete':True} - - -Either ``Employee`` base can be used in the normal fashion:: - - class Manager(Employee): - __tablename__ = 'manager' - employee_id = Column(Integer, primary_key=True) - name = Column(String(50)) - manager_data = Column(String(40)) - __mapper_args__ = { - 'polymorphic_identity':'manager', - 'concrete':True} - - class Engineer(Employee): - __tablename__ = 'engineer' - employee_id = Column(Integer, primary_key=True) - name = Column(String(50)) - engineer_info = Column(String(40)) - __mapper_args__ = {'polymorphic_identity':'engineer', - 'concrete':True} - - -.. _declarative_mixins: - -Mixin and Custom Base Classes -============================== - -A common need when using :mod:`~sqlalchemy.ext.declarative` is to -share some functionality, such as a set of common columns, some common -table options, or other mapped properties, across many -classes. The standard Python idioms for this is to have the classes -inherit from a base which includes these common features. - -When using :mod:`~sqlalchemy.ext.declarative`, this idiom is allowed -via the usage of a custom declarative base class, as well as a "mixin" class -which is inherited from in addition to the primary base. Declarative -includes several helper features to make this work in terms of how -mappings are declared. An example of some commonly mixed-in -idioms is below:: - - from sqlalchemy.ext.declarative import declared_attr - - class MyMixin(object): - - @declared_attr - def __tablename__(cls): - return cls.__name__.lower() - - __table_args__ = {'mysql_engine': 'InnoDB'} - __mapper_args__= {'always_refresh': True} - - id = Column(Integer, primary_key=True) - - class MyModel(MyMixin, Base): - name = Column(String(1000)) - -Where above, the class ``MyModel`` will contain an "id" column -as the primary key, a ``__tablename__`` attribute that derives -from the name of the class itself, as well as ``__table_args__`` -and ``__mapper_args__`` defined by the ``MyMixin`` mixin class. - -There's no fixed convention over whether ``MyMixin`` precedes -``Base`` or not. Normal Python method resolution rules apply, and -the above example would work just as well with:: - - class MyModel(Base, MyMixin): - name = Column(String(1000)) - -This works because ``Base`` here doesn't define any of the -variables that ``MyMixin`` defines, i.e. ``__tablename__``, -``__table_args__``, ``id``, etc. If the ``Base`` did define -an attribute of the same name, the class placed first in the -inherits list would determine which attribute is used on the -newly defined class. - -Augmenting the Base -~~~~~~~~~~~~~~~~~~~ - -In addition to using a pure mixin, most of the techniques in this -section can also be applied to the base class itself, for patterns that -should apply to all classes derived from a particular base. This -is achieved using the ``cls`` argument of the :func:`.declarative_base` function:: - - from sqlalchemy.ext.declarative import declared_attr - - class Base(object): - @declared_attr - def __tablename__(cls): - return cls.__name__.lower() - - __table_args__ = {'mysql_engine': 'InnoDB'} - - id = Column(Integer, primary_key=True) - - from sqlalchemy.ext.declarative import declarative_base - - Base = declarative_base(cls=Base) - - class MyModel(Base): - name = Column(String(1000)) - -Where above, ``MyModel`` and all other classes that derive from ``Base`` will have -a table name derived from the class name, an ``id`` primary key column, as well as -the "InnoDB" engine for MySQL. - -Mixing in Columns -~~~~~~~~~~~~~~~~~ - -The most basic way to specify a column on a mixin is by simple -declaration:: - - class TimestampMixin(object): - created_at = Column(DateTime, default=func.now()) - - class MyModel(TimestampMixin, Base): - __tablename__ = 'test' - - id = Column(Integer, primary_key=True) - name = Column(String(1000)) - -Where above, all declarative classes that include ``TimestampMixin`` -will also have a column ``created_at`` that applies a timestamp to -all row insertions. - -Those familiar with the SQLAlchemy expression language know that -the object identity of clause elements defines their role in a schema. -Two ``Table`` objects ``a`` and ``b`` may both have a column called -``id``, but the way these are differentiated is that ``a.c.id`` -and ``b.c.id`` are two distinct Python objects, referencing their -parent tables ``a`` and ``b`` respectively. - -In the case of the mixin column, it seems that only one -:class:`.Column` object is explicitly created, yet the ultimate -``created_at`` column above must exist as a distinct Python object -for each separate destination class. To accomplish this, the declarative -extension creates a **copy** of each :class:`.Column` object encountered on -a class that is detected as a mixin. - -This copy mechanism is limited to simple columns that have no foreign -keys, as a :class:`.ForeignKey` itself contains references to columns -which can't be properly recreated at this level. For columns that -have foreign keys, as well as for the variety of mapper-level constructs -that require destination-explicit context, the -:func:`~.declared_attr` decorator is provided so that -patterns common to many classes can be defined as callables:: - - from sqlalchemy.ext.declarative import declared_attr - - class ReferenceAddressMixin(object): - @declared_attr - def address_id(cls): - return Column(Integer, ForeignKey('address.id')) - - class User(ReferenceAddressMixin, Base): - __tablename__ = 'user' - id = Column(Integer, primary_key=True) - -Where above, the ``address_id`` class-level callable is executed at the -point at which the ``User`` class is constructed, and the declarative -extension can use the resulting :class:`.Column` object as returned by -the method without the need to copy it. - -.. versionchanged:: > 0.6.5 - Rename 0.6.5 ``sqlalchemy.util.classproperty`` into :func:`~.declared_attr`. - -Columns generated by :func:`~.declared_attr` can also be -referenced by ``__mapper_args__`` to a limited degree, currently -by ``polymorphic_on`` and ``version_id_col``, by specifying the -classdecorator itself into the dictionary - the declarative extension -will resolve them at class construction time:: - - class MyMixin: - @declared_attr - def type_(cls): - return Column(String(50)) - - __mapper_args__= {'polymorphic_on':type_} - - class MyModel(MyMixin, Base): - __tablename__='test' - id = Column(Integer, primary_key=True) - -Mixing in Relationships -~~~~~~~~~~~~~~~~~~~~~~~ - -Relationships created by :func:`~sqlalchemy.orm.relationship` are provided -with declarative mixin classes exclusively using the -:class:`.declared_attr` approach, eliminating any ambiguity -which could arise when copying a relationship and its possibly column-bound -contents. Below is an example which combines a foreign key column and a -relationship so that two classes ``Foo`` and ``Bar`` can both be configured to -reference a common target class via many-to-one:: - - class RefTargetMixin(object): - @declared_attr - def target_id(cls): - return Column('target_id', ForeignKey('target.id')) - - @declared_attr - def target(cls): - return relationship("Target") - - class Foo(RefTargetMixin, Base): - __tablename__ = 'foo' - id = Column(Integer, primary_key=True) - - class Bar(RefTargetMixin, Base): - __tablename__ = 'bar' - id = Column(Integer, primary_key=True) - - class Target(Base): - __tablename__ = 'target' - id = Column(Integer, primary_key=True) - -:func:`~sqlalchemy.orm.relationship` definitions which require explicit -primaryjoin, order_by etc. expressions should use the string forms -for these arguments, so that they are evaluated as late as possible. -To reference the mixin class in these expressions, use the given ``cls`` -to get its name:: - - class RefTargetMixin(object): - @declared_attr - def target_id(cls): - return Column('target_id', ForeignKey('target.id')) - - @declared_attr - def target(cls): - return relationship("Target", - primaryjoin="Target.id==%s.target_id" % cls.__name__ - ) - -Mixing in deferred(), column_property(), etc. -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Like :func:`~sqlalchemy.orm.relationship`, all -:class:`~sqlalchemy.orm.interfaces.MapperProperty` subclasses such as -:func:`~sqlalchemy.orm.deferred`, :func:`~sqlalchemy.orm.column_property`, -etc. ultimately involve references to columns, and therefore, when -used with declarative mixins, have the :class:`.declared_attr` -requirement so that no reliance on copying is needed:: - - class SomethingMixin(object): - - @declared_attr - def dprop(cls): - return deferred(Column(Integer)) - - class Something(SomethingMixin, Base): - __tablename__ = "something" - - -Controlling table inheritance with mixins -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The ``__tablename__`` attribute in conjunction with the hierarchy of -classes involved in a declarative mixin scenario controls what type of -table inheritance, if any, -is configured by the declarative extension. - -If the ``__tablename__`` is computed by a mixin, you may need to -control which classes get the computed attribute in order to get the -type of table inheritance you require. - -For example, if you had a mixin that computes ``__tablename__`` but -where you wanted to use that mixin in a single table inheritance -hierarchy, you can explicitly specify ``__tablename__`` as ``None`` to -indicate that the class should not have a table mapped:: - - from sqlalchemy.ext.declarative import declared_attr - - class Tablename: - @declared_attr - def __tablename__(cls): - return cls.__name__.lower() - - class Person(Tablename, Base): - id = Column(Integer, primary_key=True) - discriminator = Column('type', String(50)) - __mapper_args__ = {'polymorphic_on': discriminator} - - class Engineer(Person): - __tablename__ = None - __mapper_args__ = {'polymorphic_identity': 'engineer'} - primary_language = Column(String(50)) - -Alternatively, you can make the mixin intelligent enough to only -return a ``__tablename__`` in the event that no table is already -mapped in the inheritance hierarchy. To help with this, a -:func:`~sqlalchemy.ext.declarative.has_inherited_table` helper -function is provided that returns ``True`` if a parent class already -has a mapped table. - -As an example, here's a mixin that will only allow single table -inheritance:: - - from sqlalchemy.ext.declarative import declared_attr - from sqlalchemy.ext.declarative import has_inherited_table - - class Tablename(object): - @declared_attr - def __tablename__(cls): - if has_inherited_table(cls): - return None - return cls.__name__.lower() - - class Person(Tablename, Base): - id = Column(Integer, primary_key=True) - discriminator = Column('type', String(50)) - __mapper_args__ = {'polymorphic_on': discriminator} - - class Engineer(Person): - primary_language = Column(String(50)) - __mapper_args__ = {'polymorphic_identity': 'engineer'} - -If you want to use a similar pattern with a mix of single and joined -table inheritance, you would need a slightly different mixin and use -it on any joined table child classes in addition to their parent -classes:: - - from sqlalchemy.ext.declarative import declared_attr - from sqlalchemy.ext.declarative import has_inherited_table - - class Tablename(object): - @declared_attr - def __tablename__(cls): - if (has_inherited_table(cls) and - Tablename not in cls.__bases__): - return None - return cls.__name__.lower() - - class Person(Tablename, Base): - id = Column(Integer, primary_key=True) - discriminator = Column('type', String(50)) - __mapper_args__ = {'polymorphic_on': discriminator} - - # This is single table inheritance - class Engineer(Person): - primary_language = Column(String(50)) - __mapper_args__ = {'polymorphic_identity': 'engineer'} - - # This is joined table inheritance - class Manager(Tablename, Person): - id = Column(Integer, ForeignKey('person.id'), primary_key=True) - preferred_recreation = Column(String(50)) - __mapper_args__ = {'polymorphic_identity': 'engineer'} - -Combining Table/Mapper Arguments from Multiple Mixins -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -In the case of ``__table_args__`` or ``__mapper_args__`` -specified with declarative mixins, you may want to combine -some parameters from several mixins with those you wish to -define on the class iteself. The -:class:`.declared_attr` decorator can be used -here to create user-defined collation routines that pull -from multiple collections:: - - from sqlalchemy.ext.declarative import declared_attr - - class MySQLSettings(object): - __table_args__ = {'mysql_engine':'InnoDB'} - - class MyOtherMixin(object): - __table_args__ = {'info':'foo'} - - class MyModel(MySQLSettings, MyOtherMixin, Base): - __tablename__='my_model' - - @declared_attr - def __table_args__(cls): - args = dict() - args.update(MySQLSettings.__table_args__) - args.update(MyOtherMixin.__table_args__) - return args - - id = Column(Integer, primary_key=True) - -Creating Indexes with Mixins -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -To define a named, potentially multicolumn :class:`.Index` that applies to all -tables derived from a mixin, use the "inline" form of :class:`.Index` and establish -it as part of ``__table_args__``:: - - class MyMixin(object): - a = Column(Integer) - b = Column(Integer) - - @declared_attr - def __table_args__(cls): - return (Index('test_idx_%s' % cls.__tablename__, 'a', 'b'),) - - class MyModel(MyMixin, Base): - __tablename__ = 'atable' - c = Column(Integer,primary_key=True) - -Special Directives -================== - -``__declare_last__()`` -~~~~~~~~~~~~~~~~~~~~~~ - -The ``__declare_last__()`` hook allows definition of -a class level function that is automatically called by the :meth:`.MapperEvents.after_configured` -event, which occurs after mappings are assumed to be completed and the 'configure' step -has finished:: - - class MyClass(Base): - @classmethod - def __declare_last__(cls): - "" - # do something with mappings - -.. versionadded:: 0.7.3 - -.. _declarative_abstract: - -``__abstract__`` -~~~~~~~~~~~~~~~~~~~ - -``__abstract__`` causes declarative to skip the production -of a table or mapper for the class entirely. A class can be added within a hierarchy -in the same way as mixin (see :ref:`declarative_mixins`), allowing subclasses to extend -just from the special class:: - - class SomeAbstractBase(Base): - __abstract__ = True - - def some_helpful_method(self): - "" - - @declared_attr - def __mapper_args__(cls): - return {"helpful mapper arguments":True} - - class MyMappedClass(SomeAbstractBase): - "" - -One possible use of ``__abstract__`` is to use a distinct :class:`.MetaData` for different -bases:: - - Base = declarative_base() - - class DefaultBase(Base): - __abstract__ = True - metadata = MetaData() - - class OtherBase(Base): - __abstract__ = True - metadata = MetaData() - -Above, classes which inherit from ``DefaultBase`` will use one :class:`.MetaData` as the -registry of tables, and those which inherit from ``OtherBase`` will use a different one. -The tables themselves can then be created perhaps within distinct databases:: - - DefaultBase.metadata.create_all(some_engine) - OtherBase.metadata_create_all(some_other_engine) - -.. versionadded:: 0.7.3 - -Class Constructor -================= - -As a convenience feature, the :func:`declarative_base` sets a default -constructor on classes which takes keyword arguments, and assigns them -to the named attributes:: - - e = Engineer(primary_language='python') - -Sessions -======== - -Note that ``declarative`` does nothing special with sessions, and is -only intended as an easier way to configure mappers and -:class:`~sqlalchemy.schema.Table` objects. A typical application -setup using :func:`~sqlalchemy.orm.scoped_session` might look like:: - - engine = create_engine('postgresql://scott:tiger@localhost/test') - Session = scoped_session(sessionmaker(autocommit=False, - autoflush=False, - bind=engine)) - Base = declarative_base() - -Mapped instances then make usage of -:class:`~sqlalchemy.orm.session.Session` in the usual way. - -""" - -from sqlalchemy.schema import Table, Column, MetaData, _get_table_key -from sqlalchemy.orm import synonym as _orm_synonym, mapper,\ - comparable_property, class_mapper -from sqlalchemy.orm.interfaces import MapperProperty -from sqlalchemy.orm.properties import RelationshipProperty, ColumnProperty, CompositeProperty -from sqlalchemy.orm.util import _is_mapped_class -from sqlalchemy import util, exc -from sqlalchemy.sql import util as sql_util, expression -from sqlalchemy import event -from sqlalchemy.orm.util import polymorphic_union, _mapper_or_none - - -__all__ = 'declarative_base', 'synonym_for', \ - 'comparable_using', 'instrument_declarative' - -def instrument_declarative(cls, registry, metadata): - """Given a class, configure the class declaratively, - using the given registry, which can be any dictionary, and - MetaData object. - - """ - if '_decl_class_registry' in cls.__dict__: - raise exc.InvalidRequestError( - "Class %r already has been " - "instrumented declaratively" % cls) - cls._decl_class_registry = registry - cls.metadata = metadata - _as_declarative(cls, cls.__name__, cls.__dict__) - -def has_inherited_table(cls): - """Given a class, return True if any of the classes it inherits from has a - mapped table, otherwise return False. - """ - for class_ in cls.__mro__: - if getattr(class_,'__table__',None) is not None: - return True - return False - -def _as_declarative(cls, classname, dict_): - - # dict_ will be a dictproxy, which we can't write to, and we need to! - dict_ = dict(dict_) - - column_copies = {} - potential_columns = {} - - mapper_args = {} - table_args = inherited_table_args = None - tablename = None - parent_columns = () - - declarative_props = (declared_attr, util.classproperty) - - for base in cls.__mro__: - _is_declarative_inherits = hasattr(base, '_decl_class_registry') - - if '__declare_last__' in base.__dict__: - @event.listens_for(mapper, "after_configured") - def go(): - cls.__declare_last__() - if '__abstract__' in base.__dict__: - if (base is cls or - (base in cls.__bases__ and not _is_declarative_inherits) - ): - return - - class_mapped = _is_mapped_class(base) - if class_mapped: - parent_columns = base.__table__.c.keys() - - for name,obj in vars(base).items(): - if name == '__mapper_args__': - if not mapper_args and ( - not class_mapped or - isinstance(obj, declarative_props) - ): - mapper_args = cls.__mapper_args__ - elif name == '__tablename__': - if not tablename and ( - not class_mapped or - isinstance(obj, declarative_props) - ): - tablename = cls.__tablename__ - elif name == '__table_args__': - if not table_args and ( - not class_mapped or - isinstance(obj, declarative_props) - ): - table_args = cls.__table_args__ - if not isinstance(table_args, (tuple, dict, type(None))): - raise exc.ArgumentError( - "__table_args__ value must be a tuple, " - "dict, or None") - if base is not cls: - inherited_table_args = True - elif class_mapped: - if isinstance(obj, declarative_props): - util.warn("Regular (i.e. not __special__) " - "attribute '%s.%s' uses @declared_attr, " - "but owning class %s is mapped - " - "not applying to subclass %s." - % (base.__name__, name, base, cls)) - continue - elif base is not cls: - # we're a mixin. - if isinstance(obj, Column): - if obj.foreign_keys: - raise exc.InvalidRequestError( - "Columns with foreign keys to other columns " - "must be declared as @declared_attr callables " - "on declarative mixin classes. ") - if name not in dict_ and not ( - '__table__' in dict_ and - (obj.name or name) in dict_['__table__'].c - ) and name not in potential_columns: - potential_columns[name] = \ - column_copies[obj] = \ - obj.copy() - column_copies[obj]._creation_order = \ - obj._creation_order - elif isinstance(obj, MapperProperty): - raise exc.InvalidRequestError( - "Mapper properties (i.e. deferred," - "column_property(), relationship(), etc.) must " - "be declared as @declared_attr callables " - "on declarative mixin classes.") - elif isinstance(obj, declarative_props): - dict_[name] = ret = \ - column_copies[obj] = getattr(cls, name) - if isinstance(ret, (Column, MapperProperty)) and \ - ret.doc is None: - ret.doc = obj.__doc__ - - # apply inherited columns as we should - for k, v in potential_columns.items(): - if tablename or (v.name or k) not in parent_columns: - dict_[k] = v - - if inherited_table_args and not tablename: - table_args = None - - # make sure that column copies are used rather - # than the original columns from any mixins - for k in ('version_id_col', 'polymorphic_on',): - if k in mapper_args: - v = mapper_args[k] - mapper_args[k] = column_copies.get(v,v) - - if classname in cls._decl_class_registry: - util.warn("The classname %r is already in the registry of this" - " declarative base, mapped to %r" % ( - classname, - cls._decl_class_registry[classname] - )) - cls._decl_class_registry[classname] = cls - our_stuff = util.OrderedDict() - - for k in dict_: - value = dict_[k] - if isinstance(value, declarative_props): - value = getattr(cls, k) - - if (isinstance(value, tuple) and len(value) == 1 and - isinstance(value[0], (Column, MapperProperty))): - util.warn("Ignoring declarative-like tuple value of attribute " - "%s: possibly a copy-and-paste error with a comma " - "left at the end of the line?" % k) - continue - if not isinstance(value, (Column, MapperProperty)): - continue - if k == 'metadata': - raise exc.InvalidRequestError( - "Attribute name 'metadata' is reserved " - "for the MetaData instance when using a " - "declarative base class." - ) - prop = _deferred_relationship(cls, value) - our_stuff[k] = prop - - # set up attributes in the order they were created - our_stuff.sort(key=lambda key: our_stuff[key]._creation_order) - - # extract columns from the class dict - cols = set() - for key, c in our_stuff.iteritems(): - if isinstance(c, (ColumnProperty, CompositeProperty)): - for col in c.columns: - if isinstance(col, Column) and \ - col.table is None: - _undefer_column_name(key, col) - cols.add(col) - elif isinstance(c, Column): - _undefer_column_name(key, c) - cols.add(c) - # if the column is the same name as the key, - # remove it from the explicit properties dict. - # the normal rules for assigning column-based properties - # will take over, including precedence of columns - # in multi-column ColumnProperties. - if key == c.key: - del our_stuff[key] - cols = sorted(cols, key=lambda c:c._creation_order) - table = None - - if hasattr(cls, '__table_cls__'): - table_cls = util.unbound_method_to_callable(cls.__table_cls__) - else: - table_cls = Table - - if '__table__' not in dict_: - if tablename is not None: - - args, table_kw = (), {} - if table_args: - if isinstance(table_args, dict): - table_kw = table_args - elif isinstance(table_args, tuple): - if isinstance(table_args[-1], dict): - args, table_kw = table_args[0:-1], table_args[-1] - else: - args = table_args - - autoload = dict_.get('__autoload__') - if autoload: - table_kw['autoload'] = True - - cls.__table__ = table = table_cls(tablename, cls.metadata, - *(tuple(cols) + tuple(args)), - **table_kw) - else: - table = cls.__table__ - if cols: - for c in cols: - if not table.c.contains_column(c): - raise exc.ArgumentError( - "Can't add additional column %r when " - "specifying __table__" % c.key - ) - - if 'inherits' not in mapper_args: - for c in cls.__bases__: - if _is_mapped_class(c): - mapper_args['inherits'] = c - break - - if hasattr(cls, '__mapper_cls__'): - mapper_cls = util.unbound_method_to_callable(cls.__mapper_cls__) - else: - mapper_cls = mapper - - if table is None and 'inherits' not in mapper_args: - raise exc.InvalidRequestError( - "Class %r does not have a __table__ or __tablename__ " - "specified and does not inherit from an existing " - "table-mapped class." % cls - ) - - elif 'inherits' in mapper_args and not mapper_args.get('concrete', False): - inherited_mapper = class_mapper(mapper_args['inherits'], - compile=False) - inherited_table = inherited_mapper.local_table - - if table is None: - # single table inheritance. - # ensure no table args - if table_args: - raise exc.ArgumentError( - "Can't place __table_args__ on an inherited class " - "with no table." - ) - - # add any columns declared here to the inherited table. - for c in cols: - if c.primary_key: - raise exc.ArgumentError( - "Can't place primary key columns on an inherited " - "class with no table." - ) - if c.name in inherited_table.c: - raise exc.ArgumentError( - "Column '%s' on class %s conflicts with " - "existing column '%s'" % - (c, cls, inherited_table.c[c.name]) - ) - inherited_table.append_column(c) - - # single or joined inheritance - # exclude any cols on the inherited table which are not mapped on the - # parent class, to avoid - # mapping columns specific to sibling/nephew classes - inherited_mapper = class_mapper(mapper_args['inherits'], - compile=False) - inherited_table = inherited_mapper.local_table - - if 'exclude_properties' not in mapper_args: - mapper_args['exclude_properties'] = exclude_properties = \ - set([c.key for c in inherited_table.c - if c not in inherited_mapper._columntoproperty]) - exclude_properties.difference_update([c.key for c in cols]) - - # look through columns in the current mapper that - # are keyed to a propname different than the colname - # (if names were the same, we'd have popped it out above, - # in which case the mapper makes this combination). - # See if the superclass has a similar column property. - # If so, join them together. - for k, col in our_stuff.items(): - if not isinstance(col, expression.ColumnElement): - continue - if k in inherited_mapper._props: - p = inherited_mapper._props[k] - if isinstance(p, ColumnProperty): - # note here we place the subclass column - # first. See [ticket:1892] for background. - our_stuff[k] = [col] + p.columns - - - cls.__mapper__ = mapper_cls(cls, - table, - properties=our_stuff, - **mapper_args) - -class DeclarativeMeta(type): - def __init__(cls, classname, bases, dict_): - if '_decl_class_registry' not in cls.__dict__: - _as_declarative(cls, classname, cls.__dict__) - type.__init__(cls, classname, bases, dict_) - - def __setattr__(cls, key, value): - if '__mapper__' in cls.__dict__: - if isinstance(value, Column): - _undefer_column_name(key, value) - cls.__table__.append_column(value) - cls.__mapper__.add_property(key, value) - elif isinstance(value, ColumnProperty): - for col in value.columns: - if isinstance(col, Column) and col.table is None: - _undefer_column_name(key, col) - cls.__table__.append_column(col) - cls.__mapper__.add_property(key, value) - elif isinstance(value, MapperProperty): - cls.__mapper__.add_property( - key, - _deferred_relationship(cls, value) - ) - else: - type.__setattr__(cls, key, value) - else: - type.__setattr__(cls, key, value) - - -class _GetColumns(object): - def __init__(self, cls): - self.cls = cls - - def __getattr__(self, key): - mapper = class_mapper(self.cls, compile=False) - if mapper: - if not mapper.has_property(key): - raise exc.InvalidRequestError( - "Class %r does not have a mapped column named %r" - % (self.cls, key)) - - prop = mapper.get_property(key) - if not isinstance(prop, ColumnProperty): - raise exc.InvalidRequestError( - "Property %r is not an instance of" - " ColumnProperty (i.e. does not correspond" - " directly to a Column)." % key) - return getattr(self.cls, key) - -class _GetTable(object): - def __init__(self, key, metadata): - self.key = key - self.metadata = metadata - - def __getattr__(self, key): - return self.metadata.tables[ - _get_table_key(key, self.key) - ] - -def _deferred_relationship(cls, prop): - def resolve_arg(arg): - import sqlalchemy - - def access_cls(key): - if key in cls._decl_class_registry: - return _GetColumns(cls._decl_class_registry[key]) - elif key in cls.metadata.tables: - return cls.metadata.tables[key] - elif key in cls.metadata._schemas: - return _GetTable(key, cls.metadata) - else: - return sqlalchemy.__dict__[key] - - d = util.PopulateDict(access_cls) - def return_cls(): - try: - x = eval(arg, globals(), d) - - if isinstance(x, _GetColumns): - return x.cls - else: - return x - except NameError, n: - raise exc.InvalidRequestError( - "When initializing mapper %s, expression %r failed to " - "locate a name (%r). If this is a class name, consider " - "adding this relationship() to the %r class after " - "both dependent classes have been defined." % - (prop.parent, arg, n.args[0], cls) - ) - return return_cls - - if isinstance(prop, RelationshipProperty): - for attr in ('argument', 'order_by', 'primaryjoin', 'secondaryjoin', - 'secondary', '_user_defined_foreign_keys', 'remote_side'): - v = getattr(prop, attr) - if isinstance(v, basestring): - setattr(prop, attr, resolve_arg(v)) - - if prop.backref and isinstance(prop.backref, tuple): - key, kwargs = prop.backref - for attr in ('primaryjoin', 'secondaryjoin', 'secondary', - 'foreign_keys', 'remote_side', 'order_by'): - if attr in kwargs and isinstance(kwargs[attr], basestring): - kwargs[attr] = resolve_arg(kwargs[attr]) - - - return prop - -def synonym_for(name, map_column=False): - """Decorator, make a Python @property a query synonym for a column. - - A decorator version of :func:`~sqlalchemy.orm.synonym`. The function being - decorated is the 'descriptor', otherwise passes its arguments through to - synonym():: - - @synonym_for('col') - @property - def prop(self): - return 'special sauce' - - The regular ``synonym()`` is also usable directly in a declarative setting - and may be convenient for read/write properties:: - - prop = synonym('col', descriptor=property(_read_prop, _write_prop)) - - """ - def decorate(fn): - return _orm_synonym(name, map_column=map_column, descriptor=fn) - return decorate - -def comparable_using(comparator_factory): - """Decorator, allow a Python @property to be used in query criteria. - - This is a decorator front end to - :func:`~sqlalchemy.orm.comparable_property` that passes - through the comparator_factory and the function being decorated:: - - @comparable_using(MyComparatorType) - @property - def prop(self): - return 'special sauce' - - The regular ``comparable_property()`` is also usable directly in a - declarative setting and may be convenient for read/write properties:: - - prop = comparable_property(MyComparatorType) - - """ - def decorate(fn): - return comparable_property(comparator_factory, fn) - return decorate - -class declared_attr(property): - """Mark a class-level method as representing the definition of - a mapped property or special declarative member name. - - .. versionchanged:: 0.6.{2,3,4} - ``@declared_attr`` is available as - ``sqlalchemy.util.classproperty`` for SQLAlchemy versions - 0.6.2, 0.6.3, 0.6.4. - - @declared_attr turns the attribute into a scalar-like - property that can be invoked from the uninstantiated class. - Declarative treats attributes specifically marked with - @declared_attr as returning a construct that is specific - to mapping or declarative table configuration. The name - of the attribute is that of what the non-dynamic version - of the attribute would be. - - @declared_attr is more often than not applicable to mixins, - to define relationships that are to be applied to different - implementors of the class:: - - class ProvidesUser(object): - "A mixin that adds a 'user' relationship to classes." - - @declared_attr - def user(self): - return relationship("User") - - It also can be applied to mapped classes, such as to provide - a "polymorphic" scheme for inheritance:: - - class Employee(Base): - id = Column(Integer, primary_key=True) - type = Column(String(50), nullable=False) - - @declared_attr - def __tablename__(cls): - return cls.__name__.lower() - - @declared_attr - def __mapper_args__(cls): - if cls.__name__ == 'Employee': - return { - "polymorphic_on":cls.type, - "polymorphic_identity":"Employee" - } - else: - return {"polymorphic_identity":cls.__name__} - - """ - - def __init__(self, fget, *arg, **kw): - super(declared_attr, self).__init__(fget, *arg, **kw) - self.__doc__ = fget.__doc__ - - def __get__(desc, self, cls): - return desc.fget(cls) - -def _declarative_constructor(self, **kwargs): - """A simple constructor that allows initialization from kwargs. - - Sets attributes on the constructed instance using the names and - values in ``kwargs``. - - Only keys that are present as - attributes of the instance's class are allowed. These could be, - for example, any mapped columns or relationships. - """ - cls_ = type(self) - for k in kwargs: - if not hasattr(cls_, k): - raise TypeError( - "%r is an invalid keyword argument for %s" % - (k, cls_.__name__)) - setattr(self, k, kwargs[k]) -_declarative_constructor.__name__ = '__init__' - -def declarative_base(bind=None, metadata=None, mapper=None, cls=object, - name='Base', constructor=_declarative_constructor, - class_registry=None, - metaclass=DeclarativeMeta): - """Construct a base class for declarative class definitions. - - The new base class will be given a metaclass that produces - appropriate :class:`~sqlalchemy.schema.Table` objects and makes - the appropriate :func:`~sqlalchemy.orm.mapper` calls based on the - information provided declaratively in the class and any subclasses - of the class. - - :param bind: An optional - :class:`~sqlalchemy.engine.base.Connectable`, will be assigned - the ``bind`` attribute on the :class:`~sqlalchemy.MetaData` - instance. - - :param metadata: - An optional :class:`~sqlalchemy.MetaData` instance. All - :class:`~sqlalchemy.schema.Table` objects implicitly declared by - subclasses of the base will share this MetaData. A MetaData instance - will be created if none is provided. The - :class:`~sqlalchemy.MetaData` instance will be available via the - `metadata` attribute of the generated declarative base class. - - :param mapper: - An optional callable, defaults to :func:`~sqlalchemy.orm.mapper`. Will - be used to map subclasses to their Tables. - - :param cls: - Defaults to :class:`object`. A type to use as the base for the generated - declarative base class. May be a class or tuple of classes. - - :param name: - Defaults to ``Base``. The display name for the generated - class. Customizing this is not required, but can improve clarity in - tracebacks and debugging. - - :param constructor: - Defaults to - :func:`~sqlalchemy.ext.declarative._declarative_constructor`, an - __init__ implementation that assigns \**kwargs for declared - fields and relationships to an instance. If ``None`` is supplied, - no __init__ will be provided and construction will fall back to - cls.__init__ by way of the normal Python semantics. - - :param class_registry: optional dictionary that will serve as the - registry of class names-> mapped classes when string names - are used to identify classes inside of :func:`.relationship` - and others. Allows two or more declarative base classes - to share the same registry of class names for simplified - inter-base relationships. - - :param metaclass: - Defaults to :class:`.DeclarativeMeta`. A metaclass or __metaclass__ - compatible callable to use as the meta type of the generated - declarative base class. - - """ - lcl_metadata = metadata or MetaData() - if bind: - lcl_metadata.bind = bind - - if class_registry is None: - class_registry = {} - - bases = not isinstance(cls, tuple) and (cls,) or cls - class_dict = dict(_decl_class_registry=class_registry, - metadata=lcl_metadata) - - if constructor: - class_dict['__init__'] = constructor - if mapper: - class_dict['__mapper_cls__'] = mapper - - return metaclass(name, bases, class_dict) - -def _undefer_column_name(key, column): - if column.key is None: - column.key = key - if column.name is None: - column.name = key - -class ConcreteBase(object): - """A helper class for 'concrete' declarative mappings. - - :class:`.ConcreteBase` will use the :func:`.polymorphic_union` - function automatically, against all tables mapped as a subclass - to this class. The function is called via the - ``__declare_last__()`` function, which is essentially - a hook for the :func:`.MapperEvents.after_configured` event. - - :class:`.ConcreteBase` produces a mapped - table for the class itself. Compare to :class:`.AbstractConcreteBase`, - which does not. - - Example:: - - from sqlalchemy.ext.declarative import ConcreteBase - - class Employee(ConcreteBase, Base): - __tablename__ = 'employee' - employee_id = Column(Integer, primary_key=True) - name = Column(String(50)) - __mapper_args__ = { - 'polymorphic_identity':'employee', - 'concrete':True} - - class Manager(Employee): - __tablename__ = 'manager' - employee_id = Column(Integer, primary_key=True) - name = Column(String(50)) - manager_data = Column(String(40)) - __mapper_args__ = { - 'polymorphic_identity':'manager', - 'concrete':True} - - """ - - @classmethod - def _create_polymorphic_union(cls, mappers): - return polymorphic_union(dict( - (mapper.polymorphic_identity, mapper.local_table) - for mapper in mappers - ), 'type', 'pjoin') - - @classmethod - def __declare_last__(cls): - m = cls.__mapper__ - if m.with_polymorphic: - return - - mappers = list(m.self_and_descendants) - pjoin = cls._create_polymorphic_union(mappers) - m._set_with_polymorphic(("*",pjoin)) - m._set_polymorphic_on(pjoin.c.type) - -class AbstractConcreteBase(ConcreteBase): - """A helper class for 'concrete' declarative mappings. - - :class:`.AbstractConcreteBase` will use the :func:`.polymorphic_union` - function automatically, against all tables mapped as a subclass - to this class. The function is called via the - ``__declare_last__()`` function, which is essentially - a hook for the :func:`.MapperEvents.after_configured` event. - - :class:`.AbstractConcreteBase` does not produce a mapped - table for the class itself. Compare to :class:`.ConcreteBase`, - which does. - - Example:: - - from sqlalchemy.ext.declarative import ConcreteBase - - class Employee(AbstractConcreteBase, Base): - pass - - class Manager(Employee): - __tablename__ = 'manager' - employee_id = Column(Integer, primary_key=True) - name = Column(String(50)) - manager_data = Column(String(40)) - __mapper_args__ = { - 'polymorphic_identity':'manager', - 'concrete':True} - - """ - - __abstract__ = True - - @classmethod - def __declare_last__(cls): - if hasattr(cls, '__mapper__'): - return - - # can't rely on 'self_and_descendants' here - # since technically an immediate subclass - # might not be mapped, but a subclass - # may be. - mappers = [] - stack = list(cls.__subclasses__()) - while stack: - klass = stack.pop() - stack.extend(klass.__subclasses__()) - mn = _mapper_or_none(klass) - if mn is not None: - mappers.append(mn) - pjoin = cls._create_polymorphic_union(mappers) - cls.__mapper__ = m = mapper(cls, pjoin, polymorphic_on=pjoin.c.type) - - for scls in cls.__subclasses__(): - sm = _mapper_or_none(scls) - if sm.concrete and cls in scls.__bases__: - sm._set_concrete_base(m) diff --git a/libs/sqlalchemy/ext/horizontal_shard.py b/libs/sqlalchemy/ext/horizontal_shard.py deleted file mode 100644 index 05b45e03..00000000 --- a/libs/sqlalchemy/ext/horizontal_shard.py +++ /dev/null @@ -1,128 +0,0 @@ -# ext/horizontal_shard.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Horizontal sharding support. - -Defines a rudimental 'horizontal sharding' system which allows a Session to -distribute queries and persistence operations across multiple databases. - -For a usage example, see the :ref:`examples_sharding` example included in -the source distribution. - -""" - -from sqlalchemy import exc as sa_exc -from sqlalchemy import util -from sqlalchemy.orm.session import Session -from sqlalchemy.orm.query import Query - -__all__ = ['ShardedSession', 'ShardedQuery'] - -class ShardedQuery(Query): - def __init__(self, *args, **kwargs): - super(ShardedQuery, self).__init__(*args, **kwargs) - self.id_chooser = self.session.id_chooser - self.query_chooser = self.session.query_chooser - self._shard_id = None - - def set_shard(self, shard_id): - """return a new query, limited to a single shard ID. - - all subsequent operations with the returned query will - be against the single shard regardless of other state. - """ - - q = self._clone() - q._shard_id = shard_id - return q - - def _execute_and_instances(self, context): - def iter_for_shard(shard_id): - context.attributes['shard_id'] = shard_id - result = self._connection_from_session( - mapper=self._mapper_zero(), - shard_id=shard_id).execute( - context.statement, - self._params) - return self.instances(result, context) - - if self._shard_id is not None: - return iter_for_shard(self._shard_id) - else: - partial = [] - for shard_id in self.query_chooser(self): - partial.extend(iter_for_shard(shard_id)) - - # if some kind of in memory 'sorting' - # were done, this is where it would happen - return iter(partial) - - def get(self, ident, **kwargs): - if self._shard_id is not None: - return super(ShardedQuery, self).get(ident) - else: - ident = util.to_list(ident) - for shard_id in self.id_chooser(self, ident): - o = self.set_shard(shard_id).get(ident, **kwargs) - if o is not None: - return o - else: - return None - -class ShardedSession(Session): - def __init__(self, shard_chooser, id_chooser, query_chooser, shards=None, - query_cls=ShardedQuery, **kwargs): - """Construct a ShardedSession. - - :param shard_chooser: A callable which, passed a Mapper, a mapped instance, and possibly a - SQL clause, returns a shard ID. This id may be based off of the - attributes present within the object, or on some round-robin - scheme. If the scheme is based on a selection, it should set - whatever state on the instance to mark it in the future as - participating in that shard. - - :param id_chooser: A callable, passed a query and a tuple of identity values, which - should return a list of shard ids where the ID might reside. The - databases will be queried in the order of this listing. - - :param query_chooser: For a given Query, returns the list of shard_ids where the query - should be issued. Results from all shards returned will be combined - together into a single listing. - - :param shards: A dictionary of string shard names to :class:`~sqlalchemy.engine.base.Engine` - objects. - - """ - super(ShardedSession, self).__init__(query_cls=query_cls, **kwargs) - self.shard_chooser = shard_chooser - self.id_chooser = id_chooser - self.query_chooser = query_chooser - self.__binds = {} - self.connection_callable = self.connection - if shards is not None: - for k in shards: - self.bind_shard(k, shards[k]) - - def connection(self, mapper=None, instance=None, shard_id=None, **kwargs): - if shard_id is None: - shard_id = self.shard_chooser(mapper, instance) - - if self.transaction is not None: - return self.transaction.connection(mapper, shard_id=shard_id) - else: - return self.get_bind(mapper, - shard_id=shard_id, - instance=instance).contextual_connect(**kwargs) - - def get_bind(self, mapper, shard_id=None, instance=None, clause=None, **kw): - if shard_id is None: - shard_id = self.shard_chooser(mapper, instance, clause=clause) - return self.__binds[shard_id] - - def bind_shard(self, shard_id, bind): - self.__binds[shard_id] = bind - - diff --git a/libs/sqlalchemy/ext/hybrid.py b/libs/sqlalchemy/ext/hybrid.py deleted file mode 100644 index 038898e4..00000000 --- a/libs/sqlalchemy/ext/hybrid.py +++ /dev/null @@ -1,747 +0,0 @@ -# ext/hybrid.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Define attributes on ORM-mapped classes that have "hybrid" behavior. - -"hybrid" means the attribute has distinct behaviors defined at the -class level and at the instance level. - -The :mod:`~sqlalchemy.ext.hybrid` extension provides a special form of method -decorator, is around 50 lines of code and has almost no dependencies on the rest -of SQLAlchemy. It can, in theory, work with any descriptor-based expression -system. - -Consider a mapping ``Interval``, representing integer ``start`` and ``end`` -values. We can define higher level functions on mapped classes that produce -SQL expressions at the class level, and Python expression evaluation at the -instance level. Below, each function decorated with :class:`.hybrid_method` or -:class:`.hybrid_property` may receive ``self`` as an instance of the class, or -as the class itself:: - - from sqlalchemy import Column, Integer - from sqlalchemy.ext.declarative import declarative_base - from sqlalchemy.orm import Session, aliased - from sqlalchemy.ext.hybrid import hybrid_property, hybrid_method - - Base = declarative_base() - - class Interval(Base): - __tablename__ = 'interval' - - id = Column(Integer, primary_key=True) - start = Column(Integer, nullable=False) - end = Column(Integer, nullable=False) - - def __init__(self, start, end): - self.start = start - self.end = end - - @hybrid_property - def length(self): - return self.end - self.start - - @hybrid_method - def contains(self,point): - return (self.start <= point) & (point < self.end) - - @hybrid_method - def intersects(self, other): - return self.contains(other.start) | self.contains(other.end) - -Above, the ``length`` property returns the difference between the ``end`` and -``start`` attributes. With an instance of ``Interval``, this subtraction occurs -in Python, using normal Python descriptor mechanics:: - - >>> i1 = Interval(5, 10) - >>> i1.length - 5 - -When dealing with the ``Interval`` class itself, the :class:`.hybrid_property` -descriptor evaluates the function body given the ``Interval`` class as -the argument, which when evaluated with SQLAlchemy expression mechanics -returns a new SQL expression:: - - >>> print Interval.length - interval."end" - interval.start - - >>> print Session().query(Interval).filter(Interval.length > 10) - SELECT interval.id AS interval_id, interval.start AS interval_start, - interval."end" AS interval_end - FROM interval - WHERE interval."end" - interval.start > :param_1 - -ORM methods such as :meth:`~.Query.filter_by` generally use ``getattr()`` to -locate attributes, so can also be used with hybrid attributes:: - - >>> print Session().query(Interval).filter_by(length=5) - SELECT interval.id AS interval_id, interval.start AS interval_start, - interval."end" AS interval_end - FROM interval - WHERE interval."end" - interval.start = :param_1 - -The ``Interval`` class example also illustrates two methods, ``contains()`` and ``intersects()``, -decorated with :class:`.hybrid_method`. -This decorator applies the same idea to methods that :class:`.hybrid_property` applies -to attributes. The methods return boolean values, and take advantage -of the Python ``|`` and ``&`` bitwise operators to produce equivalent instance-level and -SQL expression-level boolean behavior:: - - >>> i1.contains(6) - True - >>> i1.contains(15) - False - >>> i1.intersects(Interval(7, 18)) - True - >>> i1.intersects(Interval(25, 29)) - False - - >>> print Session().query(Interval).filter(Interval.contains(15)) - SELECT interval.id AS interval_id, interval.start AS interval_start, - interval."end" AS interval_end - FROM interval - WHERE interval.start <= :start_1 AND interval."end" > :end_1 - - >>> ia = aliased(Interval) - >>> print Session().query(Interval, ia).filter(Interval.intersects(ia)) - SELECT interval.id AS interval_id, interval.start AS interval_start, - interval."end" AS interval_end, interval_1.id AS interval_1_id, - interval_1.start AS interval_1_start, interval_1."end" AS interval_1_end - FROM interval, interval AS interval_1 - WHERE interval.start <= interval_1.start - AND interval."end" > interval_1.start - OR interval.start <= interval_1."end" - AND interval."end" > interval_1."end" - -Defining Expression Behavior Distinct from Attribute Behavior --------------------------------------------------------------- - -Our usage of the ``&`` and ``|`` bitwise operators above was fortunate, considering -our functions operated on two boolean values to return a new one. In many cases, the construction -of an in-Python function and a SQLAlchemy SQL expression have enough differences that two -separate Python expressions should be defined. The :mod:`~sqlalchemy.ext.hybrid` decorators -define the :meth:`.hybrid_property.expression` modifier for this purpose. As an example we'll -define the radius of the interval, which requires the usage of the absolute value function:: - - from sqlalchemy import func - - class Interval(object): - # ... - - @hybrid_property - def radius(self): - return abs(self.length) / 2 - - @radius.expression - def radius(cls): - return func.abs(cls.length) / 2 - -Above the Python function ``abs()`` is used for instance-level operations, the SQL function -``ABS()`` is used via the :attr:`.func` object for class-level expressions:: - - >>> i1.radius - 2 - - >>> print Session().query(Interval).filter(Interval.radius > 5) - SELECT interval.id AS interval_id, interval.start AS interval_start, - interval."end" AS interval_end - FROM interval - WHERE abs(interval."end" - interval.start) / :abs_1 > :param_1 - -Defining Setters ----------------- - -Hybrid properties can also define setter methods. If we wanted ``length`` above, when -set, to modify the endpoint value:: - - class Interval(object): - # ... - - @hybrid_property - def length(self): - return self.end - self.start - - @length.setter - def length(self, value): - self.end = self.start + value - -The ``length(self, value)`` method is now called upon set:: - - >>> i1 = Interval(5, 10) - >>> i1.length - 5 - >>> i1.length = 12 - >>> i1.end - 17 - -Working with Relationships --------------------------- - -There's no essential difference when creating hybrids that work with -related objects as opposed to column-based data. The need for distinct -expressions tends to be greater. Two variants of we'll illustrate -are the "join-dependent" hybrid, and the "correlated subquery" hybrid. - -Join-Dependent Relationship Hybrid -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Consider the following declarative -mapping which relates a ``User`` to a ``SavingsAccount``:: - - from sqlalchemy import Column, Integer, ForeignKey, Numeric, String - from sqlalchemy.orm import relationship - from sqlalchemy.ext.declarative import declarative_base - from sqlalchemy.ext.hybrid import hybrid_property - - Base = declarative_base() - - class SavingsAccount(Base): - __tablename__ = 'account' - id = Column(Integer, primary_key=True) - user_id = Column(Integer, ForeignKey('user.id'), nullable=False) - balance = Column(Numeric(15, 5)) - - class User(Base): - __tablename__ = 'user' - id = Column(Integer, primary_key=True) - name = Column(String(100), nullable=False) - - accounts = relationship("SavingsAccount", backref="owner") - - @hybrid_property - def balance(self): - if self.accounts: - return self.accounts[0].balance - else: - return None - - @balance.setter - def balance(self, value): - if not self.accounts: - account = Account(owner=self) - else: - account = self.accounts[0] - account.balance = balance - - @balance.expression - def balance(cls): - return SavingsAccount.balance - -The above hybrid property ``balance`` works with the first -``SavingsAccount`` entry in the list of accounts for this user. The -in-Python getter/setter methods can treat ``accounts`` as a Python -list available on ``self``. - -However, at the expression level, it's expected that the ``User`` class will be used -in an appropriate context such that an appropriate join to -``SavingsAccount`` will be present:: - - >>> print Session().query(User, User.balance).\\ - ... join(User.accounts).filter(User.balance > 5000) - SELECT "user".id AS user_id, "user".name AS user_name, - account.balance AS account_balance - FROM "user" JOIN account ON "user".id = account.user_id - WHERE account.balance > :balance_1 - -Note however, that while the instance level accessors need to worry -about whether ``self.accounts`` is even present, this issue expresses -itself differently at the SQL expression level, where we basically -would use an outer join:: - - >>> from sqlalchemy import or_ - >>> print (Session().query(User, User.balance).outerjoin(User.accounts). - ... filter(or_(User.balance < 5000, User.balance == None))) - SELECT "user".id AS user_id, "user".name AS user_name, - account.balance AS account_balance - FROM "user" LEFT OUTER JOIN account ON "user".id = account.user_id - WHERE account.balance < :balance_1 OR account.balance IS NULL - -Correlated Subquery Relationship Hybrid -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -We can, of course, forego being dependent on the enclosing query's usage -of joins in favor of the correlated -subquery, which can portably be packed into a single colunn expression. -A correlated subquery is more portable, but often performs more poorly -at the SQL level. -Using the same technique illustrated at :ref:`mapper_column_property_sql_expressions`, -we can adjust our ``SavingsAccount`` example to aggregate the balances for -*all* accounts, and use a correlated subquery for the column expression:: - - from sqlalchemy import Column, Integer, ForeignKey, Numeric, String - from sqlalchemy.orm import relationship - from sqlalchemy.ext.declarative import declarative_base - from sqlalchemy.ext.hybrid import hybrid_property - from sqlalchemy import select, func - - Base = declarative_base() - - class SavingsAccount(Base): - __tablename__ = 'account' - id = Column(Integer, primary_key=True) - user_id = Column(Integer, ForeignKey('user.id'), nullable=False) - balance = Column(Numeric(15, 5)) - - class User(Base): - __tablename__ = 'user' - id = Column(Integer, primary_key=True) - name = Column(String(100), nullable=False) - - accounts = relationship("SavingsAccount", backref="owner") - - @hybrid_property - def balance(self): - return sum(acc.balance for acc in self.accounts) - - @balance.expression - def balance(cls): - return select([func.sum(SavingsAccount.balance)]).\\ - where(SavingsAccount.user_id==cls.id).\\ - label('total_balance') - -The above recipe will give us the ``balance`` column which renders -a correlated SELECT:: - - >>> print s.query(User).filter(User.balance > 400) - SELECT "user".id AS user_id, "user".name AS user_name - FROM "user" - WHERE (SELECT sum(account.balance) AS sum_1 - FROM account - WHERE account.user_id = "user".id) > :param_1 - -.. _hybrid_custom_comparators: - -Building Custom Comparators ---------------------------- - -The hybrid property also includes a helper that allows construction of custom comparators. -A comparator object allows one to customize the behavior of each SQLAlchemy expression -operator individually. They are useful when creating custom types that have -some highly idiosyncratic behavior on the SQL side. - -The example class below allows case-insensitive comparisons on the attribute -named ``word_insensitive``:: - - from sqlalchemy.ext.hybrid import Comparator, hybrid_property - from sqlalchemy import func, Column, Integer, String - from sqlalchemy.orm import Session - from sqlalchemy.ext.declarative import declarative_base - - Base = declarative_base() - - class CaseInsensitiveComparator(Comparator): - def __eq__(self, other): - return func.lower(self.__clause_element__()) == func.lower(other) - - class SearchWord(Base): - __tablename__ = 'searchword' - id = Column(Integer, primary_key=True) - word = Column(String(255), nullable=False) - - @hybrid_property - def word_insensitive(self): - return self.word.lower() - - @word_insensitive.comparator - def word_insensitive(cls): - return CaseInsensitiveComparator(cls.word) - -Above, SQL expressions against ``word_insensitive`` will apply the ``LOWER()`` -SQL function to both sides:: - - >>> print Session().query(SearchWord).filter_by(word_insensitive="Trucks") - SELECT searchword.id AS searchword_id, searchword.word AS searchword_word - FROM searchword - WHERE lower(searchword.word) = lower(:lower_1) - -The ``CaseInsensitiveComparator`` above implements part of the :class:`.ColumnOperators` -interface. A "coercion" operation like lowercasing can be applied to all comparison operations -(i.e. ``eq``, ``lt``, ``gt``, etc.) using :meth:`.Operators.operate`:: - - class CaseInsensitiveComparator(Comparator): - def operate(self, op, other): - return op(func.lower(self.__clause_element__()), func.lower(other)) - -Hybrid Value Objects --------------------- - -Note in our previous example, if we were to compare the ``word_insensitive`` attribute of -a ``SearchWord`` instance to a plain Python string, the plain Python string would not -be coerced to lower case - the ``CaseInsensitiveComparator`` we built, being returned -by ``@word_insensitive.comparator``, only applies to the SQL side. - -A more comprehensive form of the custom comparator is to construct a *Hybrid Value Object*. -This technique applies the target value or expression to a value object which is then -returned by the accessor in all cases. The value object allows control -of all operations upon the value as well as how compared values are treated, both -on the SQL expression side as well as the Python value side. Replacing the -previous ``CaseInsensitiveComparator`` class with a new ``CaseInsensitiveWord`` class:: - - class CaseInsensitiveWord(Comparator): - "Hybrid value representing a lower case representation of a word." - - def __init__(self, word): - if isinstance(word, basestring): - self.word = word.lower() - elif isinstance(word, CaseInsensitiveWord): - self.word = word.word - else: - self.word = func.lower(word) - - def operate(self, op, other): - if not isinstance(other, CaseInsensitiveWord): - other = CaseInsensitiveWord(other) - return op(self.word, other.word) - - def __clause_element__(self): - return self.word - - def __str__(self): - return self.word - - key = 'word' - "Label to apply to Query tuple results" - -Above, the ``CaseInsensitiveWord`` object represents ``self.word``, which may be a SQL function, -or may be a Python native. By overriding ``operate()`` and ``__clause_element__()`` -to work in terms of ``self.word``, all comparison operations will work against the -"converted" form of ``word``, whether it be SQL side or Python side. -Our ``SearchWord`` class can now deliver the ``CaseInsensitiveWord`` object unconditionally -from a single hybrid call:: - - class SearchWord(Base): - __tablename__ = 'searchword' - id = Column(Integer, primary_key=True) - word = Column(String(255), nullable=False) - - @hybrid_property - def word_insensitive(self): - return CaseInsensitiveWord(self.word) - -The ``word_insensitive`` attribute now has case-insensitive comparison behavior -universally, including SQL expression vs. Python expression (note the Python value is -converted to lower case on the Python side here):: - - >>> print Session().query(SearchWord).filter_by(word_insensitive="Trucks") - SELECT searchword.id AS searchword_id, searchword.word AS searchword_word - FROM searchword - WHERE lower(searchword.word) = :lower_1 - -SQL expression versus SQL expression:: - - >>> sw1 = aliased(SearchWord) - >>> sw2 = aliased(SearchWord) - >>> print Session().query( - ... sw1.word_insensitive, - ... sw2.word_insensitive).\\ - ... filter( - ... sw1.word_insensitive > sw2.word_insensitive - ... ) - SELECT lower(searchword_1.word) AS lower_1, lower(searchword_2.word) AS lower_2 - FROM searchword AS searchword_1, searchword AS searchword_2 - WHERE lower(searchword_1.word) > lower(searchword_2.word) - -Python only expression:: - - >>> ws1 = SearchWord(word="SomeWord") - >>> ws1.word_insensitive == "sOmEwOrD" - True - >>> ws1.word_insensitive == "XOmEwOrX" - False - >>> print ws1.word_insensitive - someword - -The Hybrid Value pattern is very useful for any kind of value that may have multiple representations, -such as timestamps, time deltas, units of measurement, currencies and encrypted passwords. - -See Also: - -`Hybrids and Value Agnostic Types `_ - on the techspot.zzzeek.org blog - -`Value Agnostic Types, Part II `_ - on the techspot.zzzeek.org blog - -.. _hybrid_transformers: - -Building Transformers ----------------------- - -A *transformer* is an object which can receive a :class:`.Query` object and return a -new one. The :class:`.Query` object includes a method :meth:`.with_transformation` -that simply returns a new :class:`.Query` transformed by the given function. - -We can combine this with the :class:`.Comparator` class to produce one type -of recipe which can both set up the FROM clause of a query as well as assign -filtering criterion. - -Consider a mapped class ``Node``, which assembles using adjacency list into a hierarchical -tree pattern:: - - from sqlalchemy import Column, Integer, ForeignKey - from sqlalchemy.orm import relationship - from sqlalchemy.ext.declarative import declarative_base - Base = declarative_base() - - class Node(Base): - __tablename__ = 'node' - id =Column(Integer, primary_key=True) - parent_id = Column(Integer, ForeignKey('node.id')) - parent = relationship("Node", remote_side=id) - -Suppose we wanted to add an accessor ``grandparent``. This would return the ``parent`` of -``Node.parent``. When we have an instance of ``Node``, this is simple:: - - from sqlalchemy.ext.hybrid import hybrid_property - - class Node(Base): - # ... - - @hybrid_property - def grandparent(self): - return self.parent.parent - -For the expression, things are not so clear. We'd need to construct a :class:`.Query` where we -:meth:`~.Query.join` twice along ``Node.parent`` to get to the ``grandparent``. We can instead -return a transforming callable that we'll combine with the :class:`.Comparator` class -to receive any :class:`.Query` object, and return a new one that's joined to the ``Node.parent`` -attribute and filtered based on the given criterion:: - - from sqlalchemy.ext.hybrid import Comparator - - class GrandparentTransformer(Comparator): - def operate(self, op, other): - def transform(q): - cls = self.__clause_element__() - parent_alias = aliased(cls) - return q.join(parent_alias, cls.parent).\\ - filter(op(parent_alias.parent, other)) - return transform - - Base = declarative_base() - - class Node(Base): - __tablename__ = 'node' - id =Column(Integer, primary_key=True) - parent_id = Column(Integer, ForeignKey('node.id')) - parent = relationship("Node", remote_side=id) - - @hybrid_property - def grandparent(self): - return self.parent.parent - - @grandparent.comparator - def grandparent(cls): - return GrandparentTransformer(cls) - -The ``GrandparentTransformer`` overrides the core :meth:`.Operators.operate` method -at the base of the :class:`.Comparator` hierarchy to return a query-transforming -callable, which then runs the given comparison operation in a particular context. -Such as, in the example above, the ``operate`` method is called, given the -:attr:`.Operators.eq` callable as well as the right side of the comparison -``Node(id=5)``. A function ``transform`` is then returned which will transform -a :class:`.Query` first to join to ``Node.parent``, then to compare ``parent_alias`` -using :attr:`.Operators.eq` against the left and right sides, passing into -:class:`.Query.filter`: - -.. sourcecode:: pycon+sql - - >>> from sqlalchemy.orm import Session - >>> session = Session() - {sql}>>> session.query(Node).\\ - ... with_transformation(Node.grandparent==Node(id=5)).\\ - ... all() - SELECT node.id AS node_id, node.parent_id AS node_parent_id - FROM node JOIN node AS node_1 ON node_1.id = node.parent_id - WHERE :param_1 = node_1.parent_id - {stop} - -We can modify the pattern to be more verbose but flexible by separating -the "join" step from the "filter" step. The tricky part here is ensuring -that successive instances of ``GrandparentTransformer`` use the same -:class:`.AliasedClass` object against ``Node``. Below we use a simple -memoizing approach that associates a ``GrandparentTransformer`` -with each class:: - - class Node(Base): - - # ... - - @grandparent.comparator - def grandparent(cls): - # memoize a GrandparentTransformer - # per class - if '_gp' not in cls.__dict__: - cls._gp = GrandparentTransformer(cls) - return cls._gp - - class GrandparentTransformer(Comparator): - - def __init__(self, cls): - self.parent_alias = aliased(cls) - - @property - def join(self): - def go(q): - return q.join(self.parent_alias, Node.parent) - return go - - def operate(self, op, other): - return op(self.parent_alias.parent, other) - -.. sourcecode:: pycon+sql - - {sql}>>> session.query(Node).\\ - ... with_transformation(Node.grandparent.join).\\ - ... filter(Node.grandparent==Node(id=5)) - SELECT node.id AS node_id, node.parent_id AS node_parent_id - FROM node JOIN node AS node_1 ON node_1.id = node.parent_id - WHERE :param_1 = node_1.parent_id - {stop} - -The "transformer" pattern is an experimental pattern that starts -to make usage of some functional programming paradigms. -While it's only recommended for advanced and/or patient developers, -there's probably a whole lot of amazing things it can be used for. - -""" -from sqlalchemy import util -from sqlalchemy.orm import attributes, interfaces - -class hybrid_method(object): - """A decorator which allows definition of a Python object method with both - instance-level and class-level behavior. - - """ - - - def __init__(self, func, expr=None): - """Create a new :class:`.hybrid_method`. - - Usage is typically via decorator:: - - from sqlalchemy.ext.hybrid import hybrid_method - - class SomeClass(object): - @hybrid_method - def value(self, x, y): - return self._value + x + y - - @value.expression - def value(self, x, y): - return func.some_function(self._value, x, y) - - """ - self.func = func - self.expr = expr or func - - def __get__(self, instance, owner): - if instance is None: - return self.expr.__get__(owner, owner.__class__) - else: - return self.func.__get__(instance, owner) - - def expression(self, expr): - """Provide a modifying decorator that defines a SQL-expression producing method.""" - - self.expr = expr - return self - -class hybrid_property(object): - """A decorator which allows definition of a Python descriptor with both - instance-level and class-level behavior. - - """ - - def __init__(self, fget, fset=None, fdel=None, expr=None): - """Create a new :class:`.hybrid_property`. - - Usage is typically via decorator:: - - from sqlalchemy.ext.hybrid import hybrid_property - - class SomeClass(object): - @hybrid_property - def value(self): - return self._value - - @value.setter - def value(self, value): - self._value = value - - """ - self.fget = fget - self.fset = fset - self.fdel = fdel - self.expr = expr or fget - util.update_wrapper(self, fget) - - def __get__(self, instance, owner): - if instance is None: - return self.expr(owner) - else: - return self.fget(instance) - - def __set__(self, instance, value): - if self.fset is None: - raise AttributeError("can't set attribute") - self.fset(instance, value) - - def __delete__(self, instance): - if self.fdel is None: - raise AttributeError("can't delete attribute") - self.fdel(instance) - - def setter(self, fset): - """Provide a modifying decorator that defines a value-setter method.""" - - self.fset = fset - return self - - def deleter(self, fdel): - """Provide a modifying decorator that defines a value-deletion method.""" - - self.fdel = fdel - return self - - def expression(self, expr): - """Provide a modifying decorator that defines a SQL-expression producing method.""" - - self.expr = expr - return self - - def comparator(self, comparator): - """Provide a modifying decorator that defines a custom comparator producing method. - - The return value of the decorated method should be an instance of - :class:`~.hybrid.Comparator`. - - """ - - proxy_attr = attributes.\ - create_proxied_attribute(self) - def expr(owner): - return proxy_attr(owner, self.__name__, self, comparator(owner)) - self.expr = expr - return self - - -class Comparator(interfaces.PropComparator): - """A helper class that allows easy construction of custom :class:`~.orm.interfaces.PropComparator` - classes for usage with hybrids.""" - - - def __init__(self, expression): - self.expression = expression - - def __clause_element__(self): - expr = self.expression - while hasattr(expr, '__clause_element__'): - expr = expr.__clause_element__() - return expr - - def adapted(self, adapter): - # interesting.... - return self - - diff --git a/libs/sqlalchemy/ext/mutable.py b/libs/sqlalchemy/ext/mutable.py deleted file mode 100644 index 36e8fcaf..00000000 --- a/libs/sqlalchemy/ext/mutable.py +++ /dev/null @@ -1,596 +0,0 @@ -# ext/mutable.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Provide support for tracking of in-place changes to scalar values, -which are propagated into ORM change events on owning parent objects. - -The :mod:`sqlalchemy.ext.mutable` extension replaces SQLAlchemy's legacy approach to in-place -mutations of scalar values, established by the :class:`.types.MutableType` -class as well as the ``mutable=True`` type flag, with a system that allows -change events to be propagated from the value to the owning parent, thereby -removing the need for the ORM to maintain copies of values as well as the very -expensive requirement of scanning through all "mutable" values on each flush -call, looking for changes. - -.. _mutable_scalars: - -Establishing Mutability on Scalar Column Values -=============================================== - -A typical example of a "mutable" structure is a Python dictionary. -Following the example introduced in :ref:`types_toplevel`, we -begin with a custom type that marshals Python dictionaries into -JSON strings before being persisted:: - - from sqlalchemy.types import TypeDecorator, VARCHAR - import json - - class JSONEncodedDict(TypeDecorator): - "Represents an immutable structure as a json-encoded string." - - impl = VARCHAR - - def process_bind_param(self, value, dialect): - if value is not None: - value = json.dumps(value) - return value - - def process_result_value(self, value, dialect): - if value is not None: - value = json.loads(value) - return value - -The usage of ``json`` is only for the purposes of example. The :mod:`sqlalchemy.ext.mutable` -extension can be used -with any type whose target Python type may be mutable, including -:class:`.PickleType`, :class:`.postgresql.ARRAY`, etc. - -When using the :mod:`sqlalchemy.ext.mutable` extension, the value itself -tracks all parents which reference it. Here we will replace the usage -of plain Python dictionaries with a dict subclass that implements -the :class:`.Mutable` mixin:: - - import collections - from sqlalchemy.ext.mutable import Mutable - - class MutationDict(Mutable, dict): - @classmethod - def coerce(cls, key, value): - "Convert plain dictionaries to MutationDict." - - if not isinstance(value, MutationDict): - if isinstance(value, dict): - return MutationDict(value) - - # this call will raise ValueError - return Mutable.coerce(key, value) - else: - return value - - def __setitem__(self, key, value): - "Detect dictionary set events and emit change events." - - dict.__setitem__(self, key, value) - self.changed() - - def __delitem__(self, key): - "Detect dictionary del events and emit change events." - - dict.__delitem__(self, key) - self.changed() - -The above dictionary class takes the approach of subclassing the Python -built-in ``dict`` to produce a dict -subclass which routes all mutation events through ``__setitem__``. There are -many variants on this approach, such as subclassing ``UserDict.UserDict``, -the newer ``collections.MutableMapping``, etc. The part that's important to this -example is that the :meth:`.Mutable.changed` method is called whenever an in-place change to the -datastructure takes place. - -We also redefine the :meth:`.Mutable.coerce` method which will be used to -convert any values that are not instances of ``MutationDict``, such -as the plain dictionaries returned by the ``json`` module, into the -appropriate type. Defining this method is optional; we could just as well created our -``JSONEncodedDict`` such that it always returns an instance of ``MutationDict``, -and additionally ensured that all calling code uses ``MutationDict`` -explicitly. When :meth:`.Mutable.coerce` is not overridden, any values -applied to a parent object which are not instances of the mutable type -will raise a ``ValueError``. - -Our new ``MutationDict`` type offers a class method -:meth:`~.Mutable.as_mutable` which we can use within column metadata -to associate with types. This method grabs the given type object or -class and associates a listener that will detect all future mappings -of this type, applying event listening instrumentation to the mapped -attribute. Such as, with classical table metadata:: - - from sqlalchemy import Table, Column, Integer - - my_data = Table('my_data', metadata, - Column('id', Integer, primary_key=True), - Column('data', MutationDict.as_mutable(JSONEncodedDict)) - ) - -Above, :meth:`~.Mutable.as_mutable` returns an instance of ``JSONEncodedDict`` -(if the type object was not an instance already), which will intercept any -attributes which are mapped against this type. Below we establish a simple -mapping against the ``my_data`` table:: - - from sqlalchemy import mapper - - class MyDataClass(object): - pass - - # associates mutation listeners with MyDataClass.data - mapper(MyDataClass, my_data) - -The ``MyDataClass.data`` member will now be notified of in place changes -to its value. - -There's no difference in usage when using declarative:: - - from sqlalchemy.ext.declarative import declarative_base - - Base = declarative_base() - - class MyDataClass(Base): - __tablename__ = 'my_data' - id = Column(Integer, primary_key=True) - data = Column(MutationDict.as_mutable(JSONEncodedDict)) - -Any in-place changes to the ``MyDataClass.data`` member -will flag the attribute as "dirty" on the parent object:: - - >>> from sqlalchemy.orm import Session - - >>> sess = Session() - >>> m1 = MyDataClass(data={'value1':'foo'}) - >>> sess.add(m1) - >>> sess.commit() - - >>> m1.data['value1'] = 'bar' - >>> assert m1 in sess.dirty - True - -The ``MutationDict`` can be associated with all future instances -of ``JSONEncodedDict`` in one step, using :meth:`~.Mutable.associate_with`. This -is similar to :meth:`~.Mutable.as_mutable` except it will intercept -all occurrences of ``MutationDict`` in all mappings unconditionally, without -the need to declare it individually:: - - MutationDict.associate_with(JSONEncodedDict) - - class MyDataClass(Base): - __tablename__ = 'my_data' - id = Column(Integer, primary_key=True) - data = Column(JSONEncodedDict) - - -Supporting Pickling --------------------- - -The key to the :mod:`sqlalchemy.ext.mutable` extension relies upon the -placement of a ``weakref.WeakKeyDictionary`` upon the value object, which -stores a mapping of parent mapped objects keyed to the attribute name under -which they are associated with this value. ``WeakKeyDictionary`` objects are -not picklable, due to the fact that they contain weakrefs and function -callbacks. In our case, this is a good thing, since if this dictionary were -picklable, it could lead to an excessively large pickle size for our value -objects that are pickled by themselves outside of the context of the parent. -The developer responsibility here is only to provide a ``__getstate__`` method -that excludes the :meth:`~.MutableBase._parents` collection from the pickle -stream:: - - class MyMutableType(Mutable): - def __getstate__(self): - d = self.__dict__.copy() - d.pop('_parents', None) - return d - -With our dictionary example, we need to return the contents of the dict itself -(and also restore them on __setstate__):: - - class MutationDict(Mutable, dict): - # .... - - def __getstate__(self): - return dict(self) - - def __setstate__(self, state): - self.update(state) - -In the case that our mutable value object is pickled as it is attached to one -or more parent objects that are also part of the pickle, the :class:`.Mutable` -mixin will re-establish the :attr:`.Mutable._parents` collection on each value -object as the owning parents themselves are unpickled. - -.. _mutable_composites: - -Establishing Mutability on Composites -===================================== - -Composites are a special ORM feature which allow a single scalar attribute to -be assigned an object value which represents information "composed" from one -or more columns from the underlying mapped table. The usual example is that of -a geometric "point", and is introduced in :ref:`mapper_composite`. - -.. versionchanged:: 0.7 - The internals of :func:`.orm.composite` have been - greatly simplified and in-place mutation detection is no longer enabled by - default; instead, the user-defined value must detect changes on its own and - propagate them to all owning parents. The :mod:`sqlalchemy.ext.mutable` - extension provides the helper class :class:`.MutableComposite`, which is a - slight variant on the :class:`.Mutable` class. - -As is the case with :class:`.Mutable`, the user-defined composite class -subclasses :class:`.MutableComposite` as a mixin, and detects and delivers -change events to its parents via the :meth:`.MutableComposite.changed` method. -In the case of a composite class, the detection is usually via the usage of -Python descriptors (i.e. ``@property``), or alternatively via the special -Python method ``__setattr__()``. Below we expand upon the ``Point`` class -introduced in :ref:`mapper_composite` to subclass :class:`.MutableComposite` -and to also route attribute set events via ``__setattr__`` to the -:meth:`.MutableComposite.changed` method:: - - from sqlalchemy.ext.mutable import MutableComposite - - class Point(MutableComposite): - def __init__(self, x, y): - self.x = x - self.y = y - - def __setattr__(self, key, value): - "Intercept set events" - - # set the attribute - object.__setattr__(self, key, value) - - # alert all parents to the change - self.changed() - - def __composite_values__(self): - return self.x, self.y - - def __eq__(self, other): - return isinstance(other, Point) and \\ - other.x == self.x and \\ - other.y == self.y - - def __ne__(self, other): - return not self.__eq__(other) - -The :class:`.MutableComposite` class uses a Python metaclass to automatically -establish listeners for any usage of :func:`.orm.composite` that specifies our -``Point`` type. Below, when ``Point`` is mapped to the ``Vertex`` class, -listeners are established which will route change events from ``Point`` -objects to each of the ``Vertex.start`` and ``Vertex.end`` attributes:: - - from sqlalchemy.orm import composite, mapper - from sqlalchemy import Table, Column - - vertices = Table('vertices', metadata, - Column('id', Integer, primary_key=True), - Column('x1', Integer), - Column('y1', Integer), - Column('x2', Integer), - Column('y2', Integer), - ) - - class Vertex(object): - pass - - mapper(Vertex, vertices, properties={ - 'start': composite(Point, vertices.c.x1, vertices.c.y1), - 'end': composite(Point, vertices.c.x2, vertices.c.y2) - }) - -Any in-place changes to the ``Vertex.start`` or ``Vertex.end`` members -will flag the attribute as "dirty" on the parent object:: - - >>> from sqlalchemy.orm import Session - - >>> sess = Session() - >>> v1 = Vertex(start=Point(3, 4), end=Point(12, 15)) - >>> sess.add(v1) - >>> sess.commit() - - >>> v1.end.x = 8 - >>> assert v1 in sess.dirty - True - -Coercing Mutable Composites ---------------------------- - -The :meth:`.MutableBase.coerce` method is also supported on composite types. -In the case of :class:`.MutableComposite`, the :meth:`.MutableBase.coerce` -method is only called for attribute set operations, not load operations. -Overriding the :meth:`.MutableBase.coerce` method is essentially equivalent -to using a :func:`.validates` validation routine for all attributes which -make use of the custom composite type:: - - class Point(MutableComposite): - # other Point methods - # ... - - def coerce(cls, key, value): - if isinstance(value, tuple): - value = Point(*value) - elif not isinstance(value, Point): - raise ValueError("tuple or Point expected") - return value - -.. versionadded:: 0.7.10,0.8.0b2 - Support for the :meth:`.MutableBase.coerce` method in conjunction with - objects of type :class:`.MutableComposite`. - -Supporting Pickling --------------------- - -As is the case with :class:`.Mutable`, the :class:`.MutableComposite` helper -class uses a ``weakref.WeakKeyDictionary`` available via the -:meth:`.MutableBase._parents` attribute which isn't picklable. If we need to -pickle instances of ``Point`` or its owning class ``Vertex``, we at least need -to define a ``__getstate__`` that doesn't include the ``_parents`` dictionary. -Below we define both a ``__getstate__`` and a ``__setstate__`` that package up -the minimal form of our ``Point`` class:: - - class Point(MutableComposite): - # ... - - def __getstate__(self): - return self.x, self.y - - def __setstate__(self, state): - self.x, self.y = state - -As with :class:`.Mutable`, the :class:`.MutableComposite` augments the -pickling process of the parent's object-relational state so that the -:meth:`.MutableBase._parents` collection is restored to all ``Point`` objects. - -""" -from sqlalchemy.orm.attributes import flag_modified -from sqlalchemy import event, types -from sqlalchemy.orm import mapper, object_mapper, Mapper -from sqlalchemy.util import memoized_property -import weakref - -class MutableBase(object): - """Common base class to :class:`.Mutable` and :class:`.MutableComposite`.""" - - @memoized_property - def _parents(self): - """Dictionary of parent object->attribute name on the parent. - - This attribute is a so-called "memoized" property. It initializes - itself with a new ``weakref.WeakKeyDictionary`` the first time - it is accessed, returning the same object upon subsequent access. - - """ - - return weakref.WeakKeyDictionary() - - @classmethod - def coerce(cls, key, value): - """Given a value, coerce it into the target type. - - Can be overridden by custom subclasses to coerce incoming - data into a particular type. - - By default, raises ``ValueError``. - - This method is called in different scenarios depending on if - the parent class is of type :class:`.Mutable` or of type - :class:`.MutableComposite`. In the case of the former, it is called - for both attribute-set operations as well as during ORM loading - operations. For the latter, it is only called during attribute-set - operations; the mechanics of the :func:`.composite` construct - handle coercion during load operations. - - - :param key: string name of the ORM-mapped attribute being set. - :param value: the incoming value. - :return: the method should return the coerced value, or raise - ``ValueError`` if the coercion cannot be completed. - - """ - if value is None: - return None - raise ValueError("Attribute '%s' does not accept objects of type %s" % (key, type(value))) - - @classmethod - def _listen_on_attribute(cls, attribute, coerce, parent_cls): - """Establish this type as a mutation listener for the given - mapped descriptor. - - """ - key = attribute.key - if parent_cls is not attribute.class_: - return - - # rely on "propagate" here - parent_cls = attribute.class_ - - def load(state, *args): - """Listen for objects loaded or refreshed. - - Wrap the target data member's value with - ``Mutable``. - - """ - val = state.dict.get(key, None) - if val is not None: - if coerce: - val = cls.coerce(key, val) - state.dict[key] = val - val._parents[state.obj()] = key - - def set(target, value, oldvalue, initiator): - """Listen for set/replace events on the target - data member. - - Establish a weak reference to the parent object - on the incoming value, remove it for the one - outgoing. - - """ - if not isinstance(value, cls): - value = cls.coerce(key, value) - if value is not None: - value._parents[target.obj()] = key - if isinstance(oldvalue, cls): - oldvalue._parents.pop(target.obj(), None) - return value - - def pickle(state, state_dict): - val = state.dict.get(key, None) - if val is not None: - if 'ext.mutable.values' not in state_dict: - state_dict['ext.mutable.values'] = [] - state_dict['ext.mutable.values'].append(val) - - def unpickle(state, state_dict): - if 'ext.mutable.values' in state_dict: - for val in state_dict['ext.mutable.values']: - val._parents[state.obj()] = key - - - event.listen(parent_cls, 'load', load, raw=True, propagate=True) - event.listen(parent_cls, 'refresh', load, raw=True, propagate=True) - event.listen(attribute, 'set', set, raw=True, retval=True, propagate=True) - event.listen(parent_cls, 'pickle', pickle, raw=True, propagate=True) - event.listen(parent_cls, 'unpickle', unpickle, raw=True, propagate=True) - -class Mutable(MutableBase): - """Mixin that defines transparent propagation of change - events to a parent object. - - See the example in :ref:`mutable_scalars` for usage information. - - """ - - def changed(self): - """Subclasses should call this method whenever change events occur.""" - - for parent, key in self._parents.items(): - flag_modified(parent, key) - - @classmethod - def associate_with_attribute(cls, attribute): - """Establish this type as a mutation listener for the given - mapped descriptor. - - """ - cls._listen_on_attribute(attribute, True, attribute.class_) - - @classmethod - def associate_with(cls, sqltype): - """Associate this wrapper with all future mapped columns - of the given type. - - This is a convenience method that calls ``associate_with_attribute`` automatically. - - .. warning:: - - The listeners established by this method are *global* - to all mappers, and are *not* garbage collected. Only use - :meth:`.associate_with` for types that are permanent to an application, - not with ad-hoc types else this will cause unbounded growth - in memory usage. - - """ - - def listen_for_type(mapper, class_): - for prop in mapper.iterate_properties: - if hasattr(prop, 'columns'): - if isinstance(prop.columns[0].type, sqltype): - cls.associate_with_attribute(getattr(class_, prop.key)) - - event.listen(mapper, 'mapper_configured', listen_for_type) - - @classmethod - def as_mutable(cls, sqltype): - """Associate a SQL type with this mutable Python type. - - This establishes listeners that will detect ORM mappings against - the given type, adding mutation event trackers to those mappings. - - The type is returned, unconditionally as an instance, so that - :meth:`.as_mutable` can be used inline:: - - Table('mytable', metadata, - Column('id', Integer, primary_key=True), - Column('data', MyMutableType.as_mutable(PickleType)) - ) - - Note that the returned type is always an instance, even if a class - is given, and that only columns which are declared specifically with that - type instance receive additional instrumentation. - - To associate a particular mutable type with all occurrences of a - particular type, use the :meth:`.Mutable.associate_with` classmethod - of the particular :meth:`.Mutable` subclass to establish a global - association. - - .. warning:: - - The listeners established by this method are *global* - to all mappers, and are *not* garbage collected. Only use - :meth:`.as_mutable` for types that are permanent to an application, - not with ad-hoc types else this will cause unbounded growth - in memory usage. - - """ - sqltype = types.to_instance(sqltype) - - def listen_for_type(mapper, class_): - for prop in mapper.iterate_properties: - if hasattr(prop, 'columns'): - if prop.columns[0].type is sqltype: - cls.associate_with_attribute(getattr(class_, prop.key)) - - event.listen(mapper, 'mapper_configured', listen_for_type) - - return sqltype - -class MutableComposite(MutableBase): - """Mixin that defines transparent propagation of change - events on a SQLAlchemy "composite" object to its - owning parent or parents. - - See the example in :ref:`mutable_composites` for usage information. - - .. warning:: - - The listeners established by the :class:`.MutableComposite` - class are *global* to all mappers, and are *not* garbage collected. Only use - :class:`.MutableComposite` for types that are permanent to an application, - not with ad-hoc types else this will cause unbounded growth - in memory usage. - - """ - - def changed(self): - """Subclasses should call this method whenever change events occur.""" - - for parent, key in self._parents.items(): - - prop = object_mapper(parent).get_property(key) - for value, attr_name in zip( - self.__composite_values__(), - prop._attribute_keys): - setattr(parent, attr_name, value) - - -def _setup_composite_listener(): - def _listen_for_type(mapper, class_): - for prop in mapper.iterate_properties: - if (hasattr(prop, 'composite_class') and - issubclass(prop.composite_class, MutableComposite)): - prop.composite_class._listen_on_attribute( - getattr(class_, prop.key), False, class_) - if not Mapper.dispatch.mapper_configured._contains(Mapper, _listen_for_type): - event.listen(Mapper, 'mapper_configured', _listen_for_type) -_setup_composite_listener() diff --git a/libs/sqlalchemy/ext/orderinglist.py b/libs/sqlalchemy/ext/orderinglist.py deleted file mode 100644 index 6a9b6c39..00000000 --- a/libs/sqlalchemy/ext/orderinglist.py +++ /dev/null @@ -1,367 +0,0 @@ -# ext/orderinglist.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""A custom list that manages index/position information for contained -elements. - -:author: Jason Kirtland - -``orderinglist`` is a helper for mutable ordered relationships. It will -intercept list operations performed on a :func:`.relationship`-managed -collection and -automatically synchronize changes in list position onto a target scalar -attribute. - -Example: A ``slide`` table, where each row refers to zero or more entries -in a related ``bullet`` table. The bullets within a slide are -displayed in order based on the value of the ``position`` column in the -``bullet`` table. As entries are reordered in memory, the value of the -``position`` attribute should be updated to reflect the new sort order:: - - - Base = declarative_base() - - class Slide(Base): - __tablename__ = 'slide' - - id = Column(Integer, primary_key=True) - name = Column(String) - - bullets = relationship("Bullet", order_by="Bullet.position") - - class Bullet(Base): - __tablename__ = 'bullet' - id = Column(Integer, primary_key=True) - slide_id = Column(Integer, ForeignKey('slide.id')) - position = Column(Integer) - text = Column(String) - -The standard relationship mapping will produce a list-like attribute on each -``Slide`` containing all related ``Bullet`` objects, -but coping with changes in ordering is not handled automatically. -When appending a ``Bullet`` into ``Slide.bullets``, the ``Bullet.position`` -attribute will remain unset until manually assigned. When the ``Bullet`` -is inserted into the middle of the list, the following ``Bullet`` objects -will also need to be renumbered. - -The :class:`.OrderingList` object automates this task, managing the -``position`` attribute on all ``Bullet`` objects in the collection. It is -constructed using the :func:`.ordering_list` factory:: - - from sqlalchemy.ext.orderinglist import ordering_list - - Base = declarative_base() - - class Slide(Base): - __tablename__ = 'slide' - - id = Column(Integer, primary_key=True) - name = Column(String) - - bullets = relationship("Bullet", order_by="Bullet.position", - collection_class=ordering_list('position')) - - class Bullet(Base): - __tablename__ = 'bullet' - id = Column(Integer, primary_key=True) - slide_id = Column(Integer, ForeignKey('slide.id')) - position = Column(Integer) - text = Column(String) - -With the above mapping the ``Bullet.position`` attribute is managed:: - - s = Slide() - s.bullets.append(Bullet()) - s.bullets.append(Bullet()) - s.bullets[1].position - >>> 1 - s.bullets.insert(1, Bullet()) - s.bullets[2].position - >>> 2 - -The :class:`.OrderingList` construct only works with **changes** to a collection, -and not the initial load from the database, and requires that the list be -sorted when loaded. Therefore, be sure to -specify ``order_by`` on the :func:`.relationship` against the target ordering -attribute, so that the ordering is correct when first loaded. - -.. warning:: - - :class:`.OrderingList` only provides limited functionality when a primary - key column or unique column is the target of the sort. Since changing the - order of entries often means that two rows must trade values, this is not - possible when the value is constrained by a primary key or unique - constraint, since one of the rows would temporarily have to point to a - third available value so that the other row could take its old - value. :class:`.OrderingList` doesn't do any of this for you, - nor does SQLAlchemy itself. - -:func:`.ordering_list` takes the name of the related object's ordering attribute as -an argument. By default, the zero-based integer index of the object's -position in the :func:`.ordering_list` is synchronized with the ordering attribute: -index 0 will get position 0, index 1 position 1, etc. To start numbering at 1 -or some other integer, provide ``count_from=1``. - - -""" -from sqlalchemy.orm.collections import collection -from sqlalchemy import util - -__all__ = ['ordering_list'] - - -def ordering_list(attr, count_from=None, **kw): - """Prepares an :class:`OrderingList` factory for use in mapper definitions. - - Returns an object suitable for use as an argument to a Mapper - relationship's ``collection_class`` option. e.g.:: - - from sqlalchemy.ext.orderinglist import ordering_list - - class Slide(Base): - __tablename__ = 'slide' - - id = Column(Integer, primary_key=True) - name = Column(String) - - bullets = relationship("Bullet", order_by="Bullet.position", - collection_class=ordering_list('position')) - - :param attr: - Name of the mapped attribute to use for storage and retrieval of - ordering information - - :param count_from: - Set up an integer-based ordering, starting at ``count_from``. For - example, ``ordering_list('pos', count_from=1)`` would create a 1-based - list in SQL, storing the value in the 'pos' column. Ignored if - ``ordering_func`` is supplied. - - Additional arguments are passed to the :class:`.OrderingList` constructor. - - """ - - kw = _unsugar_count_from(count_from=count_from, **kw) - return lambda: OrderingList(attr, **kw) - - -# Ordering utility functions - - -def count_from_0(index, collection): - """Numbering function: consecutive integers starting at 0.""" - - return index - - -def count_from_1(index, collection): - """Numbering function: consecutive integers starting at 1.""" - - return index + 1 - - -def count_from_n_factory(start): - """Numbering function: consecutive integers starting at arbitrary start.""" - - def f(index, collection): - return index + start - try: - f.__name__ = 'count_from_%i' % start - except TypeError: - pass - return f - - -def _unsugar_count_from(**kw): - """Builds counting functions from keyword arguments. - - Keyword argument filter, prepares a simple ``ordering_func`` from a - ``count_from`` argument, otherwise passes ``ordering_func`` on unchanged. - """ - - count_from = kw.pop('count_from', None) - if kw.get('ordering_func', None) is None and count_from is not None: - if count_from == 0: - kw['ordering_func'] = count_from_0 - elif count_from == 1: - kw['ordering_func'] = count_from_1 - else: - kw['ordering_func'] = count_from_n_factory(count_from) - return kw - - -class OrderingList(list): - """A custom list that manages position information for its children. - - The :class:`.OrderingList` object is normally set up using the - :func:`.ordering_list` factory function, used in conjunction with - the :func:`.relationship` function. - - """ - - def __init__(self, ordering_attr=None, ordering_func=None, - reorder_on_append=False): - """A custom list that manages position information for its children. - - ``OrderingList`` is a ``collection_class`` list implementation that - syncs position in a Python list with a position attribute on the - mapped objects. - - This implementation relies on the list starting in the proper order, - so be **sure** to put an ``order_by`` on your relationship. - - :param ordering_attr: - Name of the attribute that stores the object's order in the - relationship. - - :param ordering_func: Optional. A function that maps the position in - the Python list to a value to store in the - ``ordering_attr``. Values returned are usually (but need not be!) - integers. - - An ``ordering_func`` is called with two positional parameters: the - index of the element in the list, and the list itself. - - If omitted, Python list indexes are used for the attribute values. - Two basic pre-built numbering functions are provided in this module: - ``count_from_0`` and ``count_from_1``. For more exotic examples - like stepped numbering, alphabetical and Fibonacci numbering, see - the unit tests. - - :param reorder_on_append: - Default False. When appending an object with an existing (non-None) - ordering value, that value will be left untouched unless - ``reorder_on_append`` is true. This is an optimization to avoid a - variety of dangerous unexpected database writes. - - SQLAlchemy will add instances to the list via append() when your - object loads. If for some reason the result set from the database - skips a step in the ordering (say, row '1' is missing but you get - '2', '3', and '4'), reorder_on_append=True would immediately - renumber the items to '1', '2', '3'. If you have multiple sessions - making changes, any of whom happen to load this collection even in - passing, all of the sessions would try to "clean up" the numbering - in their commits, possibly causing all but one to fail with a - concurrent modification error. - - Recommend leaving this with the default of False, and just call - ``reorder()`` if you're doing ``append()`` operations with - previously ordered instances or when doing some housekeeping after - manual sql operations. - - """ - self.ordering_attr = ordering_attr - if ordering_func is None: - ordering_func = count_from_0 - self.ordering_func = ordering_func - self.reorder_on_append = reorder_on_append - - # More complex serialization schemes (multi column, e.g.) are possible by - # subclassing and reimplementing these two methods. - def _get_order_value(self, entity): - return getattr(entity, self.ordering_attr) - - def _set_order_value(self, entity, value): - setattr(entity, self.ordering_attr, value) - - def reorder(self): - """Synchronize ordering for the entire collection. - - Sweeps through the list and ensures that each object has accurate - ordering information set. - - """ - for index, entity in enumerate(self): - self._order_entity(index, entity, True) - - # As of 0.5, _reorder is no longer semi-private - _reorder = reorder - - def _order_entity(self, index, entity, reorder=True): - have = self._get_order_value(entity) - - # Don't disturb existing ordering if reorder is False - if have is not None and not reorder: - return - - should_be = self.ordering_func(index, self) - if have != should_be: - self._set_order_value(entity, should_be) - - def append(self, entity): - super(OrderingList, self).append(entity) - self._order_entity(len(self) - 1, entity, self.reorder_on_append) - - def _raw_append(self, entity): - """Append without any ordering behavior.""" - - super(OrderingList, self).append(entity) - _raw_append = collection.adds(1)(_raw_append) - - def insert(self, index, entity): - super(OrderingList, self).insert(index, entity) - self._reorder() - - def remove(self, entity): - super(OrderingList, self).remove(entity) - self._reorder() - - def pop(self, index=-1): - entity = super(OrderingList, self).pop(index) - self._reorder() - return entity - - def __setitem__(self, index, entity): - if isinstance(index, slice): - step = index.step or 1 - start = index.start or 0 - if start < 0: - start += len(self) - stop = index.stop or len(self) - if stop < 0: - stop += len(self) - - for i in xrange(start, stop, step): - self.__setitem__(i, entity[i]) - else: - self._order_entity(index, entity, True) - super(OrderingList, self).__setitem__(index, entity) - - def __delitem__(self, index): - super(OrderingList, self).__delitem__(index) - self._reorder() - - # Py2K - def __setslice__(self, start, end, values): - super(OrderingList, self).__setslice__(start, end, values) - self._reorder() - - def __delslice__(self, start, end): - super(OrderingList, self).__delslice__(start, end) - self._reorder() - # end Py2K - - def __reduce__(self): - return _reconstitute, (self.__class__, self.__dict__, list(self)) - - for func_name, func in locals().items(): - if (util.callable(func) and func.func_name == func_name and - not func.__doc__ and hasattr(list, func_name)): - func.__doc__ = getattr(list, func_name).__doc__ - del func_name, func - - -def _reconstitute(cls, dict_, items): - """ Reconstitute an :class:`.OrderingList`. - - This is the adjoint to :meth:`.OrderingList.__reduce__`. It is used for - unpickling :class:`.OrderingList` objects. - - """ - obj = cls.__new__(cls) - obj.__dict__.update(dict_) - list.extend(obj, items) - return obj diff --git a/libs/sqlalchemy/ext/serializer.py b/libs/sqlalchemy/ext/serializer.py deleted file mode 100644 index 47121bca..00000000 --- a/libs/sqlalchemy/ext/serializer.py +++ /dev/null @@ -1,161 +0,0 @@ -# ext/serializer.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Serializer/Deserializer objects for usage with SQLAlchemy query structures, -allowing "contextual" deserialization. - -Any SQLAlchemy query structure, either based on sqlalchemy.sql.* -or sqlalchemy.orm.* can be used. The mappers, Tables, Columns, Session -etc. which are referenced by the structure are not persisted in serialized -form, but are instead re-associated with the query structure -when it is deserialized. - -Usage is nearly the same as that of the standard Python pickle module:: - - from sqlalchemy.ext.serializer import loads, dumps - metadata = MetaData(bind=some_engine) - Session = scoped_session(sessionmaker()) - - # ... define mappers - - query = Session.query(MyClass).filter(MyClass.somedata=='foo').order_by(MyClass.sortkey) - - # pickle the query - serialized = dumps(query) - - # unpickle. Pass in metadata + scoped_session - query2 = loads(serialized, metadata, Session) - - print query2.all() - -Similar restrictions as when using raw pickle apply; mapped classes must be -themselves be pickleable, meaning they are importable from a module-level -namespace. - -The serializer module is only appropriate for query structures. It is not -needed for: - -* instances of user-defined classes. These contain no references to engines, - sessions or expression constructs in the typical case and can be serialized directly. - -* Table metadata that is to be loaded entirely from the serialized structure (i.e. is - not already declared in the application). Regular pickle.loads()/dumps() can - be used to fully dump any ``MetaData`` object, typically one which was reflected - from an existing database at some previous point in time. The serializer module - is specifically for the opposite case, where the Table metadata is already present - in memory. - -""" - -from sqlalchemy.orm import class_mapper, Query -from sqlalchemy.orm.session import Session -from sqlalchemy.orm.mapper import Mapper -from sqlalchemy.orm.attributes import QueryableAttribute -from sqlalchemy import Table, Column -from sqlalchemy.engine import Engine -from sqlalchemy.util import pickle -import re -import base64 -# Py3K -#from io import BytesIO as byte_buffer -# Py2K -from cStringIO import StringIO as byte_buffer -# end Py2K - -# Py3K -#def b64encode(x): -# return base64.b64encode(x).decode('ascii') -#def b64decode(x): -# return base64.b64decode(x.encode('ascii')) -# Py2K -b64encode = base64.b64encode -b64decode = base64.b64decode -# end Py2K - -__all__ = ['Serializer', 'Deserializer', 'dumps', 'loads'] - - - -def Serializer(*args, **kw): - pickler = pickle.Pickler(*args, **kw) - - def persistent_id(obj): - #print "serializing:", repr(obj) - if isinstance(obj, QueryableAttribute): - cls = obj.impl.class_ - key = obj.impl.key - id = "attribute:" + key + ":" + b64encode(pickle.dumps(cls)) - elif isinstance(obj, Mapper) and not obj.non_primary: - id = "mapper:" + b64encode(pickle.dumps(obj.class_)) - elif isinstance(obj, Table): - id = "table:" + str(obj) - elif isinstance(obj, Column) and isinstance(obj.table, Table): - id = "column:" + str(obj.table) + ":" + obj.key - elif isinstance(obj, Session): - id = "session:" - elif isinstance(obj, Engine): - id = "engine:" - else: - return None - return id - - pickler.persistent_id = persistent_id - return pickler - -our_ids = re.compile(r'(mapper|table|column|session|attribute|engine):(.*)') - -def Deserializer(file, metadata=None, scoped_session=None, engine=None): - unpickler = pickle.Unpickler(file) - - def get_engine(): - if engine: - return engine - elif scoped_session and scoped_session().bind: - return scoped_session().bind - elif metadata and metadata.bind: - return metadata.bind - else: - return None - - def persistent_load(id): - m = our_ids.match(id) - if not m: - return None - else: - type_, args = m.group(1, 2) - if type_ == 'attribute': - key, clsarg = args.split(":") - cls = pickle.loads(b64decode(clsarg)) - return getattr(cls, key) - elif type_ == "mapper": - cls = pickle.loads(b64decode(args)) - return class_mapper(cls) - elif type_ == "table": - return metadata.tables[args] - elif type_ == "column": - table, colname = args.split(':') - return metadata.tables[table].c[colname] - elif type_ == "session": - return scoped_session() - elif type_ == "engine": - return get_engine() - else: - raise Exception("Unknown token: %s" % type_) - unpickler.persistent_load = persistent_load - return unpickler - -def dumps(obj, protocol=0): - buf = byte_buffer() - pickler = Serializer(buf, protocol) - pickler.dump(obj) - return buf.getvalue() - -def loads(data, metadata=None, scoped_session=None, engine=None): - buf = byte_buffer(data) - unpickler = Deserializer(buf, metadata, scoped_session, engine) - return unpickler.load() - - diff --git a/libs/sqlalchemy/ext/sqlsoup.py b/libs/sqlalchemy/ext/sqlsoup.py deleted file mode 100644 index 486b09c5..00000000 --- a/libs/sqlalchemy/ext/sqlsoup.py +++ /dev/null @@ -1,811 +0,0 @@ -# ext/sqlsoup.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -""" - -.. versionchanged:: 0.8 - SQLSoup is now its own project. Documentation - and project status are available at: - http://pypi.python.org/pypi/sqlsoup and - http://readthedocs.org/docs/sqlsoup\ . - SQLSoup will no longer be included with SQLAlchemy. - - -Introduction -============ - -SqlSoup provides a convenient way to access existing database -tables without having to declare table or mapper classes ahead -of time. It is built on top of the SQLAlchemy ORM and provides a -super-minimalistic interface to an existing database. - -SqlSoup effectively provides a coarse grained, alternative -interface to working with the SQLAlchemy ORM, providing a "self -configuring" interface for extremely rudimental operations. It's -somewhat akin to a "super novice mode" version of the ORM. While -SqlSoup can be very handy, users are strongly encouraged to use -the full ORM for non-trivial applications. - -Suppose we have a database with users, books, and loans tables -(corresponding to the PyWebOff dataset, if you're curious). - -Creating a SqlSoup gateway is just like creating an SQLAlchemy -engine:: - - >>> from sqlalchemy.ext.sqlsoup import SqlSoup - >>> db = SqlSoup('sqlite:///:memory:') - -or, you can re-use an existing engine:: - - >>> db = SqlSoup(engine) - -You can optionally specify a schema within the database for your -SqlSoup:: - - >>> db.schema = myschemaname - -Loading objects -=============== - -Loading objects is as easy as this:: - - >>> users = db.users.all() - >>> users.sort() - >>> users - [ - MappedUsers(name=u'Joe Student',email=u'student@example.edu', - password=u'student',classname=None,admin=0), - MappedUsers(name=u'Bhargan Basepair',email=u'basepair@example.edu', - password=u'basepair',classname=None,admin=1) - ] - -Of course, letting the database do the sort is better:: - - >>> db.users.order_by(db.users.name).all() - [ - MappedUsers(name=u'Bhargan Basepair',email=u'basepair@example.edu', - password=u'basepair',classname=None,admin=1), - MappedUsers(name=u'Joe Student',email=u'student@example.edu', - password=u'student',classname=None,admin=0) - ] - -Field access is intuitive:: - - >>> users[0].email - u'student@example.edu' - -Of course, you don't want to load all users very often. Let's -add a WHERE clause. Let's also switch the order_by to DESC while -we're at it:: - - >>> from sqlalchemy import or_, and_, desc - >>> where = or_(db.users.name=='Bhargan Basepair', db.users.email=='student@example.edu') - >>> db.users.filter(where).order_by(desc(db.users.name)).all() - [ - MappedUsers(name=u'Joe Student',email=u'student@example.edu', - password=u'student',classname=None,admin=0), - MappedUsers(name=u'Bhargan Basepair',email=u'basepair@example.edu', - password=u'basepair',classname=None,admin=1) - ] - -You can also use .first() (to retrieve only the first object -from a query) or .one() (like .first when you expect exactly one -user -- it will raise an exception if more were returned):: - - >>> db.users.filter(db.users.name=='Bhargan Basepair').one() - MappedUsers(name=u'Bhargan Basepair',email=u'basepair@example.edu', - password=u'basepair',classname=None,admin=1) - -Since name is the primary key, this is equivalent to - - >>> db.users.get('Bhargan Basepair') - MappedUsers(name=u'Bhargan Basepair',email=u'basepair@example.edu', - password=u'basepair',classname=None,admin=1) - -This is also equivalent to - - >>> db.users.filter_by(name='Bhargan Basepair').one() - MappedUsers(name=u'Bhargan Basepair',email=u'basepair@example.edu', - password=u'basepair',classname=None,admin=1) - -filter_by is like filter, but takes kwargs instead of full -clause expressions. This makes it more concise for simple -queries like this, but you can't do complex queries like the -or\_ above or non-equality based comparisons this way. - -Full query documentation ------------------------- - -Get, filter, filter_by, order_by, limit, and the rest of the -query methods are explained in detail in -:ref:`ormtutorial_querying`. - -Modifying objects -================= - -Modifying objects is intuitive:: - - >>> user = _ - >>> user.email = 'basepair+nospam@example.edu' - >>> db.commit() - -(SqlSoup leverages the sophisticated SQLAlchemy unit-of-work -code, so multiple updates to a single object will be turned into -a single ``UPDATE`` statement when you commit.) - -To finish covering the basics, let's insert a new loan, then -delete it:: - - >>> book_id = db.books.filter_by(title='Regional Variation in Moss').first().id - >>> db.loans.insert(book_id=book_id, user_name=user.name) - MappedLoans(book_id=2,user_name=u'Bhargan Basepair',loan_date=None) - - >>> loan = db.loans.filter_by(book_id=2, user_name='Bhargan Basepair').one() - >>> db.delete(loan) - >>> db.commit() - -You can also delete rows that have not been loaded as objects. -Let's do our insert/delete cycle once more, this time using the -loans table's delete method. (For SQLAlchemy experts: note that -no flush() call is required since this delete acts at the SQL -level, not at the Mapper level.) The same where-clause -construction rules apply here as to the select methods:: - - >>> db.loans.insert(book_id=book_id, user_name=user.name) - MappedLoans(book_id=2,user_name=u'Bhargan Basepair',loan_date=None) - >>> db.loans.delete(db.loans.book_id==2) - -You can similarly update multiple rows at once. This will change the -book_id to 1 in all loans whose book_id is 2:: - - >>> db.loans.filter_by(db.loans.book_id==2).update({'book_id':1}) - >>> db.loans.filter_by(book_id=1).all() - [MappedLoans(book_id=1,user_name=u'Joe Student', - loan_date=datetime.datetime(2006, 7, 12, 0, 0))] - - -Joins -===== - -Occasionally, you will want to pull out a lot of data from related -tables all at once. In this situation, it is far more efficient to -have the database perform the necessary join. (Here we do not have *a -lot of data* but hopefully the concept is still clear.) SQLAlchemy is -smart enough to recognize that loans has a foreign key to users, and -uses that as the join condition automatically:: - - >>> join1 = db.join(db.users, db.loans, isouter=True) - >>> join1.filter_by(name='Joe Student').all() - [ - MappedJoin(name=u'Joe Student',email=u'student@example.edu', - password=u'student',classname=None,admin=0,book_id=1, - user_name=u'Joe Student',loan_date=datetime.datetime(2006, 7, 12, 0, 0)) - ] - -If you're unfortunate enough to be using MySQL with the default MyISAM -storage engine, you'll have to specify the join condition manually, -since MyISAM does not store foreign keys. Here's the same join again, -with the join condition explicitly specified:: - - >>> db.join(db.users, db.loans, db.users.name==db.loans.user_name, isouter=True) - - -You can compose arbitrarily complex joins by combining Join objects -with tables or other joins. Here we combine our first join with the -books table:: - - >>> join2 = db.join(join1, db.books) - >>> join2.all() - [ - MappedJoin(name=u'Joe Student',email=u'student@example.edu', - password=u'student',classname=None,admin=0,book_id=1, - user_name=u'Joe Student',loan_date=datetime.datetime(2006, 7, 12, 0, 0), - id=1,title=u'Mustards I Have Known',published_year=u'1989', - authors=u'Jones') - ] - -If you join tables that have an identical column name, wrap your join -with `with_labels`, to disambiguate columns with their table name -(.c is short for .columns):: - - >>> db.with_labels(join1).c.keys() - [u'users_name', u'users_email', u'users_password', - u'users_classname', u'users_admin', u'loans_book_id', - u'loans_user_name', u'loans_loan_date'] - -You can also join directly to a labeled object:: - - >>> labeled_loans = db.with_labels(db.loans) - >>> db.join(db.users, labeled_loans, isouter=True).c.keys() - [u'name', u'email', u'password', u'classname', - u'admin', u'loans_book_id', u'loans_user_name', u'loans_loan_date'] - - -Relationships -============= - -You can define relationships on SqlSoup classes: - - >>> db.users.relate('loans', db.loans) - -These can then be used like a normal SA property: - - >>> db.users.get('Joe Student').loans - [MappedLoans(book_id=1,user_name=u'Joe Student', - loan_date=datetime.datetime(2006, 7, 12, 0, 0))] - - >>> db.users.filter(~db.users.loans.any()).all() - [MappedUsers(name=u'Bhargan Basepair', - email='basepair+nospam@example.edu', - password=u'basepair',classname=None,admin=1)] - -relate can take any options that the relationship function -accepts in normal mapper definition: - - >>> del db._cache['users'] - >>> db.users.relate('loans', db.loans, order_by=db.loans.loan_date, cascade='all, delete-orphan') - -Advanced Use -============ - -Sessions, Transactions and Application Integration ---------------------------------------------------- - -.. note:: - - Please read and understand this section thoroughly - before using SqlSoup in any web application. - -SqlSoup uses a ScopedSession to provide thread-local sessions. -You can get a reference to the current one like this:: - - >>> session = db.session - -The default session is available at the module level in SQLSoup, -via:: - - >>> from sqlalchemy.ext.sqlsoup import Session - -The configuration of this session is ``autoflush=True``, -``autocommit=False``. This means when you work with the SqlSoup -object, you need to call ``db.commit()`` in order to have -changes persisted. You may also call ``db.rollback()`` to roll -things back. - -Since the SqlSoup object's Session automatically enters into a -transaction as soon as it's used, it is *essential* that you -call ``commit()`` or ``rollback()`` on it when the work within a -thread completes. This means all the guidelines for web -application integration at :ref:`session_lifespan` must be -followed. - -The SqlSoup object can have any session or scoped session -configured onto it. This is of key importance when integrating -with existing code or frameworks such as Pylons. If your -application already has a ``Session`` configured, pass it to -your SqlSoup object:: - - >>> from myapplication import Session - >>> db = SqlSoup(session=Session) - -If the ``Session`` is configured with ``autocommit=True``, use -``flush()`` instead of ``commit()`` to persist changes - in this -case, the ``Session`` closes out its transaction immediately and -no external management is needed. ``rollback()`` is also not -available. Configuring a new SQLSoup object in "autocommit" mode -looks like:: - - >>> from sqlalchemy.orm import scoped_session, sessionmaker - >>> db = SqlSoup('sqlite://', session=scoped_session(sessionmaker(autoflush=False, expire_on_commit=False, autocommit=True))) - - -Mapping arbitrary Selectables ------------------------------ - -SqlSoup can map any SQLAlchemy :class:`.Selectable` with the map -method. Let's map an :func:`.expression.select` object that uses an aggregate -function; we'll use the SQLAlchemy :class:`.Table` that SqlSoup -introspected as the basis. (Since we're not mapping to a simple -table or join, we need to tell SQLAlchemy how to find the -*primary key* which just needs to be unique within the select, -and not necessarily correspond to a *real* PK in the database.):: - - >>> from sqlalchemy import select, func - >>> b = db.books._table - >>> s = select([b.c.published_year, func.count('*').label('n')], from_obj=[b], group_by=[b.c.published_year]) - >>> s = s.alias('years_with_count') - >>> years_with_count = db.map(s, primary_key=[s.c.published_year]) - >>> years_with_count.filter_by(published_year='1989').all() - [MappedBooks(published_year=u'1989',n=1)] - -Obviously if we just wanted to get a list of counts associated with -book years once, raw SQL is going to be less work. The advantage of -mapping a Select is reusability, both standalone and in Joins. (And if -you go to full SQLAlchemy, you can perform mappings like this directly -to your object models.) - -An easy way to save mapped selectables like this is to just hang them on -your db object:: - - >>> db.years_with_count = years_with_count - -Python is flexible like that! - -Raw SQL -------- - -SqlSoup works fine with SQLAlchemy's text construct, described -in :ref:`sqlexpression_text`. You can also execute textual SQL -directly using the `execute()` method, which corresponds to the -`execute()` method on the underlying `Session`. Expressions here -are expressed like ``text()`` constructs, using named parameters -with colons:: - - >>> rp = db.execute('select name, email from users where name like :name order by name', name='%Bhargan%') - >>> for name, email in rp.fetchall(): print name, email - Bhargan Basepair basepair+nospam@example.edu - -Or you can get at the current transaction's connection using -`connection()`. This is the raw connection object which can -accept any sort of SQL expression or raw SQL string passed to -the database:: - - >>> conn = db.connection() - >>> conn.execute("'select name, email from users where name like ? order by name'", '%Bhargan%') - -Dynamic table names -------------------- - -You can load a table whose name is specified at runtime with the -entity() method: - - >>> tablename = 'loans' - >>> db.entity(tablename) == db.loans - True - -entity() also takes an optional schema argument. If none is -specified, the default schema is used. - -""" - -from sqlalchemy import Table, MetaData, join -from sqlalchemy import schema, sql, util -from sqlalchemy.engine.base import Engine -from sqlalchemy.orm import scoped_session, sessionmaker, mapper, \ - class_mapper, relationship, session,\ - object_session, attributes -from sqlalchemy.orm.interfaces import MapperExtension, EXT_CONTINUE -from sqlalchemy.exc import SQLAlchemyError, InvalidRequestError, ArgumentError -from sqlalchemy.sql import expression - - -__all__ = ['PKNotFoundError', 'SqlSoup'] - -Session = scoped_session(sessionmaker(autoflush=True, autocommit=False)) - -class AutoAdd(MapperExtension): - def __init__(self, scoped_session): - self.scoped_session = scoped_session - - def instrument_class(self, mapper, class_): - class_.__init__ = self._default__init__(mapper) - - def _default__init__(ext, mapper): - def __init__(self, **kwargs): - for key, value in kwargs.iteritems(): - setattr(self, key, value) - return __init__ - - def init_instance(self, mapper, class_, oldinit, instance, args, kwargs): - session = self.scoped_session() - state = attributes.instance_state(instance) - session._save_impl(state) - return EXT_CONTINUE - - def init_failed(self, mapper, class_, oldinit, instance, args, kwargs): - sess = object_session(instance) - if sess: - sess.expunge(instance) - return EXT_CONTINUE - -class PKNotFoundError(SQLAlchemyError): - pass - -def _ddl_error(cls): - msg = 'SQLSoup can only modify mapped Tables (found: %s)' \ - % cls._table.__class__.__name__ - raise InvalidRequestError(msg) - -# metaclass is necessary to expose class methods with getattr, e.g. -# we want to pass db.users.select through to users._mapper.select -class SelectableClassType(type): - def insert(cls, **kwargs): - _ddl_error(cls) - - def __clause_element__(cls): - return cls._table - - def __getattr__(cls, attr): - if attr == '_query': - # called during mapper init - raise AttributeError() - return getattr(cls._query, attr) - -class TableClassType(SelectableClassType): - def insert(cls, **kwargs): - o = cls() - o.__dict__.update(kwargs) - return o - - def relate(cls, propname, *args, **kwargs): - class_mapper(cls)._configure_property(propname, relationship(*args, **kwargs)) - -def _is_outer_join(selectable): - if not isinstance(selectable, sql.Join): - return False - if selectable.isouter: - return True - return _is_outer_join(selectable.left) or _is_outer_join(selectable.right) - -def _selectable_name(selectable): - if isinstance(selectable, sql.Alias): - return _selectable_name(selectable.element) - elif isinstance(selectable, sql.Select): - return ''.join(_selectable_name(s) for s in selectable.froms) - elif isinstance(selectable, schema.Table): - return selectable.name.capitalize() - else: - x = selectable.__class__.__name__ - if x[0] == '_': - x = x[1:] - return x - -def _class_for_table(session, engine, selectable, base_cls, mapper_kwargs): - selectable = expression._clause_element_as_expr(selectable) - mapname = 'Mapped' + _selectable_name(selectable) - # Py2K - if isinstance(mapname, unicode): - engine_encoding = engine.dialect.encoding - mapname = mapname.encode(engine_encoding) - # end Py2K - - if isinstance(selectable, Table): - klass = TableClassType(mapname, (base_cls,), {}) - else: - klass = SelectableClassType(mapname, (base_cls,), {}) - - def _compare(self, o): - L = list(self.__class__.c.keys()) - L.sort() - t1 = [getattr(self, k) for k in L] - try: - t2 = [getattr(o, k) for k in L] - except AttributeError: - raise TypeError('unable to compare with %s' % o.__class__) - return t1, t2 - - # python2/python3 compatible system of - # __cmp__ - __lt__ + __eq__ - - def __lt__(self, o): - t1, t2 = _compare(self, o) - return t1 < t2 - - def __eq__(self, o): - t1, t2 = _compare(self, o) - return t1 == t2 - - def __repr__(self): - L = ["%s=%r" % (key, getattr(self, key, '')) - for key in self.__class__.c.keys()] - return '%s(%s)' % (self.__class__.__name__, ','.join(L)) - - for m in ['__eq__', '__repr__', '__lt__']: - setattr(klass, m, eval(m)) - klass._table = selectable - klass.c = expression.ColumnCollection() - mappr = mapper(klass, - selectable, - extension=AutoAdd(session), - **mapper_kwargs) - - for k in mappr.iterate_properties: - klass.c[k.key] = k.columns[0] - - klass._query = session.query_property() - return klass - -class SqlSoup(object): - """Represent an ORM-wrapped database resource.""" - - def __init__(self, engine_or_metadata, base=object, session=None): - """Initialize a new :class:`.SqlSoup`. - - :param engine_or_metadata: a string database URL, :class:`.Engine` - or :class:`.MetaData` object to associate with. If the - argument is a :class:`.MetaData`, it should be *bound* - to an :class:`.Engine`. - :param base: a class which will serve as the default class for - returned mapped classes. Defaults to ``object``. - :param session: a :class:`.ScopedSession` or :class:`.Session` with - which to associate ORM operations for this :class:`.SqlSoup` instance. - If ``None``, a :class:`.ScopedSession` that's local to this - module is used. - - """ - - self.session = session or Session - self.base=base - - if isinstance(engine_or_metadata, MetaData): - self._metadata = engine_or_metadata - elif isinstance(engine_or_metadata, (basestring, Engine)): - self._metadata = MetaData(engine_or_metadata) - else: - raise ArgumentError("invalid engine or metadata argument %r" % - engine_or_metadata) - - self._cache = {} - self.schema = None - - @property - def bind(self): - """The :class:`.Engine` associated with this :class:`.SqlSoup`.""" - return self._metadata.bind - - engine = bind - - def delete(self, instance): - """Mark an instance as deleted.""" - - self.session.delete(instance) - - def execute(self, stmt, **params): - """Execute a SQL statement. - - The statement may be a string SQL string, - an :func:`.expression.select` construct, or an :func:`.expression.text` - construct. - - """ - return self.session.execute(sql.text(stmt, bind=self.bind), **params) - - @property - def _underlying_session(self): - if isinstance(self.session, session.Session): - return self.session - else: - return self.session() - - def connection(self): - """Return the current :class:`.Connection` in use by the current transaction.""" - - return self._underlying_session._connection_for_bind(self.bind) - - def flush(self): - """Flush pending changes to the database. - - See :meth:`.Session.flush`. - - """ - self.session.flush() - - def rollback(self): - """Rollback the current transaction. - - See :meth:`.Session.rollback`. - - """ - self.session.rollback() - - def commit(self): - """Commit the current transaction. - - See :meth:`.Session.commit`. - - """ - self.session.commit() - - def clear(self): - """Synonym for :meth:`.SqlSoup.expunge_all`.""" - - self.session.expunge_all() - - def expunge(self, instance): - """Remove an instance from the :class:`.Session`. - - See :meth:`.Session.expunge`. - - """ - self.session.expunge(instance) - - def expunge_all(self): - """Clear all objects from the current :class:`.Session`. - - See :meth:`.Session.expunge_all`. - - """ - self.session.expunge_all() - - def map_to(self, attrname, tablename=None, selectable=None, - schema=None, base=None, mapper_args=util.immutabledict()): - """Configure a mapping to the given attrname. - - This is the "master" method that can be used to create any - configuration. - - .. versionadded:: 0.6.6 - - :param attrname: String attribute name which will be - established as an attribute on this :class:.`.SqlSoup` - instance. - :param base: a Python class which will be used as the - base for the mapped class. If ``None``, the "base" - argument specified by this :class:`.SqlSoup` - instance's constructor will be used, which defaults to - ``object``. - :param mapper_args: Dictionary of arguments which will - be passed directly to :func:`.orm.mapper`. - :param tablename: String name of a :class:`.Table` to be - reflected. If a :class:`.Table` is already available, - use the ``selectable`` argument. This argument is - mutually exclusive versus the ``selectable`` argument. - :param selectable: a :class:`.Table`, :class:`.Join`, or - :class:`.Select` object which will be mapped. This - argument is mutually exclusive versus the ``tablename`` - argument. - :param schema: String schema name to use if the - ``tablename`` argument is present. - - - """ - if attrname in self._cache: - raise InvalidRequestError( - "Attribute '%s' is already mapped to '%s'" % ( - attrname, - class_mapper(self._cache[attrname]).mapped_table - )) - - if tablename is not None: - if not isinstance(tablename, basestring): - raise ArgumentError("'tablename' argument must be a string." - ) - if selectable is not None: - raise ArgumentError("'tablename' and 'selectable' " - "arguments are mutually exclusive") - - selectable = Table(tablename, - self._metadata, - autoload=True, - autoload_with=self.bind, - schema=schema or self.schema) - elif schema: - raise ArgumentError("'tablename' argument is required when " - "using 'schema'.") - elif selectable is not None: - if not isinstance(selectable, expression.FromClause): - raise ArgumentError("'selectable' argument must be a " - "table, select, join, or other " - "selectable construct.") - else: - raise ArgumentError("'tablename' or 'selectable' argument is " - "required.") - - if not selectable.primary_key.columns: - if tablename: - raise PKNotFoundError( - "table '%s' does not have a primary " - "key defined" % tablename) - else: - raise PKNotFoundError( - "selectable '%s' does not have a primary " - "key defined" % selectable) - - mapped_cls = _class_for_table( - self.session, - self.engine, - selectable, - base or self.base, - mapper_args - ) - self._cache[attrname] = mapped_cls - return mapped_cls - - - def map(self, selectable, base=None, **mapper_args): - """Map a selectable directly. - - .. versionchanged:: 0.6.6 - The class and its mapping are not cached and will - be discarded once dereferenced. - - :param selectable: an :func:`.expression.select` construct. - :param base: a Python class which will be used as the - base for the mapped class. If ``None``, the "base" - argument specified by this :class:`.SqlSoup` - instance's constructor will be used, which defaults to - ``object``. - :param mapper_args: Dictionary of arguments which will - be passed directly to :func:`.orm.mapper`. - - """ - - return _class_for_table( - self.session, - self.engine, - selectable, - base or self.base, - mapper_args - ) - - def with_labels(self, selectable, base=None, **mapper_args): - """Map a selectable directly, wrapping the - selectable in a subquery with labels. - - .. versionchanged:: 0.6.6 - The class and its mapping are not cached and will - be discarded once dereferenced. - - :param selectable: an :func:`.expression.select` construct. - :param base: a Python class which will be used as the - base for the mapped class. If ``None``, the "base" - argument specified by this :class:`.SqlSoup` - instance's constructor will be used, which defaults to - ``object``. - :param mapper_args: Dictionary of arguments which will - be passed directly to :func:`.orm.mapper`. - - """ - - # TODO give meaningful aliases - return self.map( - expression._clause_element_as_expr(selectable). - select(use_labels=True). - alias('foo'), base=base, **mapper_args) - - def join(self, left, right, onclause=None, isouter=False, - base=None, **mapper_args): - """Create an :func:`.expression.join` and map to it. - - .. versionchanged:: 0.6.6 - The class and its mapping are not cached and will - be discarded once dereferenced. - - :param left: a mapped class or table object. - :param right: a mapped class or table object. - :param onclause: optional "ON" clause construct.. - :param isouter: if True, the join will be an OUTER join. - :param base: a Python class which will be used as the - base for the mapped class. If ``None``, the "base" - argument specified by this :class:`.SqlSoup` - instance's constructor will be used, which defaults to - ``object``. - :param mapper_args: Dictionary of arguments which will - be passed directly to :func:`.orm.mapper`. - - """ - - j = join(left, right, onclause=onclause, isouter=isouter) - return self.map(j, base=base, **mapper_args) - - def entity(self, attr, schema=None): - """Return the named entity from this :class:`.SqlSoup`, or - create if not present. - - For more generalized mapping, see :meth:`.map_to`. - - """ - try: - return self._cache[attr] - except KeyError, ke: - return self.map_to(attr, tablename=attr, schema=schema) - - def __getattr__(self, attr): - return self.entity(attr) - - def __repr__(self): - return 'SqlSoup(%r)' % self._metadata - diff --git a/libs/sqlalchemy/interfaces.py b/libs/sqlalchemy/interfaces.py deleted file mode 100644 index 92fb3154..00000000 --- a/libs/sqlalchemy/interfaces.py +++ /dev/null @@ -1,309 +0,0 @@ -# sqlalchemy/interfaces.py -# Copyright (C) 2007-2013 the SQLAlchemy authors and contributors -# Copyright (C) 2007 Jason Kirtland jek@discorporate.us -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Interfaces and abstract types. - -This module is **deprecated** and is superseded by the -event system. - -""" - -from sqlalchemy import event, util - -class PoolListener(object): - """Hooks into the lifecycle of connections in a :class:`.Pool`. - - .. note:: - - :class:`.PoolListener` is deprecated. Please - refer to :class:`.PoolEvents`. - - Usage:: - - class MyListener(PoolListener): - def connect(self, dbapi_con, con_record): - '''perform connect operations''' - # etc. - - # create a new pool with a listener - p = QueuePool(..., listeners=[MyListener()]) - - # add a listener after the fact - p.add_listener(MyListener()) - - # usage with create_engine() - e = create_engine("url://", listeners=[MyListener()]) - - All of the standard connection :class:`~sqlalchemy.pool.Pool` types can - accept event listeners for key connection lifecycle events: - creation, pool check-out and check-in. There are no events fired - when a connection closes. - - For any given DB-API connection, there will be one ``connect`` - event, `n` number of ``checkout`` events, and either `n` or `n - 1` - ``checkin`` events. (If a ``Connection`` is detached from its - pool via the ``detach()`` method, it won't be checked back in.) - - These are low-level events for low-level objects: raw Python - DB-API connections, without the conveniences of the SQLAlchemy - ``Connection`` wrapper, ``Dialect`` services or ``ClauseElement`` - execution. If you execute SQL through the connection, explicitly - closing all cursors and other resources is recommended. - - Events also receive a ``_ConnectionRecord``, a long-lived internal - ``Pool`` object that basically represents a "slot" in the - connection pool. ``_ConnectionRecord`` objects have one public - attribute of note: ``info``, a dictionary whose contents are - scoped to the lifetime of the DB-API connection managed by the - record. You can use this shared storage area however you like. - - There is no need to subclass ``PoolListener`` to handle events. - Any class that implements one or more of these methods can be used - as a pool listener. The ``Pool`` will inspect the methods - provided by a listener object and add the listener to one or more - internal event queues based on its capabilities. In terms of - efficiency and function call overhead, you're much better off only - providing implementations for the hooks you'll be using. - - """ - - @classmethod - def _adapt_listener(cls, self, listener): - """Adapt a :class:`.PoolListener` to individual - :class:`event.Dispatch` events. - - """ - - listener = util.as_interface(listener, methods=('connect', - 'first_connect', 'checkout', 'checkin')) - if hasattr(listener, 'connect'): - event.listen(self, 'connect', listener.connect) - if hasattr(listener, 'first_connect'): - event.listen(self, 'first_connect', listener.first_connect) - if hasattr(listener, 'checkout'): - event.listen(self, 'checkout', listener.checkout) - if hasattr(listener, 'checkin'): - event.listen(self, 'checkin', listener.checkin) - - - def connect(self, dbapi_con, con_record): - """Called once for each new DB-API connection or Pool's ``creator()``. - - dbapi_con - A newly connected raw DB-API connection (not a SQLAlchemy - ``Connection`` wrapper). - - con_record - The ``_ConnectionRecord`` that persistently manages the connection - - """ - - def first_connect(self, dbapi_con, con_record): - """Called exactly once for the first DB-API connection. - - dbapi_con - A newly connected raw DB-API connection (not a SQLAlchemy - ``Connection`` wrapper). - - con_record - The ``_ConnectionRecord`` that persistently manages the connection - - """ - - def checkout(self, dbapi_con, con_record, con_proxy): - """Called when a connection is retrieved from the Pool. - - dbapi_con - A raw DB-API connection - - con_record - The ``_ConnectionRecord`` that persistently manages the connection - - con_proxy - The ``_ConnectionFairy`` which manages the connection for the span of - the current checkout. - - If you raise an ``exc.DisconnectionError``, the current - connection will be disposed and a fresh connection retrieved. - Processing of all checkout listeners will abort and restart - using the new connection. - """ - - def checkin(self, dbapi_con, con_record): - """Called when a connection returns to the pool. - - Note that the connection may be closed, and may be None if the - connection has been invalidated. ``checkin`` will not be called - for detached connections. (They do not return to the pool.) - - dbapi_con - A raw DB-API connection - - con_record - The ``_ConnectionRecord`` that persistently manages the connection - - """ - -class ConnectionProxy(object): - """Allows interception of statement execution by Connections. - - .. note:: - - :class:`.ConnectionProxy` is deprecated. Please - refer to :class:`.ConnectionEvents`. - - Either or both of the ``execute()`` and ``cursor_execute()`` - may be implemented to intercept compiled statement and - cursor level executions, e.g.:: - - class MyProxy(ConnectionProxy): - def execute(self, conn, execute, clauseelement, *multiparams, **params): - print "compiled statement:", clauseelement - return execute(clauseelement, *multiparams, **params) - - def cursor_execute(self, execute, cursor, statement, parameters, context, executemany): - print "raw statement:", statement - return execute(cursor, statement, parameters, context) - - The ``execute`` argument is a function that will fulfill the default - execution behavior for the operation. The signature illustrated - in the example should be used. - - The proxy is installed into an :class:`~sqlalchemy.engine.Engine` via - the ``proxy`` argument:: - - e = create_engine('someurl://', proxy=MyProxy()) - - """ - - @classmethod - def _adapt_listener(cls, self, listener): - - def adapt_execute(conn, clauseelement, multiparams, params): - - def execute_wrapper(clauseelement, *multiparams, **params): - return clauseelement, multiparams, params - - return listener.execute(conn, execute_wrapper, - clauseelement, *multiparams, - **params) - - event.listen(self, 'before_execute', adapt_execute) - - def adapt_cursor_execute(conn, cursor, statement, - parameters,context, executemany, ): - - def execute_wrapper( - cursor, - statement, - parameters, - context, - ): - return statement, parameters - - return listener.cursor_execute( - execute_wrapper, - cursor, - statement, - parameters, - context, - executemany, - ) - - event.listen(self, 'before_cursor_execute', adapt_cursor_execute) - - def do_nothing_callback(*arg, **kw): - pass - - def adapt_listener(fn): - - def go(conn, *arg, **kw): - fn(conn, do_nothing_callback, *arg, **kw) - - return util.update_wrapper(go, fn) - - event.listen(self, 'begin', adapt_listener(listener.begin)) - event.listen(self, 'rollback', - adapt_listener(listener.rollback)) - event.listen(self, 'commit', adapt_listener(listener.commit)) - event.listen(self, 'savepoint', - adapt_listener(listener.savepoint)) - event.listen(self, 'rollback_savepoint', - adapt_listener(listener.rollback_savepoint)) - event.listen(self, 'release_savepoint', - adapt_listener(listener.release_savepoint)) - event.listen(self, 'begin_twophase', - adapt_listener(listener.begin_twophase)) - event.listen(self, 'prepare_twophase', - adapt_listener(listener.prepare_twophase)) - event.listen(self, 'rollback_twophase', - adapt_listener(listener.rollback_twophase)) - event.listen(self, 'commit_twophase', - adapt_listener(listener.commit_twophase)) - - - def execute(self, conn, execute, clauseelement, *multiparams, **params): - """Intercept high level execute() events.""" - - - return execute(clauseelement, *multiparams, **params) - - def cursor_execute(self, execute, cursor, statement, parameters, context, executemany): - """Intercept low-level cursor execute() events.""" - - return execute(cursor, statement, parameters, context) - - def begin(self, conn, begin): - """Intercept begin() events.""" - - return begin() - - def rollback(self, conn, rollback): - """Intercept rollback() events.""" - - return rollback() - - def commit(self, conn, commit): - """Intercept commit() events.""" - - return commit() - - def savepoint(self, conn, savepoint, name=None): - """Intercept savepoint() events.""" - - return savepoint(name=name) - - def rollback_savepoint(self, conn, rollback_savepoint, name, context): - """Intercept rollback_savepoint() events.""" - - return rollback_savepoint(name, context) - - def release_savepoint(self, conn, release_savepoint, name, context): - """Intercept release_savepoint() events.""" - - return release_savepoint(name, context) - - def begin_twophase(self, conn, begin_twophase, xid): - """Intercept begin_twophase() events.""" - - return begin_twophase(xid) - - def prepare_twophase(self, conn, prepare_twophase, xid): - """Intercept prepare_twophase() events.""" - - return prepare_twophase(xid) - - def rollback_twophase(self, conn, rollback_twophase, xid, is_prepared): - """Intercept rollback_twophase() events.""" - - return rollback_twophase(xid, is_prepared) - - def commit_twophase(self, conn, commit_twophase, xid, is_prepared): - """Intercept commit_twophase() events.""" - - return commit_twophase(xid, is_prepared) - diff --git a/libs/sqlalchemy/log.py b/libs/sqlalchemy/log.py deleted file mode 100644 index 24608fde..00000000 --- a/libs/sqlalchemy/log.py +++ /dev/null @@ -1,212 +0,0 @@ -# sqlalchemy/log.py -# Copyright (C) 2006-2011 the SQLAlchemy authors and contributors -# Includes alterations by Vinay Sajip vinay_sajip@yahoo.co.uk -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Logging control and utilities. - -Control of logging for SA can be performed from the regular python logging -module. The regular dotted module namespace is used, starting at -'sqlalchemy'. For class-level logging, the class name is appended. - -The "echo" keyword parameter, available on SQLA :class:`.Engine` -and :class:`.Pool` objects, corresponds to a logger specific to that -instance only. - -""" - -import logging -import sys -from sqlalchemy import util - -# set initial level to WARN. This so that -# log statements don't occur in the absense of explicit -# logging being enabled for 'sqlalchemy'. -rootlogger = logging.getLogger('sqlalchemy') -if rootlogger.level == logging.NOTSET: - rootlogger.setLevel(logging.WARN) - -def _add_default_handler(logger): - handler = logging.StreamHandler(sys.stdout) - handler.setFormatter(logging.Formatter( - '%(asctime)s %(levelname)s %(name)s %(message)s')) - logger.addHandler(handler) - -_logged_classes = set() -def class_logger(cls, enable=False): - logger = logging.getLogger(cls.__module__ + "." + cls.__name__) - if enable == 'debug': - logger.setLevel(logging.DEBUG) - elif enable == 'info': - logger.setLevel(logging.INFO) - cls._should_log_debug = lambda self: logger.isEnabledFor(logging.DEBUG) - cls._should_log_info = lambda self: logger.isEnabledFor(logging.INFO) - cls.logger = logger - _logged_classes.add(cls) - - -class Identified(object): - logging_name = None - - def _should_log_debug(self): - return self.logger.isEnabledFor(logging.DEBUG) - - def _should_log_info(self): - return self.logger.isEnabledFor(logging.INFO) - -class InstanceLogger(object): - """A logger adapter (wrapper) for :class:`.Identified` subclasses. - - This allows multiple instances (e.g. Engine or Pool instances) - to share a logger, but have its verbosity controlled on a - per-instance basis. - - The basic functionality is to return a logging level - which is based on an instance's echo setting. - - Default implementation is: - - 'debug' -> logging.DEBUG - True -> logging.INFO - False -> Effective level of underlying logger - (logging.WARNING by default) - None -> same as False - """ - - # Map echo settings to logger levels - _echo_map = { - None: logging.NOTSET, - False: logging.NOTSET, - True: logging.INFO, - 'debug': logging.DEBUG, - } - - def __init__(self, echo, name): - self.echo = echo - self.logger = logging.getLogger(name) - - # if echo flag is enabled and no handlers, - # add a handler to the list - if self._echo_map[echo] <= logging.INFO \ - and not self.logger.handlers: - _add_default_handler(self.logger) - - # - # Boilerplate convenience methods - # - def debug(self, msg, *args, **kwargs): - """Delegate a debug call to the underlying logger.""" - - self.log(logging.DEBUG, msg, *args, **kwargs) - - def info(self, msg, *args, **kwargs): - """Delegate an info call to the underlying logger.""" - - self.log(logging.INFO, msg, *args, **kwargs) - - def warning(self, msg, *args, **kwargs): - """Delegate a warning call to the underlying logger.""" - - self.log(logging.WARNING, msg, *args, **kwargs) - - warn = warning - - def error(self, msg, *args, **kwargs): - """ - Delegate an error call to the underlying logger. - """ - self.log(logging.ERROR, msg, *args, **kwargs) - - def exception(self, msg, *args, **kwargs): - """Delegate an exception call to the underlying logger.""" - - kwargs["exc_info"] = 1 - self.log(logging.ERROR, msg, *args, **kwargs) - - def critical(self, msg, *args, **kwargs): - """Delegate a critical call to the underlying logger.""" - - self.log(logging.CRITICAL, msg, *args, **kwargs) - - def log(self, level, msg, *args, **kwargs): - """Delegate a log call to the underlying logger. - - The level here is determined by the echo - flag as well as that of the underlying logger, and - logger._log() is called directly. - - """ - - # inline the logic from isEnabledFor(), - # getEffectiveLevel(), to avoid overhead. - - if self.logger.manager.disable >= level: - return - - selected_level = self._echo_map[self.echo] - if selected_level == logging.NOTSET: - selected_level = self.logger.getEffectiveLevel() - - if level >= selected_level: - self.logger._log(level, msg, args, **kwargs) - - def isEnabledFor(self, level): - """Is this logger enabled for level 'level'?""" - - if self.logger.manager.disable >= level: - return False - return level >= self.getEffectiveLevel() - - def getEffectiveLevel(self): - """What's the effective level for this logger?""" - - level = self._echo_map[self.echo] - if level == logging.NOTSET: - level = self.logger.getEffectiveLevel() - return level - -def instance_logger(instance, echoflag=None): - """create a logger for an instance that implements :class:`.Identified`.""" - - if instance.logging_name: - name = "%s.%s.%s" % (instance.__class__.__module__, - instance.__class__.__name__, instance.logging_name) - else: - name = "%s.%s" % (instance.__class__.__module__, - instance.__class__.__name__) - - instance._echo = echoflag - - if echoflag in (False, None): - # if no echo setting or False, return a Logger directly, - # avoiding overhead of filtering - logger = logging.getLogger(name) - else: - # if a specified echo flag, return an EchoLogger, - # which checks the flag, overrides normal log - # levels by calling logger._log() - logger = InstanceLogger(echoflag, name) - - instance.logger = logger - -class echo_property(object): - __doc__ = """\ - When ``True``, enable log output for this element. - - This has the effect of setting the Python logging level for the namespace - of this element's class and object reference. A value of boolean ``True`` - indicates that the loglevel ``logging.INFO`` will be set for the logger, - whereas the string value ``debug`` will set the loglevel to - ``logging.DEBUG``. - """ - - def __get__(self, instance, owner): - if instance is None: - return self - else: - return instance._echo - - def __set__(self, instance, value): - instance_logger(instance, echoflag=value) diff --git a/libs/sqlalchemy/orm/__init__.py b/libs/sqlalchemy/orm/__init__.py deleted file mode 100644 index b4006be4..00000000 --- a/libs/sqlalchemy/orm/__init__.py +++ /dev/null @@ -1,1685 +0,0 @@ -# orm/__init__.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -""" -Functional constructs for ORM configuration. - -See the SQLAlchemy object relational tutorial and mapper configuration -documentation for an overview of how this module is used. - -""" - -from sqlalchemy.orm import exc -from sqlalchemy.orm.mapper import ( - Mapper, - _mapper_registry, - class_mapper, - configure_mappers - ) -from sqlalchemy.orm.interfaces import ( - EXT_CONTINUE, - EXT_STOP, - InstrumentationManager, - MapperExtension, - PropComparator, - SessionExtension, - AttributeExtension, - ) -from sqlalchemy.orm.util import ( - aliased, - join, - object_mapper, - outerjoin, - polymorphic_union, - with_parent, - ) -from sqlalchemy.orm.properties import ( - ColumnProperty, - ComparableProperty, - CompositeProperty, - RelationshipProperty, - PropertyLoader, - SynonymProperty, - ) -from sqlalchemy.orm import mapper as mapperlib -from sqlalchemy.orm.mapper import reconstructor, validates -from sqlalchemy.orm import strategies -from sqlalchemy.orm.query import AliasOption, Query -from sqlalchemy.sql import util as sql_util -from sqlalchemy.orm.session import Session -from sqlalchemy.orm.session import object_session, sessionmaker, \ - make_transient -from sqlalchemy.orm.scoping import ScopedSession -from sqlalchemy import util as sa_util - -__all__ = ( - 'EXT_CONTINUE', - 'EXT_STOP', - 'InstrumentationManager', - 'MapperExtension', - 'AttributeExtension', - 'PropComparator', - 'Query', - 'Session', - 'aliased', - 'backref', - 'class_mapper', - 'clear_mappers', - 'column_property', - 'comparable_property', - 'compile_mappers', - 'configure_mappers', - 'composite', - 'contains_alias', - 'contains_eager', - 'create_session', - 'defer', - 'deferred', - 'dynamic_loader', - 'eagerload', - 'eagerload_all', - 'immediateload', - 'join', - 'joinedload', - 'joinedload_all', - 'lazyload', - 'mapper', - 'make_transient', - 'noload', - 'object_mapper', - 'object_session', - 'outerjoin', - 'polymorphic_union', - 'reconstructor', - 'relationship', - 'relation', - 'scoped_session', - 'sessionmaker', - 'subqueryload', - 'subqueryload_all', - 'synonym', - 'undefer', - 'undefer_group', - 'validates' - ) - - -def scoped_session(session_factory, scopefunc=None): - """Provides thread-local or scoped management of :class:`.Session` objects. - - This is a front-end function to - :class:`.ScopedSession`:: - - Session = scoped_session(sessionmaker(autoflush=True)) - - To instantiate a Session object which is part of the scoped context, - instantiate normally:: - - session = Session() - - Most session methods are available as classmethods from the scoped - session:: - - Session.commit() - Session.close() - - See also: :ref:`unitofwork_contextual`. - - :param session_factory: a callable function that produces - :class:`.Session` instances, such as :func:`sessionmaker`. - - :param scopefunc: Optional "scope" function which would be - passed to the :class:`.ScopedRegistry`. If None, the - :class:`.ThreadLocalRegistry` is used by default. - - :returns: a :class:`.ScopedSession` instance - - - """ - return ScopedSession(session_factory, scopefunc=scopefunc) - -def create_session(bind=None, **kwargs): - """Create a new :class:`.Session` - with no automation enabled by default. - - This function is used primarily for testing. The usual - route to :class:`.Session` creation is via its constructor - or the :func:`.sessionmaker` function. - - :param bind: optional, a single Connectable to use for all - database access in the created - :class:`~sqlalchemy.orm.session.Session`. - - :param \*\*kwargs: optional, passed through to the - :class:`.Session` constructor. - - :returns: an :class:`~sqlalchemy.orm.session.Session` instance - - The defaults of create_session() are the opposite of that of - :func:`sessionmaker`; ``autoflush`` and ``expire_on_commit`` are - False, ``autocommit`` is True. In this sense the session acts - more like the "classic" SQLAlchemy 0.3 session with these. - - Usage:: - - >>> from sqlalchemy.orm import create_session - >>> session = create_session() - - It is recommended to use :func:`sessionmaker` instead of - create_session(). - - """ - kwargs.setdefault('autoflush', False) - kwargs.setdefault('autocommit', True) - kwargs.setdefault('expire_on_commit', False) - return Session(bind=bind, **kwargs) - -def relationship(argument, secondary=None, **kwargs): - """Provide a relationship of a primary Mapper to a secondary Mapper. - - .. versionchanged:: 0.6 - :func:`relationship` is historically known as :func:`relation`. - - This corresponds to a parent-child or associative table relationship. The - constructed class is an instance of :class:`.RelationshipProperty`. - - A typical :func:`.relationship`, used in a classical mapping:: - - mapper(Parent, properties={ - 'children': relationship(Child) - }) - - Some arguments accepted by :func:`.relationship` optionally accept a - callable function, which when called produces the desired value. - The callable is invoked by the parent :class:`.Mapper` at "mapper initialization" - time, which happens only when mappers are first used, and is assumed - to be after all mappings have been constructed. This can be used - to resolve order-of-declaration and other dependency issues, such as - if ``Child`` is declared below ``Parent`` in the same file:: - - mapper(Parent, properties={ - "children":relationship(lambda: Child, - order_by=lambda: Child.id) - }) - - When using the :ref:`declarative_toplevel` extension, the Declarative - initializer allows string arguments to be passed to :func:`.relationship`. - These string arguments are converted into callables that evaluate - the string as Python code, using the Declarative - class-registry as a namespace. This allows the lookup of related - classes to be automatic via their string name, and removes the need to import - related classes at all into the local module space:: - - from sqlalchemy.ext.declarative import declarative_base - - Base = declarative_base() - - class Parent(Base): - __tablename__ = 'parent' - id = Column(Integer, primary_key=True) - children = relationship("Child", order_by="Child.id") - - A full array of examples and reference documentation regarding - :func:`.relationship` is at :ref:`relationship_config_toplevel`. - - :param argument: - a mapped class, or actual :class:`.Mapper` instance, representing the target of - the relationship. - - ``argument`` may also be passed as a callable function - which is evaluated at mapper initialization time, and may be passed as a - Python-evaluable string when using Declarative. - - :param secondary: - for a many-to-many relationship, specifies the intermediary - table, and is an instance of :class:`.Table`. The ``secondary`` keyword - argument should generally only - be used for a table that is not otherwise expressed in any class - mapping, unless this relationship is declared as view only, otherwise - conflicting persistence operations can occur. - - ``secondary`` may - also be passed as a callable function which is evaluated at - mapper initialization time. - - :param active_history=False: - When ``True``, indicates that the "previous" value for a - many-to-one reference should be loaded when replaced, if - not already loaded. Normally, history tracking logic for - simple many-to-ones only needs to be aware of the "new" - value in order to perform a flush. This flag is available - for applications that make use of - :func:`.attributes.get_history` which also need to know - the "previous" value of the attribute. - - :param backref: - indicates the string name of a property to be placed on the related - mapper's class that will handle this relationship in the other - direction. The other property will be created automatically - when the mappers are configured. Can also be passed as a - :func:`backref` object to control the configuration of the - new relationship. - - :param back_populates: - Takes a string name and has the same meaning as ``backref``, - except the complementing property is **not** created automatically, - and instead must be configured explicitly on the other mapper. The - complementing property should also indicate ``back_populates`` - to this relationship to ensure proper functioning. - - :param cascade: - a comma-separated list of cascade rules which determines how - Session operations should be "cascaded" from parent to child. - This defaults to ``False``, which means the default cascade - should be used. The default value is ``"save-update, merge"``. - - Available cascades are: - - * ``save-update`` - cascade the :meth:`.Session.add` - operation. This cascade applies both to future and - past calls to :meth:`~sqlalchemy.orm.session.Session.add`, - meaning new items added to a collection or scalar relationship - get placed into the same session as that of the parent, and - also applies to items which have been removed from this - relationship but are still part of unflushed history. - - * ``merge`` - cascade the :meth:`~sqlalchemy.orm.session.Session.merge` - operation - - * ``expunge`` - cascade the :meth:`.Session.expunge` - operation - - * ``delete`` - cascade the :meth:`.Session.delete` - operation - - * ``delete-orphan`` - if an item of the child's type is - detached from its parent, mark it for deletion. - - .. versionchanged:: 0.7 - This option does not prevent - a new instance of the child object from being persisted - without a parent to start with; to constrain against - that case, ensure the child's foreign key column(s) - is configured as NOT NULL - - * ``refresh-expire`` - cascade the :meth:`.Session.expire` - and :meth:`~sqlalchemy.orm.session.Session.refresh` operations - - * ``all`` - shorthand for "save-update,merge, refresh-expire, - expunge, delete" - - See the section :ref:`unitofwork_cascades` for more background - on configuring cascades. - - :param cascade_backrefs=True: - a boolean value indicating if the ``save-update`` cascade should - operate along an assignment event intercepted by a backref. - When set to ``False``, - the attribute managed by this relationship will not cascade - an incoming transient object into the session of a - persistent parent, if the event is received via backref. - - That is:: - - mapper(A, a_table, properties={ - 'bs':relationship(B, backref="a", cascade_backrefs=False) - }) - - If an ``A()`` is present in the session, assigning it to - the "a" attribute on a transient ``B()`` will not place - the ``B()`` into the session. To set the flag in the other - direction, i.e. so that ``A().bs.append(B())`` won't add - a transient ``A()`` into the session for a persistent ``B()``:: - - mapper(A, a_table, properties={ - 'bs':relationship(B, - backref=backref("a", cascade_backrefs=False) - ) - }) - - See the section :ref:`unitofwork_cascades` for more background - on configuring cascades. - - :param collection_class: - a class or callable that returns a new list-holding object. will - be used in place of a plain list for storing elements. - Behavior of this attribute is described in detail at - :ref:`custom_collections`. - - :param comparator_factory: - a class which extends :class:`.RelationshipProperty.Comparator` which - provides custom SQL clause generation for comparison operations. - - :param doc: - docstring which will be applied to the resulting descriptor. - - :param extension: - an :class:`.AttributeExtension` instance, or list of extensions, - which will be prepended to the list of attribute listeners for - the resulting descriptor placed on the class. - **Deprecated.** Please see :class:`.AttributeEvents`. - - :param foreign_keys: - a list of columns which are to be used as "foreign key" columns. - Normally, :func:`relationship` uses the :class:`.ForeignKey` - and :class:`.ForeignKeyConstraint` objects present within the - mapped or secondary :class:`.Table` to determine the "foreign" side of - the join condition. This is used to construct SQL clauses in order - to load objects, as well as to "synchronize" values from - primary key columns to referencing foreign key columns. - The ``foreign_keys`` parameter overrides the notion of what's - "foreign" in the table metadata, allowing the specification - of a list of :class:`.Column` objects that should be considered - part of the foreign key. - - There are only two use cases for ``foreign_keys`` - one, when it is not - convenient for :class:`.Table` metadata to contain its own foreign key - metadata (which should be almost never, unless reflecting a large amount of - tables from a MySQL MyISAM schema, or a schema that doesn't actually - have foreign keys on it). The other is for extremely - rare and exotic composite foreign key setups where some columns - should artificially not be considered as foreign. - - ``foreign_keys`` may also be passed as a callable function - which is evaluated at mapper initialization time, and may be passed as a - Python-evaluable string when using Declarative. - - :param innerjoin=False: - when ``True``, joined eager loads will use an inner join to join - against related tables instead of an outer join. The purpose - of this option is generally one of performance, as inner joins - generally perform better than outer joins. Another reason can be - the use of ``with_lockmode``, which does not support outer joins. - - This flag can be set to ``True`` when the relationship references an - object via many-to-one using local foreign keys that are not nullable, - or when the reference is one-to-one or a collection that is guaranteed - to have one or at least one entry. - - :param join_depth: - when non-``None``, an integer value indicating how many levels - deep "eager" loaders should join on a self-referring or cyclical - relationship. The number counts how many times the same Mapper - shall be present in the loading condition along a particular join - branch. When left at its default of ``None``, eager loaders - will stop chaining when they encounter a the same target mapper - which is already higher up in the chain. This option applies - both to joined- and subquery- eager loaders. - - :param lazy='select': specifies - how the related items should be loaded. Default value is - ``select``. Values include: - - * ``select`` - items should be loaded lazily when the property is first - accessed, using a separate SELECT statement, or identity map - fetch for simple many-to-one references. - - * ``immediate`` - items should be loaded as the parents are loaded, - using a separate SELECT statement, or identity map fetch for - simple many-to-one references. - - .. versionadded:: 0.6.5 - - * ``joined`` - items should be loaded "eagerly" in the same query as - that of the parent, using a JOIN or LEFT OUTER JOIN. Whether - the join is "outer" or not is determined by the ``innerjoin`` - parameter. - - * ``subquery`` - items should be loaded "eagerly" within the same - query as that of the parent, using a second SQL statement - which issues a JOIN to a subquery of the original - statement. - - * ``noload`` - no loading should occur at any time. This is to - support "write-only" attributes, or attributes which are - populated in some manner specific to the application. - - * ``dynamic`` - the attribute will return a pre-configured - :class:`~sqlalchemy.orm.query.Query` object for all read - operations, onto which further filtering operations can be - applied before iterating the results. See - the section :ref:`dynamic_relationship` for more details. - - * True - a synonym for 'select' - - * False - a synonym for 'joined' - - * None - a synonym for 'noload' - - Detailed discussion of loader strategies is at :ref:`loading_toplevel`. - - :param load_on_pending=False: - Indicates loading behavior for transient or pending parent objects. - - When set to ``True``, causes the lazy-loader to - issue a query for a parent object that is not persistent, meaning it has - never been flushed. This may take effect for a pending object when - autoflush is disabled, or for a transient object that has been - "attached" to a :class:`.Session` but is not part of its pending - collection. Attachment of transient objects to the session without - moving to the "pending" state is not a supported behavior at this time. - - Note that the load of related objects on a pending or transient object - also does not trigger any attribute change events - no user-defined - events will be emitted for these attributes, and if and when the - object is ultimately flushed, only the user-specific foreign key - attributes will be part of the modified state. - - The load_on_pending flag does not improve behavior - when the ORM is used normally - object references should be constructed - at the object level, not at the foreign key level, so that they - are present in an ordinary way before flush() proceeds. This flag - is not not intended for general use. - - New in 0.6.5. - - :param order_by: - indicates the ordering that should be applied when loading these - items. ``order_by`` is expected to refer to one of the :class:`.Column` - objects to which the target class is mapped, or - the attribute itself bound to the target class which refers - to the column. - - ``order_by`` may also be passed as a callable function - which is evaluated at mapper initialization time, and may be passed as a - Python-evaluable string when using Declarative. - - :param passive_deletes=False: - Indicates loading behavior during delete operations. - - A value of True indicates that unloaded child items should not - be loaded during a delete operation on the parent. Normally, - when a parent item is deleted, all child items are loaded so - that they can either be marked as deleted, or have their - foreign key to the parent set to NULL. Marking this flag as - True usually implies an ON DELETE rule is in - place which will handle updating/deleting child rows on the - database side. - - Additionally, setting the flag to the string value 'all' will - disable the "nulling out" of the child foreign keys, when there - is no delete or delete-orphan cascade enabled. This is - typically used when a triggering or error raise scenario is in - place on the database side. Note that the foreign key - attributes on in-session child objects will not be changed - after a flush occurs so this is a very special use-case - setting. - - :param passive_updates=True: - Indicates loading and INSERT/UPDATE/DELETE behavior when the - source of a foreign key value changes (i.e. an "on update" - cascade), which are typically the primary key columns of the - source row. - - When True, it is assumed that ON UPDATE CASCADE is configured on - the foreign key in the database, and that the database will - handle propagation of an UPDATE from a source column to - dependent rows. Note that with databases which enforce - referential integrity (i.e. PostgreSQL, MySQL with InnoDB tables), - ON UPDATE CASCADE is required for this operation. The - relationship() will update the value of the attribute on related - items which are locally present in the session during a flush. - - When False, it is assumed that the database does not enforce - referential integrity and will not be issuing its own CASCADE - operation for an update. The relationship() will issue the - appropriate UPDATE statements to the database in response to the - change of a referenced key, and items locally present in the - session during a flush will also be refreshed. - - This flag should probably be set to False if primary key changes - are expected and the database in use doesn't support CASCADE - (i.e. SQLite, MySQL MyISAM tables). - - Also see the passive_updates flag on ``mapper()``. - - A future SQLAlchemy release will provide a "detect" feature for - this flag. - - :param post_update: - this indicates that the relationship should be handled by a - second UPDATE statement after an INSERT or before a - DELETE. Currently, it also will issue an UPDATE after the - instance was UPDATEd as well, although this technically should - be improved. This flag is used to handle saving bi-directional - dependencies between two individual rows (i.e. each row - references the other), where it would otherwise be impossible to - INSERT or DELETE both rows fully since one row exists before the - other. Use this flag when a particular mapping arrangement will - incur two rows that are dependent on each other, such as a table - that has a one-to-many relationship to a set of child rows, and - also has a column that references a single child row within that - list (i.e. both tables contain a foreign key to each other). If - a ``flush()`` operation returns an error that a "cyclical - dependency" was detected, this is a cue that you might want to - use ``post_update`` to "break" the cycle. - - :param primaryjoin: - a SQL expression that will be used as the primary - join of this child object against the parent object, or in a - many-to-many relationship the join of the primary object to the - association table. By default, this value is computed based on the - foreign key relationships of the parent and child tables (or association - table). - - ``primaryjoin`` may also be passed as a callable function - which is evaluated at mapper initialization time, and may be passed as a - Python-evaluable string when using Declarative. - - :param remote_side: - used for self-referential relationships, indicates the column or - list of columns that form the "remote side" of the relationship. - - ``remote_side`` may also be passed as a callable function - which is evaluated at mapper initialization time, and may be passed as a - Python-evaluable string when using Declarative. - - :param query_class: - a :class:`.Query` subclass that will be used as the base of the - "appender query" returned by a "dynamic" relationship, that - is, a relationship that specifies ``lazy="dynamic"`` or was - otherwise constructed using the :func:`.orm.dynamic_loader` - function. - - :param secondaryjoin: - a SQL expression that will be used as the join of - an association table to the child object. By default, this value is - computed based on the foreign key relationships of the association and - child tables. - - ``secondaryjoin`` may also be passed as a callable function - which is evaluated at mapper initialization time, and may be passed as a - Python-evaluable string when using Declarative. - - :param single_parent=(True|False): - when True, installs a validator which will prevent objects - from being associated with more than one parent at a time. - This is used for many-to-one or many-to-many relationships that - should be treated either as one-to-one or one-to-many. Its - usage is optional unless delete-orphan cascade is also - set on this relationship(), in which case its required. - - :param uselist=(True|False): - a boolean that indicates if this property should be loaded as a - list or a scalar. In most cases, this value is determined - automatically by ``relationship()``, based on the type and direction - of the relationship - one to many forms a list, many to one - forms a scalar, many to many is a list. If a scalar is desired - where normally a list would be present, such as a bi-directional - one-to-one relationship, set uselist to False. - - :param viewonly=False: - when set to True, the relationship is used only for loading objects - within the relationship, and has no effect on the unit-of-work - flush process. Relationships with viewonly can specify any kind of - join conditions to provide additional views of related objects - onto a parent object. Note that the functionality of a viewonly - relationship has its limits - complicated join conditions may - not compile into eager or lazy loaders properly. If this is the - case, use an alternative method. - - """ - return RelationshipProperty(argument, secondary=secondary, **kwargs) - -def relation(*arg, **kw): - """A synonym for :func:`relationship`.""" - - return relationship(*arg, **kw) - -def dynamic_loader(argument, **kw): - """Construct a dynamically-loading mapper property. - - This is essentially the same as - using the ``lazy='dynamic'`` argument with :func:`relationship`:: - - dynamic_loader(SomeClass) - - # is the same as - - relationship(SomeClass, lazy="dynamic") - - See the section :ref:`dynamic_relationship` for more details - on dynamic loading. - - """ - kw['lazy'] = 'dynamic' - return relationship(argument, **kw) - -def column_property(*cols, **kw): - """Provide a column-level property for use with a Mapper. - - Column-based properties can normally be applied to the mapper's - ``properties`` dictionary using the :class:`.Column` element directly. - Use this function when the given column is not directly present within the - mapper's selectable; examples include SQL expressions, functions, and - scalar SELECT queries. - - Columns that aren't present in the mapper's selectable won't be persisted - by the mapper and are effectively "read-only" attributes. - - :param \*cols: - list of Column objects to be mapped. - - :param active_history=False: - When ``True``, indicates that the "previous" value for a - scalar attribute should be loaded when replaced, if not - already loaded. Normally, history tracking logic for - simple non-primary-key scalar values only needs to be - aware of the "new" value in order to perform a flush. This - flag is available for applications that make use of - :func:`.attributes.get_history` or :meth:`.Session.is_modified` - which also need to know - the "previous" value of the attribute. - - .. versionadded:: 0.6.6 - - :param comparator_factory: a class which extends - :class:`.ColumnProperty.Comparator` which provides custom SQL clause - generation for comparison operations. - - :param group: - a group name for this property when marked as deferred. - - :param deferred: - when True, the column property is "deferred", meaning that - it does not load immediately, and is instead loaded when the - attribute is first accessed on an instance. See also - :func:`~sqlalchemy.orm.deferred`. - - :param doc: - optional string that will be applied as the doc on the - class-bound descriptor. - - :param expire_on_flush=True: - Disable expiry on flush. A column_property() which refers - to a SQL expression (and not a single table-bound column) - is considered to be a "read only" property; populating it - has no effect on the state of data, and it can only return - database state. For this reason a column_property()'s value - is expired whenever the parent object is involved in a - flush, that is, has any kind of "dirty" state within a flush. - Setting this parameter to ``False`` will have the effect of - leaving any existing value present after the flush proceeds. - Note however that the :class:`.Session` with default expiration - settings still expires - all attributes after a :meth:`.Session.commit` call, however. - - .. versionadded:: 0.7.3 - - :param extension: - an - :class:`.AttributeExtension` - instance, or list of extensions, which will be prepended - to the list of attribute listeners for the resulting - descriptor placed on the class. - **Deprecated.** Please see :class:`.AttributeEvents`. - - - """ - - return ColumnProperty(*cols, **kw) - -def composite(class_, *cols, **kwargs): - """Return a composite column-based property for use with a Mapper. - - See the mapping documentation section :ref:`mapper_composite` for a full - usage example. - - :param class\_: - The "composite type" class. - - :param \*cols: - List of Column objects to be mapped. - - :param active_history=False: - When ``True``, indicates that the "previous" value for a - scalar attribute should be loaded when replaced, if not - already loaded. See the same flag on :func:`.column_property`. - - .. versionchanged:: 0.7 - This flag specifically becomes meaningful - - previously it was a placeholder. - - :param group: - A group name for this property when marked as deferred. - - :param deferred: - When True, the column property is "deferred", meaning that it does not - load immediately, and is instead loaded when the attribute is first - accessed on an instance. See also :func:`~sqlalchemy.orm.deferred`. - - :param comparator_factory: a class which extends - :class:`.CompositeProperty.Comparator` which provides custom SQL clause - generation for comparison operations. - - :param doc: - optional string that will be applied as the doc on the - class-bound descriptor. - - :param extension: - an :class:`.AttributeExtension` instance, - or list of extensions, which will be prepended to the list of - attribute listeners for the resulting descriptor placed on the class. - **Deprecated.** Please see :class:`.AttributeEvents`. - - """ - return CompositeProperty(class_, *cols, **kwargs) - - -def backref(name, **kwargs): - """Create a back reference with explicit keyword arguments, which are the same - arguments one can send to :func:`relationship`. - - Used with the ``backref`` keyword argument to :func:`relationship` in - place of a string argument, e.g.:: - - 'items':relationship(SomeItem, backref=backref('parent', lazy='subquery')) - - """ - return (name, kwargs) - -def deferred(*columns, **kwargs): - """Return a :class:`.DeferredColumnProperty`, which indicates this - object attributes should only be loaded from its corresponding - table column when first accessed. - - Used with the "properties" dictionary sent to :func:`mapper`. - - See also: - - :ref:`deferred` - - """ - return ColumnProperty(deferred=True, *columns, **kwargs) - -def mapper(class_, local_table=None, *args, **params): - """Return a new :class:`~.Mapper` object. - - This function is typically used behind the scenes - via the Declarative extension. When using Declarative, - many of the usual :func:`.mapper` arguments are handled - by the Declarative extension itself, including ``class_``, - ``local_table``, ``properties``, and ``inherits``. - Other options are passed to :func:`.mapper` using - the ``__mapper_args__`` class variable:: - - class MyClass(Base): - __tablename__ = 'my_table' - id = Column(Integer, primary_key=True) - type = Column(String(50)) - alt = Column("some_alt", Integer) - - __mapper_args__ = { - 'polymorphic_on' : type - } - - - Explicit use of :func:`.mapper` - is often referred to as *classical mapping*. The above - declarative example is equivalent in classical form to:: - - my_table = Table("my_table", metadata, - Column('id', Integer, primary_key=True), - Column('type', String(50)), - Column("some_alt", Integer) - ) - - class MyClass(object): - pass - - mapper(MyClass, my_table, - polymorphic_on=my_table.c.type, - properties={ - 'alt':my_table.c.some_alt - }) - - See also: - - :ref:`classical_mapping` - discussion of direct usage of - :func:`.mapper` - - :param class\_: The class to be mapped. When using Declarative, - this argument is automatically passed as the declared class - itself. - - :param local_table: The :class:`.Table` or other selectable - to which the class is mapped. May be ``None`` if - this mapper inherits from another mapper using single-table - inheritance. When using Declarative, this argument is - automatically passed by the extension, based on what - is configured via the ``__table__`` argument or via the :class:`.Table` - produced as a result of the ``__tablename__`` and :class:`.Column` - arguments present. - - :param always_refresh: If True, all query operations for this mapped - class will overwrite all data within object instances that already - exist within the session, erasing any in-memory changes with - whatever information was loaded from the database. Usage of this - flag is highly discouraged; as an alternative, see the method - :meth:`.Query.populate_existing`. - - :param allow_null_pks: This flag is deprecated - this is stated as - allow_partial_pks which defaults to True. - - :param allow_partial_pks: Defaults to True. Indicates that a - composite primary key with some NULL values should be considered as - possibly existing within the database. This affects whether a - mapper will assign an incoming row to an existing identity, as well - as if :meth:`.Session.merge` will check the database first for a - particular primary key value. A "partial primary key" can occur if - one has mapped to an OUTER JOIN, for example. - - :param batch: Defaults to ``True``, indicating that save operations - of multiple entities can be batched together for efficiency. - Setting to False indicates - that an instance will be fully saved before saving the next - instance. This is used in the extremely rare case that a - :class:`.MapperEvents` listener requires being called - in between individual row persistence operations. - - :param column_prefix: A string which will be prepended - to the mapped attribute name when :class:`.Column` - objects are automatically assigned as attributes to the - mapped class. Does not affect explicitly specified - column-based properties. - - See the section :ref:`column_prefix` for an example. - - :param concrete: If True, indicates this mapper should use concrete - table inheritance with its parent mapper. - - See the section :ref:`concrete_inheritance` for an example. - - :param exclude_properties: A list or set of string column names to - be excluded from mapping. - - See :ref:`include_exclude_cols` for an example. - - :param extension: A :class:`.MapperExtension` instance or - list of :class:`.MapperExtension` - instances which will be applied to all operations by this - :class:`.Mapper`. **Deprecated.** Please see :class:`.MapperEvents`. - - :param include_properties: An inclusive list or set of string column - names to map. - - See :ref:`include_exclude_cols` for an example. - - :param inherits: A mapped class or the corresponding :class:`.Mapper` - of one indicating a superclass to which this :class:`.Mapper` - should *inherit* from. The mapped class here must be a subclass of the - other mapper's class. When using Declarative, this argument - is passed automatically as a result of the natural class - hierarchy of the declared classes. - - See also: - - :ref:`inheritance_toplevel` - - :param inherit_condition: For joined table inheritance, a SQL - expression which will - define how the two tables are joined; defaults to a natural join - between the two tables. - - :param inherit_foreign_keys: When ``inherit_condition`` is used and the - columns present are missing a :class:`.ForeignKey` configuration, - this parameter can be used to specify which columns are "foreign". - In most cases can be left as ``None``. - - :param non_primary: Specify that this :class:`.Mapper` is in addition - to the "primary" mapper, that is, the one used for persistence. - The :class:`.Mapper` created here may be used for ad-hoc - mapping of the class to an alternate selectable, for loading - only. - - The ``non_primary`` feature is rarely needed with modern - usage. - - :param order_by: A single :class:`.Column` or list of :class:`.Column` - objects for which selection operations should use as the default - ordering for entities. By default mappers have no pre-defined - ordering. - - :param passive_updates: Indicates UPDATE behavior of foreign key - columns when a primary key column changes on a joined-table inheritance - mapping. Defaults to ``True``. - - When True, it is assumed that ON UPDATE CASCADE is configured on - the foreign key in the database, and that the database will handle - propagation of an UPDATE from a source column to dependent columns - on joined-table rows. - - When False, it is assumed that the database does not enforce - referential integrity and will not be issuing its own CASCADE - operation for an update. The :class:`.Mapper` here will - emit an UPDATE statement for the dependent columns during a - primary key change. - - See also: - - :ref:`passive_updates` - description of a similar feature as - used with :func:`.relationship` - - :param polymorphic_on: Specifies the column, attribute, or - SQL expression used to determine the target class for an - incoming row, when inheriting classes are present. - - This value is commonly a :class:`.Column` object that's - present in the mapped :class:`.Table`:: - - class Employee(Base): - __tablename__ = 'employee' - - id = Column(Integer, primary_key=True) - discriminator = Column(String(50)) - - __mapper_args__ = { - "polymorphic_on":discriminator, - "polymorphic_identity":"employee" - } - - It may also be specified - as a SQL expression, as in this example where we - use the :func:`.case` construct to provide a conditional - approach:: - - class Employee(Base): - __tablename__ = 'employee' - - id = Column(Integer, primary_key=True) - discriminator = Column(String(50)) - - __mapper_args__ = { - "polymorphic_on":case([ - (discriminator == "EN", "engineer"), - (discriminator == "MA", "manager"), - ], else_="employee"), - "polymorphic_identity":"employee" - } - - It may also refer to any attribute - configured with :func:`.column_property`, or to the - string name of one:: - - class Employee(Base): - __tablename__ = 'employee' - - id = Column(Integer, primary_key=True) - discriminator = Column(String(50)) - employee_type = column_property( - case([ - (discriminator == "EN", "engineer"), - (discriminator == "MA", "manager"), - ], else_="employee") - ) - - __mapper_args__ = { - "polymorphic_on":employee_type, - "polymorphic_identity":"employee" - } - - .. versionchanged:: 0.7.4 - ``polymorphic_on`` may be specified as a SQL expression, - or refer to any attribute configured with - :func:`.column_property`, or to the string name of one. - - When setting ``polymorphic_on`` to reference an - attribute or expression that's not present in the - locally mapped :class:`.Table`, yet the value - of the discriminator should be persisted to the database, - the value of the - discriminator is not automatically set on new - instances; this must be handled by the user, - either through manual means or via event listeners. - A typical approach to establishing such a listener - looks like:: - - from sqlalchemy import event - from sqlalchemy.orm import object_mapper - - @event.listens_for(Employee, "init", propagate=True) - def set_identity(instance, *arg, **kw): - mapper = object_mapper(instance) - instance.discriminator = mapper.polymorphic_identity - - Where above, we assign the value of ``polymorphic_identity`` - for the mapped class to the ``discriminator`` attribute, - thus persisting the value to the ``discriminator`` column - in the database. - - See also: - - :ref:`inheritance_toplevel` - - :param polymorphic_identity: Specifies the value which - identifies this particular class as returned by the - column expression referred to by the ``polymorphic_on`` - setting. As rows are received, the value corresponding - to the ``polymorphic_on`` column expression is compared - to this value, indicating which subclass should - be used for the newly reconstructed object. - - :param properties: A dictionary mapping the string names of object - attributes to :class:`.MapperProperty` instances, which define the - persistence behavior of that attribute. Note that :class:`.Column` - objects present in - the mapped :class:`.Table` are automatically placed into - ``ColumnProperty`` instances upon mapping, unless overridden. - When using Declarative, this argument is passed automatically, - based on all those :class:`.MapperProperty` instances declared - in the declared class body. - - :param primary_key: A list of :class:`.Column` objects which define the - primary key to be used against this mapper's selectable unit. - This is normally simply the primary key of the ``local_table``, but - can be overridden here. - - :param version_id_col: A :class:`.Column` - that will be used to keep a running version id of mapped entities - in the database. This is used during save operations to ensure that - no other thread or process has updated the instance during the - lifetime of the entity, else a :class:`~sqlalchemy.orm.exc.StaleDataError` - exception is - thrown. By default the column must be of :class:`.Integer` type, - unless ``version_id_generator`` specifies a new generation - algorithm. - - :param version_id_generator: A callable which defines the algorithm - used to generate new version ids. Defaults to an integer - generator. Can be replaced with one that generates timestamps, - uuids, etc. e.g.:: - - import uuid - - class MyClass(Base): - __tablename__ = 'mytable' - id = Column(Integer, primary_key=True) - version_uuid = Column(String(32)) - - __mapper_args__ = { - 'version_id_col':version_uuid, - 'version_id_generator':lambda version:uuid.uuid4().hex - } - - The callable receives the current version identifier as its - single argument. - - :param with_polymorphic: A tuple in the form ``(, - )`` indicating the default style of "polymorphic" - loading, that is, which tables are queried at once. is - any single or list of mappers and/or classes indicating the - inherited classes that should be loaded at once. The special value - ``'*'`` may be used to indicate all descending classes should be - loaded immediately. The second tuple argument - indicates a selectable that will be used to query for multiple - classes. - - See also: - - :ref:`concrete_inheritance` - typically uses ``with_polymorphic`` - to specify a UNION statement to select from. - - :ref:`with_polymorphic` - usage example of the related - :meth:`.Query.with_polymorphic` method - - """ - return Mapper(class_, local_table, *args, **params) - -def synonym(name, map_column=False, descriptor=None, - comparator_factory=None, doc=None): - """Denote an attribute name as a synonym to a mapped property. - - .. versionchanged:: 0.7 - :func:`.synonym` is superseded by the :mod:`~sqlalchemy.ext.hybrid` - extension. See the documentation for hybrids - at :ref:`hybrids_toplevel`. - - Used with the ``properties`` dictionary sent to - :func:`~sqlalchemy.orm.mapper`:: - - class MyClass(object): - def _get_status(self): - return self._status - def _set_status(self, value): - self._status = value - status = property(_get_status, _set_status) - - mapper(MyClass, sometable, properties={ - "status":synonym("_status", map_column=True) - }) - - Above, the ``status`` attribute of MyClass will produce - expression behavior against the table column named ``status``, - using the Python attribute ``_status`` on the mapped class - to represent the underlying value. - - :param name: the name of the existing mapped property, which can be - any other ``MapperProperty`` including column-based properties and - relationships. - - :param map_column: if ``True``, an additional ``ColumnProperty`` is created - on the mapper automatically, using the synonym's name as the keyname of - the property, and the keyname of this ``synonym()`` as the name of the - column to map. - - """ - return SynonymProperty(name, map_column=map_column, - descriptor=descriptor, - comparator_factory=comparator_factory, - doc=doc) - -def comparable_property(comparator_factory, descriptor=None): - """Provides a method of applying a :class:`.PropComparator` - to any Python descriptor attribute. - - .. versionchanged:: 0.7 - :func:`.comparable_property` is superseded by - the :mod:`~sqlalchemy.ext.hybrid` extension. See the example - at :ref:`hybrid_custom_comparators`. - - Allows any Python descriptor to behave like a SQL-enabled - attribute when used at the class level in queries, allowing - redefinition of expression operator behavior. - - In the example below we redefine :meth:`.PropComparator.operate` - to wrap both sides of an expression in ``func.lower()`` to produce - case-insensitive comparison:: - - from sqlalchemy.orm import comparable_property - from sqlalchemy.orm.interfaces import PropComparator - from sqlalchemy.sql import func - from sqlalchemy import Integer, String, Column - from sqlalchemy.ext.declarative import declarative_base - - class CaseInsensitiveComparator(PropComparator): - def __clause_element__(self): - return self.prop - - def operate(self, op, other): - return op( - func.lower(self.__clause_element__()), - func.lower(other) - ) - - Base = declarative_base() - - class SearchWord(Base): - __tablename__ = 'search_word' - id = Column(Integer, primary_key=True) - word = Column(String) - word_insensitive = comparable_property(lambda prop, mapper: - CaseInsensitiveComparator(mapper.c.word, mapper) - ) - - - A mapping like the above allows the ``word_insensitive`` attribute - to render an expression like:: - - >>> print SearchWord.word_insensitive == "Trucks" - lower(search_word.word) = lower(:lower_1) - - :param comparator_factory: - A PropComparator subclass or factory that defines operator behavior - for this property. - - :param descriptor: - Optional when used in a ``properties={}`` declaration. The Python - descriptor or property to layer comparison behavior on top of. - - The like-named descriptor will be automatically retrieved from the - mapped class if left blank in a ``properties`` declaration. - - """ - return ComparableProperty(comparator_factory, descriptor) - -@sa_util.deprecated("0.7", message=":func:`.compile_mappers` " - "is renamed to :func:`.configure_mappers`") -def compile_mappers(): - """Initialize the inter-mapper relationships of all mappers that have been defined.""" - - configure_mappers() - -def clear_mappers(): - """Remove all mappers from all classes. - - This function removes all instrumentation from classes and disposes - of their associated mappers. Once called, the classes are unmapped - and can be later re-mapped with new mappers. - - :func:`.clear_mappers` is *not* for normal use, as there is literally no - valid usage for it outside of very specific testing scenarios. Normally, - mappers are permanent structural components of user-defined classes, and - are never discarded independently of their class. If a mapped class itself - is garbage collected, its mapper is automatically disposed of as well. As - such, :func:`.clear_mappers` is only for usage in test suites that re-use - the same classes with different mappings, which is itself an extremely rare - use case - the only such use case is in fact SQLAlchemy's own test suite, - and possibly the test suites of other ORM extension libraries which - intend to test various combinations of mapper construction upon a fixed - set of classes. - - """ - mapperlib._COMPILE_MUTEX.acquire() - try: - while _mapper_registry: - try: - # can't even reliably call list(weakdict) in jython - mapper, b = _mapper_registry.popitem() - mapper.dispose() - except KeyError: - pass - finally: - mapperlib._COMPILE_MUTEX.release() - -def joinedload(*keys, **kw): - """Return a ``MapperOption`` that will convert the property of the given - name or series of mapped attributes into an joined eager load. - - .. versionchanged:: 0.6beta3 - This function is known as :func:`eagerload` in all versions - of SQLAlchemy prior to version 0.6beta3, including the 0.5 and 0.4 - series. :func:`eagerload` will remain available for the foreseeable - future in order to enable cross-compatibility. - - Used with :meth:`~sqlalchemy.orm.query.Query.options`. - - examples:: - - # joined-load the "orders" collection on "User" - query(User).options(joinedload(User.orders)) - - # joined-load the "keywords" collection on each "Item", - # but not the "items" collection on "Order" - those - # remain lazily loaded. - query(Order).options(joinedload(Order.items, Item.keywords)) - - # to joined-load across both, use joinedload_all() - query(Order).options(joinedload_all(Order.items, Item.keywords)) - - # set the default strategy to be 'joined' - query(Order).options(joinedload('*')) - - :func:`joinedload` also accepts a keyword argument `innerjoin=True` which - indicates using an inner join instead of an outer:: - - query(Order).options(joinedload(Order.user, innerjoin=True)) - - .. note:: - - The join created by :func:`joinedload` is anonymously aliased such that - it **does not affect the query results**. An :meth:`.Query.order_by` - or :meth:`.Query.filter` call **cannot** reference these aliased - tables - so-called "user space" joins are constructed using - :meth:`.Query.join`. The rationale for this is that :func:`joinedload` is only - applied in order to affect how related objects or collections are loaded - as an optimizing detail - it can be added or removed with no impact - on actual results. See the section :ref:`zen_of_eager_loading` for - a detailed description of how this is used, including how to use a single - explicit JOIN for filtering/ordering and eager loading simultaneously. - - See also: :func:`subqueryload`, :func:`lazyload` - - """ - innerjoin = kw.pop('innerjoin', None) - if innerjoin is not None: - return ( - strategies.EagerLazyOption(keys, lazy='joined'), - strategies.EagerJoinOption(keys, innerjoin) - ) - else: - return strategies.EagerLazyOption(keys, lazy='joined') - -def joinedload_all(*keys, **kw): - """Return a ``MapperOption`` that will convert all properties along the - given dot-separated path or series of mapped attributes - into an joined eager load. - - .. versionchanged:: 0.6beta3 - This function is known as :func:`eagerload_all` in all versions - of SQLAlchemy prior to version 0.6beta3, including the 0.5 and 0.4 - series. :func:`eagerload_all` will remain available for the - foreseeable future in order to enable cross-compatibility. - - Used with :meth:`~sqlalchemy.orm.query.Query.options`. - - For example:: - - query.options(joinedload_all('orders.items.keywords'))... - - will set all of ``orders``, ``orders.items``, and ``orders.items.keywords`` to - load in one joined eager load. - - Individual descriptors are accepted as arguments as well:: - - query.options(joinedload_all(User.orders, Order.items, Item.keywords)) - - The keyword arguments accept a flag `innerjoin=True|False` which will - override the value of the `innerjoin` flag specified on the - relationship(). - - See also: :func:`subqueryload_all`, :func:`lazyload` - - """ - innerjoin = kw.pop('innerjoin', None) - if innerjoin is not None: - return ( - strategies.EagerLazyOption(keys, lazy='joined', chained=True), - strategies.EagerJoinOption(keys, innerjoin, chained=True) - ) - else: - return strategies.EagerLazyOption(keys, lazy='joined', chained=True) - - -def eagerload(*args, **kwargs): - """A synonym for :func:`joinedload()`.""" - return joinedload(*args, **kwargs) - -def eagerload_all(*args, **kwargs): - """A synonym for :func:`joinedload_all()`""" - return joinedload_all(*args, **kwargs) - -def subqueryload(*keys): - """Return a ``MapperOption`` that will convert the property - of the given name or series of mapped attributes - into an subquery eager load. - - Used with :meth:`~sqlalchemy.orm.query.Query.options`. - - examples:: - - # subquery-load the "orders" collection on "User" - query(User).options(subqueryload(User.orders)) - - # subquery-load the "keywords" collection on each "Item", - # but not the "items" collection on "Order" - those - # remain lazily loaded. - query(Order).options(subqueryload(Order.items, Item.keywords)) - - # to subquery-load across both, use subqueryload_all() - query(Order).options(subqueryload_all(Order.items, Item.keywords)) - - # set the default strategy to be 'subquery' - query(Order).options(subqueryload('*')) - - See also: :func:`joinedload`, :func:`lazyload` - - """ - return strategies.EagerLazyOption(keys, lazy="subquery") - -def subqueryload_all(*keys): - """Return a ``MapperOption`` that will convert all properties along the - given dot-separated path or series of mapped attributes - into a subquery eager load. - - Used with :meth:`~sqlalchemy.orm.query.Query.options`. - - For example:: - - query.options(subqueryload_all('orders.items.keywords'))... - - will set all of ``orders``, ``orders.items``, and ``orders.items.keywords`` to - load in one subquery eager load. - - Individual descriptors are accepted as arguments as well:: - - query.options(subqueryload_all(User.orders, Order.items, - Item.keywords)) - - See also: :func:`joinedload_all`, :func:`lazyload`, :func:`immediateload` - - """ - return strategies.EagerLazyOption(keys, lazy="subquery", chained=True) - -def lazyload(*keys): - """Return a ``MapperOption`` that will convert the property of the given - name or series of mapped attributes into a lazy load. - - Used with :meth:`~sqlalchemy.orm.query.Query.options`. - - See also: :func:`eagerload`, :func:`subqueryload`, :func:`immediateload` - - """ - return strategies.EagerLazyOption(keys, lazy=True) - -def lazyload_all(*keys): - """Return a ``MapperOption`` that will convert all the properties - along the given dot-separated path or series of mapped attributes - into a lazy load. - - Used with :meth:`~sqlalchemy.orm.query.Query.options`. - - See also: :func:`eagerload`, :func:`subqueryload`, :func:`immediateload` - - """ - return strategies.EagerLazyOption(keys, lazy=True, chained=True) - -def noload(*keys): - """Return a ``MapperOption`` that will convert the property of the - given name or series of mapped attributes into a non-load. - - Used with :meth:`~sqlalchemy.orm.query.Query.options`. - - See also: :func:`lazyload`, :func:`eagerload`, - :func:`subqueryload`, :func:`immediateload` - - """ - return strategies.EagerLazyOption(keys, lazy=None) - -def immediateload(*keys): - """Return a ``MapperOption`` that will convert the property of the given - name or series of mapped attributes into an immediate load. - - The "immediate" load means the attribute will be fetched - with a separate SELECT statement per parent in the - same way as lazy loading - except the loader is guaranteed - to be called at load time before the parent object - is returned in the result. - - The normal behavior of lazy loading applies - if - the relationship is a simple many-to-one, and the child - object is already present in the :class:`.Session`, - no SELECT statement will be emitted. - - Used with :meth:`~sqlalchemy.orm.query.Query.options`. - - See also: :func:`lazyload`, :func:`eagerload`, :func:`subqueryload` - - .. versionadded:: 0.6.5 - - """ - return strategies.EagerLazyOption(keys, lazy='immediate') - -def contains_alias(alias): - """Return a :class:`.MapperOption` that will indicate to the query that - the main table has been aliased. - - This is used in the very rare case that :func:`.contains_eager` - is being used in conjunction with a user-defined SELECT - statement that aliases the parent table. E.g.:: - - # define an aliased UNION called 'ulist' - statement = users.select(users.c.user_id==7).\\ - union(users.select(users.c.user_id>7)).\\ - alias('ulist') - - # add on an eager load of "addresses" - statement = statement.outerjoin(addresses).\\ - select().apply_labels() - - # create query, indicating "ulist" will be an - # alias for the main table, "addresses" - # property should be eager loaded - query = session.query(User).options( - contains_alias('ulist'), - contains_eager('addresses')) - - # then get results via the statement - results = query.from_statement(statement).all() - - :param alias: is the string name of an alias, or a - :class:`~.sql.expression.Alias` object representing - the alias. - - """ - return AliasOption(alias) - -def contains_eager(*keys, **kwargs): - """Return a ``MapperOption`` that will indicate to the query that - the given attribute should be eagerly loaded from columns currently - in the query. - - Used with :meth:`~sqlalchemy.orm.query.Query.options`. - - The option is used in conjunction with an explicit join that loads - the desired rows, i.e.:: - - sess.query(Order).\\ - join(Order.user).\\ - options(contains_eager(Order.user)) - - The above query would join from the ``Order`` entity to its related - ``User`` entity, and the returned ``Order`` objects would have the - ``Order.user`` attribute pre-populated. - - :func:`contains_eager` also accepts an `alias` argument, which is the - string name of an alias, an :func:`~sqlalchemy.sql.expression.alias` - construct, or an :func:`~sqlalchemy.orm.aliased` construct. Use this when - the eagerly-loaded rows are to come from an aliased table:: - - user_alias = aliased(User) - sess.query(Order).\\ - join((user_alias, Order.user)).\\ - options(contains_eager(Order.user, alias=user_alias)) - - See also :func:`eagerload` for the "automatic" version of this - functionality. - - For additional examples of :func:`contains_eager` see - :ref:`contains_eager`. - - """ - alias = kwargs.pop('alias', None) - if kwargs: - raise exceptions.ArgumentError('Invalid kwargs for contains_eag' - 'er: %r' % kwargs.keys()) - return strategies.EagerLazyOption(keys, lazy='joined', - propagate_to_loaders=False, chained=True), \ - strategies.LoadEagerFromAliasOption(keys, alias=alias, chained=True) - -def defer(*key): - """Return a :class:`.MapperOption` that will convert the column property - of the given name into a deferred load. - - Used with :meth:`.Query.options`. - - e.g.:: - - from sqlalchemy.orm import defer - - query(MyClass).options(defer("attribute_one"), - defer("attribute_two")) - - A class bound descriptor is also accepted:: - - query(MyClass).options( - defer(MyClass.attribute_one), - defer(MyClass.attribute_two)) - - A "path" can be specified onto a related or collection object using a - dotted name. The :func:`.orm.defer` option will be applied to that object - when loaded:: - - query(MyClass).options( - defer("related.attribute_one"), - defer("related.attribute_two")) - - To specify a path via class, send multiple arguments:: - - query(MyClass).options( - defer(MyClass.related, MyOtherClass.attribute_one), - defer(MyClass.related, MyOtherClass.attribute_two)) - - See also: - - :ref:`deferred` - - :param \*key: A key representing an individual path. Multiple entries - are accepted to allow a multiple-token path for a single target, not - multiple targets. - - """ - return strategies.DeferredOption(key, defer=True) - -def undefer(*key): - """Return a :class:`.MapperOption` that will convert the column property - of the given name into a non-deferred (regular column) load. - - Used with :meth:`.Query.options`. - - e.g.:: - - from sqlalchemy.orm import undefer - - query(MyClass).options(undefer("attribute_one"), - undefer("attribute_two")) - - A class bound descriptor is also accepted:: - - query(MyClass).options( - undefer(MyClass.attribute_one), - undefer(MyClass.attribute_two)) - - A "path" can be specified onto a related or collection object using a - dotted name. The :func:`.orm.undefer` option will be applied to that - object when loaded:: - - query(MyClass).options( - undefer("related.attribute_one"), - undefer("related.attribute_two")) - - To specify a path via class, send multiple arguments:: - - query(MyClass).options( - undefer(MyClass.related, MyOtherClass.attribute_one), - undefer(MyClass.related, MyOtherClass.attribute_two)) - - See also: - - :func:`.orm.undefer_group` as a means to "undefer" a group - of attributes at once. - - :ref:`deferred` - - :param \*key: A key representing an individual path. Multiple entries - are accepted to allow a multiple-token path for a single target, not - multiple targets. - - """ - return strategies.DeferredOption(key, defer=False) - -def undefer_group(name): - """Return a :class:`.MapperOption` that will convert the given group of deferred - column properties into a non-deferred (regular column) load. - - Used with :meth:`.Query.options`. - - e.g.:: - - query(MyClass).options(undefer("group_one")) - - See also: - - :ref:`deferred` - - :param name: String name of the deferred group. This name is - established using the "group" name to the :func:`.orm.deferred` - configurational function. - - """ - return strategies.UndeferGroupOption(name) - -from sqlalchemy import util as _sa_util -_sa_util.importlater.resolve_all() diff --git a/libs/sqlalchemy/orm/attributes.py b/libs/sqlalchemy/orm/attributes.py deleted file mode 100644 index b40abd57..00000000 --- a/libs/sqlalchemy/orm/attributes.py +++ /dev/null @@ -1,1415 +0,0 @@ -# orm/attributes.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Defines instrumentation for class attributes and their interaction -with instances. - -This module is usually not directly visible to user applications, but -defines a large part of the ORM's interactivity. - - -""" - -import operator -from operator import itemgetter - -from sqlalchemy import util, event, exc as sa_exc -from sqlalchemy.orm import interfaces, collections, events, exc as orm_exc - - -mapperutil = util.importlater("sqlalchemy.orm", "util") - -PASSIVE_NO_RESULT = util.symbol('PASSIVE_NO_RESULT') -ATTR_WAS_SET = util.symbol('ATTR_WAS_SET') -ATTR_EMPTY = util.symbol('ATTR_EMPTY') -NO_VALUE = util.symbol('NO_VALUE') -NEVER_SET = util.symbol('NEVER_SET') - -PASSIVE_RETURN_NEVER_SET = util.symbol('PASSIVE_RETURN_NEVER_SET', -"""Symbol indicating that loader callables can be -fired off, but if no callable is applicable and no value is -present, the attribute should remain non-initialized. -NEVER_SET is returned in this case. -""") - -PASSIVE_NO_INITIALIZE = util.symbol('PASSIVE_NO_INITIALIZE', -"""Symbol indicating that loader callables should - not be fired off, and a non-initialized attribute - should remain that way. -""") - -PASSIVE_NO_FETCH = util.symbol('PASSIVE_NO_FETCH', -"""Symbol indicating that loader callables should not emit SQL, - but a value can be fetched from the current session. - - Non-initialized attributes should be initialized to an empty value. - -""") - -PASSIVE_NO_FETCH_RELATED = util.symbol('PASSIVE_NO_FETCH_RELATED', -"""Symbol indicating that loader callables should not emit SQL for - loading a related object, but can refresh the attributes of the local - instance in order to locate a related object in the current session. - - Non-initialized attributes should be initialized to an empty value. - - The unit of work uses this mode to check if history is present - on many-to-one attributes with minimal SQL emitted. - -""") - -PASSIVE_ONLY_PERSISTENT = util.symbol('PASSIVE_ONLY_PERSISTENT', -"""Symbol indicating that loader callables should only fire off for - parent objects which are persistent (i.e., have a database - identity). - - Load operations for the "previous" value of an attribute make - use of this flag during change events. - -""") - -PASSIVE_OFF = util.symbol('PASSIVE_OFF', -"""Symbol indicating that loader callables should be executed - normally. - -""") - - -class QueryableAttribute(interfaces.PropComparator): - """Base class for class-bound attributes. """ - - def __init__(self, class_, key, impl=None, - comparator=None, parententity=None): - self.class_ = class_ - self.key = key - self.impl = impl - self.comparator = comparator - self.parententity = parententity - - manager = manager_of_class(class_) - # manager is None in the case of AliasedClass - if manager: - # propagate existing event listeners from - # immediate superclass - for base in manager._bases: - if key in base: - self.dispatch._update(base[key].dispatch) - - dispatch = event.dispatcher(events.AttributeEvents) - dispatch.dispatch_cls._active_history = False - - @util.memoized_property - def _supports_population(self): - return self.impl.supports_population - - def get_history(self, instance, passive=PASSIVE_OFF): - return self.impl.get_history(instance_state(instance), - instance_dict(instance), passive) - - def __selectable__(self): - # TODO: conditionally attach this method based on clause_element ? - return self - - def __clause_element__(self): - return self.comparator.__clause_element__() - - def label(self, name): - return self.__clause_element__().label(name) - - def operate(self, op, *other, **kwargs): - return op(self.comparator, *other, **kwargs) - - def reverse_operate(self, op, other, **kwargs): - return op(other, self.comparator, **kwargs) - - def hasparent(self, state, optimistic=False): - return self.impl.hasparent(state, optimistic=optimistic) is not False - - def __getattr__(self, key): - try: - return getattr(self.comparator, key) - except AttributeError: - raise AttributeError( - 'Neither %r object nor %r object has an attribute %r' % ( - type(self).__name__, - type(self.comparator).__name__, - key) - ) - - def __str__(self): - return "%s.%s" % (self.class_.__name__, self.key) - - @util.memoized_property - def property(self): - return self.comparator.property - - -class InstrumentedAttribute(QueryableAttribute): - """Class bound instrumented attribute which adds descriptor methods.""" - - def __set__(self, instance, value): - self.impl.set(instance_state(instance), - instance_dict(instance), value, None) - - def __delete__(self, instance): - self.impl.delete(instance_state(instance), instance_dict(instance)) - - def __get__(self, instance, owner): - if instance is None: - return self - - dict_ = instance_dict(instance) - if self._supports_population and self.key in dict_: - return dict_[self.key] - else: - return self.impl.get(instance_state(instance),dict_) - -def create_proxied_attribute(descriptor): - """Create an QueryableAttribute / user descriptor hybrid. - - Returns a new QueryableAttribute type that delegates descriptor - behavior and getattr() to the given descriptor. - """ - - # TODO: can move this to descriptor_props if the need for this - # function is removed from ext/hybrid.py - - class Proxy(QueryableAttribute): - """Presents the :class:`.QueryableAttribute` interface as a - proxy on top of a Python descriptor / :class:`.PropComparator` - combination. - - """ - - def __init__(self, class_, key, descriptor, comparator, - adapter=None, doc=None): - self.class_ = class_ - self.key = key - self.descriptor = descriptor - self._comparator = comparator - self.adapter = adapter - self.__doc__ = doc - - @property - def property(self): - return self.comparator.property - - @util.memoized_property - def comparator(self): - if util.callable(self._comparator): - self._comparator = self._comparator() - if self.adapter: - self._comparator = self._comparator.adapted(self.adapter) - return self._comparator - - def adapted(self, adapter): - """Proxy adapted() for the use case of AliasedClass calling adapted.""" - - return self.__class__(self.class_, self.key, self.descriptor, - self._comparator, - adapter) - - def __get__(self, instance, owner): - if instance is None: - return self - else: - return self.descriptor.__get__(instance, owner) - - def __str__(self): - return self.key - - def __getattr__(self, attribute): - """Delegate __getattr__ to the original descriptor and/or - comparator.""" - - try: - return getattr(descriptor, attribute) - except AttributeError: - try: - return getattr(self.comparator, attribute) - except AttributeError: - raise AttributeError( - 'Neither %r object nor %r object has an attribute %r' % ( - type(descriptor).__name__, - type(self.comparator).__name__, - attribute) - ) - - Proxy.__name__ = type(descriptor).__name__ + 'Proxy' - - util.monkeypatch_proxied_specials(Proxy, type(descriptor), - name='descriptor', - from_instance=descriptor) - return Proxy - -class AttributeImpl(object): - """internal implementation for instrumented attributes.""" - - def __init__(self, class_, key, - callable_, dispatch, trackparent=False, extension=None, - compare_function=None, active_history=False, - parent_token=None, expire_missing=True, - **kwargs): - """Construct an AttributeImpl. - - \class_ - associated class - - key - string name of the attribute - - \callable_ - optional function which generates a callable based on a parent - instance, which produces the "default" values for a scalar or - collection attribute when it's first accessed, if not present - already. - - trackparent - if True, attempt to track if an instance has a parent attached - to it via this attribute. - - extension - a single or list of AttributeExtension object(s) which will - receive set/delete/append/remove/etc. events. Deprecated. - The event package is now used. - - compare_function - a function that compares two values which are normally - assignable to this attribute. - - active_history - indicates that get_history() should always return the "old" value, - even if it means executing a lazy callable upon attribute change. - - parent_token - Usually references the MapperProperty, used as a key for - the hasparent() function to identify an "owning" attribute. - Allows multiple AttributeImpls to all match a single - owner attribute. - - expire_missing - if False, don't add an "expiry" callable to this attribute - during state.expire_attributes(None), if no value is present - for this key. - - """ - self.class_ = class_ - self.key = key - self.callable_ = callable_ - self.dispatch = dispatch - self.trackparent = trackparent - self.parent_token = parent_token or self - if compare_function is None: - self.is_equal = operator.eq - else: - self.is_equal = compare_function - - # TODO: pass in the manager here - # instead of doing a lookup - attr = manager_of_class(class_)[key] - - for ext in util.to_list(extension or []): - ext._adapt_listener(attr, ext) - - if active_history: - self.dispatch._active_history = True - - self.expire_missing = expire_missing - - def _get_active_history(self): - """Backwards compat for impl.active_history""" - - return self.dispatch._active_history - - def _set_active_history(self, value): - self.dispatch._active_history = value - - active_history = property(_get_active_history, _set_active_history) - - - def hasparent(self, state, optimistic=False): - """Return the boolean value of a `hasparent` flag attached to - the given state. - - The `optimistic` flag determines what the default return value - should be if no `hasparent` flag can be located. - - As this function is used to determine if an instance is an - *orphan*, instances that were loaded from storage should be - assumed to not be orphans, until a True/False value for this - flag is set. - - An instance attribute that is loaded by a callable function - will also not have a `hasparent` flag. - - """ - assert self.trackparent, "This AttributeImpl is not configured to track parents." - - return state.parents.get(id(self.parent_token), optimistic) \ - is not False - - def sethasparent(self, state, parent_state, value): - """Set a boolean flag on the given item corresponding to - whether or not it is attached to a parent object via the - attribute represented by this ``InstrumentedAttribute``. - - """ - assert self.trackparent, "This AttributeImpl is not configured to track parents." - - id_ = id(self.parent_token) - if value: - state.parents[id_] = parent_state - else: - if id_ in state.parents: - last_parent = state.parents[id_] - - if last_parent is not False and \ - last_parent.key != parent_state.key: - - if last_parent.obj() is None: - raise orm_exc.StaleDataError( - "Removing state %s from parent " - "state %s along attribute '%s', " - "but the parent record " - "has gone stale, can't be sure this " - "is the most recent parent." % - (mapperutil.state_str(state), - mapperutil.state_str(parent_state), - self.key)) - - return - - state.parents[id_] = False - - - def set_callable(self, state, callable_): - """Set a callable function for this attribute on the given object. - - This callable will be executed when the attribute is next - accessed, and is assumed to construct part of the instances - previously stored state. When its value or values are loaded, - they will be established as part of the instance's *committed - state*. While *trackparent* information will be assembled for - these instances, attribute-level event handlers will not be - fired. - - The callable overrides the class level callable set in the - ``InstrumentedAttribute`` constructor. - - """ - state.callables[self.key] = callable_ - - def get_history(self, state, dict_, passive=PASSIVE_OFF): - raise NotImplementedError() - - def get_all_pending(self, state, dict_): - """Return a list of tuples of (state, obj) - for all objects in this attribute's current state - + history. - - Only applies to object-based attributes. - - This is an inlining of existing functionality - which roughly corresponds to: - - get_state_history( - state, - key, - passive=PASSIVE_NO_INITIALIZE).sum() - - """ - raise NotImplementedError() - - def initialize(self, state, dict_): - """Initialize the given state's attribute with an empty value.""" - - dict_[self.key] = None - return None - - def get(self, state, dict_, passive=PASSIVE_OFF): - """Retrieve a value from the given object. - - If a callable is assembled on this object's attribute, and - passive is False, the callable will be executed and the - resulting value will be set as the new value for this attribute. - """ - if self.key in dict_: - return dict_[self.key] - else: - # if history present, don't load - key = self.key - if key not in state.committed_state or \ - state.committed_state[key] is NEVER_SET: - if passive is PASSIVE_NO_INITIALIZE: - return PASSIVE_NO_RESULT - - if key in state.callables: - callable_ = state.callables[key] - value = callable_(passive) - elif self.callable_: - value = self.callable_(state, passive) - else: - value = ATTR_EMPTY - - if value is PASSIVE_NO_RESULT or value is NEVER_SET: - return value - elif value is ATTR_WAS_SET: - try: - return dict_[key] - except KeyError: - # TODO: no test coverage here. - raise KeyError( - "Deferred loader for attribute " - "%r failed to populate " - "correctly" % key) - elif value is not ATTR_EMPTY: - return self.set_committed_value(state, dict_, value) - - if passive is PASSIVE_RETURN_NEVER_SET: - return NEVER_SET - else: - # Return a new, empty value - return self.initialize(state, dict_) - - def append(self, state, dict_, value, initiator, passive=PASSIVE_OFF): - self.set(state, dict_, value, initiator, passive=passive) - - def remove(self, state, dict_, value, initiator, passive=PASSIVE_OFF): - self.set(state, dict_, None, initiator, - passive=passive, check_old=value) - - def pop(self, state, dict_, value, initiator, passive=PASSIVE_OFF): - self.set(state, dict_, None, initiator, - passive=passive, check_old=value, pop=True) - - def set(self, state, dict_, value, initiator, - passive=PASSIVE_OFF, check_old=None, pop=False): - raise NotImplementedError() - - def get_committed_value(self, state, dict_, passive=PASSIVE_OFF): - """return the unchanged value of this attribute""" - - if self.key in state.committed_state: - value = state.committed_state[self.key] - if value is NO_VALUE: - return None - else: - return value - else: - return self.get(state, dict_, passive=passive) - - def set_committed_value(self, state, dict_, value): - """set an attribute value on the given instance and 'commit' it.""" - - dict_[self.key] = value - state.commit(dict_, [self.key]) - return value - -class ScalarAttributeImpl(AttributeImpl): - """represents a scalar value-holding InstrumentedAttribute.""" - - accepts_scalar_loader = True - uses_objects = False - supports_population = True - - def delete(self, state, dict_): - - # TODO: catch key errors, convert to attributeerror? - if self.dispatch._active_history: - old = self.get(state, dict_, PASSIVE_RETURN_NEVER_SET) - else: - old = dict_.get(self.key, NO_VALUE) - - if self.dispatch.remove: - self.fire_remove_event(state, dict_, old, None) - state.modified_event(dict_, self, old) - del dict_[self.key] - - def get_history(self, state, dict_, passive=PASSIVE_OFF): - return History.from_scalar_attribute( - self, state, dict_.get(self.key, NO_VALUE)) - - def set(self, state, dict_, value, initiator, - passive=PASSIVE_OFF, check_old=None, pop=False): - if initiator and initiator.parent_token is self.parent_token: - return - - if self.dispatch._active_history: - old = self.get(state, dict_, PASSIVE_RETURN_NEVER_SET) - else: - old = dict_.get(self.key, NO_VALUE) - - if self.dispatch.set: - value = self.fire_replace_event(state, dict_, - value, old, initiator) - state.modified_event(dict_, self, old) - dict_[self.key] = value - - def fire_replace_event(self, state, dict_, value, previous, initiator): - for fn in self.dispatch.set: - value = fn(state, value, previous, initiator or self) - return value - - def fire_remove_event(self, state, dict_, value, initiator): - for fn in self.dispatch.remove: - fn(state, value, initiator or self) - - @property - def type(self): - self.property.columns[0].type - - -class MutableScalarAttributeImpl(ScalarAttributeImpl): - """represents a scalar value-holding InstrumentedAttribute, which can - detect changes within the value itself. - - """ - - uses_objects = False - supports_population = True - - def __init__(self, class_, key, callable_, dispatch, - class_manager, copy_function=None, - compare_function=None, **kwargs): - super(ScalarAttributeImpl, self).__init__( - class_, - key, - callable_, dispatch, - compare_function=compare_function, - **kwargs) - class_manager.mutable_attributes.add(key) - if copy_function is None: - raise sa_exc.ArgumentError( - "MutableScalarAttributeImpl requires a copy function") - self.copy = copy_function - - def get_history(self, state, dict_, passive=PASSIVE_OFF): - if not dict_: - v = state.committed_state.get(self.key, NO_VALUE) - else: - v = dict_.get(self.key, NO_VALUE) - - return History.from_scalar_attribute(self, state, v) - - def check_mutable_modified(self, state, dict_): - a, u, d = self.get_history(state, dict_) - return bool(a or d) - - def get(self, state, dict_, passive=PASSIVE_OFF): - if self.key not in state.mutable_dict: - ret = ScalarAttributeImpl.get(self, state, dict_, passive=passive) - if ret is not PASSIVE_NO_RESULT: - state.mutable_dict[self.key] = ret - return ret - else: - return state.mutable_dict[self.key] - - def delete(self, state, dict_): - ScalarAttributeImpl.delete(self, state, dict_) - state.mutable_dict.pop(self.key) - - def set(self, state, dict_, value, initiator, - passive=PASSIVE_OFF, check_old=None, pop=False): - ScalarAttributeImpl.set(self, state, dict_, value, - initiator, passive, check_old=check_old, pop=pop) - state.mutable_dict[self.key] = value - - -class ScalarObjectAttributeImpl(ScalarAttributeImpl): - """represents a scalar-holding InstrumentedAttribute, - where the target object is also instrumented. - - Adds events to delete/set operations. - - """ - - accepts_scalar_loader = False - uses_objects = True - supports_population = True - - def delete(self, state, dict_): - old = self.get(state, dict_) - self.fire_remove_event(state, dict_, old, self) - del dict_[self.key] - - def get_history(self, state, dict_, passive=PASSIVE_OFF): - if self.key in dict_: - return History.from_object_attribute(self, state, dict_[self.key]) - else: - if passive is PASSIVE_OFF: - passive = PASSIVE_RETURN_NEVER_SET - current = self.get(state, dict_, passive=passive) - if current is PASSIVE_NO_RESULT: - return HISTORY_BLANK - else: - return History.from_object_attribute(self, state, current) - - def get_all_pending(self, state, dict_): - if self.key in dict_: - current = dict_[self.key] - if current is not None: - ret = [(instance_state(current), current)] - else: - ret = [(None, None)] - - if self.key in state.committed_state: - original = state.committed_state[self.key] - if original not in (NEVER_SET, PASSIVE_NO_RESULT, None) and \ - original is not current: - - ret.append((instance_state(original), original)) - return ret - else: - return [] - - def set(self, state, dict_, value, initiator, - passive=PASSIVE_OFF, check_old=None, pop=False): - """Set a value on the given InstanceState. - - `initiator` is the ``InstrumentedAttribute`` that initiated the - ``set()`` operation and is used to control the depth of a circular - setter operation. - - """ - if initiator and initiator.parent_token is self.parent_token: - return - - if self.dispatch._active_history: - old = self.get(state, dict_, passive=PASSIVE_ONLY_PERSISTENT) - else: - old = self.get(state, dict_, passive=PASSIVE_NO_FETCH) - - if check_old is not None and \ - old is not PASSIVE_NO_RESULT and \ - check_old is not old: - if pop: - return - else: - raise ValueError( - "Object %s not associated with %s on attribute '%s'" % ( - mapperutil.instance_str(check_old), - mapperutil.state_str(state), - self.key - )) - value = self.fire_replace_event(state, dict_, value, old, initiator) - dict_[self.key] = value - - def fire_remove_event(self, state, dict_, value, initiator): - if self.trackparent and value is not None: - self.sethasparent(instance_state(value), state, False) - - for fn in self.dispatch.remove: - fn(state, value, initiator or self) - - state.modified_event(dict_, self, value) - - def fire_replace_event(self, state, dict_, value, previous, initiator): - if self.trackparent: - if (previous is not value and - previous is not None and - previous is not PASSIVE_NO_RESULT): - self.sethasparent(instance_state(previous), state, False) - - for fn in self.dispatch.set: - value = fn(state, value, previous, initiator or self) - - state.modified_event(dict_, self, previous) - - if self.trackparent: - if value is not None: - self.sethasparent(instance_state(value), state, True) - - return value - - -class CollectionAttributeImpl(AttributeImpl): - """A collection-holding attribute that instruments changes in membership. - - Only handles collections of instrumented objects. - - InstrumentedCollectionAttribute holds an arbitrary, user-specified - container object (defaulting to a list) and brokers access to the - CollectionAdapter, a "view" onto that object that presents consistent bag - semantics to the orm layer independent of the user data implementation. - - """ - accepts_scalar_loader = False - uses_objects = True - supports_population = True - - def __init__(self, class_, key, callable_, dispatch, - typecallable=None, trackparent=False, extension=None, - copy_function=None, compare_function=None, **kwargs): - super(CollectionAttributeImpl, self).__init__( - class_, - key, - callable_, dispatch, - trackparent=trackparent, - extension=extension, - compare_function=compare_function, - **kwargs) - - if copy_function is None: - copy_function = self.__copy - self.copy = copy_function - self.collection_factory = typecallable - - def __copy(self, item): - return [y for y in list(collections.collection_adapter(item))] - - def get_history(self, state, dict_, passive=PASSIVE_OFF): - current = self.get(state, dict_, passive=passive) - if current is PASSIVE_NO_RESULT: - return HISTORY_BLANK - else: - return History.from_collection(self, state, current) - - def get_all_pending(self, state, dict_): - if self.key not in dict_: - return [] - - current = dict_[self.key] - current = getattr(current, '_sa_adapter') - - if self.key in state.committed_state: - original = state.committed_state[self.key] - if original is not NO_VALUE: - current_states = [((c is not None) and - instance_state(c) or None, c) - for c in current] - original_states = [((c is not None) and - instance_state(c) or None, c) - for c in original] - - current_set = dict(current_states) - original_set = dict(original_states) - - return \ - [(s, o) for s, o in current_states if s not in original_set] + \ - [(s, o) for s, o in current_states if s in original_set] + \ - [(s, o) for s, o in original_states if s not in current_set] - - return [(instance_state(o), o) for o in current] - - - def fire_append_event(self, state, dict_, value, initiator): - for fn in self.dispatch.append: - value = fn(state, value, initiator or self) - - state.modified_event(dict_, self, NEVER_SET, True) - - if self.trackparent and value is not None: - self.sethasparent(instance_state(value), state, True) - - return value - - def fire_pre_remove_event(self, state, dict_, initiator): - state.modified_event(dict_, self, NEVER_SET, True) - - def fire_remove_event(self, state, dict_, value, initiator): - if self.trackparent and value is not None: - self.sethasparent(instance_state(value), state, False) - - for fn in self.dispatch.remove: - fn(state, value, initiator or self) - - state.modified_event(dict_, self, NEVER_SET, True) - - def delete(self, state, dict_): - if self.key not in dict_: - return - - state.modified_event(dict_, self, NEVER_SET, True) - - collection = self.get_collection(state, state.dict) - collection.clear_with_event() - # TODO: catch key errors, convert to attributeerror? - del dict_[self.key] - - def initialize(self, state, dict_): - """Initialize this attribute with an empty collection.""" - - _, user_data = self._initialize_collection(state) - dict_[self.key] = user_data - return user_data - - def _initialize_collection(self, state): - return state.manager.initialize_collection( - self.key, state, self.collection_factory) - - def append(self, state, dict_, value, initiator, passive=PASSIVE_OFF): - if initiator and initiator.parent_token is self.parent_token: - return - - collection = self.get_collection(state, dict_, passive=passive) - if collection is PASSIVE_NO_RESULT: - value = self.fire_append_event(state, dict_, value, initiator) - assert self.key not in dict_, \ - "Collection was loaded during event handling." - state.get_pending(self.key).append(value) - else: - collection.append_with_event(value, initiator) - - def remove(self, state, dict_, value, initiator, passive=PASSIVE_OFF): - if initiator and initiator.parent_token is self.parent_token: - return - - collection = self.get_collection(state, state.dict, passive=passive) - if collection is PASSIVE_NO_RESULT: - self.fire_remove_event(state, dict_, value, initiator) - assert self.key not in dict_, \ - "Collection was loaded during event handling." - state.get_pending(self.key).remove(value) - else: - collection.remove_with_event(value, initiator) - - def pop(self, state, dict_, value, initiator, passive=PASSIVE_OFF): - try: - # TODO: better solution here would be to add - # a "popper" role to collections.py to complement - # "remover". - self.remove(state, dict_, value, initiator, passive=passive) - except (ValueError, KeyError, IndexError): - pass - - def set(self, state, dict_, value, initiator, - passive=PASSIVE_OFF, pop=False): - """Set a value on the given object. - - `initiator` is the ``InstrumentedAttribute`` that initiated the - ``set()`` operation and is used to control the depth of a circular - setter operation. - """ - - if initiator and initiator.parent_token is self.parent_token: - return - - self._set_iterable( - state, dict_, value, - lambda adapter, i: adapter.adapt_like_to_iterable(i)) - - def _set_iterable(self, state, dict_, iterable, adapter=None): - """Set a collection value from an iterable of state-bearers. - - ``adapter`` is an optional callable invoked with a CollectionAdapter - and the iterable. Should return an iterable of state-bearing - instances suitable for appending via a CollectionAdapter. Can be used - for, e.g., adapting an incoming dictionary into an iterator of values - rather than keys. - - """ - # pulling a new collection first so that an adaptation exception does - # not trigger a lazy load of the old collection. - new_collection, user_data = self._initialize_collection(state) - if adapter: - new_values = list(adapter(new_collection, iterable)) - else: - new_values = list(iterable) - - old = self.get(state, dict_, passive=PASSIVE_ONLY_PERSISTENT) - if old is PASSIVE_NO_RESULT: - old = self.initialize(state, dict_) - elif old is iterable: - # ignore re-assignment of the current collection, as happens - # implicitly with in-place operators (foo.collection |= other) - return - - # place a copy of "old" in state.committed_state - state.modified_event(dict_, self, old, True) - - old_collection = getattr(old, '_sa_adapter') - - dict_[self.key] = user_data - - collections.bulk_replace(new_values, old_collection, new_collection) - old_collection.unlink(old) - - def set_committed_value(self, state, dict_, value): - """Set an attribute value on the given instance and 'commit' it.""" - - collection, user_data = self._initialize_collection(state) - - if value: - collection.append_multiple_without_event(value) - - state.dict[self.key] = user_data - - state.commit(dict_, [self.key]) - - if self.key in state.pending: - # pending items exist. issue a modified event, - # add/remove new items. - state.modified_event(dict_, self, user_data, True) - - pending = state.pending.pop(self.key) - added = pending.added_items - removed = pending.deleted_items - for item in added: - collection.append_without_event(item) - for item in removed: - collection.remove_without_event(item) - - return user_data - - def get_collection(self, state, dict_, - user_data=None, passive=PASSIVE_OFF): - """Retrieve the CollectionAdapter associated with the given state. - - Creates a new CollectionAdapter if one does not exist. - - """ - if user_data is None: - user_data = self.get(state, dict_, passive=passive) - if user_data is PASSIVE_NO_RESULT: - return user_data - - return getattr(user_data, '_sa_adapter') - -def backref_listeners(attribute, key, uselist): - """Apply listeners to synchronize a two-way relationship.""" - - # use easily recognizable names for stack traces - - parent_token = attribute.impl.parent_token - - def _acceptable_key_err(child_state, initiator): - raise ValueError( - "Object %s not associated with attribute of " - "type %s" % (mapperutil.state_str(child_state), - manager_of_class(initiator.class_)[initiator.key])) - - def emit_backref_from_scalar_set_event(state, child, oldchild, initiator): - if oldchild is child: - return child - - if oldchild is not None and oldchild is not PASSIVE_NO_RESULT: - # With lazy=None, there's no guarantee that the full collection is - # present when updating via a backref. - old_state, old_dict = instance_state(oldchild),\ - instance_dict(oldchild) - impl = old_state.manager[key].impl - impl.pop(old_state, - old_dict, - state.obj(), - initiator, passive=PASSIVE_NO_FETCH) - - if child is not None: - child_state, child_dict = instance_state(child),\ - instance_dict(child) - child_impl = child_state.manager[key].impl - if initiator.parent_token is not parent_token and \ - initiator.parent_token is not child_impl.parent_token: - _acceptable_key_err(state, initiator) - child_impl.append( - child_state, - child_dict, - state.obj(), - initiator, - passive=PASSIVE_NO_FETCH) - return child - - def emit_backref_from_collection_append_event(state, child, initiator): - child_state, child_dict = instance_state(child), \ - instance_dict(child) - child_impl = child_state.manager[key].impl - if initiator.parent_token is not parent_token and \ - initiator.parent_token is not child_impl.parent_token: - _acceptable_key_err(state, initiator) - child_impl.append( - child_state, - child_dict, - state.obj(), - initiator, - passive=PASSIVE_NO_FETCH) - return child - - def emit_backref_from_collection_remove_event(state, child, initiator): - if child is not None: - child_state, child_dict = instance_state(child),\ - instance_dict(child) - child_impl = child_state.manager[key].impl - # can't think of a path that would produce an initiator - # mismatch here, as it would require an existing collection - # mismatch. - child_impl.pop( - child_state, - child_dict, - state.obj(), - initiator, - passive=PASSIVE_NO_FETCH) - - if uselist: - event.listen(attribute, "append", - emit_backref_from_collection_append_event, - retval=True, raw=True) - else: - event.listen(attribute, "set", - emit_backref_from_scalar_set_event, - retval=True, raw=True) - # TODO: need coverage in test/orm/ of remove event - event.listen(attribute, "remove", - emit_backref_from_collection_remove_event, - retval=True, raw=True) - -_NO_HISTORY = util.symbol('NO_HISTORY') -_NO_STATE_SYMBOLS = frozenset([ - id(PASSIVE_NO_RESULT), - id(NO_VALUE), - id(NEVER_SET)]) -class History(tuple): - """A 3-tuple of added, unchanged and deleted values, - representing the changes which have occurred on an instrumented - attribute. - - Each tuple member is an iterable sequence. - - """ - - __slots__ = () - - added = property(itemgetter(0)) - """Return the collection of items added to the attribute (the first tuple - element).""" - - unchanged = property(itemgetter(1)) - """Return the collection of items that have not changed on the attribute - (the second tuple element).""" - - - deleted = property(itemgetter(2)) - """Return the collection of items that have been removed from the - attribute (the third tuple element).""" - - def __new__(cls, added, unchanged, deleted): - return tuple.__new__(cls, (added, unchanged, deleted)) - - def __nonzero__(self): - return self != HISTORY_BLANK - - def empty(self): - """Return True if this :class:`.History` has no changes - and no existing, unchanged state. - - """ - - return not bool( - (self.added or self.deleted) - or self.unchanged and self.unchanged != [None] - ) - - def sum(self): - """Return a collection of added + unchanged + deleted.""" - - return (self.added or []) +\ - (self.unchanged or []) +\ - (self.deleted or []) - - def non_deleted(self): - """Return a collection of added + unchanged.""" - - return (self.added or []) +\ - (self.unchanged or []) - - def non_added(self): - """Return a collection of unchanged + deleted.""" - - return (self.unchanged or []) +\ - (self.deleted or []) - - def has_changes(self): - """Return True if this :class:`.History` has changes.""" - - return bool(self.added or self.deleted) - - def as_state(self): - return History( - [(c is not None) - and instance_state(c) or None - for c in self.added], - [(c is not None) - and instance_state(c) or None - for c in self.unchanged], - [(c is not None) - and instance_state(c) or None - for c in self.deleted], - ) - - @classmethod - def from_scalar_attribute(cls, attribute, state, current): - original = state.committed_state.get(attribute.key, _NO_HISTORY) - - if original is _NO_HISTORY: - if current is NO_VALUE: - return cls((), (), ()) - else: - return cls((), [current], ()) - # don't let ClauseElement expressions here trip things up - elif attribute.is_equal(current, original) is True: - return cls((), [current], ()) - else: - # current convention on native scalars is to not - # include information - # about missing previous value in "deleted", but - # we do include None, which helps in some primary - # key situations - if id(original) in _NO_STATE_SYMBOLS: - deleted = () - else: - deleted = [original] - if current is NO_VALUE: - return cls((), (), deleted) - else: - return cls([current], (), deleted) - - @classmethod - def from_object_attribute(cls, attribute, state, current): - original = state.committed_state.get(attribute.key, _NO_HISTORY) - - if original is _NO_HISTORY: - if current is NO_VALUE or current is NEVER_SET: - return cls((), (), ()) - else: - return cls((), [current], ()) - elif current is original: - return cls((), [current], ()) - else: - # current convention on related objects is to not - # include information - # about missing previous value in "deleted", and - # to also not include None - the dependency.py rules - # ignore the None in any case. - if id(original) in _NO_STATE_SYMBOLS or original is None: - deleted = () - else: - deleted = [original] - if current is NO_VALUE or current is NEVER_SET: - return cls((), (), deleted) - else: - return cls([current], (), deleted) - - @classmethod - def from_collection(cls, attribute, state, current): - original = state.committed_state.get(attribute.key, _NO_HISTORY) - current = getattr(current, '_sa_adapter') - - if original is NO_VALUE: - return cls(list(current), (), ()) - elif original is _NO_HISTORY: - return cls((), list(current), ()) - else: - - current_states = [((c is not None) and instance_state(c) or None, c) - for c in current - ] - original_states = [((c is not None) and instance_state(c) or None, c) - for c in original - ] - - current_set = dict(current_states) - original_set = dict(original_states) - - return cls( - [o for s, o in current_states if s not in original_set], - [o for s, o in current_states if s in original_set], - [o for s, o in original_states if s not in current_set] - ) - -HISTORY_BLANK = History(None, None, None) - -def get_history(obj, key, passive=PASSIVE_OFF): - """Return a :class:`.History` record for the given object - and attribute key. - - :param obj: an object whose class is instrumented by the - attributes package. - - :param key: string attribute name. - - :param passive: indicates if the attribute should be - loaded from the database if not already present (:attr:`.PASSIVE_NO_FETCH`), and - if the attribute should be not initialized to a blank value otherwise - (:attr:`.PASSIVE_NO_INITIALIZE`). Default is :attr:`PASSIVE_OFF`. - - """ - if passive is True: - util.warn_deprecated("Passing True for 'passive' is deprecated. " - "Use attributes.PASSIVE_NO_INITIALIZE") - passive = PASSIVE_NO_INITIALIZE - elif passive is False: - util.warn_deprecated("Passing False for 'passive' is " - "deprecated. Use attributes.PASSIVE_OFF") - passive = PASSIVE_OFF - - return get_state_history(instance_state(obj), key, passive) - -def get_state_history(state, key, passive=PASSIVE_OFF): - return state.get_history(key, passive) - - -def has_parent(cls, obj, key, optimistic=False): - """TODO""" - manager = manager_of_class(cls) - state = instance_state(obj) - return manager.has_parent(state, key, optimistic) - -def register_attribute(class_, key, **kw): - comparator = kw.pop('comparator', None) - parententity = kw.pop('parententity', None) - doc = kw.pop('doc', None) - desc = register_descriptor(class_, key, - comparator, parententity, doc=doc) - register_attribute_impl(class_, key, **kw) - return desc - -def register_attribute_impl(class_, key, - uselist=False, callable_=None, - useobject=False, mutable_scalars=False, - impl_class=None, backref=None, **kw): - - manager = manager_of_class(class_) - if uselist: - factory = kw.pop('typecallable', None) - typecallable = manager.instrument_collection_class( - key, factory or list) - else: - typecallable = kw.pop('typecallable', None) - - dispatch = manager[key].dispatch - - if impl_class: - impl = impl_class(class_, key, typecallable, dispatch, **kw) - elif uselist: - impl = CollectionAttributeImpl(class_, key, callable_, dispatch, - typecallable=typecallable, **kw) - elif useobject: - impl = ScalarObjectAttributeImpl(class_, key, callable_, - dispatch,**kw) - elif mutable_scalars: - impl = MutableScalarAttributeImpl(class_, key, callable_, dispatch, - class_manager=manager, **kw) - else: - impl = ScalarAttributeImpl(class_, key, callable_, dispatch, **kw) - - manager[key].impl = impl - - if backref: - backref_listeners(manager[key], backref, uselist) - - manager.post_configure_attribute(key) - return manager[key] - -def register_descriptor(class_, key, comparator=None, - parententity=None, doc=None): - manager = manager_of_class(class_) - - descriptor = InstrumentedAttribute(class_, key, comparator=comparator, - parententity=parententity) - - descriptor.__doc__ = doc - - manager.instrument_attribute(key, descriptor) - return descriptor - -def unregister_attribute(class_, key): - manager_of_class(class_).uninstrument_attribute(key) - -def init_collection(obj, key): - """Initialize a collection attribute and return the collection adapter. - - This function is used to provide direct access to collection internals - for a previously unloaded attribute. e.g.:: - - collection_adapter = init_collection(someobject, 'elements') - for elem in values: - collection_adapter.append_without_event(elem) - - For an easier way to do the above, see - :func:`~sqlalchemy.orm.attributes.set_committed_value`. - - obj is an instrumented object instance. An InstanceState - is accepted directly for backwards compatibility but - this usage is deprecated. - - """ - state = instance_state(obj) - dict_ = state.dict - return init_state_collection(state, dict_, key) - -def init_state_collection(state, dict_, key): - """Initialize a collection attribute and return the collection adapter.""" - - attr = state.manager[key].impl - user_data = attr.initialize(state, dict_) - return attr.get_collection(state, dict_, user_data) - -def set_committed_value(instance, key, value): - """Set the value of an attribute with no history events. - - Cancels any previous history present. The value should be - a scalar value for scalar-holding attributes, or - an iterable for any collection-holding attribute. - - This is the same underlying method used when a lazy loader - fires off and loads additional data from the database. - In particular, this method can be used by application code - which has loaded additional attributes or collections through - separate queries, which can then be attached to an instance - as though it were part of its original loaded state. - - """ - state, dict_ = instance_state(instance), instance_dict(instance) - state.manager[key].impl.set_committed_value(state, dict_, value) - -def set_attribute(instance, key, value): - """Set the value of an attribute, firing history events. - - This function may be used regardless of instrumentation - applied directly to the class, i.e. no descriptors are required. - Custom attribute management schemes will need to make usage - of this method to establish attribute state as understood - by SQLAlchemy. - - """ - state, dict_ = instance_state(instance), instance_dict(instance) - state.manager[key].impl.set(state, dict_, value, None) - -def get_attribute(instance, key): - """Get the value of an attribute, firing any callables required. - - This function may be used regardless of instrumentation - applied directly to the class, i.e. no descriptors are required. - Custom attribute management schemes will need to make usage - of this method to make usage of attribute state as understood - by SQLAlchemy. - - """ - state, dict_ = instance_state(instance), instance_dict(instance) - return state.manager[key].impl.get(state, dict_) - -def del_attribute(instance, key): - """Delete the value of an attribute, firing history events. - - This function may be used regardless of instrumentation - applied directly to the class, i.e. no descriptors are required. - Custom attribute management schemes will need to make usage - of this method to establish attribute state as understood - by SQLAlchemy. - - """ - state, dict_ = instance_state(instance), instance_dict(instance) - state.manager[key].impl.delete(state, dict_) - -def flag_modified(instance, key): - """Mark an attribute on an instance as 'modified'. - - This sets the 'modified' flag on the instance and - establishes an unconditional change event for the given attribute. - - """ - state, dict_ = instance_state(instance), instance_dict(instance) - impl = state.manager[key].impl - state.modified_event(dict_, impl, NO_VALUE) - diff --git a/libs/sqlalchemy/orm/collections.py b/libs/sqlalchemy/orm/collections.py deleted file mode 100644 index e26a5973..00000000 --- a/libs/sqlalchemy/orm/collections.py +++ /dev/null @@ -1,1578 +0,0 @@ -# orm/collections.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Support for collections of mapped entities. - -The collections package supplies the machinery used to inform the ORM of -collection membership changes. An instrumentation via decoration approach is -used, allowing arbitrary types (including built-ins) to be used as entity -collections without requiring inheritance from a base class. - -Instrumentation decoration relays membership change events to the -``InstrumentedCollectionAttribute`` that is currently managing the collection. -The decorators observe function call arguments and return values, tracking -entities entering or leaving the collection. Two decorator approaches are -provided. One is a bundle of generic decorators that map function arguments -and return values to events:: - - from sqlalchemy.orm.collections import collection - class MyClass(object): - # ... - - @collection.adds(1) - def store(self, item): - self.data.append(item) - - @collection.removes_return() - def pop(self): - return self.data.pop() - - -The second approach is a bundle of targeted decorators that wrap appropriate -append and remove notifiers around the mutation methods present in the -standard Python ``list``, ``set`` and ``dict`` interfaces. These could be -specified in terms of generic decorator recipes, but are instead hand-tooled -for increased efficiency. The targeted decorators occasionally implement -adapter-like behavior, such as mapping bulk-set methods (``extend``, -``update``, ``__setslice__``, etc.) into the series of atomic mutation events -that the ORM requires. - -The targeted decorators are used internally for automatic instrumentation of -entity collection classes. Every collection class goes through a -transformation process roughly like so: - -1. If the class is a built-in, substitute a trivial sub-class -2. Is this class already instrumented? -3. Add in generic decorators -4. Sniff out the collection interface through duck-typing -5. Add targeted decoration to any undecorated interface method - -This process modifies the class at runtime, decorating methods and adding some -bookkeeping properties. This isn't possible (or desirable) for built-in -classes like ``list``, so trivial sub-classes are substituted to hold -decoration:: - - class InstrumentedList(list): - pass - -Collection classes can be specified in ``relationship(collection_class=)`` as -types or a function that returns an instance. Collection classes are -inspected and instrumented during the mapper compilation phase. The -collection_class callable will be executed once to produce a specimen -instance, and the type of that specimen will be instrumented. Functions that -return built-in types like ``lists`` will be adapted to produce instrumented -instances. - -When extending a known type like ``list``, additional decorations are not -generally not needed. Odds are, the extension method will delegate to a -method that's already instrumented. For example:: - - class QueueIsh(list): - def push(self, item): - self.append(item) - def shift(self): - return self.pop(0) - -There's no need to decorate these methods. ``append`` and ``pop`` are already -instrumented as part of the ``list`` interface. Decorating them would fire -duplicate events, which should be avoided. - -The targeted decoration tries not to rely on other methods in the underlying -collection class, but some are unavoidable. Many depend on 'read' methods -being present to properly instrument a 'write', for example, ``__setitem__`` -needs ``__getitem__``. "Bulk" methods like ``update`` and ``extend`` may also -reimplemented in terms of atomic appends and removes, so the ``extend`` -decoration will actually perform many ``append`` operations and not call the -underlying method at all. - -Tight control over bulk operation and the firing of events is also possible by -implementing the instrumentation internally in your methods. The basic -instrumentation package works under the general assumption that collection -mutation will not raise unusual exceptions. If you want to closely -orchestrate append and remove events with exception management, internal -instrumentation may be the answer. Within your method, -``collection_adapter(self)`` will retrieve an object that you can use for -explicit control over triggering append and remove events. - -The owning object and InstrumentedCollectionAttribute are also reachable -through the adapter, allowing for some very sophisticated behavior. - -""" - -import copy -import inspect -import operator -import sys -import weakref - -from sqlalchemy.sql import expression -from sqlalchemy import schema, util, exc as sa_exc - -__all__ = ['collection', 'collection_adapter', - 'mapped_collection', 'column_mapped_collection', - 'attribute_mapped_collection'] - -__instrumentation_mutex = util.threading.Lock() - - -class _PlainColumnGetter(object): - """Plain column getter, stores collection of Column objects - directly. - - Serializes to a :class:`._SerializableColumnGetterV2` - which has more expensive __call__() performance - and some rare caveats. - - """ - def __init__(self, cols): - self.cols = cols - self.composite = len(cols) > 1 - - def __reduce__(self): - return _SerializableColumnGetterV2._reduce_from_cols(self.cols) - - def _cols(self, mapper): - return self.cols - - def __call__(self, value): - state = instance_state(value) - m = _state_mapper(state) - - key = [ - m._get_state_attr_by_column(state, state.dict, col) - for col in self._cols(m) - ] - - if self.composite: - return tuple(key) - else: - return key[0] - -class _SerializableColumnGetter(object): - """Column-based getter used in version 0.7.6 only. - - Remains here for pickle compatibility with 0.7.6. - - """ - def __init__(self, colkeys): - self.colkeys = colkeys - self.composite = len(colkeys) > 1 - def __reduce__(self): - return _SerializableColumnGetter, (self.colkeys,) - def __call__(self, value): - state = instance_state(value) - m = _state_mapper(state) - key = [m._get_state_attr_by_column( - state, state.dict, - m.mapped_table.columns[k]) - for k in self.colkeys] - if self.composite: - return tuple(key) - else: - return key[0] - -class _SerializableColumnGetterV2(_PlainColumnGetter): - """Updated serializable getter which deals with - multi-table mapped classes. - - Two extremely unusual cases are not supported. - Mappings which have tables across multiple metadata - objects, or which are mapped to non-Table selectables - linked across inheriting mappers may fail to function - here. - - """ - - def __init__(self, colkeys): - self.colkeys = colkeys - self.composite = len(colkeys) > 1 - - def __reduce__(self): - return self.__class__, (self.colkeys,) - - @classmethod - def _reduce_from_cols(cls, cols): - def _table_key(c): - if not isinstance(c.table, expression.TableClause): - return None - else: - return c.table.key - colkeys = [(c.key, _table_key(c)) for c in cols] - return _SerializableColumnGetterV2, (colkeys,) - - def _cols(self, mapper): - cols = [] - metadata = getattr(mapper.local_table, 'metadata', None) - for (ckey, tkey) in self.colkeys: - if tkey is None or \ - metadata is None or \ - tkey not in metadata: - cols.append(mapper.local_table.c[ckey]) - else: - cols.append(metadata.tables[tkey].c[ckey]) - return cols - - -def column_mapped_collection(mapping_spec): - """A dictionary-based collection type with column-based keying. - - Returns a :class:`.MappedCollection` factory with a keying function generated - from mapping_spec, which may be a Column or a sequence of Columns. - - The key value must be immutable for the lifetime of the object. You - can not, for example, map on foreign key values if those key values will - change during the session, i.e. from None to a database-assigned integer - after a session flush. - - """ - global _state_mapper, instance_state - from sqlalchemy.orm.util import _state_mapper - from sqlalchemy.orm.attributes import instance_state - - cols = [expression._only_column_elements(q, "mapping_spec") - for q in util.to_list(mapping_spec) - ] - keyfunc = _PlainColumnGetter(cols) - return lambda: MappedCollection(keyfunc) - -class _SerializableAttrGetter(object): - def __init__(self, name): - self.name = name - self.getter = operator.attrgetter(name) - - def __call__(self, target): - return self.getter(target) - - def __reduce__(self): - return _SerializableAttrGetter, (self.name, ) - -def attribute_mapped_collection(attr_name): - """A dictionary-based collection type with attribute-based keying. - - Returns a :class:`.MappedCollection` factory with a keying based on the - 'attr_name' attribute of entities in the collection, where ``attr_name`` - is the string name of the attribute. - - The key value must be immutable for the lifetime of the object. You - can not, for example, map on foreign key values if those key values will - change during the session, i.e. from None to a database-assigned integer - after a session flush. - - """ - getter = _SerializableAttrGetter(attr_name) - return lambda: MappedCollection(getter) - - -def mapped_collection(keyfunc): - """A dictionary-based collection type with arbitrary keying. - - Returns a :class:`.MappedCollection` factory with a keying function generated - from keyfunc, a callable that takes an entity and returns a key value. - - The key value must be immutable for the lifetime of the object. You - can not, for example, map on foreign key values if those key values will - change during the session, i.e. from None to a database-assigned integer - after a session flush. - - """ - return lambda: MappedCollection(keyfunc) - -class collection(object): - """Decorators for entity collection classes. - - The decorators fall into two groups: annotations and interception recipes. - - The annotating decorators (appender, remover, iterator, - internally_instrumented, link) indicate the method's purpose and take no - arguments. They are not written with parens:: - - @collection.appender - def append(self, append): ... - - The recipe decorators all require parens, even those that take no - arguments:: - - @collection.adds('entity') - def insert(self, position, entity): ... - - @collection.removes_return() - def popitem(self): ... - - """ - # Bundled as a class solely for ease of use: packaging, doc strings, - # importability. - - @staticmethod - def appender(fn): - """Tag the method as the collection appender. - - The appender method is called with one positional argument: the value - to append. The method will be automatically decorated with 'adds(1)' - if not already decorated:: - - @collection.appender - def add(self, append): ... - - # or, equivalently - @collection.appender - @collection.adds(1) - def add(self, append): ... - - # for mapping type, an 'append' may kick out a previous value - # that occupies that slot. consider d['a'] = 'foo'- any previous - # value in d['a'] is discarded. - @collection.appender - @collection.replaces(1) - def add(self, entity): - key = some_key_func(entity) - previous = None - if key in self: - previous = self[key] - self[key] = entity - return previous - - If the value to append is not allowed in the collection, you may - raise an exception. Something to remember is that the appender - will be called for each object mapped by a database query. If the - database contains rows that violate your collection semantics, you - will need to get creative to fix the problem, as access via the - collection will not work. - - If the appender method is internally instrumented, you must also - receive the keyword argument '_sa_initiator' and ensure its - promulgation to collection events. - - """ - setattr(fn, '_sa_instrument_role', 'appender') - return fn - - @staticmethod - def remover(fn): - """Tag the method as the collection remover. - - The remover method is called with one positional argument: the value - to remove. The method will be automatically decorated with - :meth:`removes_return` if not already decorated:: - - @collection.remover - def zap(self, entity): ... - - # or, equivalently - @collection.remover - @collection.removes_return() - def zap(self, ): ... - - If the value to remove is not present in the collection, you may - raise an exception or return None to ignore the error. - - If the remove method is internally instrumented, you must also - receive the keyword argument '_sa_initiator' and ensure its - promulgation to collection events. - - """ - setattr(fn, '_sa_instrument_role', 'remover') - return fn - - @staticmethod - def iterator(fn): - """Tag the method as the collection remover. - - The iterator method is called with no arguments. It is expected to - return an iterator over all collection members:: - - @collection.iterator - def __iter__(self): ... - - """ - setattr(fn, '_sa_instrument_role', 'iterator') - return fn - - @staticmethod - def internally_instrumented(fn): - """Tag the method as instrumented. - - This tag will prevent any decoration from being applied to the method. - Use this if you are orchestrating your own calls to :func:`.collection_adapter` - in one of the basic SQLAlchemy interface methods, or to prevent - an automatic ABC method decoration from wrapping your implementation:: - - # normally an 'extend' method on a list-like class would be - # automatically intercepted and re-implemented in terms of - # SQLAlchemy events and append(). your implementation will - # never be called, unless: - @collection.internally_instrumented - def extend(self, items): ... - - """ - setattr(fn, '_sa_instrumented', True) - return fn - - @staticmethod - def link(fn): - """Tag the method as a the "linked to attribute" event handler. - - This optional event handler will be called when the collection class - is linked to or unlinked from the InstrumentedAttribute. It is - invoked immediately after the '_sa_adapter' property is set on - the instance. A single argument is passed: the collection adapter - that has been linked, or None if unlinking. - - """ - setattr(fn, '_sa_instrument_role', 'link') - return fn - - @staticmethod - def converter(fn): - """Tag the method as the collection converter. - - This optional method will be called when a collection is being - replaced entirely, as in:: - - myobj.acollection = [newvalue1, newvalue2] - - The converter method will receive the object being assigned and should - return an iterable of values suitable for use by the ``appender`` - method. A converter must not assign values or mutate the collection, - it's sole job is to adapt the value the user provides into an iterable - of values for the ORM's use. - - The default converter implementation will use duck-typing to do the - conversion. A dict-like collection will be convert into an iterable - of dictionary values, and other types will simply be iterated:: - - @collection.converter - def convert(self, other): ... - - If the duck-typing of the object does not match the type of this - collection, a TypeError is raised. - - Supply an implementation of this method if you want to expand the - range of possible types that can be assigned in bulk or perform - validation on the values about to be assigned. - - """ - setattr(fn, '_sa_instrument_role', 'converter') - return fn - - @staticmethod - def adds(arg): - """Mark the method as adding an entity to the collection. - - Adds "add to collection" handling to the method. The decorator - argument indicates which method argument holds the SQLAlchemy-relevant - value. Arguments can be specified positionally (i.e. integer) or by - name:: - - @collection.adds(1) - def push(self, item): ... - - @collection.adds('entity') - def do_stuff(self, thing, entity=None): ... - - """ - def decorator(fn): - setattr(fn, '_sa_instrument_before', ('fire_append_event', arg)) - return fn - return decorator - - @staticmethod - def replaces(arg): - """Mark the method as replacing an entity in the collection. - - Adds "add to collection" and "remove from collection" handling to - the method. The decorator argument indicates which method argument - holds the SQLAlchemy-relevant value to be added, and return value, if - any will be considered the value to remove. - - Arguments can be specified positionally (i.e. integer) or by name:: - - @collection.replaces(2) - def __setitem__(self, index, item): ... - - """ - def decorator(fn): - setattr(fn, '_sa_instrument_before', ('fire_append_event', arg)) - setattr(fn, '_sa_instrument_after', 'fire_remove_event') - return fn - return decorator - - @staticmethod - def removes(arg): - """Mark the method as removing an entity in the collection. - - Adds "remove from collection" handling to the method. The decorator - argument indicates which method argument holds the SQLAlchemy-relevant - value to be removed. Arguments can be specified positionally (i.e. - integer) or by name:: - - @collection.removes(1) - def zap(self, item): ... - - For methods where the value to remove is not known at call-time, use - collection.removes_return. - - """ - def decorator(fn): - setattr(fn, '_sa_instrument_before', ('fire_remove_event', arg)) - return fn - return decorator - - @staticmethod - def removes_return(): - """Mark the method as removing an entity in the collection. - - Adds "remove from collection" handling to the method. The return value - of the method, if any, is considered the value to remove. The method - arguments are not inspected:: - - @collection.removes_return() - def pop(self): ... - - For methods where the value to remove is known at call-time, use - collection.remove. - - """ - def decorator(fn): - setattr(fn, '_sa_instrument_after', 'fire_remove_event') - return fn - return decorator - - -# public instrumentation interface for 'internally instrumented' -# implementations -def collection_adapter(collection): - """Fetch the :class:`.CollectionAdapter` for a collection.""" - - return getattr(collection, '_sa_adapter', None) - -def collection_iter(collection): - """Iterate over an object supporting the @iterator or __iter__ protocols. - - If the collection is an ORM collection, it need not be attached to an - object to be iterable. - - """ - try: - return getattr(collection, '_sa_iterator', - getattr(collection, '__iter__'))() - except AttributeError: - raise TypeError("'%s' object is not iterable" % - type(collection).__name__) - - -class CollectionAdapter(object): - """Bridges between the ORM and arbitrary Python collections. - - Proxies base-level collection operations (append, remove, iterate) - to the underlying Python collection, and emits add/remove events for - entities entering or leaving the collection. - - The ORM uses :class:`.CollectionAdapter` exclusively for interaction with - entity collections. - - The usage of getattr()/setattr() is currently to allow injection - of custom methods, such as to unwrap Zope security proxies. - - """ - def __init__(self, attr, owner_state, data): - self._key = attr.key - self._data = weakref.ref(data) - self.owner_state = owner_state - self.link_to_self(data) - - @property - def data(self): - "The entity collection being adapted." - return self._data() - - @util.memoized_property - def attr(self): - return self.owner_state.manager[self._key].impl - - def link_to_self(self, data): - """Link a collection to this adapter, and fire a link event.""" - setattr(data, '_sa_adapter', self) - if hasattr(data, '_sa_on_link'): - getattr(data, '_sa_on_link')(self) - - def unlink(self, data): - """Unlink a collection from any adapter, and fire a link event.""" - setattr(data, '_sa_adapter', None) - if hasattr(data, '_sa_on_link'): - getattr(data, '_sa_on_link')(None) - - def adapt_like_to_iterable(self, obj): - """Converts collection-compatible objects to an iterable of values. - - Can be passed any type of object, and if the underlying collection - determines that it can be adapted into a stream of values it can - use, returns an iterable of values suitable for append()ing. - - This method may raise TypeError or any other suitable exception - if adaptation fails. - - If a converter implementation is not supplied on the collection, - a default duck-typing-based implementation is used. - - """ - converter = getattr(self._data(), '_sa_converter', None) - if converter is not None: - return converter(obj) - - setting_type = util.duck_type_collection(obj) - receiving_type = util.duck_type_collection(self._data()) - - if obj is None or setting_type != receiving_type: - given = obj is None and 'None' or obj.__class__.__name__ - if receiving_type is None: - wanted = self._data().__class__.__name__ - else: - wanted = receiving_type.__name__ - - raise TypeError( - "Incompatible collection type: %s is not %s-like" % ( - given, wanted)) - - # If the object is an adapted collection, return the (iterable) - # adapter. - if getattr(obj, '_sa_adapter', None) is not None: - return getattr(obj, '_sa_adapter') - elif setting_type == dict: - # Py3K - #return obj.values() - # Py2K - return getattr(obj, 'itervalues', getattr(obj, 'values'))() - # end Py2K - else: - return iter(obj) - - def append_with_event(self, item, initiator=None): - """Add an entity to the collection, firing mutation events.""" - - getattr(self._data(), '_sa_appender')(item, _sa_initiator=initiator) - - def append_without_event(self, item): - """Add or restore an entity to the collection, firing no events.""" - getattr(self._data(), '_sa_appender')(item, _sa_initiator=False) - - def append_multiple_without_event(self, items): - """Add or restore an entity to the collection, firing no events.""" - appender = getattr(self._data(), '_sa_appender') - for item in items: - appender(item, _sa_initiator=False) - - def remove_with_event(self, item, initiator=None): - """Remove an entity from the collection, firing mutation events.""" - getattr(self._data(), '_sa_remover')(item, _sa_initiator=initiator) - - def remove_without_event(self, item): - """Remove an entity from the collection, firing no events.""" - getattr(self._data(), '_sa_remover')(item, _sa_initiator=False) - - def clear_with_event(self, initiator=None): - """Empty the collection, firing a mutation event for each entity.""" - - remover = getattr(self._data(), '_sa_remover') - for item in list(self): - remover(item, _sa_initiator=initiator) - - def clear_without_event(self): - """Empty the collection, firing no events.""" - - remover = getattr(self._data(), '_sa_remover') - for item in list(self): - remover(item, _sa_initiator=False) - - def __iter__(self): - """Iterate over entities in the collection.""" - - # Py3K requires iter() here - return iter(getattr(self._data(), '_sa_iterator')()) - - def __len__(self): - """Count entities in the collection.""" - return len(list(getattr(self._data(), '_sa_iterator')())) - - def __nonzero__(self): - return True - - def fire_append_event(self, item, initiator=None): - """Notify that a entity has entered the collection. - - Initiator is a token owned by the InstrumentedAttribute that initiated the membership - mutation, and should be left as None unless you are passing along - an initiator value from a chained operation. - - """ - if initiator is not False and item is not None: - return self.attr.fire_append_event( - self.owner_state, - self.owner_state.dict, - item, initiator) - else: - return item - - def fire_remove_event(self, item, initiator=None): - """Notify that a entity has been removed from the collection. - - Initiator is the InstrumentedAttribute that initiated the membership - mutation, and should be left as None unless you are passing along - an initiator value from a chained operation. - - """ - if initiator is not False and item is not None: - self.attr.fire_remove_event( - self.owner_state, - self.owner_state.dict, - item, initiator) - - def fire_pre_remove_event(self, initiator=None): - """Notify that an entity is about to be removed from the collection. - - Only called if the entity cannot be removed after calling - fire_remove_event(). - - """ - self.attr.fire_pre_remove_event( - self.owner_state, - self.owner_state.dict, - initiator=initiator) - - def __getstate__(self): - return {'key': self._key, - 'owner_state': self.owner_state, - 'data': self.data} - - def __setstate__(self, d): - self._key = d['key'] - self.owner_state = d['owner_state'] - self._data = weakref.ref(d['data']) - - -def bulk_replace(values, existing_adapter, new_adapter): - """Load a new collection, firing events based on prior like membership. - - Appends instances in ``values`` onto the ``new_adapter``. Events will be - fired for any instance not present in the ``existing_adapter``. Any - instances in ``existing_adapter`` not present in ``values`` will have - remove events fired upon them. - - :param values: An iterable of collection member instances - - :param existing_adapter: A :class:`.CollectionAdapter` of instances to be replaced - - :param new_adapter: An empty :class:`.CollectionAdapter` to load with ``values`` - - - """ - if not isinstance(values, list): - values = list(values) - - idset = util.IdentitySet - constants = idset(existing_adapter or ()).intersection(values or ()) - additions = idset(values or ()).difference(constants) - removals = idset(existing_adapter or ()).difference(constants) - - for member in values or (): - if member in additions: - new_adapter.append_with_event(member) - elif member in constants: - new_adapter.append_without_event(member) - - if existing_adapter: - for member in removals: - existing_adapter.remove_with_event(member) - -def prepare_instrumentation(factory): - """Prepare a callable for future use as a collection class factory. - - Given a collection class factory (either a type or no-arg callable), - return another factory that will produce compatible instances when - called. - - This function is responsible for converting collection_class=list - into the run-time behavior of collection_class=InstrumentedList. - - """ - # Convert a builtin to 'Instrumented*' - if factory in __canned_instrumentation: - factory = __canned_instrumentation[factory] - - # Create a specimen - cls = type(factory()) - - # Did factory callable return a builtin? - if cls in __canned_instrumentation: - # Wrap it so that it returns our 'Instrumented*' - factory = __converting_factory(factory) - cls = factory() - - # Instrument the class if needed. - if __instrumentation_mutex.acquire(): - try: - if getattr(cls, '_sa_instrumented', None) != id(cls): - _instrument_class(cls) - finally: - __instrumentation_mutex.release() - - return factory - -def __converting_factory(original_factory): - """Convert the type returned by collection factories on the fly. - - Given a collection factory that returns a builtin type (e.g. a list), - return a wrapped function that converts that type to one of our - instrumented types. - - """ - def wrapper(): - collection = original_factory() - type_ = type(collection) - if type_ in __canned_instrumentation: - # return an instrumented type initialized from the factory's - # collection - return __canned_instrumentation[type_](collection) - else: - raise sa_exc.InvalidRequestError( - "Collection class factories must produce instances of a " - "single class.") - try: - # often flawed but better than nothing - wrapper.__name__ = "%sWrapper" % original_factory.__name__ - wrapper.__doc__ = original_factory.__doc__ - except: - pass - return wrapper - -def _instrument_class(cls): - """Modify methods in a class and install instrumentation.""" - - # TODO: more formally document this as a decoratorless/Python 2.3 - # option for specifying instrumentation. (likely doc'd here in code only, - # not in online docs.) Useful for C types too. - # - # __instrumentation__ = { - # 'rolename': 'methodname', # ... - # 'methods': { - # 'methodname': ('fire_{append,remove}_event', argspec, - # 'fire_{append,remove}_event'), - # 'append': ('fire_append_event', 1, None), - # '__setitem__': ('fire_append_event', 1, 'fire_remove_event'), - # 'pop': (None, None, 'fire_remove_event'), - # } - # } - - # In the normal call flow, a request for any of the 3 basic collection - # types is transformed into one of our trivial subclasses - # (e.g. InstrumentedList). Catch anything else that sneaks in here... - if cls.__module__ == '__builtin__': - raise sa_exc.ArgumentError( - "Can not instrument a built-in type. Use a " - "subclass, even a trivial one.") - - collection_type = util.duck_type_collection(cls) - if collection_type in __interfaces: - roles = __interfaces[collection_type].copy() - decorators = roles.pop('_decorators', {}) - else: - roles, decorators = {}, {} - - if hasattr(cls, '__instrumentation__'): - roles.update(copy.deepcopy(getattr(cls, '__instrumentation__'))) - - methods = roles.pop('methods', {}) - - for name in dir(cls): - method = getattr(cls, name, None) - if not util.callable(method): - continue - - # note role declarations - if hasattr(method, '_sa_instrument_role'): - role = method._sa_instrument_role - assert role in ('appender', 'remover', 'iterator', - 'link', 'converter') - roles[role] = name - - # transfer instrumentation requests from decorated function - # to the combined queue - before, after = None, None - if hasattr(method, '_sa_instrument_before'): - op, argument = method._sa_instrument_before - assert op in ('fire_append_event', 'fire_remove_event') - before = op, argument - if hasattr(method, '_sa_instrument_after'): - op = method._sa_instrument_after - assert op in ('fire_append_event', 'fire_remove_event') - after = op - if before: - methods[name] = before[0], before[1], after - elif after: - methods[name] = None, None, after - - # apply ABC auto-decoration to methods that need it - - for method, decorator in decorators.items(): - fn = getattr(cls, method, None) - if (fn and method not in methods and - not hasattr(fn, '_sa_instrumented')): - setattr(cls, method, decorator(fn)) - - # ensure all roles are present, and apply implicit instrumentation if - # needed - if 'appender' not in roles or not hasattr(cls, roles['appender']): - raise sa_exc.ArgumentError( - "Type %s must elect an appender method to be " - "a collection class" % cls.__name__) - elif (roles['appender'] not in methods and - not hasattr(getattr(cls, roles['appender']), '_sa_instrumented')): - methods[roles['appender']] = ('fire_append_event', 1, None) - - if 'remover' not in roles or not hasattr(cls, roles['remover']): - raise sa_exc.ArgumentError( - "Type %s must elect a remover method to be " - "a collection class" % cls.__name__) - elif (roles['remover'] not in methods and - not hasattr(getattr(cls, roles['remover']), '_sa_instrumented')): - methods[roles['remover']] = ('fire_remove_event', 1, None) - - if 'iterator' not in roles or not hasattr(cls, roles['iterator']): - raise sa_exc.ArgumentError( - "Type %s must elect an iterator method to be " - "a collection class" % cls.__name__) - - # apply ad-hoc instrumentation from decorators, class-level defaults - # and implicit role declarations - for method, (before, argument, after) in methods.items(): - setattr(cls, method, - _instrument_membership_mutator(getattr(cls, method), - before, argument, after)) - # intern the role map - for role, method in roles.items(): - setattr(cls, '_sa_%s' % role, getattr(cls, method)) - - setattr(cls, '_sa_instrumented', id(cls)) - -def _instrument_membership_mutator(method, before, argument, after): - """Route method args and/or return value through the collection adapter.""" - # This isn't smart enough to handle @adds(1) for 'def fn(self, (a, b))' - if before: - fn_args = list(util.flatten_iterator(inspect.getargspec(method)[0])) - if type(argument) is int: - pos_arg = argument - named_arg = len(fn_args) > argument and fn_args[argument] or None - else: - if argument in fn_args: - pos_arg = fn_args.index(argument) - else: - pos_arg = None - named_arg = argument - del fn_args - - def wrapper(*args, **kw): - if before: - if pos_arg is None: - if named_arg not in kw: - raise sa_exc.ArgumentError( - "Missing argument %s" % argument) - value = kw[named_arg] - else: - if len(args) > pos_arg: - value = args[pos_arg] - elif named_arg in kw: - value = kw[named_arg] - else: - raise sa_exc.ArgumentError( - "Missing argument %s" % argument) - - initiator = kw.pop('_sa_initiator', None) - if initiator is False: - executor = None - else: - executor = getattr(args[0], '_sa_adapter', None) - - if before and executor: - getattr(executor, before)(value, initiator) - - if not after or not executor: - return method(*args, **kw) - else: - res = method(*args, **kw) - if res is not None: - getattr(executor, after)(res, initiator) - return res - try: - wrapper._sa_instrumented = True - wrapper.__name__ = method.__name__ - wrapper.__doc__ = method.__doc__ - except: - pass - return wrapper - -def __set(collection, item, _sa_initiator=None): - """Run set events, may eventually be inlined into decorators.""" - - if _sa_initiator is not False and item is not None: - executor = getattr(collection, '_sa_adapter', None) - if executor: - item = getattr(executor, 'fire_append_event')(item, _sa_initiator) - return item - -def __del(collection, item, _sa_initiator=None): - """Run del events, may eventually be inlined into decorators.""" - if _sa_initiator is not False and item is not None: - executor = getattr(collection, '_sa_adapter', None) - if executor: - getattr(executor, 'fire_remove_event')(item, _sa_initiator) - -def __before_delete(collection, _sa_initiator=None): - """Special method to run 'commit existing value' methods""" - executor = getattr(collection, '_sa_adapter', None) - if executor: - getattr(executor, 'fire_pre_remove_event')(_sa_initiator) - -def _list_decorators(): - """Tailored instrumentation wrappers for any list-like class.""" - - def _tidy(fn): - setattr(fn, '_sa_instrumented', True) - fn.__doc__ = getattr(getattr(list, fn.__name__), '__doc__') - - def append(fn): - def append(self, item, _sa_initiator=None): - item = __set(self, item, _sa_initiator) - fn(self, item) - _tidy(append) - return append - - def remove(fn): - def remove(self, value, _sa_initiator=None): - __before_delete(self, _sa_initiator) - # testlib.pragma exempt:__eq__ - fn(self, value) - __del(self, value, _sa_initiator) - _tidy(remove) - return remove - - def insert(fn): - def insert(self, index, value): - value = __set(self, value) - fn(self, index, value) - _tidy(insert) - return insert - - def __setitem__(fn): - def __setitem__(self, index, value): - if not isinstance(index, slice): - existing = self[index] - if existing is not None: - __del(self, existing) - value = __set(self, value) - fn(self, index, value) - else: - # slice assignment requires __delitem__, insert, __len__ - step = index.step or 1 - start = index.start or 0 - if start < 0: - start += len(self) - stop = index.stop or len(self) - if stop < 0: - stop += len(self) - - if step == 1: - for i in xrange(start, stop, step): - if len(self) > start: - del self[start] - - for i, item in enumerate(value): - self.insert(i + start, item) - else: - rng = range(start, stop, step) - if len(value) != len(rng): - raise ValueError( - "attempt to assign sequence of size %s to " - "extended slice of size %s" % (len(value), - len(rng))) - for i, item in zip(rng, value): - self.__setitem__(i, item) - _tidy(__setitem__) - return __setitem__ - - def __delitem__(fn): - def __delitem__(self, index): - if not isinstance(index, slice): - item = self[index] - __del(self, item) - fn(self, index) - else: - # slice deletion requires __getslice__ and a slice-groking - # __getitem__ for stepped deletion - # note: not breaking this into atomic dels - for item in self[index]: - __del(self, item) - fn(self, index) - _tidy(__delitem__) - return __delitem__ - - # Py2K - def __setslice__(fn): - def __setslice__(self, start, end, values): - for value in self[start:end]: - __del(self, value) - values = [__set(self, value) for value in values] - fn(self, start, end, values) - _tidy(__setslice__) - return __setslice__ - - def __delslice__(fn): - def __delslice__(self, start, end): - for value in self[start:end]: - __del(self, value) - fn(self, start, end) - _tidy(__delslice__) - return __delslice__ - # end Py2K - - def extend(fn): - def extend(self, iterable): - for value in iterable: - self.append(value) - _tidy(extend) - return extend - - def __iadd__(fn): - def __iadd__(self, iterable): - # list.__iadd__ takes any iterable and seems to let TypeError raise - # as-is instead of returning NotImplemented - for value in iterable: - self.append(value) - return self - _tidy(__iadd__) - return __iadd__ - - def pop(fn): - def pop(self, index=-1): - __before_delete(self) - item = fn(self, index) - __del(self, item) - return item - _tidy(pop) - return pop - - # __imul__ : not wrapping this. all members of the collection are already - # present, so no need to fire appends... wrapping it with an explicit - # decorator is still possible, so events on *= can be had if they're - # desired. hard to imagine a use case for __imul__, though. - - l = locals().copy() - l.pop('_tidy') - return l - -def _dict_decorators(): - """Tailored instrumentation wrappers for any dict-like mapping class.""" - - def _tidy(fn): - setattr(fn, '_sa_instrumented', True) - fn.__doc__ = getattr(getattr(dict, fn.__name__), '__doc__') - - Unspecified = util.symbol('Unspecified') - - def __setitem__(fn): - def __setitem__(self, key, value, _sa_initiator=None): - if key in self: - __del(self, self[key], _sa_initiator) - value = __set(self, value, _sa_initiator) - fn(self, key, value) - _tidy(__setitem__) - return __setitem__ - - def __delitem__(fn): - def __delitem__(self, key, _sa_initiator=None): - if key in self: - __del(self, self[key], _sa_initiator) - fn(self, key) - _tidy(__delitem__) - return __delitem__ - - def clear(fn): - def clear(self): - for key in self: - __del(self, self[key]) - fn(self) - _tidy(clear) - return clear - - def pop(fn): - def pop(self, key, default=Unspecified): - if key in self: - __del(self, self[key]) - if default is Unspecified: - return fn(self, key) - else: - return fn(self, key, default) - _tidy(pop) - return pop - - def popitem(fn): - def popitem(self): - __before_delete(self) - item = fn(self) - __del(self, item[1]) - return item - _tidy(popitem) - return popitem - - def setdefault(fn): - def setdefault(self, key, default=None): - if key not in self: - self.__setitem__(key, default) - return default - else: - return self.__getitem__(key) - _tidy(setdefault) - return setdefault - - if sys.version_info < (2, 4): - def update(fn): - def update(self, other): - for key in other.keys(): - if key not in self or self[key] is not other[key]: - self[key] = other[key] - _tidy(update) - return update - else: - def update(fn): - def update(self, __other=Unspecified, **kw): - if __other is not Unspecified: - if hasattr(__other, 'keys'): - for key in __other.keys(): - if (key not in self or - self[key] is not __other[key]): - self[key] = __other[key] - else: - for key, value in __other: - if key not in self or self[key] is not value: - self[key] = value - for key in kw: - if key not in self or self[key] is not kw[key]: - self[key] = kw[key] - _tidy(update) - return update - - l = locals().copy() - l.pop('_tidy') - l.pop('Unspecified') - return l - -if util.py3k_warning: - _set_binop_bases = (set, frozenset) -else: - import sets - _set_binop_bases = (set, frozenset, sets.BaseSet) - -def _set_binops_check_strict(self, obj): - """Allow only set, frozenset and self.__class__-derived objects in binops.""" - return isinstance(obj, _set_binop_bases + (self.__class__,)) - -def _set_binops_check_loose(self, obj): - """Allow anything set-like to participate in set binops.""" - return (isinstance(obj, _set_binop_bases + (self.__class__,)) or - util.duck_type_collection(obj) == set) - - -def _set_decorators(): - """Tailored instrumentation wrappers for any set-like class.""" - - def _tidy(fn): - setattr(fn, '_sa_instrumented', True) - fn.__doc__ = getattr(getattr(set, fn.__name__), '__doc__') - - Unspecified = util.symbol('Unspecified') - - def add(fn): - def add(self, value, _sa_initiator=None): - if value not in self: - value = __set(self, value, _sa_initiator) - # testlib.pragma exempt:__hash__ - fn(self, value) - _tidy(add) - return add - - if sys.version_info < (2, 4): - def discard(fn): - def discard(self, value, _sa_initiator=None): - if value in self: - self.remove(value, _sa_initiator) - _tidy(discard) - return discard - else: - def discard(fn): - def discard(self, value, _sa_initiator=None): - # testlib.pragma exempt:__hash__ - if value in self: - __del(self, value, _sa_initiator) - # testlib.pragma exempt:__hash__ - fn(self, value) - _tidy(discard) - return discard - - def remove(fn): - def remove(self, value, _sa_initiator=None): - # testlib.pragma exempt:__hash__ - if value in self: - __del(self, value, _sa_initiator) - # testlib.pragma exempt:__hash__ - fn(self, value) - _tidy(remove) - return remove - - def pop(fn): - def pop(self): - __before_delete(self) - item = fn(self) - __del(self, item) - return item - _tidy(pop) - return pop - - def clear(fn): - def clear(self): - for item in list(self): - self.remove(item) - _tidy(clear) - return clear - - def update(fn): - def update(self, value): - for item in value: - self.add(item) - _tidy(update) - return update - - def __ior__(fn): - def __ior__(self, value): - if not _set_binops_check_strict(self, value): - return NotImplemented - for item in value: - self.add(item) - return self - _tidy(__ior__) - return __ior__ - - def difference_update(fn): - def difference_update(self, value): - for item in value: - self.discard(item) - _tidy(difference_update) - return difference_update - - def __isub__(fn): - def __isub__(self, value): - if not _set_binops_check_strict(self, value): - return NotImplemented - for item in value: - self.discard(item) - return self - _tidy(__isub__) - return __isub__ - - def intersection_update(fn): - def intersection_update(self, other): - want, have = self.intersection(other), set(self) - remove, add = have - want, want - have - - for item in remove: - self.remove(item) - for item in add: - self.add(item) - _tidy(intersection_update) - return intersection_update - - def __iand__(fn): - def __iand__(self, other): - if not _set_binops_check_strict(self, other): - return NotImplemented - want, have = self.intersection(other), set(self) - remove, add = have - want, want - have - - for item in remove: - self.remove(item) - for item in add: - self.add(item) - return self - _tidy(__iand__) - return __iand__ - - def symmetric_difference_update(fn): - def symmetric_difference_update(self, other): - want, have = self.symmetric_difference(other), set(self) - remove, add = have - want, want - have - - for item in remove: - self.remove(item) - for item in add: - self.add(item) - _tidy(symmetric_difference_update) - return symmetric_difference_update - - def __ixor__(fn): - def __ixor__(self, other): - if not _set_binops_check_strict(self, other): - return NotImplemented - want, have = self.symmetric_difference(other), set(self) - remove, add = have - want, want - have - - for item in remove: - self.remove(item) - for item in add: - self.add(item) - return self - _tidy(__ixor__) - return __ixor__ - - l = locals().copy() - l.pop('_tidy') - l.pop('Unspecified') - return l - - -class InstrumentedList(list): - """An instrumented version of the built-in list.""" - - __instrumentation__ = { - 'appender': 'append', - 'remover': 'remove', - 'iterator': '__iter__', } - -class InstrumentedSet(set): - """An instrumented version of the built-in set.""" - - __instrumentation__ = { - 'appender': 'add', - 'remover': 'remove', - 'iterator': '__iter__', } - -class InstrumentedDict(dict): - """An instrumented version of the built-in dict.""" - - # Py3K - #__instrumentation__ = { - # 'iterator': 'values', } - # Py2K - __instrumentation__ = { - 'iterator': 'itervalues', } - # end Py2K - -__canned_instrumentation = { - list: InstrumentedList, - set: InstrumentedSet, - dict: InstrumentedDict, - } - -__interfaces = { - list: {'appender': 'append', - 'remover': 'remove', - 'iterator': '__iter__', - '_decorators': _list_decorators(), }, - set: {'appender': 'add', - 'remover': 'remove', - 'iterator': '__iter__', - '_decorators': _set_decorators(), }, - # decorators are required for dicts and object collections. - # Py3K - #dict: {'iterator': 'values', - # '_decorators': _dict_decorators(), }, - # Py2K - dict: {'iterator': 'itervalues', - '_decorators': _dict_decorators(), }, - # end Py2K - # < 0.4 compatible naming, deprecated- use decorators instead. - None: {} - } - -class MappedCollection(dict): - """A basic dictionary-based collection class. - - Extends dict with the minimal bag semantics that collection classes require. - ``set`` and ``remove`` are implemented in terms of a keying function: any - callable that takes an object and returns an object for use as a dictionary - key. - - """ - - def __init__(self, keyfunc): - """Create a new collection with keying provided by keyfunc. - - keyfunc may be any callable any callable that takes an object and - returns an object for use as a dictionary key. - - The keyfunc will be called every time the ORM needs to add a member by - value-only (such as when loading instances from the database) or - remove a member. The usual cautions about dictionary keying apply- - ``keyfunc(object)`` should return the same output for the life of the - collection. Keying based on mutable properties can result in - unreachable instances "lost" in the collection. - - """ - self.keyfunc = keyfunc - - def set(self, value, _sa_initiator=None): - """Add an item by value, consulting the keyfunc for the key.""" - - key = self.keyfunc(value) - self.__setitem__(key, value, _sa_initiator) - set = collection.internally_instrumented(set) - set = collection.appender(set) - - def remove(self, value, _sa_initiator=None): - """Remove an item by value, consulting the keyfunc for the key.""" - - key = self.keyfunc(value) - # Let self[key] raise if key is not in this collection - # testlib.pragma exempt:__ne__ - if self[key] != value: - raise sa_exc.InvalidRequestError( - "Can not remove '%s': collection holds '%s' for key '%s'. " - "Possible cause: is the MappedCollection key function " - "based on mutable properties or properties that only obtain " - "values after flush?" % - (value, self[key], key)) - self.__delitem__(key, _sa_initiator) - remove = collection.internally_instrumented(remove) - remove = collection.remover(remove) - - def _convert(self, dictlike): - """Validate and convert a dict-like object into values for set()ing. - - This is called behind the scenes when a MappedCollection is replaced - entirely by another collection, as in:: - - myobj.mappedcollection = {'a':obj1, 'b': obj2} # ... - - Raises a TypeError if the key in any (key, value) pair in the dictlike - object does not match the key that this collection's keyfunc would - have assigned for that value. - - """ - for incoming_key, value in util.dictlike_iteritems(dictlike): - new_key = self.keyfunc(value) - if incoming_key != new_key: - raise TypeError( - "Found incompatible key %r for value %r; this collection's " - "keying function requires a key of %r for this value." % ( - incoming_key, value, new_key)) - yield value - _convert = collection.converter(_convert) - -# ensure instrumentation is associated with -# these built-in classes; if a user-defined class -# subclasses these and uses @internally_instrumented, -# the superclass is otherwise not instrumented. -# see [ticket:2406]. -_instrument_class(MappedCollection) -_instrument_class(InstrumentedList) -_instrument_class(InstrumentedSet) - diff --git a/libs/sqlalchemy/orm/dependency.py b/libs/sqlalchemy/orm/dependency.py deleted file mode 100644 index c46969f2..00000000 --- a/libs/sqlalchemy/orm/dependency.py +++ /dev/null @@ -1,1161 +0,0 @@ -# orm/dependency.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Relationship dependencies. - -""" - -from sqlalchemy import sql, util, exc as sa_exc -from sqlalchemy.orm import attributes, exc, sync, unitofwork, \ - util as mapperutil -from sqlalchemy.orm.interfaces import ONETOMANY, MANYTOONE, MANYTOMANY - -class DependencyProcessor(object): - def __init__(self, prop): - self.prop = prop - self.cascade = prop.cascade - self.mapper = prop.mapper - self.parent = prop.parent - self.secondary = prop.secondary - self.direction = prop.direction - self.post_update = prop.post_update - self.passive_deletes = prop.passive_deletes - self.passive_updates = prop.passive_updates - self.enable_typechecks = prop.enable_typechecks - if self.passive_deletes: - self._passive_delete_flag = attributes.PASSIVE_NO_INITIALIZE - else: - self._passive_delete_flag = attributes.PASSIVE_OFF - if self.passive_updates: - self._passive_update_flag = attributes.PASSIVE_NO_INITIALIZE - else: - self._passive_update_flag= attributes.PASSIVE_OFF - - self.key = prop.key - if not self.prop.synchronize_pairs: - raise sa_exc.ArgumentError( - "Can't build a DependencyProcessor for relationship %s. " - "No target attributes to populate between parent and " - "child are present" % - self.prop) - - @classmethod - def from_relationship(cls, prop): - return _direction_to_processor[prop.direction](prop) - - def hasparent(self, state): - """return True if the given object instance has a parent, - according to the ``InstrumentedAttribute`` handled by this - ``DependencyProcessor``. - - """ - return self.parent.class_manager.get_impl(self.key).hasparent(state) - - def per_property_preprocessors(self, uow): - """establish actions and dependencies related to a flush. - - These actions will operate on all relevant states in - the aggregate. - - """ - uow.register_preprocessor(self, True) - - - def per_property_flush_actions(self, uow): - after_save = unitofwork.ProcessAll(uow, self, False, True) - before_delete = unitofwork.ProcessAll(uow, self, True, True) - - parent_saves = unitofwork.SaveUpdateAll( - uow, - self.parent.primary_base_mapper - ) - child_saves = unitofwork.SaveUpdateAll( - uow, - self.mapper.primary_base_mapper - ) - - parent_deletes = unitofwork.DeleteAll( - uow, - self.parent.primary_base_mapper - ) - child_deletes = unitofwork.DeleteAll( - uow, - self.mapper.primary_base_mapper - ) - - self.per_property_dependencies(uow, - parent_saves, - child_saves, - parent_deletes, - child_deletes, - after_save, - before_delete - ) - - - def per_state_flush_actions(self, uow, states, isdelete): - """establish actions and dependencies related to a flush. - - These actions will operate on all relevant states - individually. This occurs only if there are cycles - in the 'aggregated' version of events. - - """ - - parent_base_mapper = self.parent.primary_base_mapper - child_base_mapper = self.mapper.primary_base_mapper - child_saves = unitofwork.SaveUpdateAll(uow, child_base_mapper) - child_deletes = unitofwork.DeleteAll(uow, child_base_mapper) - - # locate and disable the aggregate processors - # for this dependency - - if isdelete: - before_delete = unitofwork.ProcessAll(uow, self, True, True) - before_delete.disabled = True - else: - after_save = unitofwork.ProcessAll(uow, self, False, True) - after_save.disabled = True - - # check if the "child" side is part of the cycle - - if child_saves not in uow.cycles: - # based on the current dependencies we use, the saves/ - # deletes should always be in the 'cycles' collection - # together. if this changes, we will have to break up - # this method a bit more. - assert child_deletes not in uow.cycles - - # child side is not part of the cycle, so we will link per-state - # actions to the aggregate "saves", "deletes" actions - child_actions = [ - (child_saves, False), (child_deletes, True) - ] - child_in_cycles = False - else: - child_in_cycles = True - - # check if the "parent" side is part of the cycle - if not isdelete: - parent_saves = unitofwork.SaveUpdateAll( - uow, - self.parent.base_mapper) - parent_deletes = before_delete = None - if parent_saves in uow.cycles: - parent_in_cycles = True - else: - parent_deletes = unitofwork.DeleteAll( - uow, - self.parent.base_mapper) - parent_saves = after_save = None - if parent_deletes in uow.cycles: - parent_in_cycles = True - - # now create actions /dependencies for each state. - for state in states: - # detect if there's anything changed or loaded - # by a preprocessor on this state/attribute. if not, - # we should be able to skip it entirely. - sum_ = state.manager[self.key].impl.get_all_pending(state, state.dict) - - if not sum_: - continue - - if isdelete: - before_delete = unitofwork.ProcessState(uow, - self, True, state) - if parent_in_cycles: - parent_deletes = unitofwork.DeleteState( - uow, - state, - parent_base_mapper) - else: - after_save = unitofwork.ProcessState(uow, self, False, state) - if parent_in_cycles: - parent_saves = unitofwork.SaveUpdateState( - uow, - state, - parent_base_mapper) - - if child_in_cycles: - child_actions = [] - for child_state, child in sum_: - if child_state not in uow.states: - child_action = (None, None) - else: - (deleted, listonly) = uow.states[child_state] - if deleted: - child_action = ( - unitofwork.DeleteState( - uow, child_state, - child_base_mapper), - True) - else: - child_action = ( - unitofwork.SaveUpdateState( - uow, child_state, - child_base_mapper), - False) - child_actions.append(child_action) - - # establish dependencies between our possibly per-state - # parent action and our possibly per-state child action. - for child_action, childisdelete in child_actions: - self.per_state_dependencies(uow, parent_saves, - parent_deletes, - child_action, - after_save, before_delete, - isdelete, childisdelete) - - - def presort_deletes(self, uowcommit, states): - return False - - def presort_saves(self, uowcommit, states): - return False - - def process_deletes(self, uowcommit, states): - pass - - def process_saves(self, uowcommit, states): - pass - - def prop_has_changes(self, uowcommit, states, isdelete): - if not isdelete or self.passive_deletes: - passive = attributes.PASSIVE_NO_INITIALIZE - elif self.direction is MANYTOONE: - passive = attributes.PASSIVE_NO_FETCH_RELATED - else: - passive = attributes.PASSIVE_OFF - - for s in states: - # TODO: add a high speed method - # to InstanceState which returns: attribute - # has a non-None value, or had one - history = uowcommit.get_attribute_history( - s, - self.key, - passive) - if history and not history.empty(): - return True - else: - return states and \ - not self.prop._is_self_referential and \ - self.mapper in uowcommit.mappers - - def _verify_canload(self, state): - if state is not None and \ - not self.mapper._canload(state, - allow_subtypes=not self.enable_typechecks): - if self.mapper._canload(state, allow_subtypes=True): - raise exc.FlushError('Attempting to flush an item of type ' - '%(x)s as a member of collection ' - '"%(y)s". Expected an object of type ' - '%(z)s or a polymorphic subclass of ' - 'this type. If %(x)s is a subclass of ' - '%(z)s, configure mapper "%(zm)s" to ' - 'load this subtype polymorphically, or ' - 'set enable_typechecks=False to allow ' - 'any subtype to be accepted for flush. ' - % { - 'x': state.class_, - 'y': self.prop, - 'z': self.mapper.class_, - 'zm': self.mapper, - }) - else: - raise exc.FlushError( - 'Attempting to flush an item of type ' - '%(x)s as a member of collection ' - '"%(y)s". Expected an object of type ' - '%(z)s or a polymorphic subclass of ' - 'this type.' % { - 'x': state.class_, - 'y': self.prop, - 'z': self.mapper.class_, - }) - - def _synchronize(self, state, child, associationrow, - clearkeys, uowcommit): - raise NotImplementedError() - - def _get_reversed_processed_set(self, uow): - if not self.prop._reverse_property: - return None - - process_key = tuple(sorted( - [self.key] + - [p.key for p in self.prop._reverse_property] - )) - return uow.memo( - ('reverse_key', process_key), - set - ) - - def _post_update(self, state, uowcommit, related): - for x in related: - if x is not None: - uowcommit.issue_post_update( - state, - [r for l, r in self.prop.synchronize_pairs] - ) - break - - def _pks_changed(self, uowcommit, state): - raise NotImplementedError() - - def __repr__(self): - return "%s(%s)" % (self.__class__.__name__, self.prop) - -class OneToManyDP(DependencyProcessor): - - def per_property_dependencies(self, uow, parent_saves, - child_saves, - parent_deletes, - child_deletes, - after_save, - before_delete, - ): - if self.post_update: - child_post_updates = unitofwork.IssuePostUpdate( - uow, - self.mapper.primary_base_mapper, - False) - child_pre_updates = unitofwork.IssuePostUpdate( - uow, - self.mapper.primary_base_mapper, - True) - - uow.dependencies.update([ - (child_saves, after_save), - (parent_saves, after_save), - (after_save, child_post_updates), - - (before_delete, child_pre_updates), - (child_pre_updates, parent_deletes), - (child_pre_updates, child_deletes), - - ]) - else: - uow.dependencies.update([ - (parent_saves, after_save), - (after_save, child_saves), - (after_save, child_deletes), - - (child_saves, parent_deletes), - (child_deletes, parent_deletes), - - (before_delete, child_saves), - (before_delete, child_deletes), - ]) - - def per_state_dependencies(self, uow, - save_parent, - delete_parent, - child_action, - after_save, before_delete, - isdelete, childisdelete): - - if self.post_update: - - child_post_updates = unitofwork.IssuePostUpdate( - uow, - self.mapper.primary_base_mapper, - False) - child_pre_updates = unitofwork.IssuePostUpdate( - uow, - self.mapper.primary_base_mapper, - True) - - # TODO: this whole block is not covered - # by any tests - if not isdelete: - if childisdelete: - uow.dependencies.update([ - (child_action, after_save), - (after_save, child_post_updates), - ]) - else: - uow.dependencies.update([ - (save_parent, after_save), - (child_action, after_save), - (after_save, child_post_updates), - ]) - else: - if childisdelete: - uow.dependencies.update([ - (before_delete, child_pre_updates), - (child_pre_updates, delete_parent), - ]) - else: - uow.dependencies.update([ - (before_delete, child_pre_updates), - (child_pre_updates, delete_parent), - ]) - elif not isdelete: - uow.dependencies.update([ - (save_parent, after_save), - (after_save, child_action), - (save_parent, child_action) - ]) - else: - uow.dependencies.update([ - (before_delete, child_action), - (child_action, delete_parent) - ]) - - def presort_deletes(self, uowcommit, states): - # head object is being deleted, and we manage its list of - # child objects the child objects have to have their - # foreign key to the parent set to NULL - should_null_fks = not self.cascade.delete and \ - not self.passive_deletes == 'all' - - for state in states: - history = uowcommit.get_attribute_history( - state, - self.key, - self._passive_delete_flag) - if history: - for child in history.deleted: - if child is not None and self.hasparent(child) is False: - if self.cascade.delete_orphan: - uowcommit.register_object(child, isdelete=True) - else: - uowcommit.register_object(child) - - if should_null_fks: - for child in history.unchanged: - if child is not None: - uowcommit.register_object(child, - operation="delete", prop=self.prop) - - - - def presort_saves(self, uowcommit, states): - children_added = uowcommit.memo(('children_added', self), set) - - for state in states: - pks_changed = self._pks_changed(uowcommit, state) - - if not pks_changed or self.passive_updates: - passive = attributes.PASSIVE_NO_INITIALIZE - else: - passive = attributes.PASSIVE_OFF - - history = uowcommit.get_attribute_history( - state, - self.key, - passive) - if history: - for child in history.added: - if child is not None: - uowcommit.register_object(child, cancel_delete=True, - operation="add", - prop=self.prop) - - children_added.update(history.added) - - for child in history.deleted: - if not self.cascade.delete_orphan: - uowcommit.register_object(child, isdelete=False, - operation='delete', - prop=self.prop) - elif self.hasparent(child) is False: - uowcommit.register_object(child, isdelete=True, - operation="delete", prop=self.prop) - for c, m, st_, dct_ in self.mapper.cascade_iterator( - 'delete', child): - uowcommit.register_object( - st_, - isdelete=True) - - if pks_changed: - if history: - for child in history.unchanged: - if child is not None: - uowcommit.register_object( - child, - False, - self.passive_updates, - operation="pk change", - prop=self.prop) - - def process_deletes(self, uowcommit, states): - # head object is being deleted, and we manage its list of - # child objects the child objects have to have their foreign - # key to the parent set to NULL this phase can be called - # safely for any cascade but is unnecessary if delete cascade - # is on. - - if self.post_update or not self.passive_deletes == 'all': - children_added = uowcommit.memo(('children_added', self), set) - - for state in states: - history = uowcommit.get_attribute_history( - state, - self.key, - self._passive_delete_flag) - if history: - for child in history.deleted: - if child is not None and \ - self.hasparent(child) is False: - self._synchronize( - state, - child, - None, True, - uowcommit, False) - if self.post_update and child: - self._post_update(child, uowcommit, [state]) - - if self.post_update or not self.cascade.delete: - for child in set(history.unchanged).\ - difference(children_added): - if child is not None: - self._synchronize( - state, - child, - None, True, - uowcommit, False) - if self.post_update and child: - self._post_update(child, - uowcommit, - [state]) - - # technically, we can even remove each child from the - # collection here too. but this would be a somewhat - # inconsistent behavior since it wouldn't happen - #if the old parent wasn't deleted but child was moved. - - def process_saves(self, uowcommit, states): - for state in states: - history = uowcommit.get_attribute_history( - state, - self.key, - attributes.PASSIVE_NO_INITIALIZE) - if history: - for child in history.added: - self._synchronize(state, child, None, - False, uowcommit, False) - if child is not None and self.post_update: - self._post_update(child, uowcommit, [state]) - - for child in history.deleted: - if not self.cascade.delete_orphan and \ - not self.hasparent(child): - self._synchronize(state, child, None, True, - uowcommit, False) - - if self._pks_changed(uowcommit, state): - for child in history.unchanged: - self._synchronize(state, child, None, - False, uowcommit, True) - - def _synchronize(self, state, child, - associationrow, clearkeys, uowcommit, - pks_changed): - source = state - dest = child - if dest is None or \ - (not self.post_update and uowcommit.is_deleted(dest)): - return - self._verify_canload(child) - if clearkeys: - sync.clear(dest, self.mapper, self.prop.synchronize_pairs) - else: - sync.populate(source, self.parent, dest, self.mapper, - self.prop.synchronize_pairs, uowcommit, - self.passive_updates and pks_changed) - - def _pks_changed(self, uowcommit, state): - return sync.source_modified( - uowcommit, - state, - self.parent, - self.prop.synchronize_pairs) - -class ManyToOneDP(DependencyProcessor): - def __init__(self, prop): - DependencyProcessor.__init__(self, prop) - self.mapper._dependency_processors.append(DetectKeySwitch(prop)) - - def per_property_dependencies(self, uow, - parent_saves, - child_saves, - parent_deletes, - child_deletes, - after_save, - before_delete): - - if self.post_update: - parent_post_updates = unitofwork.IssuePostUpdate( - uow, - self.parent.primary_base_mapper, - False) - parent_pre_updates = unitofwork.IssuePostUpdate( - uow, - self.parent.primary_base_mapper, - True) - - uow.dependencies.update([ - (child_saves, after_save), - (parent_saves, after_save), - (after_save, parent_post_updates), - - (after_save, parent_pre_updates), - (before_delete, parent_pre_updates), - - (parent_pre_updates, child_deletes), - ]) - else: - uow.dependencies.update([ - (child_saves, after_save), - (after_save, parent_saves), - (parent_saves, child_deletes), - (parent_deletes, child_deletes) - ]) - - def per_state_dependencies(self, uow, - save_parent, - delete_parent, - child_action, - after_save, before_delete, - isdelete, childisdelete): - - if self.post_update: - - if not isdelete: - parent_post_updates = unitofwork.IssuePostUpdate( - uow, - self.parent.primary_base_mapper, - False) - if childisdelete: - uow.dependencies.update([ - (after_save, parent_post_updates), - (parent_post_updates, child_action) - ]) - else: - uow.dependencies.update([ - (save_parent, after_save), - (child_action, after_save), - - (after_save, parent_post_updates) - ]) - else: - parent_pre_updates = unitofwork.IssuePostUpdate( - uow, - self.parent.primary_base_mapper, - True) - - uow.dependencies.update([ - (before_delete, parent_pre_updates), - (parent_pre_updates, delete_parent), - (parent_pre_updates, child_action) - ]) - - elif not isdelete: - if not childisdelete: - uow.dependencies.update([ - (child_action, after_save), - (after_save, save_parent), - ]) - else: - uow.dependencies.update([ - (after_save, save_parent), - ]) - - else: - if childisdelete: - uow.dependencies.update([ - (delete_parent, child_action) - ]) - - def presort_deletes(self, uowcommit, states): - if self.cascade.delete or self.cascade.delete_orphan: - for state in states: - history = uowcommit.get_attribute_history( - state, - self.key, - self._passive_delete_flag) - if history: - if self.cascade.delete_orphan: - todelete = history.sum() - else: - todelete = history.non_deleted() - for child in todelete: - if child is None: - continue - uowcommit.register_object(child, isdelete=True, - operation="delete", prop=self.prop) - for c, m, st_, dct_ in self.mapper.cascade_iterator( - 'delete', child): - uowcommit.register_object( - st_, isdelete=True) - - def presort_saves(self, uowcommit, states): - for state in states: - uowcommit.register_object(state, operation="add", prop=self.prop) - if self.cascade.delete_orphan: - history = uowcommit.get_attribute_history( - state, - self.key, - self._passive_delete_flag) - if history: - ret = True - for child in history.deleted: - if self.hasparent(child) is False: - uowcommit.register_object(child, isdelete=True, - operation="delete", prop=self.prop) - - for c, m, st_, dct_ in self.mapper.cascade_iterator( - 'delete', child): - uowcommit.register_object( - st_, - isdelete=True) - - def process_deletes(self, uowcommit, states): - if self.post_update and \ - not self.cascade.delete_orphan and \ - not self.passive_deletes == 'all': - - # post_update means we have to update our - # row to not reference the child object - # before we can DELETE the row - for state in states: - self._synchronize(state, None, None, True, uowcommit) - if state and self.post_update: - history = uowcommit.get_attribute_history( - state, - self.key, - self._passive_delete_flag) - if history: - self._post_update(state, uowcommit, history.sum()) - - def process_saves(self, uowcommit, states): - for state in states: - history = uowcommit.get_attribute_history( - state, - self.key, - attributes.PASSIVE_NO_INITIALIZE) - if history: - for child in history.added: - self._synchronize(state, child, None, False, - uowcommit, "add") - - if self.post_update: - self._post_update(state, uowcommit, history.sum()) - - def _synchronize(self, state, child, associationrow, - clearkeys, uowcommit, operation=None): - if state is None or \ - (not self.post_update and uowcommit.is_deleted(state)): - return - - if operation is not None and \ - child is not None and \ - not uowcommit.session._contains_state(child): - util.warn( - "Object of type %s not in session, %s " - "operation along '%s' won't proceed" % - (mapperutil.state_class_str(child), operation, self.prop)) - return - - if clearkeys or child is None: - sync.clear(state, self.parent, self.prop.synchronize_pairs) - else: - self._verify_canload(child) - sync.populate(child, self.mapper, state, - self.parent, - self.prop.synchronize_pairs, - uowcommit, - False) - -class DetectKeySwitch(DependencyProcessor): - """For many-to-one relationships with no one-to-many backref, - searches for parents through the unit of work when a primary - key has changed and updates them. - - Theoretically, this approach could be expanded to support transparent - deletion of objects referenced via many-to-one as well, although - the current attribute system doesn't do enough bookkeeping for this - to be efficient. - - """ - - def per_property_preprocessors(self, uow): - if self.prop._reverse_property: - if self.passive_updates: - return - else: - if False in (prop.passive_updates for \ - prop in self.prop._reverse_property): - return - - uow.register_preprocessor(self, False) - - def per_property_flush_actions(self, uow): - parent_saves = unitofwork.SaveUpdateAll( - uow, - self.parent.base_mapper) - after_save = unitofwork.ProcessAll(uow, self, False, False) - uow.dependencies.update([ - (parent_saves, after_save) - ]) - - def per_state_flush_actions(self, uow, states, isdelete): - pass - - def presort_deletes(self, uowcommit, states): - pass - - def presort_saves(self, uow, states): - if not self.passive_updates: - # for non-passive updates, register in the preprocess stage - # so that mapper save_obj() gets a hold of changes - self._process_key_switches(states, uow) - - def prop_has_changes(self, uow, states, isdelete): - if not isdelete and self.passive_updates: - d = self._key_switchers(uow, states) - return bool(d) - - return False - - def process_deletes(self, uowcommit, states): - assert False - - def process_saves(self, uowcommit, states): - # for passive updates, register objects in the process stage - # so that we avoid ManyToOneDP's registering the object without - # the listonly flag in its own preprocess stage (results in UPDATE) - # statements being emitted - assert self.passive_updates - self._process_key_switches(states, uowcommit) - - def _key_switchers(self, uow, states): - switched, notswitched = uow.memo( - ('pk_switchers', self), - lambda: (set(), set()) - ) - - allstates = switched.union(notswitched) - for s in states: - if s not in allstates: - if self._pks_changed(uow, s): - switched.add(s) - else: - notswitched.add(s) - return switched - - def _process_key_switches(self, deplist, uowcommit): - switchers = self._key_switchers(uowcommit, deplist) - if switchers: - # if primary key values have actually changed somewhere, perform - # a linear search through the UOW in search of a parent. - for state in uowcommit.session.identity_map.all_states(): - if not issubclass(state.class_, self.parent.class_): - continue - dict_ = state.dict - related = state.get_impl(self.key).get(state, dict_, - passive=self._passive_update_flag) - if related is not attributes.PASSIVE_NO_RESULT and \ - related is not None: - related_state = attributes.instance_state(dict_[self.key]) - if related_state in switchers: - uowcommit.register_object(state, - False, - self.passive_updates) - sync.populate( - related_state, - self.mapper, state, - self.parent, self.prop.synchronize_pairs, - uowcommit, self.passive_updates) - - def _pks_changed(self, uowcommit, state): - return bool(state.key) and sync.source_modified(uowcommit, - state, - self.mapper, - self.prop.synchronize_pairs) - - -class ManyToManyDP(DependencyProcessor): - - def per_property_dependencies(self, uow, parent_saves, - child_saves, - parent_deletes, - child_deletes, - after_save, - before_delete - ): - - uow.dependencies.update([ - (parent_saves, after_save), - (child_saves, after_save), - (after_save, child_deletes), - - # a rowswitch on the parent from deleted to saved - # can make this one occur, as the "save" may remove - # an element from the - # "deleted" list before we have a chance to - # process its child rows - (before_delete, parent_saves), - - (before_delete, parent_deletes), - (before_delete, child_deletes), - (before_delete, child_saves), - ]) - - def per_state_dependencies(self, uow, - save_parent, - delete_parent, - child_action, - after_save, before_delete, - isdelete, childisdelete): - if not isdelete: - if childisdelete: - uow.dependencies.update([ - (save_parent, after_save), - (after_save, child_action), - ]) - else: - uow.dependencies.update([ - (save_parent, after_save), - (child_action, after_save), - ]) - else: - uow.dependencies.update([ - (before_delete, child_action), - (before_delete, delete_parent) - ]) - - def presort_deletes(self, uowcommit, states): - if not self.passive_deletes: - # if no passive deletes, load history on - # the collection, so that prop_has_changes() - # returns True - for state in states: - history = uowcommit.get_attribute_history( - state, - self.key, - self._passive_delete_flag) - - def presort_saves(self, uowcommit, states): - if not self.passive_updates: - # if no passive updates, load history on - # each collection where parent has changed PK, - # so that prop_has_changes() returns True - for state in states: - if self._pks_changed(uowcommit, state): - history = uowcommit.get_attribute_history( - state, - self.key, - attributes.PASSIVE_OFF) - - if not self.cascade.delete_orphan: - return - - # check for child items removed from the collection - # if delete_orphan check is turned on. - for state in states: - history = uowcommit.get_attribute_history( - state, - self.key, - attributes.PASSIVE_NO_INITIALIZE) - if history: - for child in history.deleted: - if self.hasparent(child) is False: - uowcommit.register_object(child, isdelete=True, - operation="delete", prop=self.prop) - for c, m, st_, dct_ in self.mapper.cascade_iterator( - 'delete', - child): - uowcommit.register_object( - st_, isdelete=True) - - def process_deletes(self, uowcommit, states): - secondary_delete = [] - secondary_insert = [] - secondary_update = [] - - processed = self._get_reversed_processed_set(uowcommit) - tmp = set() - for state in states: - # this history should be cached already, as - # we loaded it in preprocess_deletes - history = uowcommit.get_attribute_history( - state, - self.key, - self._passive_delete_flag) - if history: - for child in history.non_added(): - if child is None or \ - (processed is not None and - (state, child) in processed): - continue - associationrow = {} - if not self._synchronize( - state, - child, - associationrow, - False, uowcommit, "delete"): - continue - secondary_delete.append(associationrow) - - tmp.update((c, state) for c in history.non_added()) - - if processed is not None: - processed.update(tmp) - - self._run_crud(uowcommit, secondary_insert, - secondary_update, secondary_delete) - - def process_saves(self, uowcommit, states): - secondary_delete = [] - secondary_insert = [] - secondary_update = [] - - processed = self._get_reversed_processed_set(uowcommit) - tmp = set() - - for state in states: - need_cascade_pks = not self.passive_updates and \ - self._pks_changed(uowcommit, state) - if need_cascade_pks: - passive = attributes.PASSIVE_OFF - else: - passive = attributes.PASSIVE_NO_INITIALIZE - history = uowcommit.get_attribute_history(state, self.key, - passive) - if history: - for child in history.added: - if child is None or \ - (processed is not None and - (state, child) in processed): - continue - associationrow = {} - if not self._synchronize(state, - child, - associationrow, - False, uowcommit, "add"): - continue - secondary_insert.append(associationrow) - for child in history.deleted: - if child is None or \ - (processed is not None and - (state, child) in processed): - continue - associationrow = {} - if not self._synchronize(state, - child, - associationrow, - False, uowcommit, "delete"): - continue - secondary_delete.append(associationrow) - - tmp.update((c, state) - for c in history.added + history.deleted) - - if need_cascade_pks: - - for child in history.unchanged: - associationrow = {} - sync.update(state, - self.parent, - associationrow, - "old_", - self.prop.synchronize_pairs) - sync.update(child, - self.mapper, - associationrow, - "old_", - self.prop.secondary_synchronize_pairs) - - secondary_update.append(associationrow) - - if processed is not None: - processed.update(tmp) - - self._run_crud(uowcommit, secondary_insert, - secondary_update, secondary_delete) - - def _run_crud(self, uowcommit, secondary_insert, - secondary_update, secondary_delete): - connection = uowcommit.transaction.connection(self.mapper) - - if secondary_delete: - associationrow = secondary_delete[0] - statement = self.secondary.delete(sql.and_(*[ - c == sql.bindparam(c.key, type_=c.type) - for c in self.secondary.c - if c.key in associationrow - ])) - result = connection.execute(statement, secondary_delete) - - if result.supports_sane_multi_rowcount() and \ - result.rowcount != len(secondary_delete): - raise exc.StaleDataError( - "DELETE statement on table '%s' expected to delete %d row(s); " - "Only %d were matched." % - (self.secondary.description, len(secondary_delete), - result.rowcount) - ) - - if secondary_update: - associationrow = secondary_update[0] - statement = self.secondary.update(sql.and_(*[ - c == sql.bindparam("old_" + c.key, type_=c.type) - for c in self.secondary.c - if c.key in associationrow - ])) - result = connection.execute(statement, secondary_update) - if result.supports_sane_multi_rowcount() and \ - result.rowcount != len(secondary_update): - raise exc.StaleDataError( - "UPDATE statement on table '%s' expected to update %d row(s); " - "Only %d were matched." % - (self.secondary.description, len(secondary_update), - result.rowcount) - ) - - if secondary_insert: - statement = self.secondary.insert() - connection.execute(statement, secondary_insert) - - def _synchronize(self, state, child, associationrow, - clearkeys, uowcommit, operation): - if associationrow is None: - return - - if child is not None and not uowcommit.session._contains_state(child): - if not child.deleted: - util.warn( - "Object of type %s not in session, %s " - "operation along '%s' won't proceed" % - (mapperutil.state_class_str(child), operation, self.prop)) - return False - - self._verify_canload(child) - - sync.populate_dict(state, self.parent, associationrow, - self.prop.synchronize_pairs) - sync.populate_dict(child, self.mapper, associationrow, - self.prop.secondary_synchronize_pairs) - - return True - - def _pks_changed(self, uowcommit, state): - return sync.source_modified( - uowcommit, - state, - self.parent, - self.prop.synchronize_pairs) - -_direction_to_processor = { - ONETOMANY : OneToManyDP, - MANYTOONE: ManyToOneDP, - MANYTOMANY : ManyToManyDP, -} - diff --git a/libs/sqlalchemy/orm/deprecated_interfaces.py b/libs/sqlalchemy/orm/deprecated_interfaces.py deleted file mode 100644 index d251f52e..00000000 --- a/libs/sqlalchemy/orm/deprecated_interfaces.py +++ /dev/null @@ -1,590 +0,0 @@ -# orm/deprecated_interfaces.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -from sqlalchemy import event, util -from interfaces import EXT_CONTINUE - - -class MapperExtension(object): - """Base implementation for :class:`.Mapper` event hooks. - - .. note:: - - :class:`.MapperExtension` is deprecated. Please - refer to :func:`.event.listen` as well as - :class:`.MapperEvents`. - - New extension classes subclass :class:`.MapperExtension` and are specified - using the ``extension`` mapper() argument, which is a single - :class:`.MapperExtension` or a list of such:: - - from sqlalchemy.orm.interfaces import MapperExtension - - class MyExtension(MapperExtension): - def before_insert(self, mapper, connection, instance): - print "instance %s before insert !" % instance - - m = mapper(User, users_table, extension=MyExtension()) - - A single mapper can maintain a chain of ``MapperExtension`` - objects. When a particular mapping event occurs, the - corresponding method on each ``MapperExtension`` is invoked - serially, and each method has the ability to halt the chain - from proceeding further:: - - m = mapper(User, users_table, extension=[ext1, ext2, ext3]) - - Each ``MapperExtension`` method returns the symbol - EXT_CONTINUE by default. This symbol generally means "move - to the next ``MapperExtension`` for processing". For methods - that return objects like translated rows or new object - instances, EXT_CONTINUE means the result of the method - should be ignored. In some cases it's required for a - default mapper activity to be performed, such as adding a - new instance to a result list. - - The symbol EXT_STOP has significance within a chain - of ``MapperExtension`` objects that the chain will be stopped - when this symbol is returned. Like EXT_CONTINUE, it also - has additional significance in some cases that a default - mapper activity will not be performed. - - """ - - @classmethod - def _adapt_instrument_class(cls, self, listener): - cls._adapt_listener_methods(self, listener, ('instrument_class',)) - - @classmethod - def _adapt_listener(cls, self, listener): - cls._adapt_listener_methods( - self, listener, - ( - 'init_instance', - 'init_failed', - 'translate_row', - 'create_instance', - 'append_result', - 'populate_instance', - 'reconstruct_instance', - 'before_insert', - 'after_insert', - 'before_update', - 'after_update', - 'before_delete', - 'after_delete' - )) - - @classmethod - def _adapt_listener_methods(cls, self, listener, methods): - - for meth in methods: - me_meth = getattr(MapperExtension, meth) - ls_meth = getattr(listener, meth) - - if not util.methods_equivalent(me_meth, ls_meth): - if meth == 'reconstruct_instance': - def go(ls_meth): - def reconstruct(instance, ctx): - ls_meth(self, instance) - return reconstruct - event.listen(self.class_manager, 'load', - go(ls_meth), raw=False, propagate=True) - elif meth == 'init_instance': - def go(ls_meth): - def init_instance(instance, args, kwargs): - ls_meth(self, self.class_, - self.class_manager.original_init, - instance, args, kwargs) - return init_instance - event.listen(self.class_manager, 'init', - go(ls_meth), raw=False, propagate=True) - elif meth == 'init_failed': - def go(ls_meth): - def init_failed(instance, args, kwargs): - util.warn_exception(ls_meth, self, self.class_, - self.class_manager.original_init, - instance, args, kwargs) - - return init_failed - event.listen(self.class_manager, 'init_failure', - go(ls_meth), raw=False, propagate=True) - else: - event.listen(self, "%s" % meth, ls_meth, - raw=False, retval=True, propagate=True) - - - def instrument_class(self, mapper, class_): - """Receive a class when the mapper is first constructed, and has - applied instrumentation to the mapped class. - - The return value is only significant within the ``MapperExtension`` - chain; the parent mapper's behavior isn't modified by this method. - - """ - return EXT_CONTINUE - - def init_instance(self, mapper, class_, oldinit, instance, args, kwargs): - """Receive an instance when it's constructor is called. - - This method is only called during a userland construction of - an object. It is not called when an object is loaded from the - database. - - The return value is only significant within the ``MapperExtension`` - chain; the parent mapper's behavior isn't modified by this method. - - """ - return EXT_CONTINUE - - def init_failed(self, mapper, class_, oldinit, instance, args, kwargs): - """Receive an instance when it's constructor has been called, - and raised an exception. - - This method is only called during a userland construction of - an object. It is not called when an object is loaded from the - database. - - The return value is only significant within the ``MapperExtension`` - chain; the parent mapper's behavior isn't modified by this method. - - """ - return EXT_CONTINUE - - def translate_row(self, mapper, context, row): - """Perform pre-processing on the given result row and return a - new row instance. - - This is called when the mapper first receives a row, before - the object identity or the instance itself has been derived - from that row. The given row may or may not be a - ``RowProxy`` object - it will always be a dictionary-like - object which contains mapped columns as keys. The - returned object should also be a dictionary-like object - which recognizes mapped columns as keys. - - If the ultimate return value is EXT_CONTINUE, the row - is not translated. - - """ - return EXT_CONTINUE - - def create_instance(self, mapper, selectcontext, row, class_): - """Receive a row when a new object instance is about to be - created from that row. - - The method can choose to create the instance itself, or it can return - EXT_CONTINUE to indicate normal object creation should take place. - - mapper - The mapper doing the operation - - selectcontext - The QueryContext generated from the Query. - - row - The result row from the database - - class\_ - The class we are mapping. - - return value - A new object instance, or EXT_CONTINUE - - """ - return EXT_CONTINUE - - def append_result(self, mapper, selectcontext, row, instance, - result, **flags): - """Receive an object instance before that instance is appended - to a result list. - - If this method returns EXT_CONTINUE, result appending will proceed - normally. if this method returns any other value or None, - result appending will not proceed for this instance, giving - this extension an opportunity to do the appending itself, if - desired. - - mapper - The mapper doing the operation. - - selectcontext - The QueryContext generated from the Query. - - row - The result row from the database. - - instance - The object instance to be appended to the result. - - result - List to which results are being appended. - - \**flags - extra information about the row, same as criterion in - ``create_row_processor()`` method of - :class:`~sqlalchemy.orm.interfaces.MapperProperty` - """ - - return EXT_CONTINUE - - def populate_instance(self, mapper, selectcontext, row, - instance, **flags): - """Receive an instance before that instance has - its attributes populated. - - This usually corresponds to a newly loaded instance but may - also correspond to an already-loaded instance which has - unloaded attributes to be populated. The method may be called - many times for a single instance, as multiple result rows are - used to populate eagerly loaded collections. - - If this method returns EXT_CONTINUE, instance population will - proceed normally. If any other value or None is returned, - instance population will not proceed, giving this extension an - opportunity to populate the instance itself, if desired. - - .. deprecated:: 0.5 - Most usages of this hook are obsolete. For a - generic "object has been newly created from a row" hook, use - ``reconstruct_instance()``, or the ``@orm.reconstructor`` - decorator. - - """ - return EXT_CONTINUE - - def reconstruct_instance(self, mapper, instance): - """Receive an object instance after it has been created via - ``__new__``, and after initial attribute population has - occurred. - - This typically occurs when the instance is created based on - incoming result rows, and is only called once for that - instance's lifetime. - - Note that during a result-row load, this method is called upon - the first row received for this instance. Note that some - attributes and collections may or may not be loaded or even - initialized, depending on what's present in the result rows. - - The return value is only significant within the ``MapperExtension`` - chain; the parent mapper's behavior isn't modified by this method. - - """ - return EXT_CONTINUE - - def before_insert(self, mapper, connection, instance): - """Receive an object instance before that instance is inserted - into its table. - - This is a good place to set up primary key values and such - that aren't handled otherwise. - - Column-based attributes can be modified within this method - which will result in the new value being inserted. However - *no* changes to the overall flush plan can be made, and - manipulation of the ``Session`` will not have the desired effect. - To manipulate the ``Session`` within an extension, use - ``SessionExtension``. - - The return value is only significant within the ``MapperExtension`` - chain; the parent mapper's behavior isn't modified by this method. - - """ - - return EXT_CONTINUE - - def after_insert(self, mapper, connection, instance): - """Receive an object instance after that instance is inserted. - - The return value is only significant within the ``MapperExtension`` - chain; the parent mapper's behavior isn't modified by this method. - - """ - - return EXT_CONTINUE - - def before_update(self, mapper, connection, instance): - """Receive an object instance before that instance is updated. - - Note that this method is called for all instances that are marked as - "dirty", even those which have no net changes to their column-based - attributes. An object is marked as dirty when any of its column-based - attributes have a "set attribute" operation called or when any of its - collections are modified. If, at update time, no column-based - attributes have any net changes, no UPDATE statement will be issued. - This means that an instance being sent to before_update is *not* a - guarantee that an UPDATE statement will be issued (although you can - affect the outcome here). - - To detect if the column-based attributes on the object have net - changes, and will therefore generate an UPDATE statement, use - ``object_session(instance).is_modified(instance, - include_collections=False)``. - - Column-based attributes can be modified within this method - which will result in the new value being updated. However - *no* changes to the overall flush plan can be made, and - manipulation of the ``Session`` will not have the desired effect. - To manipulate the ``Session`` within an extension, use - ``SessionExtension``. - - The return value is only significant within the ``MapperExtension`` - chain; the parent mapper's behavior isn't modified by this method. - - """ - - return EXT_CONTINUE - - def after_update(self, mapper, connection, instance): - """Receive an object instance after that instance is updated. - - The return value is only significant within the ``MapperExtension`` - chain; the parent mapper's behavior isn't modified by this method. - - """ - - return EXT_CONTINUE - - def before_delete(self, mapper, connection, instance): - """Receive an object instance before that instance is deleted. - - Note that *no* changes to the overall flush plan can be made - here; and manipulation of the ``Session`` will not have the - desired effect. To manipulate the ``Session`` within an - extension, use ``SessionExtension``. - - The return value is only significant within the ``MapperExtension`` - chain; the parent mapper's behavior isn't modified by this method. - - """ - - return EXT_CONTINUE - - def after_delete(self, mapper, connection, instance): - """Receive an object instance after that instance is deleted. - - The return value is only significant within the ``MapperExtension`` - chain; the parent mapper's behavior isn't modified by this method. - - """ - - return EXT_CONTINUE - -class SessionExtension(object): - - """Base implementation for :class:`.Session` event hooks. - - .. note:: - - :class:`.SessionExtension` is deprecated. Please - refer to :func:`.event.listen` as well as - :class:`.SessionEvents`. - - Subclasses may be installed into a :class:`.Session` (or - :func:`.sessionmaker`) using the ``extension`` keyword - argument:: - - from sqlalchemy.orm.interfaces import SessionExtension - - class MySessionExtension(SessionExtension): - def before_commit(self, session): - print "before commit!" - - Session = sessionmaker(extension=MySessionExtension()) - - The same :class:`.SessionExtension` instance can be used - with any number of sessions. - - """ - - @classmethod - def _adapt_listener(cls, self, listener): - for meth in [ - 'before_commit', - 'after_commit', - 'after_rollback', - 'before_flush', - 'after_flush', - 'after_flush_postexec', - 'after_begin', - 'after_attach', - 'after_bulk_update', - 'after_bulk_delete', - ]: - me_meth = getattr(SessionExtension, meth) - ls_meth = getattr(listener, meth) - - if not util.methods_equivalent(me_meth, ls_meth): - event.listen(self, meth, getattr(listener, meth)) - - def before_commit(self, session): - """Execute right before commit is called. - - Note that this may not be per-flush if a longer running - transaction is ongoing.""" - - def after_commit(self, session): - """Execute after a commit has occurred. - - Note that this may not be per-flush if a longer running - transaction is ongoing.""" - - def after_rollback(self, session): - """Execute after a rollback has occurred. - - Note that this may not be per-flush if a longer running - transaction is ongoing.""" - - def before_flush( self, session, flush_context, instances): - """Execute before flush process has started. - - `instances` is an optional list of objects which were passed to - the ``flush()`` method. """ - - def after_flush(self, session, flush_context): - """Execute after flush has completed, but before commit has been - called. - - Note that the session's state is still in pre-flush, i.e. 'new', - 'dirty', and 'deleted' lists still show pre-flush state as well - as the history settings on instance attributes.""" - - def after_flush_postexec(self, session, flush_context): - """Execute after flush has completed, and after the post-exec - state occurs. - - This will be when the 'new', 'dirty', and 'deleted' lists are in - their final state. An actual commit() may or may not have - occurred, depending on whether or not the flush started its own - transaction or participated in a larger transaction. """ - - def after_begin( self, session, transaction, connection): - """Execute after a transaction is begun on a connection - - `transaction` is the SessionTransaction. This method is called - after an engine level transaction is begun on a connection. """ - - def after_attach(self, session, instance): - """Execute after an instance is attached to a session. - - This is called after an add, delete or merge. """ - - def after_bulk_update( self, session, query, query_context, result): - """Execute after a bulk update operation to the session. - - This is called after a session.query(...).update() - - `query` is the query object that this update operation was - called on. `query_context` was the query context object. - `result` is the result object returned from the bulk operation. - """ - - def after_bulk_delete( self, session, query, query_context, result): - """Execute after a bulk delete operation to the session. - - This is called after a session.query(...).delete() - - `query` is the query object that this delete operation was - called on. `query_context` was the query context object. - `result` is the result object returned from the bulk operation. - """ - - -class AttributeExtension(object): - """Base implementation for :class:`.AttributeImpl` event hooks, events - that fire upon attribute mutations in user code. - - .. note:: - - :class:`.AttributeExtension` is deprecated. Please - refer to :func:`.event.listen` as well as - :class:`.AttributeEvents`. - - :class:`.AttributeExtension` is used to listen for set, - remove, and append events on individual mapped attributes. - It is established on an individual mapped attribute using - the `extension` argument, available on - :func:`.column_property`, :func:`.relationship`, and - others:: - - from sqlalchemy.orm.interfaces import AttributeExtension - from sqlalchemy.orm import mapper, relationship, column_property - - class MyAttrExt(AttributeExtension): - def append(self, state, value, initiator): - print "append event !" - return value - - def set(self, state, value, oldvalue, initiator): - print "set event !" - return value - - mapper(SomeClass, sometable, properties={ - 'foo':column_property(sometable.c.foo, extension=MyAttrExt()), - 'bar':relationship(Bar, extension=MyAttrExt()) - }) - - Note that the :class:`.AttributeExtension` methods - :meth:`~.AttributeExtension.append` and - :meth:`~.AttributeExtension.set` need to return the - ``value`` parameter. The returned value is used as the - effective value, and allows the extension to change what is - ultimately persisted. - - AttributeExtension is assembled within the descriptors associated - with a mapped class. - - """ - - active_history = True - """indicates that the set() method would like to receive the 'old' value, - even if it means firing lazy callables. - - Note that ``active_history`` can also be set directly via - :func:`.column_property` and :func:`.relationship`. - - """ - - @classmethod - def _adapt_listener(cls, self, listener): - event.listen(self, 'append', listener.append, - active_history=listener.active_history, - raw=True, retval=True) - event.listen(self, 'remove', listener.remove, - active_history=listener.active_history, - raw=True, retval=True) - event.listen(self, 'set', listener.set, - active_history=listener.active_history, - raw=True, retval=True) - - def append(self, state, value, initiator): - """Receive a collection append event. - - The returned value will be used as the actual value to be - appended. - - """ - return value - - def remove(self, state, value, initiator): - """Receive a remove event. - - No return value is defined. - - """ - pass - - def set(self, state, value, oldvalue, initiator): - """Receive a set event. - - The returned value will be used as the actual value to be - set. - - """ - return value - - diff --git a/libs/sqlalchemy/orm/descriptor_props.py b/libs/sqlalchemy/orm/descriptor_props.py deleted file mode 100644 index 70cf0e7c..00000000 --- a/libs/sqlalchemy/orm/descriptor_props.py +++ /dev/null @@ -1,422 +0,0 @@ -# orm/descriptor_props.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Descriptor properties are more "auxiliary" properties -that exist as configurational elements, but don't participate -as actively in the load/persist ORM loop. - -""" - -from sqlalchemy.orm.interfaces import \ - MapperProperty, PropComparator, StrategizedProperty -from sqlalchemy.orm.mapper import _none_set -from sqlalchemy.orm import attributes, strategies -from sqlalchemy import util, sql, exc as sa_exc, event, schema -from sqlalchemy.sql import expression -properties = util.importlater('sqlalchemy.orm', 'properties') - -class DescriptorProperty(MapperProperty): - """:class:`.MapperProperty` which proxies access to a - user-defined descriptor.""" - - doc = None - - def instrument_class(self, mapper): - prop = self - - class _ProxyImpl(object): - accepts_scalar_loader = False - expire_missing = True - - def __init__(self, key): - self.key = key - - if hasattr(prop, 'get_history'): - def get_history(self, state, dict_, - passive=attributes.PASSIVE_OFF): - return prop.get_history(state, dict_, passive) - - if self.descriptor is None: - desc = getattr(mapper.class_, self.key, None) - if mapper._is_userland_descriptor(desc): - self.descriptor = desc - - if self.descriptor is None: - def fset(obj, value): - setattr(obj, self.name, value) - def fdel(obj): - delattr(obj, self.name) - def fget(obj): - return getattr(obj, self.name) - - self.descriptor = property( - fget=fget, - fset=fset, - fdel=fdel, - ) - - proxy_attr = attributes.\ - create_proxied_attribute(self.descriptor)\ - ( - self.parent.class_, - self.key, - self.descriptor, - lambda: self._comparator_factory(mapper), - doc=self.doc - ) - proxy_attr.impl = _ProxyImpl(self.key) - mapper.class_manager.instrument_attribute(self.key, proxy_attr) - - -class CompositeProperty(DescriptorProperty): - - def __init__(self, class_, *attrs, **kwargs): - self.attrs = attrs - self.composite_class = class_ - self.active_history = kwargs.get('active_history', False) - self.deferred = kwargs.get('deferred', False) - self.group = kwargs.get('group', None) - self.comparator_factory = kwargs.pop('comparator_factory', - self.__class__.Comparator) - util.set_creation_order(self) - self._create_descriptor() - - def instrument_class(self, mapper): - super(CompositeProperty, self).instrument_class(mapper) - self._setup_event_handlers() - - def do_init(self): - """Initialization which occurs after the :class:`.CompositeProperty` - has been associated with its parent mapper. - - """ - self._init_props() - self._setup_arguments_on_columns() - - def _create_descriptor(self): - """Create the Python descriptor that will serve as - the access point on instances of the mapped class. - - """ - - def fget(instance): - dict_ = attributes.instance_dict(instance) - state = attributes.instance_state(instance) - - if self.key not in dict_: - # key not present. Iterate through related - # attributes, retrieve their values. This - # ensures they all load. - values = [getattr(instance, key) for key in self._attribute_keys] - - # current expected behavior here is that the composite is - # created on access if the object is persistent or if - # col attributes have non-None. This would be better - # if the composite were created unconditionally, - # but that would be a behavioral change. - if self.key not in dict_ and ( - state.key is not None or - not _none_set.issuperset(values) - ): - dict_[self.key] = self.composite_class(*values) - state.manager.dispatch.refresh(state, None, [self.key]) - - return dict_.get(self.key, None) - - def fset(instance, value): - dict_ = attributes.instance_dict(instance) - state = attributes.instance_state(instance) - attr = state.manager[self.key] - previous = dict_.get(self.key, attributes.NO_VALUE) - for fn in attr.dispatch.set: - value = fn(state, value, previous, attr.impl) - dict_[self.key] = value - if value is None: - for key in self._attribute_keys: - setattr(instance, key, None) - else: - for key, value in zip( - self._attribute_keys, - value.__composite_values__()): - setattr(instance, key, value) - - def fdel(instance): - state = attributes.instance_state(instance) - dict_ = attributes.instance_dict(instance) - previous = dict_.pop(self.key, attributes.NO_VALUE) - attr = state.manager[self.key] - attr.dispatch.remove(state, previous, attr.impl) - for key in self._attribute_keys: - setattr(instance, key, None) - - self.descriptor = property(fget, fset, fdel) - - @util.memoized_property - def _comparable_elements(self): - return [ - getattr(self.parent.class_, prop.key) - for prop in self.props - ] - - def _init_props(self): - self.props = props = [] - for attr in self.attrs: - if isinstance(attr, basestring): - prop = self.parent.get_property(attr) - elif isinstance(attr, schema.Column): - prop = self.parent._columntoproperty[attr] - elif isinstance(attr, attributes.InstrumentedAttribute): - prop = attr.property - props.append(prop) - - @property - def columns(self): - return [a for a in self.attrs if isinstance(a, schema.Column)] - - def _setup_arguments_on_columns(self): - """Propagate configuration arguments made on this composite - to the target columns, for those that apply. - - """ - for prop in self.props: - prop.active_history = self.active_history - if self.deferred: - prop.deferred = self.deferred - prop.strategy_class = strategies.DeferredColumnLoader - prop.group = self.group - - def _setup_event_handlers(self): - """Establish events that populate/expire the composite attribute.""" - - def load_handler(state, *args): - dict_ = state.dict - - if self.key in dict_: - return - - # if column elements aren't loaded, skip. - # __get__() will initiate a load for those - # columns - for k in self._attribute_keys: - if k not in dict_: - return - - #assert self.key not in dict_ - dict_[self.key] = self.composite_class( - *[state.dict[key] for key in - self._attribute_keys] - ) - - def expire_handler(state, keys): - if keys is None or set(self._attribute_keys).intersection(keys): - state.dict.pop(self.key, None) - - def insert_update_handler(mapper, connection, state): - """After an insert or update, some columns may be expired due - to server side defaults, or re-populated due to client side - defaults. Pop out the composite value here so that it - recreates. - - """ - - state.dict.pop(self.key, None) - - event.listen(self.parent, 'after_insert', - insert_update_handler, raw=True) - event.listen(self.parent, 'after_update', - insert_update_handler, raw=True) - event.listen(self.parent, 'load', load_handler, raw=True, propagate=True) - event.listen(self.parent, 'refresh', load_handler, raw=True, propagate=True) - event.listen(self.parent, "expire", expire_handler, raw=True, propagate=True) - - # TODO: need a deserialize hook here - - @util.memoized_property - def _attribute_keys(self): - return [ - prop.key for prop in self.props - ] - - def get_history(self, state, dict_, passive=attributes.PASSIVE_OFF): - """Provided for userland code that uses attributes.get_history().""" - - added = [] - deleted = [] - - has_history = False - for prop in self.props: - key = prop.key - hist = state.manager[key].impl.get_history(state, dict_) - if hist.has_changes(): - has_history = True - - non_deleted = hist.non_deleted() - if non_deleted: - added.extend(non_deleted) - else: - added.append(None) - if hist.deleted: - deleted.extend(hist.deleted) - else: - deleted.append(None) - - if has_history: - return attributes.History( - [self.composite_class(*added)], - (), - [self.composite_class(*deleted)] - ) - else: - return attributes.History( - (),[self.composite_class(*added)], () - ) - - def _comparator_factory(self, mapper): - return self.comparator_factory(self) - - class Comparator(PropComparator): - def __init__(self, prop, adapter=None): - self.prop = self.property = prop - self.adapter = adapter - - def __clause_element__(self): - if self.adapter: - # TODO: test coverage for adapted composite comparison - return expression.ClauseList( - *[self.adapter(x) for x in self.prop._comparable_elements]) - else: - return expression.ClauseList(*self.prop._comparable_elements) - - __hash__ = None - - def __eq__(self, other): - if other is None: - values = [None] * len(self.prop._comparable_elements) - else: - values = other.__composite_values__() - return sql.and_( - *[a==b for a, b in zip(self.prop._comparable_elements, values)]) - - def __ne__(self, other): - return sql.not_(self.__eq__(other)) - - def __str__(self): - return str(self.parent.class_.__name__) + "." + self.key - -class ConcreteInheritedProperty(DescriptorProperty): - """A 'do nothing' :class:`.MapperProperty` that disables - an attribute on a concrete subclass that is only present - on the inherited mapper, not the concrete classes' mapper. - - Cases where this occurs include: - - * When the superclass mapper is mapped against a - "polymorphic union", which includes all attributes from - all subclasses. - * When a relationship() is configured on an inherited mapper, - but not on the subclass mapper. Concrete mappers require - that relationship() is configured explicitly on each - subclass. - - """ - - def _comparator_factory(self, mapper): - comparator_callable = None - - for m in self.parent.iterate_to_root(): - p = m._props[self.key] - if not isinstance(p, ConcreteInheritedProperty): - comparator_callable = p.comparator_factory - break - return comparator_callable - - def __init__(self): - def warn(): - raise AttributeError("Concrete %s does not implement " - "attribute %r at the instance level. Add this " - "property explicitly to %s." % - (self.parent, self.key, self.parent)) - - class NoninheritedConcreteProp(object): - def __set__(s, obj, value): - warn() - def __delete__(s, obj): - warn() - def __get__(s, obj, owner): - if obj is None: - return self.descriptor - warn() - self.descriptor = NoninheritedConcreteProp() - - -class SynonymProperty(DescriptorProperty): - - def __init__(self, name, map_column=None, - descriptor=None, comparator_factory=None, - doc=None): - self.name = name - self.map_column = map_column - self.descriptor = descriptor - self.comparator_factory = comparator_factory - self.doc = doc or (descriptor and descriptor.__doc__) or None - - util.set_creation_order(self) - - # TODO: when initialized, check _proxied_property, - # emit a warning if its not a column-based property - - @util.memoized_property - def _proxied_property(self): - return getattr(self.parent.class_, self.name).property - - def _comparator_factory(self, mapper): - prop = self._proxied_property - - if self.comparator_factory: - comp = self.comparator_factory(prop, mapper) - else: - comp = prop.comparator_factory(prop, mapper) - return comp - - def set_parent(self, parent, init): - if self.map_column: - # implement the 'map_column' option. - if self.key not in parent.mapped_table.c: - raise sa_exc.ArgumentError( - "Can't compile synonym '%s': no column on table " - "'%s' named '%s'" - % (self.name, parent.mapped_table.description, self.key)) - elif parent.mapped_table.c[self.key] in \ - parent._columntoproperty and \ - parent._columntoproperty[ - parent.mapped_table.c[self.key] - ].key == self.name: - raise sa_exc.ArgumentError( - "Can't call map_column=True for synonym %r=%r, " - "a ColumnProperty already exists keyed to the name " - "%r for column %r" % - (self.key, self.name, self.name, self.key) - ) - p = properties.ColumnProperty(parent.mapped_table.c[self.key]) - parent._configure_property( - self.name, p, - init=init, - setparent=True) - p._mapped_by_synonym = self.key - - self.parent = parent - -class ComparableProperty(DescriptorProperty): - """Instruments a Python property for use in query expressions.""" - - def __init__(self, comparator_factory, descriptor=None, doc=None): - self.descriptor = descriptor - self.comparator_factory = comparator_factory - self.doc = doc or (descriptor and descriptor.__doc__) or None - util.set_creation_order(self) - - def _comparator_factory(self, mapper): - return self.comparator_factory(self, mapper) diff --git a/libs/sqlalchemy/orm/dynamic.py b/libs/sqlalchemy/orm/dynamic.py deleted file mode 100644 index e3773659..00000000 --- a/libs/sqlalchemy/orm/dynamic.py +++ /dev/null @@ -1,320 +0,0 @@ -# orm/dynamic.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Dynamic collection API. - -Dynamic collections act like Query() objects for read operations and support -basic add/delete mutation. - -""" - -from sqlalchemy import log, util -from sqlalchemy.orm import exc as orm_exc -from sqlalchemy.sql import operators -from sqlalchemy.orm import ( - attributes, object_session, util as mapperutil, strategies, object_mapper - ) -from sqlalchemy.orm.query import Query -from sqlalchemy.orm.util import has_identity -from sqlalchemy.orm import collections - -class DynaLoader(strategies.AbstractRelationshipLoader): - def init_class_attribute(self, mapper): - self.is_class_level = True - if not self.uselist: - util.warn( - "On relationship %s, 'dynamic' loaders cannot be used with " - "many-to-one/one-to-one relationships and/or " - "uselist=False." % self.parent_property) - strategies._register_attribute(self, - mapper, - useobject=True, - impl_class=DynamicAttributeImpl, - target_mapper=self.parent_property.mapper, - order_by=self.parent_property.order_by, - query_class=self.parent_property.query_class - ) - -log.class_logger(DynaLoader) - -class DynamicAttributeImpl(attributes.AttributeImpl): - uses_objects = True - accepts_scalar_loader = False - supports_population = False - - def __init__(self, class_, key, typecallable, - dispatch, - target_mapper, order_by, query_class=None, **kw): - super(DynamicAttributeImpl, self).\ - __init__(class_, key, typecallable, dispatch, **kw) - self.target_mapper = target_mapper - self.order_by = order_by - if not query_class: - self.query_class = AppenderQuery - elif AppenderMixin in query_class.mro(): - self.query_class = query_class - else: - self.query_class = mixin_user_query(query_class) - - def get(self, state, dict_, passive=attributes.PASSIVE_OFF): - if passive is not attributes.PASSIVE_OFF: - return self._get_collection_history(state, - attributes.PASSIVE_NO_INITIALIZE).added_items - else: - return self.query_class(self, state) - - def get_collection(self, state, dict_, user_data=None, - passive=attributes.PASSIVE_NO_INITIALIZE): - if passive is not attributes.PASSIVE_OFF: - return self._get_collection_history(state, - passive).added_items - else: - history = self._get_collection_history(state, passive) - return history.added_items + history.unchanged_items - - def fire_append_event(self, state, dict_, value, initiator): - collection_history = self._modified_event(state, dict_) - collection_history.added_items.append(value) - - for fn in self.dispatch.append: - value = fn(state, value, initiator or self) - - if self.trackparent and value is not None: - self.sethasparent(attributes.instance_state(value), state, True) - - def fire_remove_event(self, state, dict_, value, initiator): - collection_history = self._modified_event(state, dict_) - collection_history.deleted_items.append(value) - - if self.trackparent and value is not None: - self.sethasparent(attributes.instance_state(value), state, False) - - for fn in self.dispatch.remove: - fn(state, value, initiator or self) - - def _modified_event(self, state, dict_): - - if self.key not in state.committed_state: - state.committed_state[self.key] = CollectionHistory(self, state) - - state.modified_event(dict_, - self, - attributes.NEVER_SET) - - # this is a hack to allow the fixtures.ComparableEntity fixture - # to work - dict_[self.key] = True - return state.committed_state[self.key] - - def set(self, state, dict_, value, initiator, - passive=attributes.PASSIVE_OFF, - check_old=None, pop=False): - if initiator and initiator.parent_token is self.parent_token: - return - - if pop and value is None: - return - self._set_iterable(state, dict_, value) - - - def _set_iterable(self, state, dict_, iterable, adapter=None): - collection_history = self._modified_event(state, dict_) - new_values = list(iterable) - if state.has_identity: - old_collection = list(self.get(state, dict_)) - else: - old_collection = [] - collections.bulk_replace(new_values, DynCollectionAdapter(self, - state, old_collection), - DynCollectionAdapter(self, state, - new_values)) - - def delete(self, *args, **kwargs): - raise NotImplementedError() - - def set_committed_value(self, state, dict_, value): - raise NotImplementedError("Dynamic attributes don't support " - "collection population.") - - def get_history(self, state, dict_, passive=attributes.PASSIVE_OFF): - c = self._get_collection_history(state, passive) - return attributes.History(c.added_items, c.unchanged_items, - c.deleted_items) - - def get_all_pending(self, state, dict_): - c = self._get_collection_history(state, True) - return [ - (attributes.instance_state(x), x) - for x in - c.added_items + c.unchanged_items + c.deleted_items - ] - - def _get_collection_history(self, state, passive=attributes.PASSIVE_OFF): - if self.key in state.committed_state: - c = state.committed_state[self.key] - else: - c = CollectionHistory(self, state) - - if passive is attributes.PASSIVE_OFF: - return CollectionHistory(self, state, apply_to=c) - else: - return c - - def append(self, state, dict_, value, initiator, - passive=attributes.PASSIVE_OFF): - if initiator is not self: - self.fire_append_event(state, dict_, value, initiator) - - def remove(self, state, dict_, value, initiator, - passive=attributes.PASSIVE_OFF): - if initiator is not self: - self.fire_remove_event(state, dict_, value, initiator) - -class DynCollectionAdapter(object): - """the dynamic analogue to orm.collections.CollectionAdapter""" - - def __init__(self, attr, owner_state, data): - self.attr = attr - self.state = owner_state - self.data = data - - def __iter__(self): - return iter(self.data) - - def append_with_event(self, item, initiator=None): - self.attr.append(self.state, self.state.dict, item, initiator) - - def remove_with_event(self, item, initiator=None): - self.attr.remove(self.state, self.state.dict, item, initiator) - - def append_without_event(self, item): - pass - - def remove_without_event(self, item): - pass - -class AppenderMixin(object): - query_class = None - - def __init__(self, attr, state): - Query.__init__(self, attr.target_mapper, None) - self.instance = instance = state.obj() - self.attr = attr - - mapper = object_mapper(instance) - prop = mapper._props[self.attr.key] - self._criterion = prop.compare( - operators.eq, - instance, - value_is_parent=True, - alias_secondary=False) - - if self.attr.order_by: - self._order_by = self.attr.order_by - - def __session(self): - sess = object_session(self.instance) - if sess is not None and self.autoflush and sess.autoflush \ - and self.instance in sess: - sess.flush() - if not has_identity(self.instance): - return None - else: - return sess - - def session(self): - return self.__session() - session = property(session, lambda s, x:None) - - def __iter__(self): - sess = self.__session() - if sess is None: - return iter(self.attr._get_collection_history( - attributes.instance_state(self.instance), - attributes.PASSIVE_NO_INITIALIZE).added_items) - else: - return iter(self._clone(sess)) - - def __getitem__(self, index): - sess = self.__session() - if sess is None: - return self.attr._get_collection_history( - attributes.instance_state(self.instance), - attributes.PASSIVE_NO_INITIALIZE).added_items.\ - __getitem__(index) - else: - return self._clone(sess).__getitem__(index) - - def count(self): - sess = self.__session() - if sess is None: - return len(self.attr._get_collection_history( - attributes.instance_state(self.instance), - attributes.PASSIVE_NO_INITIALIZE).added_items) - else: - return self._clone(sess).count() - - def _clone(self, sess=None): - # note we're returning an entirely new Query class instance - # here without any assignment capabilities; the class of this - # query is determined by the session. - instance = self.instance - if sess is None: - sess = object_session(instance) - if sess is None: - raise orm_exc.DetachedInstanceError( - "Parent instance %s is not bound to a Session, and no " - "contextual session is established; lazy load operation " - "of attribute '%s' cannot proceed" % ( - mapperutil.instance_str(instance), self.attr.key)) - - if self.query_class: - query = self.query_class(self.attr.target_mapper, session=sess) - else: - query = sess.query(self.attr.target_mapper) - - query._criterion = self._criterion - query._order_by = self._order_by - - return query - - def append(self, item): - self.attr.append( - attributes.instance_state(self.instance), - attributes.instance_dict(self.instance), item, None) - - def remove(self, item): - self.attr.remove( - attributes.instance_state(self.instance), - attributes.instance_dict(self.instance), item, None) - - -class AppenderQuery(AppenderMixin, Query): - """A dynamic query that supports basic collection storage operations.""" - - -def mixin_user_query(cls): - """Return a new class with AppenderQuery functionality layered over.""" - name = 'Appender' + cls.__name__ - return type(name, (AppenderMixin, cls), {'query_class': cls}) - -class CollectionHistory(object): - """Overrides AttributeHistory to receive append/remove events directly.""" - - def __init__(self, attr, state, apply_to=None): - if apply_to: - deleted = util.IdentitySet(apply_to.deleted_items) - added = apply_to.added_items - coll = AppenderQuery(attr, state).autoflush(False) - self.unchanged_items = [o for o in util.IdentitySet(coll) - if o not in deleted] - self.added_items = apply_to.added_items - self.deleted_items = apply_to.deleted_items - else: - self.deleted_items = [] - self.added_items = [] - self.unchanged_items = [] - diff --git a/libs/sqlalchemy/orm/evaluator.py b/libs/sqlalchemy/orm/evaluator.py deleted file mode 100644 index 5eaac6c3..00000000 --- a/libs/sqlalchemy/orm/evaluator.py +++ /dev/null @@ -1,111 +0,0 @@ -# orm/evaluator.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -import operator -from sqlalchemy.sql import operators, functions -from sqlalchemy.sql import expression as sql - - -class UnevaluatableError(Exception): - pass - -_straight_ops = set(getattr(operators, op) - for op in ('add', 'mul', 'sub', - # Py2K - 'div', - # end Py2K - 'mod', 'truediv', - 'lt', 'le', 'ne', 'gt', 'ge', 'eq')) - - -_notimplemented_ops = set(getattr(operators, op) - for op in ('like_op', 'notlike_op', 'ilike_op', - 'notilike_op', 'between_op', 'in_op', - 'notin_op', 'endswith_op', 'concat_op')) - -class EvaluatorCompiler(object): - def process(self, clause): - meth = getattr(self, "visit_%s" % clause.__visit_name__, None) - if not meth: - raise UnevaluatableError("Cannot evaluate %s" % type(clause).__name__) - return meth(clause) - - def visit_grouping(self, clause): - return self.process(clause.element) - - def visit_null(self, clause): - return lambda obj: None - - def visit_column(self, clause): - if 'parentmapper' in clause._annotations: - key = clause._annotations['parentmapper'].\ - _columntoproperty[clause].key - else: - key = clause.key - get_corresponding_attr = operator.attrgetter(key) - return lambda obj: get_corresponding_attr(obj) - - def visit_clauselist(self, clause): - evaluators = map(self.process, clause.clauses) - if clause.operator is operators.or_: - def evaluate(obj): - has_null = False - for sub_evaluate in evaluators: - value = sub_evaluate(obj) - if value: - return True - has_null = has_null or value is None - if has_null: - return None - return False - elif clause.operator is operators.and_: - def evaluate(obj): - for sub_evaluate in evaluators: - value = sub_evaluate(obj) - if not value: - if value is None: - return None - return False - return True - else: - raise UnevaluatableError("Cannot evaluate clauselist with operator %s" % clause.operator) - - return evaluate - - def visit_binary(self, clause): - eval_left,eval_right = map(self.process, [clause.left, clause.right]) - operator = clause.operator - if operator is operators.is_: - def evaluate(obj): - return eval_left(obj) == eval_right(obj) - elif operator is operators.isnot: - def evaluate(obj): - return eval_left(obj) != eval_right(obj) - elif operator in _straight_ops: - def evaluate(obj): - left_val = eval_left(obj) - right_val = eval_right(obj) - if left_val is None or right_val is None: - return None - return operator(eval_left(obj), eval_right(obj)) - else: - raise UnevaluatableError("Cannot evaluate %s with operator %s" % (type(clause).__name__, clause.operator)) - return evaluate - - def visit_unary(self, clause): - eval_inner = self.process(clause.element) - if clause.operator is operators.inv: - def evaluate(obj): - value = eval_inner(obj) - if value is None: - return None - return not value - return evaluate - raise UnevaluatableError("Cannot evaluate %s with operator %s" % (type(clause).__name__, clause.operator)) - - def visit_bindparam(self, clause): - val = clause.value - return lambda obj: val diff --git a/libs/sqlalchemy/orm/events.py b/libs/sqlalchemy/orm/events.py deleted file mode 100644 index 3c868d14..00000000 --- a/libs/sqlalchemy/orm/events.py +++ /dev/null @@ -1,1256 +0,0 @@ -# orm/events.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""ORM event interfaces. - -""" -from sqlalchemy import event, exc, util -orm = util.importlater("sqlalchemy", "orm") -import inspect - -class InstrumentationEvents(event.Events): - """Events related to class instrumentation events. - - The listeners here support being established against - any new style class, that is any object that is a subclass - of 'type'. Events will then be fired off for events - against that class as well as all subclasses. - 'type' itself is also accepted as a target - in which case the events fire for all classes. - - """ - - @classmethod - def _accept_with(cls, target): - if isinstance(target, type): - return orm.instrumentation.instrumentation_registry - else: - return None - - @classmethod - def _listen(cls, target, identifier, fn, propagate=False): - event.Events._listen(target, identifier, fn, propagate=propagate) - - @classmethod - def _remove(cls, identifier, target, fn): - raise NotImplementedError("Removal of instrumentation events not yet implemented") - - def class_instrument(self, cls): - """Called after the given class is instrumented. - - To get at the :class:`.ClassManager`, use - :func:`.manager_of_class`. - - """ - - def class_uninstrument(self, cls): - """Called before the given class is uninstrumented. - - To get at the :class:`.ClassManager`, use - :func:`.manager_of_class`. - - """ - - - def attribute_instrument(self, cls, key, inst): - """Called when an attribute is instrumented.""" - -class InstanceEvents(event.Events): - """Define events specific to object lifecycle. - - e.g.:: - - from sqlalchemy import event - - def my_load_listener(target, context): - print "on load!" - - event.listen(SomeMappedClass, 'load', my_load_listener) - - Available targets include mapped classes, instances of - :class:`.Mapper` (i.e. returned by :func:`.mapper`, - :func:`.class_mapper` and similar), as well as the - :class:`.Mapper` class and :func:`.mapper` function itself - for global event reception:: - - from sqlalchemy.orm import mapper - - def some_listener(target, context): - log.debug("Instance %s being loaded" % target) - - # attach to all mappers - event.listen(mapper, 'load', some_listener) - - Instance events are closely related to mapper events, but - are more specific to the instance and its instrumentation, - rather than its system of persistence. - - When using :class:`.InstanceEvents`, several modifiers are - available to the :func:`.event.listen` function. - - :param propagate=False: When True, the event listener should - be applied to all inheriting mappers as well as the - mapper which is the target of this listener. - :param raw=False: When True, the "target" argument passed - to applicable event listener functions will be the - instance's :class:`.InstanceState` management - object, rather than the mapped instance itself. - - """ - @classmethod - def _accept_with(cls, target): - if isinstance(target, orm.instrumentation.ClassManager): - return target - elif isinstance(target, orm.Mapper): - return target.class_manager - elif target is orm.mapper: - return orm.instrumentation.ClassManager - elif isinstance(target, type): - if issubclass(target, orm.Mapper): - return orm.instrumentation.ClassManager - else: - manager = orm.instrumentation.manager_of_class(target) - if manager: - return manager - return None - - @classmethod - def _listen(cls, target, identifier, fn, raw=False, propagate=False): - if not raw: - orig_fn = fn - def wrap(state, *arg, **kw): - return orig_fn(state.obj(), *arg, **kw) - fn = wrap - - event.Events._listen(target, identifier, fn, propagate=propagate) - if propagate: - for mgr in target.subclass_managers(True): - event.Events._listen(mgr, identifier, fn, True) - - @classmethod - def _remove(cls, identifier, target, fn): - raise NotImplementedError("Removal of instance events not yet implemented") - - def first_init(self, manager, cls): - """Called when the first instance of a particular mapping is called. - - """ - - def init(self, target, args, kwargs): - """Receive an instance when it's constructor is called. - - This method is only called during a userland construction of - an object. It is not called when an object is loaded from the - database. - - """ - - def init_failure(self, target, args, kwargs): - """Receive an instance when it's constructor has been called, - and raised an exception. - - This method is only called during a userland construction of - an object. It is not called when an object is loaded from the - database. - - """ - - def load(self, target, context): - """Receive an object instance after it has been created via - ``__new__``, and after initial attribute population has - occurred. - - This typically occurs when the instance is created based on - incoming result rows, and is only called once for that - instance's lifetime. - - Note that during a result-row load, this method is called upon - the first row received for this instance. Note that some - attributes and collections may or may not be loaded or even - initialized, depending on what's present in the result rows. - - :param target: the mapped instance. If - the event is configured with ``raw=True``, this will - instead be the :class:`.InstanceState` state-management - object associated with the instance. - :param context: the :class:`.QueryContext` corresponding to the - current :class:`.Query` in progress. This argument may be - ``None`` if the load does not correspond to a :class:`.Query`, - such as during :meth:`.Session.merge`. - - """ - - def refresh(self, target, context, attrs): - """Receive an object instance after one or more attributes have - been refreshed from a query. - - :param target: the mapped instance. If - the event is configured with ``raw=True``, this will - instead be the :class:`.InstanceState` state-management - object associated with the instance. - :param context: the :class:`.QueryContext` corresponding to the - current :class:`.Query` in progress. - :param attrs: iterable collection of attribute names which - were populated, or None if all column-mapped, non-deferred - attributes were populated. - - """ - - def expire(self, target, attrs): - """Receive an object instance after its attributes or some subset - have been expired. - - 'keys' is a list of attribute names. If None, the entire - state was expired. - - :param target: the mapped instance. If - the event is configured with ``raw=True``, this will - instead be the :class:`.InstanceState` state-management - object associated with the instance. - :param attrs: iterable collection of attribute - names which were expired, or None if all attributes were - expired. - - """ - - def resurrect(self, target): - """Receive an object instance as it is 'resurrected' from - garbage collection, which occurs when a "dirty" state falls - out of scope. - - :param target: the mapped instance. If - the event is configured with ``raw=True``, this will - instead be the :class:`.InstanceState` state-management - object associated with the instance. - - """ - - def pickle(self, target, state_dict): - """Receive an object instance when its associated state is - being pickled. - - :param target: the mapped instance. If - the event is configured with ``raw=True``, this will - instead be the :class:`.InstanceState` state-management - object associated with the instance. - :param state_dict: the dictionary returned by - :class:`.InstanceState.__getstate__`, containing the state - to be pickled. - - """ - - def unpickle(self, target, state_dict): - """Receive an object instance after it's associated state has - been unpickled. - - :param target: the mapped instance. If - the event is configured with ``raw=True``, this will - instead be the :class:`.InstanceState` state-management - object associated with the instance. - :param state_dict: the dictionary sent to - :class:`.InstanceState.__setstate__`, containing the state - dictionary which was pickled. - - """ - -class MapperEvents(event.Events): - """Define events specific to mappings. - - e.g.:: - - from sqlalchemy import event - - def my_before_insert_listener(mapper, connection, target): - # execute a stored procedure upon INSERT, - # apply the value to the row to be inserted - target.calculated_value = connection.scalar( - "select my_special_function(%d)" - % target.special_number) - - # associate the listener function with SomeMappedClass, - # to execute during the "before_insert" hook - event.listen(SomeMappedClass, 'before_insert', my_before_insert_listener) - - Available targets include mapped classes, instances of - :class:`.Mapper` (i.e. returned by :func:`.mapper`, - :func:`.class_mapper` and similar), as well as the - :class:`.Mapper` class and :func:`.mapper` function itself - for global event reception:: - - from sqlalchemy.orm import mapper - - def some_listener(mapper, connection, target): - log.debug("Instance %s being inserted" % target) - - # attach to all mappers - event.listen(mapper, 'before_insert', some_listener) - - Mapper events provide hooks into critical sections of the - mapper, including those related to object instrumentation, - object loading, and object persistence. In particular, the - persistence methods :meth:`~.MapperEvents.before_insert`, - and :meth:`~.MapperEvents.before_update` are popular - places to augment the state being persisted - however, these - methods operate with several significant restrictions. The - user is encouraged to evaluate the - :meth:`.SessionEvents.before_flush` and - :meth:`.SessionEvents.after_flush` methods as more - flexible and user-friendly hooks in which to apply - additional database state during a flush. - - When using :class:`.MapperEvents`, several modifiers are - available to the :func:`.event.listen` function. - - :param propagate=False: When True, the event listener should - be applied to all inheriting mappers as well as the - mapper which is the target of this listener. - :param raw=False: When True, the "target" argument passed - to applicable event listener functions will be the - instance's :class:`.InstanceState` management - object, rather than the mapped instance itself. - :param retval=False: when True, the user-defined event function - must have a return value, the purpose of which is either to - control subsequent event propagation, or to otherwise alter - the operation in progress by the mapper. Possible return - values are: - - * ``sqlalchemy.orm.interfaces.EXT_CONTINUE`` - continue event - processing normally. - * ``sqlalchemy.orm.interfaces.EXT_STOP`` - cancel all subsequent - event handlers in the chain. - * other values - the return value specified by specific listeners, - such as :meth:`~.MapperEvents.translate_row` or - :meth:`~.MapperEvents.create_instance`. - - """ - - @classmethod - def _accept_with(cls, target): - if target is orm.mapper: - return orm.Mapper - elif isinstance(target, type): - if issubclass(target, orm.Mapper): - return target - else: - return orm.class_mapper(target, compile=False) - else: - return target - - @classmethod - def _listen(cls, target, identifier, fn, - raw=False, retval=False, propagate=False): - - if not raw or not retval: - if not raw: - meth = getattr(cls, identifier) - try: - target_index = inspect.getargspec(meth)[0].index('target') - 1 - except ValueError: - target_index = None - - wrapped_fn = fn - def wrap(*arg, **kw): - if not raw and target_index is not None: - arg = list(arg) - arg[target_index] = arg[target_index].obj() - if not retval: - wrapped_fn(*arg, **kw) - return orm.interfaces.EXT_CONTINUE - else: - return wrapped_fn(*arg, **kw) - fn = wrap - - if propagate: - for mapper in target.self_and_descendants: - event.Events._listen(mapper, identifier, fn, propagate=True) - else: - event.Events._listen(target, identifier, fn) - - def instrument_class(self, mapper, class_): - """Receive a class when the mapper is first constructed, - before instrumentation is applied to the mapped class. - - This event is the earliest phase of mapper construction. - Most attributes of the mapper are not yet initialized. - - This listener can generally only be applied to the :class:`.Mapper` - class overall. - - :param mapper: the :class:`.Mapper` which is the target - of this event. - :param class\_: the mapped class. - - """ - - def mapper_configured(self, mapper, class_): - """Called when the mapper for the class is fully configured. - - This event is the latest phase of mapper construction, and - is invoked when the mapped classes are first used, so that relationships - between mappers can be resolved. When the event is called, - the mapper should be in its final state. - - While the configuration event normally occurs automatically, - it can be forced to occur ahead of time, in the case where the event - is needed before any actual mapper usage, by using the - :func:`.configure_mappers` function. - - - :param mapper: the :class:`.Mapper` which is the target - of this event. - :param class\_: the mapped class. - - """ - # TODO: need coverage for this event - - def after_configured(self): - """Called after a series of mappers have been configured. - - This corresponds to the :func:`.orm.configure_mappers` call, which - note is usually called automatically as mappings are first - used. - - Theoretically this event is called once per - application, but is actually called any time new mappers - have been affected by a :func:`.orm.configure_mappers` call. If new mappings - are constructed after existing ones have already been used, - this event can be called again. - - """ - - def translate_row(self, mapper, context, row): - """Perform pre-processing on the given result row and return a - new row instance. - - This listener is typically registered with ``retval=True``. - It is called when the mapper first receives a row, before - the object identity or the instance itself has been derived - from that row. The given row may or may not be a - :class:`.RowProxy` object - it will always be a dictionary-like - object which contains mapped columns as keys. The - returned object should also be a dictionary-like object - which recognizes mapped columns as keys. - - :param mapper: the :class:`.Mapper` which is the target - of this event. - :param context: the :class:`.QueryContext`, which includes - a handle to the current :class:`.Query` in progress as well - as additional state information. - :param row: the result row being handled. This may be - an actual :class:`.RowProxy` or may be a dictionary containing - :class:`.Column` objects as keys. - :return: When configured with ``retval=True``, the function - should return a dictionary-like row object, or ``EXT_CONTINUE``, - indicating the original row should be used. - - - """ - - def create_instance(self, mapper, context, row, class_): - """Receive a row when a new object instance is about to be - created from that row. - - The method can choose to create the instance itself, or it can return - EXT_CONTINUE to indicate normal object creation should take place. - This listener is typically registered with ``retval=True``. - - :param mapper: the :class:`.Mapper` which is the target - of this event. - :param context: the :class:`.QueryContext`, which includes - a handle to the current :class:`.Query` in progress as well - as additional state information. - :param row: the result row being handled. This may be - an actual :class:`.RowProxy` or may be a dictionary containing - :class:`.Column` objects as keys. - :param class\_: the mapped class. - :return: When configured with ``retval=True``, the return value - should be a newly created instance of the mapped class, - or ``EXT_CONTINUE`` indicating that default object construction - should take place. - - """ - - def append_result(self, mapper, context, row, target, - result, **flags): - """Receive an object instance before that instance is appended - to a result list. - - This is a rarely used hook which can be used to alter - the construction of a result list returned by :class:`.Query`. - - :param mapper: the :class:`.Mapper` which is the target - of this event. - :param context: the :class:`.QueryContext`, which includes - a handle to the current :class:`.Query` in progress as well - as additional state information. - :param row: the result row being handled. This may be - an actual :class:`.RowProxy` or may be a dictionary containing - :class:`.Column` objects as keys. - :param target: the mapped instance being populated. If - the event is configured with ``raw=True``, this will - instead be the :class:`.InstanceState` state-management - object associated with the instance. - :param result: a list-like object where results are being - appended. - :param \**flags: Additional state information about the - current handling of the row. - :return: If this method is registered with ``retval=True``, - a return value of ``EXT_STOP`` will prevent the instance - from being appended to the given result list, whereas a - return value of ``EXT_CONTINUE`` will result in the default - behavior of appending the value to the result list. - - """ - - - def populate_instance(self, mapper, context, row, - target, **flags): - """Receive an instance before that instance has - its attributes populated. - - This usually corresponds to a newly loaded instance but may - also correspond to an already-loaded instance which has - unloaded attributes to be populated. The method may be called - many times for a single instance, as multiple result rows are - used to populate eagerly loaded collections. - - Most usages of this hook are obsolete. For a - generic "object has been newly created from a row" hook, use - :meth:`.InstanceEvents.load`. - - :param mapper: the :class:`.Mapper` which is the target - of this event. - :param context: the :class:`.QueryContext`, which includes - a handle to the current :class:`.Query` in progress as well - as additional state information. - :param row: the result row being handled. This may be - an actual :class:`.RowProxy` or may be a dictionary containing - :class:`.Column` objects as keys. - :param target: the mapped instance. If - the event is configured with ``raw=True``, this will - instead be the :class:`.InstanceState` state-management - object associated with the instance. - :return: When configured with ``retval=True``, a return - value of ``EXT_STOP`` will bypass instance population by - the mapper. A value of ``EXT_CONTINUE`` indicates that - default instance population should take place. - - """ - - def before_insert(self, mapper, connection, target): - """Receive an object instance before an INSERT statement - is emitted corresponding to that instance. - - This event is used to modify local, non-object related - attributes on the instance before an INSERT occurs, as well - as to emit additional SQL statements on the given - connection. - - The event is often called for a batch of objects of the - same class before their INSERT statements are emitted at - once in a later step. In the extremely rare case that - this is not desirable, the :func:`.mapper` can be - configured with ``batch=False``, which will cause - batches of instances to be broken up into individual - (and more poorly performing) event->persist->event - steps. - - .. warning:: - Mapper-level flush events are designed to operate **on attributes - local to the immediate object being handled - and via SQL operations with the given** :class:`.Connection` **only.** - Handlers here should **not** make alterations to the state of - the :class:`.Session` overall, and in general should not - affect any :func:`.relationship` -mapped attributes, as - session cascade rules will not function properly, nor is it - always known if the related class has already been handled. - Operations that **are not supported in mapper events** include: - - * :meth:`.Session.add` - * :meth:`.Session.delete` - * Mapped collection append, add, remove, delete, discard, etc. - * Mapped relationship attribute set/del events, i.e. ``someobject.related = someotherobject`` - - Operations which manipulate the state of the object - relative to other objects are better handled: - - * In the ``__init__()`` method of the mapped object itself, or another method - designed to establish some particular state. - * In a ``@validates`` handler, see :ref:`simple_validators` - * Within the :meth:`.SessionEvents.before_flush` event. - - :param mapper: the :class:`.Mapper` which is the target - of this event. - :param connection: the :class:`.Connection` being used to - emit INSERT statements for this instance. This - provides a handle into the current transaction on the - target database specific to this instance. - :param target: the mapped instance being persisted. If - the event is configured with ``raw=True``, this will - instead be the :class:`.InstanceState` state-management - object associated with the instance. - :return: No return value is supported by this event. - - """ - - def after_insert(self, mapper, connection, target): - """Receive an object instance after an INSERT statement - is emitted corresponding to that instance. - - This event is used to modify in-Python-only - state on the instance after an INSERT occurs, as well - as to emit additional SQL statements on the given - connection. - - The event is often called for a batch of objects of the - same class after their INSERT statements have been - emitted at once in a previous step. In the extremely - rare case that this is not desirable, the - :func:`.mapper` can be configured with ``batch=False``, - which will cause batches of instances to be broken up - into individual (and more poorly performing) - event->persist->event steps. - - .. warning:: - Mapper-level flush events are designed to operate **on attributes - local to the immediate object being handled - and via SQL operations with the given** :class:`.Connection` **only.** - Handlers here should **not** make alterations to the state of - the :class:`.Session` overall, and in general should not - affect any :func:`.relationship` -mapped attributes, as - session cascade rules will not function properly, nor is it - always known if the related class has already been handled. - Operations that **are not supported in mapper events** include: - - * :meth:`.Session.add` - * :meth:`.Session.delete` - * Mapped collection append, add, remove, delete, discard, etc. - * Mapped relationship attribute set/del events, i.e. ``someobject.related = someotherobject`` - - Operations which manipulate the state of the object - relative to other objects are better handled: - - * In the ``__init__()`` method of the mapped object itself, or another method - designed to establish some particular state. - * In a ``@validates`` handler, see :ref:`simple_validators` - * Within the :meth:`.SessionEvents.before_flush` event. - - :param mapper: the :class:`.Mapper` which is the target - of this event. - :param connection: the :class:`.Connection` being used to - emit INSERT statements for this instance. This - provides a handle into the current transaction on the - target database specific to this instance. - :param target: the mapped instance being persisted. If - the event is configured with ``raw=True``, this will - instead be the :class:`.InstanceState` state-management - object associated with the instance. - :return: No return value is supported by this event. - - """ - - def before_update(self, mapper, connection, target): - """Receive an object instance before an UPDATE statement - is emitted corresponding to that instance. - - This event is used to modify local, non-object related - attributes on the instance before an UPDATE occurs, as well - as to emit additional SQL statements on the given - connection. - - This method is called for all instances that are - marked as "dirty", *even those which have no net changes - to their column-based attributes*. An object is marked - as dirty when any of its column-based attributes have a - "set attribute" operation called or when any of its - collections are modified. If, at update time, no - column-based attributes have any net changes, no UPDATE - statement will be issued. This means that an instance - being sent to :meth:`~.MapperEvents.before_update` is - *not* a guarantee that an UPDATE statement will be - issued, although you can affect the outcome here by - modifying attributes so that a net change in value does - exist. - - To detect if the column-based attributes on the object have net - changes, and will therefore generate an UPDATE statement, use - ``object_session(instance).is_modified(instance, - include_collections=False)``. - - The event is often called for a batch of objects of the - same class before their UPDATE statements are emitted at - once in a later step. In the extremely rare case that - this is not desirable, the :func:`.mapper` can be - configured with ``batch=False``, which will cause - batches of instances to be broken up into individual - (and more poorly performing) event->persist->event - steps. - - .. warning:: - Mapper-level flush events are designed to operate **on attributes - local to the immediate object being handled - and via SQL operations with the given** :class:`.Connection` **only.** - Handlers here should **not** make alterations to the state of - the :class:`.Session` overall, and in general should not - affect any :func:`.relationship` -mapped attributes, as - session cascade rules will not function properly, nor is it - always known if the related class has already been handled. - Operations that **are not supported in mapper events** include: - - * :meth:`.Session.add` - * :meth:`.Session.delete` - * Mapped collection append, add, remove, delete, discard, etc. - * Mapped relationship attribute set/del events, i.e. ``someobject.related = someotherobject`` - - Operations which manipulate the state of the object - relative to other objects are better handled: - - * In the ``__init__()`` method of the mapped object itself, or another method - designed to establish some particular state. - * In a ``@validates`` handler, see :ref:`simple_validators` - * Within the :meth:`.SessionEvents.before_flush` event. - - :param mapper: the :class:`.Mapper` which is the target - of this event. - :param connection: the :class:`.Connection` being used to - emit UPDATE statements for this instance. This - provides a handle into the current transaction on the - target database specific to this instance. - :param target: the mapped instance being persisted. If - the event is configured with ``raw=True``, this will - instead be the :class:`.InstanceState` state-management - object associated with the instance. - :return: No return value is supported by this event. - """ - - def after_update(self, mapper, connection, target): - """Receive an object instance after an UPDATE statement - is emitted corresponding to that instance. - - This event is used to modify in-Python-only - state on the instance after an UPDATE occurs, as well - as to emit additional SQL statements on the given - connection. - - This method is called for all instances that are - marked as "dirty", *even those which have no net changes - to their column-based attributes*, and for which - no UPDATE statement has proceeded. An object is marked - as dirty when any of its column-based attributes have a - "set attribute" operation called or when any of its - collections are modified. If, at update time, no - column-based attributes have any net changes, no UPDATE - statement will be issued. This means that an instance - being sent to :meth:`~.MapperEvents.after_update` is - *not* a guarantee that an UPDATE statement has been - issued. - - To detect if the column-based attributes on the object have net - changes, and therefore resulted in an UPDATE statement, use - ``object_session(instance).is_modified(instance, - include_collections=False)``. - - The event is often called for a batch of objects of the - same class after their UPDATE statements have been emitted at - once in a previous step. In the extremely rare case that - this is not desirable, the :func:`.mapper` can be - configured with ``batch=False``, which will cause - batches of instances to be broken up into individual - (and more poorly performing) event->persist->event - steps. - - .. warning:: - Mapper-level flush events are designed to operate **on attributes - local to the immediate object being handled - and via SQL operations with the given** :class:`.Connection` **only.** - Handlers here should **not** make alterations to the state of - the :class:`.Session` overall, and in general should not - affect any :func:`.relationship` -mapped attributes, as - session cascade rules will not function properly, nor is it - always known if the related class has already been handled. - Operations that **are not supported in mapper events** include: - - * :meth:`.Session.add` - * :meth:`.Session.delete` - * Mapped collection append, add, remove, delete, discard, etc. - * Mapped relationship attribute set/del events, i.e. ``someobject.related = someotherobject`` - - Operations which manipulate the state of the object - relative to other objects are better handled: - - * In the ``__init__()`` method of the mapped object itself, or another method - designed to establish some particular state. - * In a ``@validates`` handler, see :ref:`simple_validators` - * Within the :meth:`.SessionEvents.before_flush` event. - - :param mapper: the :class:`.Mapper` which is the target - of this event. - :param connection: the :class:`.Connection` being used to - emit UPDATE statements for this instance. This - provides a handle into the current transaction on the - target database specific to this instance. - :param target: the mapped instance being persisted. If - the event is configured with ``raw=True``, this will - instead be the :class:`.InstanceState` state-management - object associated with the instance. - :return: No return value is supported by this event. - - """ - - def before_delete(self, mapper, connection, target): - """Receive an object instance before a DELETE statement - is emitted corresponding to that instance. - - This event is used to emit additional SQL statements on - the given connection as well as to perform application - specific bookkeeping related to a deletion event. - - The event is often called for a batch of objects of the - same class before their DELETE statements are emitted at - once in a later step. - - .. warning:: - Mapper-level flush events are designed to operate **on attributes - local to the immediate object being handled - and via SQL operations with the given** :class:`.Connection` **only.** - Handlers here should **not** make alterations to the state of - the :class:`.Session` overall, and in general should not - affect any :func:`.relationship` -mapped attributes, as - session cascade rules will not function properly, nor is it - always known if the related class has already been handled. - Operations that **are not supported in mapper events** include: - - * :meth:`.Session.add` - * :meth:`.Session.delete` - * Mapped collection append, add, remove, delete, discard, etc. - * Mapped relationship attribute set/del events, i.e. ``someobject.related = someotherobject`` - - Operations which manipulate the state of the object - relative to other objects are better handled: - - * In the ``__init__()`` method of the mapped object itself, or another method - designed to establish some particular state. - * In a ``@validates`` handler, see :ref:`simple_validators` - * Within the :meth:`.SessionEvents.before_flush` event. - - :param mapper: the :class:`.Mapper` which is the target - of this event. - :param connection: the :class:`.Connection` being used to - emit DELETE statements for this instance. This - provides a handle into the current transaction on the - target database specific to this instance. - :param target: the mapped instance being deleted. If - the event is configured with ``raw=True``, this will - instead be the :class:`.InstanceState` state-management - object associated with the instance. - :return: No return value is supported by this event. - - """ - - def after_delete(self, mapper, connection, target): - """Receive an object instance after a DELETE statement - has been emitted corresponding to that instance. - - This event is used to emit additional SQL statements on - the given connection as well as to perform application - specific bookkeeping related to a deletion event. - - The event is often called for a batch of objects of the - same class after their DELETE statements have been emitted at - once in a previous step. - - .. warning:: - Mapper-level flush events are designed to operate **on attributes - local to the immediate object being handled - and via SQL operations with the given** :class:`.Connection` **only.** - Handlers here should **not** make alterations to the state of - the :class:`.Session` overall, and in general should not - affect any :func:`.relationship` -mapped attributes, as - session cascade rules will not function properly, nor is it - always known if the related class has already been handled. - Operations that **are not supported in mapper events** include: - - * :meth:`.Session.add` - * :meth:`.Session.delete` - * Mapped collection append, add, remove, delete, discard, etc. - * Mapped relationship attribute set/del events, i.e. ``someobject.related = someotherobject`` - - Operations which manipulate the state of the object - relative to other objects are better handled: - - * In the ``__init__()`` method of the mapped object itself, or another method - designed to establish some particular state. - * In a ``@validates`` handler, see :ref:`simple_validators` - * Within the :meth:`.SessionEvents.before_flush` event. - - :param mapper: the :class:`.Mapper` which is the target - of this event. - :param connection: the :class:`.Connection` being used to - emit DELETE statements for this instance. This - provides a handle into the current transaction on the - target database specific to this instance. - :param target: the mapped instance being deleted. If - the event is configured with ``raw=True``, this will - instead be the :class:`.InstanceState` state-management - object associated with the instance. - :return: No return value is supported by this event. - - """ - - @classmethod - def _remove(cls, identifier, target, fn): - raise NotImplementedError("Removal of mapper events not yet implemented") - -class SessionEvents(event.Events): - """Define events specific to :class:`.Session` lifecycle. - - e.g.:: - - from sqlalchemy import event - from sqlalchemy.orm import sessionmaker - - def my_before_commit(session): - print "before commit!" - - Session = sessionmaker() - - event.listen(Session, "before_commit", my_before_commit) - - The :func:`~.event.listen` function will accept - :class:`.Session` objects as well as the return result - of :func:`.sessionmaker` and :func:`.scoped_session`. - - Additionally, it accepts the :class:`.Session` class which - will apply listeners to all :class:`.Session` instances - globally. - - """ - - @classmethod - def _accept_with(cls, target): - if isinstance(target, orm.ScopedSession): - if not isinstance(target.session_factory, type) or \ - not issubclass(target.session_factory, orm.Session): - raise exc.ArgumentError( - "Session event listen on a ScopedSession " - "requires that its creation callable " - "is a Session subclass.") - return target.session_factory - elif isinstance(target, type): - if issubclass(target, orm.ScopedSession): - return orm.Session - elif issubclass(target, orm.Session): - return target - elif isinstance(target, orm.Session): - return target - else: - return None - - @classmethod - def _remove(cls, identifier, target, fn): - raise NotImplementedError("Removal of session events not yet implemented") - - def before_commit(self, session): - """Execute before commit is called. - - Note that this may not be per-flush if a longer running - transaction is ongoing. - - :param session: The target :class:`.Session`. - - """ - - def after_commit(self, session): - """Execute after a commit has occurred. - - Note that this may not be per-flush if a longer running - transaction is ongoing. - - :param session: The target :class:`.Session`. - - """ - - def after_rollback(self, session): - """Execute after a real DBAPI rollback has occurred. - - Note that this event only fires when the *actual* rollback against - the database occurs - it does *not* fire each time the - :meth:`.Session.rollback` method is called, if the underlying - DBAPI transaction has already been rolled back. In many - cases, the :class:`.Session` will not be in - an "active" state during this event, as the current - transaction is not valid. To acquire a :class:`.Session` - which is active after the outermost rollback has proceeded, - use the :meth:`.SessionEvents.after_soft_rollback` event, checking the - :attr:`.Session.is_active` flag. - - :param session: The target :class:`.Session`. - - """ - - def after_soft_rollback(self, session, previous_transaction): - """Execute after any rollback has occurred, including "soft" - rollbacks that don't actually emit at the DBAPI level. - - This corresponds to both nested and outer rollbacks, i.e. - the innermost rollback that calls the DBAPI's - rollback() method, as well as the enclosing rollback - calls that only pop themselves from the transaction stack. - - The given :class:`.Session` can be used to invoke SQL and - :meth:`.Session.query` operations after an outermost rollback - by first checking the :attr:`.Session.is_active` flag:: - - @event.listens_for(Session, "after_soft_rollback") - def do_something(session, previous_transaction): - if session.is_active: - session.execute("select * from some_table") - - :param session: The target :class:`.Session`. - :param previous_transaction: The :class:`.SessionTransaction` transactional - marker object which was just closed. The current :class:`.SessionTransaction` - for the given :class:`.Session` is available via the - :attr:`.Session.transaction` attribute. - - .. versionadded:: 0.7.3 - - """ - - def before_flush( self, session, flush_context, instances): - """Execute before flush process has started. - - :param session: The target :class:`.Session`. - :param flush_context: Internal :class:`.UOWTransaction` object - which handles the details of the flush. - :param instances: Usually ``None``, this is the collection of - objects which can be passed to the :meth:`.Session.flush` method - (note this usage is deprecated). - - """ - - def after_flush(self, session, flush_context): - """Execute after flush has completed, but before commit has been - called. - - Note that the session's state is still in pre-flush, i.e. 'new', - 'dirty', and 'deleted' lists still show pre-flush state as well - as the history settings on instance attributes. - - :param session: The target :class:`.Session`. - :param flush_context: Internal :class:`.UOWTransaction` object - which handles the details of the flush. - - """ - - def after_flush_postexec(self, session, flush_context): - """Execute after flush has completed, and after the post-exec - state occurs. - - This will be when the 'new', 'dirty', and 'deleted' lists are in - their final state. An actual commit() may or may not have - occurred, depending on whether or not the flush started its own - transaction or participated in a larger transaction. - - :param session: The target :class:`.Session`. - :param flush_context: Internal :class:`.UOWTransaction` object - which handles the details of the flush. - """ - - def after_begin( self, session, transaction, connection): - """Execute after a transaction is begun on a connection - - :param session: The target :class:`.Session`. - :param transaction: The :class:`.SessionTransaction`. - :param connection: The :class:`~.engine.base.Connection` object - which will be used for SQL statements. - - """ - - def after_attach(self, session, instance): - """Execute after an instance is attached to a session. - - This is called after an add, delete or merge. """ - - def after_bulk_update( self, session, query, query_context, result): - """Execute after a bulk update operation to the session. - - This is called as a result of the :meth:`.Query.update` method. - - :param query: the :class:`.Query` object that this update operation was - called upon. - :param query_context: The :class:`.QueryContext` object, corresponding - to the invocation of an ORM query. - :param result: the :class:`.ResultProxy` returned as a result of the - bulk UPDATE operation. - - """ - - def after_bulk_delete( self, session, query, query_context, result): - """Execute after a bulk delete operation to the session. - - This is called as a result of the :meth:`.Query.delete` method. - - :param query: the :class:`.Query` object that this update operation was - called upon. - :param query_context: The :class:`.QueryContext` object, corresponding - to the invocation of an ORM query. - :param result: the :class:`.ResultProxy` returned as a result of the - bulk DELETE operation. - - """ - - -class AttributeEvents(event.Events): - """Define events for object attributes. - - These are typically defined on the class-bound descriptor for the - target class. - - e.g.:: - - from sqlalchemy import event - - def my_append_listener(target, value, initiator): - print "received append event for target: %s" % target - - event.listen(MyClass.collection, 'append', my_append_listener) - - Listeners have the option to return a possibly modified version - of the value, when the ``retval=True`` flag is passed - to :func:`~.event.listen`:: - - def validate_phone(target, value, oldvalue, initiator): - "Strip non-numeric characters from a phone number" - - return re.sub(r'(?![0-9])', '', value) - - # setup listener on UserContact.phone attribute, instructing - # it to use the return value - listen(UserContact.phone, 'set', validate_phone, retval=True) - - A validation function like the above can also raise an exception - such as :class:`.ValueError` to halt the operation. - - Several modifiers are available to the :func:`~.event.listen` function. - - :param active_history=False: When True, indicates that the - "set" event would like to receive the "old" value being - replaced unconditionally, even if this requires firing off - database loads. Note that ``active_history`` can also be - set directly via :func:`.column_property` and - :func:`.relationship`. - - :param propagate=False: When True, the listener function will - be established not just for the class attribute given, but - for attributes of the same name on all current subclasses - of that class, as well as all future subclasses of that - class, using an additional listener that listens for - instrumentation events. - :param raw=False: When True, the "target" argument to the - event will be the :class:`.InstanceState` management - object, rather than the mapped instance itself. - :param retval=False: when True, the user-defined event - listening must return the "value" argument from the - function. This gives the listening function the opportunity - to change the value that is ultimately used for a "set" - or "append" event. - - """ - - @classmethod - def _accept_with(cls, target): - # TODO: coverage - if isinstance(target, orm.interfaces.MapperProperty): - return getattr(target.parent.class_, target.key) - else: - return target - - @classmethod - def _listen(cls, target, identifier, fn, active_history=False, - raw=False, retval=False, - propagate=False): - if active_history: - target.dispatch._active_history = True - - # TODO: for removal, need to package the identity - # of the wrapper with the original function. - - if not raw or not retval: - orig_fn = fn - def wrap(target, value, *arg): - if not raw: - target = target.obj() - if not retval: - orig_fn(target, value, *arg) - return value - else: - return orig_fn(target, value, *arg) - fn = wrap - - event.Events._listen(target, identifier, fn, propagate) - - if propagate: - manager = orm.instrumentation.manager_of_class(target.class_) - - for mgr in manager.subclass_managers(True): - event.Events._listen(mgr[target.key], identifier, fn, True) - - @classmethod - def _remove(cls, identifier, target, fn): - raise NotImplementedError("Removal of attribute events not yet implemented") - - def append(self, target, value, initiator): - """Receive a collection append event. - - :param target: the object instance receiving the event. - If the listener is registered with ``raw=True``, this will - be the :class:`.InstanceState` object. - :param value: the value being appended. If this listener - is registered with ``retval=True``, the listener - function must return this value, or a new value which - replaces it. - :param initiator: the attribute implementation object - which initiated this event. - :return: if the event was registered with ``retval=True``, - the given value, or a new effective value, should be returned. - - """ - - def remove(self, target, value, initiator): - """Receive a collection remove event. - - :param target: the object instance receiving the event. - If the listener is registered with ``raw=True``, this will - be the :class:`.InstanceState` object. - :param value: the value being removed. - :param initiator: the attribute implementation object - which initiated this event. - :return: No return value is defined for this event. - """ - - def set(self, target, value, oldvalue, initiator): - """Receive a scalar set event. - - :param target: the object instance receiving the event. - If the listener is registered with ``raw=True``, this will - be the :class:`.InstanceState` object. - :param value: the value being set. If this listener - is registered with ``retval=True``, the listener - function must return this value, or a new value which - replaces it. - :param oldvalue: the previous value being replaced. This - may also be the symbol ``NEVER_SET`` or ``NO_VALUE``. - If the listener is registered with ``active_history=True``, - the previous value of the attribute will be loaded from - the database if the existing value is currently unloaded - or expired. - :param initiator: the attribute implementation object - which initiated this event. - :return: if the event was registered with ``retval=True``, - the given value, or a new effective value, should be returned. - - """ - diff --git a/libs/sqlalchemy/orm/exc.py b/libs/sqlalchemy/orm/exc.py deleted file mode 100644 index a116d204..00000000 --- a/libs/sqlalchemy/orm/exc.py +++ /dev/null @@ -1,156 +0,0 @@ -# orm/exc.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""SQLAlchemy ORM exceptions.""" - -import sqlalchemy as sa -orm_util = sa.util.importlater('sqlalchemy.orm', 'util') - -NO_STATE = (AttributeError, KeyError) -"""Exception types that may be raised by instrumentation implementations.""" - -class StaleDataError(sa.exc.SQLAlchemyError): - """An operation encountered database state that is unaccounted for. - - Conditions which cause this to happen include: - - * A flush may have attempted to update or delete rows - and an unexpected number of rows were matched during - the UPDATE or DELETE statement. Note that when - version_id_col is used, rows in UPDATE or DELETE statements - are also matched against the current known version - identifier. - - * A mapped object with version_id_col was refreshed, - and the version number coming back from the database does - not match that of the object itself. - - * A object is detached from its parent object, however - the object was previously attached to a different parent - identity which was garbage collected, and a decision - cannot be made if the new parent was really the most - recent "parent". - - .. versionadded:: 0.7.4 - - """ - -ConcurrentModificationError = StaleDataError - - -class FlushError(sa.exc.SQLAlchemyError): - """A invalid condition was detected during flush().""" - - -class UnmappedError(sa.exc.InvalidRequestError): - """Base for exceptions that involve expected mappings not present.""" - -class ObjectDereferencedError(sa.exc.SQLAlchemyError): - """An operation cannot complete due to an object being garbage collected.""" - -class DetachedInstanceError(sa.exc.SQLAlchemyError): - """An attempt to access unloaded attributes on a - mapped instance that is detached.""" - -class UnmappedInstanceError(UnmappedError): - """An mapping operation was requested for an unknown instance.""" - - def __init__(self, obj, msg=None): - if not msg: - try: - mapper = sa.orm.class_mapper(type(obj)) - name = _safe_cls_name(type(obj)) - msg = ("Class %r is mapped, but this instance lacks " - "instrumentation. This occurs when the instance is created " - "before sqlalchemy.orm.mapper(%s) was called." % (name, name)) - except UnmappedClassError: - msg = _default_unmapped(type(obj)) - if isinstance(obj, type): - msg += ( - '; was a class (%s) supplied where an instance was ' - 'required?' % _safe_cls_name(obj)) - UnmappedError.__init__(self, msg) - - def __reduce__(self): - return self.__class__, (None, self.args[0]) - -class UnmappedClassError(UnmappedError): - """An mapping operation was requested for an unknown class.""" - - def __init__(self, cls, msg=None): - if not msg: - msg = _default_unmapped(cls) - UnmappedError.__init__(self, msg) - - def __reduce__(self): - return self.__class__, (None, self.args[0]) - -class ObjectDeletedError(sa.exc.InvalidRequestError): - """A refresh operation failed to retrieve the database - row corresponding to an object's known primary key identity. - - A refresh operation proceeds when an expired attribute is - accessed on an object, or when :meth:`.Query.get` is - used to retrieve an object which is, upon retrieval, detected - as expired. A SELECT is emitted for the target row - based on primary key; if no row is returned, this - exception is raised. - - The true meaning of this exception is simply that - no row exists for the primary key identifier associated - with a persistent object. The row may have been - deleted, or in some cases the primary key updated - to a new value, outside of the ORM's management of the target - object. - - """ - def __init__(self, state, msg=None): - if not msg: - msg = "Instance '%s' has been deleted, or its "\ - "row is otherwise not present." % orm_util.state_str(state) - - sa.exc.InvalidRequestError.__init__(self, msg) - - def __reduce__(self): - return self.__class__, (None, self.args[0]) - -class UnmappedColumnError(sa.exc.InvalidRequestError): - """Mapping operation was requested on an unknown column.""" - - -class NoResultFound(sa.exc.InvalidRequestError): - """A database result was required but none was found.""" - - -class MultipleResultsFound(sa.exc.InvalidRequestError): - """A single database result was required but more than one were found.""" - - -# Legacy compat until 0.6. -sa.exc.ConcurrentModificationError = ConcurrentModificationError -sa.exc.FlushError = FlushError -sa.exc.UnmappedColumnError - -def _safe_cls_name(cls): - try: - cls_name = '.'.join((cls.__module__, cls.__name__)) - except AttributeError: - cls_name = getattr(cls, '__name__', None) - if cls_name is None: - cls_name = repr(cls) - return cls_name - -def _default_unmapped(cls): - try: - mappers = sa.orm.attributes.manager_of_class(cls).mappers - except NO_STATE: - mappers = {} - except TypeError: - mappers = {} - name = _safe_cls_name(cls) - - if not mappers: - return "Class '%s' is not mapped" % name diff --git a/libs/sqlalchemy/orm/identity.py b/libs/sqlalchemy/orm/identity.py deleted file mode 100644 index 29e13f2c..00000000 --- a/libs/sqlalchemy/orm/identity.py +++ /dev/null @@ -1,254 +0,0 @@ -# orm/identity.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -import weakref -from sqlalchemy.orm import attributes - - -class IdentityMap(dict): - def __init__(self): - self._mutable_attrs = set() - self._modified = set() - self._wr = weakref.ref(self) - - def replace(self, state): - raise NotImplementedError() - - def add(self, state): - raise NotImplementedError() - - def update(self, dict): - raise NotImplementedError("IdentityMap uses add() to insert data") - - def clear(self): - raise NotImplementedError("IdentityMap uses remove() to remove data") - - def _manage_incoming_state(self, state): - state._instance_dict = self._wr - - if state.modified: - self._modified.add(state) - if state.manager.mutable_attributes: - self._mutable_attrs.add(state) - - def _manage_removed_state(self, state): - del state._instance_dict - self._mutable_attrs.discard(state) - self._modified.discard(state) - - def _dirty_states(self): - return self._modified.union(s for s in self._mutable_attrs.copy() - if s.modified) - - def check_modified(self): - """return True if any InstanceStates present have been marked as 'modified'.""" - - if self._modified: - return True - else: - for state in self._mutable_attrs.copy(): - if state.modified: - return True - return False - - def has_key(self, key): - return key in self - - def popitem(self): - raise NotImplementedError("IdentityMap uses remove() to remove data") - - def pop(self, key, *args): - raise NotImplementedError("IdentityMap uses remove() to remove data") - - def setdefault(self, key, default=None): - raise NotImplementedError("IdentityMap uses add() to insert data") - - def copy(self): - raise NotImplementedError() - - def __setitem__(self, key, value): - raise NotImplementedError("IdentityMap uses add() to insert data") - - def __delitem__(self, key): - raise NotImplementedError("IdentityMap uses remove() to remove data") - -class WeakInstanceDict(IdentityMap): - def __init__(self): - IdentityMap.__init__(self) - - def __getitem__(self, key): - state = dict.__getitem__(self, key) - o = state.obj() - if o is None: - o = state._is_really_none() - if o is None: - raise KeyError, key - return o - - def __contains__(self, key): - try: - if dict.__contains__(self, key): - state = dict.__getitem__(self, key) - o = state.obj() - if o is None: - o = state._is_really_none() - else: - return False - except KeyError: - return False - else: - return o is not None - - def contains_state(self, state): - return dict.get(self, state.key) is state - - def replace(self, state): - if dict.__contains__(self, state.key): - existing = dict.__getitem__(self, state.key) - if existing is not state: - self._manage_removed_state(existing) - else: - return - - dict.__setitem__(self, state.key, state) - self._manage_incoming_state(state) - - def add(self, state): - key = state.key - # inline of self.__contains__ - if dict.__contains__(self, key): - try: - existing_state = dict.__getitem__(self, key) - if existing_state is not state: - o = existing_state.obj() - if o is None: - o = existing_state._is_really_none() - if o is not None: - raise AssertionError("A conflicting state is already " - "present in the identity map for key %r" - % (key, )) - else: - return - except KeyError: - pass - dict.__setitem__(self, key, state) - self._manage_incoming_state(state) - - def get(self, key, default=None): - state = dict.get(self, key, default) - if state is default: - return default - o = state.obj() - if o is None: - o = state._is_really_none() - if o is None: - return default - return o - - def _items(self): - values = self.all_states() - result = [] - for state in values: - value = state.obj() - if value is not None: - result.append((state.key, value)) - return result - - def _values(self): - values = self.all_states() - result = [] - for state in values: - value = state.obj() - if value is not None: - result.append(value) - - return result - - # Py3K - #def items(self): - # return iter(self._items()) - # - #def values(self): - # return iter(self._values()) - # Py2K - items = _items - def iteritems(self): - return iter(self.items()) - - values = _values - def itervalues(self): - return iter(self.values()) - # end Py2K - - def all_states(self): - # Py3K - # return list(dict.values(self)) - # Py2K - return dict.values(self) - # end Py2K - - def discard(self, state): - st = dict.get(self, state.key, None) - if st is state: - dict.pop(self, state.key, None) - self._manage_removed_state(state) - - def prune(self): - return 0 - -class StrongInstanceDict(IdentityMap): - def all_states(self): - return [attributes.instance_state(o) for o in self.itervalues()] - - def contains_state(self, state): - return state.key in self and attributes.instance_state(self[state.key]) is state - - def replace(self, state): - if dict.__contains__(self, state.key): - existing = dict.__getitem__(self, state.key) - existing = attributes.instance_state(existing) - if existing is not state: - self._manage_removed_state(existing) - else: - return - - dict.__setitem__(self, state.key, state.obj()) - self._manage_incoming_state(state) - - def add(self, state): - if state.key in self: - if attributes.instance_state(dict.__getitem__(self, - state.key)) is not state: - raise AssertionError('A conflicting state is already ' - 'present in the identity map for key %r' - % (state.key, )) - else: - dict.__setitem__(self, state.key, state.obj()) - self._manage_incoming_state(state) - - def discard(self, state): - obj = dict.get(self, state.key, None) - if obj is not None: - st = attributes.instance_state(obj) - if st is state: - dict.pop(self, state.key, None) - self._manage_removed_state(state) - - def prune(self): - """prune unreferenced, non-dirty states.""" - - ref_count = len(self) - dirty = [s.obj() for s in self.all_states() if s.modified] - - # work around http://bugs.python.org/issue6149 - keepers = weakref.WeakValueDictionary() - keepers.update(self) - - dict.clear(self) - dict.update(self, keepers) - self.modified = bool(dirty) - return ref_count - len(self) - diff --git a/libs/sqlalchemy/orm/instrumentation.py b/libs/sqlalchemy/orm/instrumentation.py deleted file mode 100644 index 0006accb..00000000 --- a/libs/sqlalchemy/orm/instrumentation.py +++ /dev/null @@ -1,674 +0,0 @@ -# orm/instrumentation.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Defines SQLAlchemy's system of class instrumentation. - -This module is usually not directly visible to user applications, but -defines a large part of the ORM's interactivity. - -instrumentation.py deals with registration of end-user classes -for state tracking. It interacts closely with state.py -and attributes.py which establish per-instance and per-class-attribute -instrumentation, respectively. - -SQLA's instrumentation system is completely customizable, in which -case an understanding of the general mechanics of this module is helpful. -An example of full customization is in /examples/custom_attributes. - -""" - - -from sqlalchemy.orm import exc, collections, events -from operator import attrgetter, itemgetter -from sqlalchemy import event, util -import weakref -from sqlalchemy.orm import state, attributes - - -INSTRUMENTATION_MANAGER = '__sa_instrumentation_manager__' -"""Attribute, elects custom instrumentation when present on a mapped class. - -Allows a class to specify a slightly or wildly different technique for -tracking changes made to mapped attributes and collections. - -Only one instrumentation implementation is allowed in a given object -inheritance hierarchy. - -The value of this attribute must be a callable and will be passed a class -object. The callable must return one of: - - - An instance of an interfaces.InstrumentationManager or subclass - - An object implementing all or some of InstrumentationManager (TODO) - - A dictionary of callables, implementing all or some of the above (TODO) - - An instance of a ClassManager or subclass - -interfaces.InstrumentationManager is public API and will remain stable -between releases. ClassManager is not public and no guarantees are made -about stability. Caveat emptor. - -This attribute is consulted by the default SQLAlchemy instrumentation -resolution code. If custom finders are installed in the global -instrumentation_finders list, they may or may not choose to honor this -attribute. - -""" - -instrumentation_finders = [] -"""An extensible sequence of instrumentation implementation finding callables. - -Finders callables will be passed a class object. If None is returned, the -next finder in the sequence is consulted. Otherwise the return must be an -instrumentation factory that follows the same guidelines as -INSTRUMENTATION_MANAGER. - -By default, the only finder is find_native_user_instrumentation_hook, which -searches for INSTRUMENTATION_MANAGER. If all finders return None, standard -ClassManager instrumentation is used. - -""" - - -class ClassManager(dict): - """tracks state information at the class level.""" - - MANAGER_ATTR = '_sa_class_manager' - STATE_ATTR = '_sa_instance_state' - - deferred_scalar_loader = None - - original_init = object.__init__ - - def __init__(self, class_): - self.class_ = class_ - self.factory = None # where we came from, for inheritance bookkeeping - self.info = {} - self.new_init = None - self.mutable_attributes = set() - self.local_attrs = {} - self.originals = {} - - self._bases = [mgr for mgr in [ - manager_of_class(base) - for base in self.class_.__bases__ - if isinstance(base, type) - ] if mgr is not None] - - for base in self._bases: - self.update(base) - - self.manage() - self._instrument_init() - - dispatch = event.dispatcher(events.InstanceEvents) - - @property - def is_mapped(self): - return 'mapper' in self.__dict__ - - @util.memoized_property - def mapper(self): - # raises unless self.mapper has been assigned - raise exc.UnmappedClassError(self.class_) - - def _attr_has_impl(self, key): - """Return True if the given attribute is fully initialized. - - i.e. has an impl. - """ - - return key in self and self[key].impl is not None - - def _subclass_manager(self, cls): - """Create a new ClassManager for a subclass of this ClassManager's - class. - - This is called automatically when attributes are instrumented so that - the attributes can be propagated to subclasses against their own - class-local manager, without the need for mappers etc. to have already - pre-configured managers for the full class hierarchy. Mappers - can post-configure the auto-generated ClassManager when needed. - - """ - manager = manager_of_class(cls) - if manager is None: - manager = _create_manager_for_cls(cls, _source=self) - return manager - - def _instrument_init(self): - # TODO: self.class_.__init__ is often the already-instrumented - # __init__ from an instrumented superclass. We still need to make - # our own wrapper, but it would - # be nice to wrap the original __init__ and not our existing wrapper - # of such, since this adds method overhead. - self.original_init = self.class_.__init__ - self.new_init = _generate_init(self.class_, self) - self.install_member('__init__', self.new_init) - - def _uninstrument_init(self): - if self.new_init: - self.uninstall_member('__init__') - self.new_init = None - - @util.memoized_property - def _state_constructor(self): - self.dispatch.first_init(self, self.class_) - if self.mutable_attributes: - return state.MutableAttrInstanceState - else: - return state.InstanceState - - def manage(self): - """Mark this instance as the manager for its class.""" - - setattr(self.class_, self.MANAGER_ATTR, self) - - def dispose(self): - """Dissasociate this manager from its class.""" - - delattr(self.class_, self.MANAGER_ATTR) - - def manager_getter(self): - return attrgetter(self.MANAGER_ATTR) - - def instrument_attribute(self, key, inst, propagated=False): - if propagated: - if key in self.local_attrs: - return # don't override local attr with inherited attr - else: - self.local_attrs[key] = inst - self.install_descriptor(key, inst) - self[key] = inst - - for cls in self.class_.__subclasses__(): - manager = self._subclass_manager(cls) - manager.instrument_attribute(key, inst, True) - - def subclass_managers(self, recursive): - for cls in self.class_.__subclasses__(): - mgr = manager_of_class(cls) - if mgr is not None and mgr is not self: - yield mgr - if recursive: - for m in mgr.subclass_managers(True): - yield m - - def post_configure_attribute(self, key): - instrumentation_registry.dispatch.\ - attribute_instrument(self.class_, key, self[key]) - - def uninstrument_attribute(self, key, propagated=False): - if key not in self: - return - if propagated: - if key in self.local_attrs: - return # don't get rid of local attr - else: - del self.local_attrs[key] - self.uninstall_descriptor(key) - del self[key] - if key in self.mutable_attributes: - self.mutable_attributes.remove(key) - for cls in self.class_.__subclasses__(): - manager = manager_of_class(cls) - if manager: - manager.uninstrument_attribute(key, True) - - def unregister(self): - """remove all instrumentation established by this ClassManager.""" - - self._uninstrument_init() - - self.mapper = self.dispatch = None - self.info.clear() - - for key in list(self): - if key in self.local_attrs: - self.uninstrument_attribute(key) - - def install_descriptor(self, key, inst): - if key in (self.STATE_ATTR, self.MANAGER_ATTR): - raise KeyError("%r: requested attribute name conflicts with " - "instrumentation attribute of the same name." % - key) - setattr(self.class_, key, inst) - - def uninstall_descriptor(self, key): - delattr(self.class_, key) - - def install_member(self, key, implementation): - if key in (self.STATE_ATTR, self.MANAGER_ATTR): - raise KeyError("%r: requested attribute name conflicts with " - "instrumentation attribute of the same name." % - key) - self.originals.setdefault(key, getattr(self.class_, key, None)) - setattr(self.class_, key, implementation) - - def uninstall_member(self, key): - original = self.originals.pop(key, None) - if original is not None: - setattr(self.class_, key, original) - - def instrument_collection_class(self, key, collection_class): - return collections.prepare_instrumentation(collection_class) - - def initialize_collection(self, key, state, factory): - user_data = factory() - adapter = collections.CollectionAdapter( - self.get_impl(key), state, user_data) - return adapter, user_data - - def is_instrumented(self, key, search=False): - if search: - return key in self - else: - return key in self.local_attrs - - def get_impl(self, key): - return self[key].impl - - @property - def attributes(self): - return self.itervalues() - - ## InstanceState management - - def new_instance(self, state=None): - instance = self.class_.__new__(self.class_) - setattr(instance, self.STATE_ATTR, - state or self._state_constructor(instance, self)) - return instance - - def setup_instance(self, instance, state=None): - setattr(instance, self.STATE_ATTR, - state or self._state_constructor(instance, self)) - - def teardown_instance(self, instance): - delattr(instance, self.STATE_ATTR) - - def _new_state_if_none(self, instance): - """Install a default InstanceState if none is present. - - A private convenience method used by the __init__ decorator. - - """ - if hasattr(instance, self.STATE_ATTR): - return False - elif self.class_ is not instance.__class__ and \ - self.is_mapped: - # this will create a new ClassManager for the - # subclass, without a mapper. This is likely a - # user error situation but allow the object - # to be constructed, so that it is usable - # in a non-ORM context at least. - return self._subclass_manager(instance.__class__).\ - _new_state_if_none(instance) - else: - state = self._state_constructor(instance, self) - setattr(instance, self.STATE_ATTR, state) - return state - - def state_getter(self): - """Return a (instance) -> InstanceState callable. - - "state getter" callables should raise either KeyError or - AttributeError if no InstanceState could be found for the - instance. - """ - - return attrgetter(self.STATE_ATTR) - - def dict_getter(self): - return attrgetter('__dict__') - - def has_state(self, instance): - return hasattr(instance, self.STATE_ATTR) - - def has_parent(self, state, key, optimistic=False): - """TODO""" - return self.get_impl(key).hasparent(state, optimistic=optimistic) - - def __nonzero__(self): - """All ClassManagers are non-zero regardless of attribute state.""" - return True - - def __repr__(self): - return '<%s of %r at %x>' % ( - self.__class__.__name__, self.class_, id(self)) - -class _ClassInstrumentationAdapter(ClassManager): - """Adapts a user-defined InstrumentationManager to a ClassManager.""" - - def __init__(self, class_, override, **kw): - self._adapted = override - self._get_state = self._adapted.state_getter(class_) - self._get_dict = self._adapted.dict_getter(class_) - - ClassManager.__init__(self, class_, **kw) - - def manage(self): - self._adapted.manage(self.class_, self) - - def dispose(self): - self._adapted.dispose(self.class_) - - def manager_getter(self): - return self._adapted.manager_getter(self.class_) - - def instrument_attribute(self, key, inst, propagated=False): - ClassManager.instrument_attribute(self, key, inst, propagated) - if not propagated: - self._adapted.instrument_attribute(self.class_, key, inst) - - def post_configure_attribute(self, key): - super(_ClassInstrumentationAdapter, self).post_configure_attribute(key) - self._adapted.post_configure_attribute(self.class_, key, self[key]) - - def install_descriptor(self, key, inst): - self._adapted.install_descriptor(self.class_, key, inst) - - def uninstall_descriptor(self, key): - self._adapted.uninstall_descriptor(self.class_, key) - - def install_member(self, key, implementation): - self._adapted.install_member(self.class_, key, implementation) - - def uninstall_member(self, key): - self._adapted.uninstall_member(self.class_, key) - - def instrument_collection_class(self, key, collection_class): - return self._adapted.instrument_collection_class( - self.class_, key, collection_class) - - def initialize_collection(self, key, state, factory): - delegate = getattr(self._adapted, 'initialize_collection', None) - if delegate: - return delegate(key, state, factory) - else: - return ClassManager.initialize_collection(self, key, - state, factory) - - def new_instance(self, state=None): - instance = self.class_.__new__(self.class_) - self.setup_instance(instance, state) - return instance - - def _new_state_if_none(self, instance): - """Install a default InstanceState if none is present. - - A private convenience method used by the __init__ decorator. - """ - if self.has_state(instance): - return False - else: - return self.setup_instance(instance) - - def setup_instance(self, instance, state=None): - self._adapted.initialize_instance_dict(self.class_, instance) - - if state is None: - state = self._state_constructor(instance, self) - - # the given instance is assumed to have no state - self._adapted.install_state(self.class_, instance, state) - return state - - def teardown_instance(self, instance): - self._adapted.remove_state(self.class_, instance) - - def has_state(self, instance): - try: - state = self._get_state(instance) - except exc.NO_STATE: - return False - else: - return True - - def state_getter(self): - return self._get_state - - def dict_getter(self): - return self._get_dict - -def register_class(class_, **kw): - """Register class instrumentation. - - Returns the existing or newly created class manager. - """ - - manager = manager_of_class(class_) - if manager is None: - manager = _create_manager_for_cls(class_, **kw) - return manager - -def unregister_class(class_): - """Unregister class instrumentation.""" - - instrumentation_registry.unregister(class_) - - -def is_instrumented(instance, key): - """Return True if the given attribute on the given instance is - instrumented by the attributes package. - - This function may be used regardless of instrumentation - applied directly to the class, i.e. no descriptors are required. - - """ - return manager_of_class(instance.__class__).\ - is_instrumented(key, search=True) - -class InstrumentationRegistry(object): - """Private instrumentation registration singleton. - - All classes are routed through this registry - when first instrumented, however the InstrumentationRegistry - is not actually needed unless custom ClassManagers are in use. - - """ - - _manager_finders = weakref.WeakKeyDictionary() - _state_finders = util.WeakIdentityMapping() - _dict_finders = util.WeakIdentityMapping() - _extended = False - - dispatch = event.dispatcher(events.InstrumentationEvents) - - def create_manager_for_cls(self, class_, **kw): - assert class_ is not None - assert manager_of_class(class_) is None - - for finder in instrumentation_finders: - factory = finder(class_) - if factory is not None: - break - else: - factory = ClassManager - - existing_factories = self._collect_management_factories_for(class_).\ - difference([factory]) - if existing_factories: - raise TypeError( - "multiple instrumentation implementations specified " - "in %s inheritance hierarchy: %r" % ( - class_.__name__, list(existing_factories))) - - manager = factory(class_) - if not isinstance(manager, ClassManager): - manager = _ClassInstrumentationAdapter(class_, manager) - - if factory != ClassManager and not self._extended: - # somebody invoked a custom ClassManager. - # reinstall global "getter" functions with the more - # expensive ones. - self._extended = True - _install_lookup_strategy(self) - - manager.factory = factory - self._manager_finders[class_] = manager.manager_getter() - self._state_finders[class_] = manager.state_getter() - self._dict_finders[class_] = manager.dict_getter() - - self.dispatch.class_instrument(class_) - - return manager - - def _collect_management_factories_for(self, cls): - """Return a collection of factories in play or specified for a - hierarchy. - - Traverses the entire inheritance graph of a cls and returns a - collection of instrumentation factories for those classes. Factories - are extracted from active ClassManagers, if available, otherwise - instrumentation_finders is consulted. - - """ - hierarchy = util.class_hierarchy(cls) - factories = set() - for member in hierarchy: - manager = manager_of_class(member) - if manager is not None: - factories.add(manager.factory) - else: - for finder in instrumentation_finders: - factory = finder(member) - if factory is not None: - break - else: - factory = None - factories.add(factory) - factories.discard(None) - return factories - - def manager_of_class(self, cls): - # this is only called when alternate instrumentation - # has been established - if cls is None: - return None - try: - finder = self._manager_finders[cls] - except KeyError: - return None - else: - return finder(cls) - - def state_of(self, instance): - # this is only called when alternate instrumentation - # has been established - if instance is None: - raise AttributeError("None has no persistent state.") - try: - return self._state_finders[instance.__class__](instance) - except KeyError: - raise AttributeError("%r is not instrumented" % - instance.__class__) - - def dict_of(self, instance): - # this is only called when alternate instrumentation - # has been established - if instance is None: - raise AttributeError("None has no persistent state.") - try: - return self._dict_finders[instance.__class__](instance) - except KeyError: - raise AttributeError("%r is not instrumented" % - instance.__class__) - - def unregister(self, class_): - if class_ in self._manager_finders: - manager = self.manager_of_class(class_) - self.dispatch.class_uninstrument(class_) - manager.unregister() - manager.dispose() - del self._manager_finders[class_] - del self._state_finders[class_] - del self._dict_finders[class_] - if ClassManager.MANAGER_ATTR in class_.__dict__: - delattr(class_, ClassManager.MANAGER_ATTR) - -instrumentation_registry = InstrumentationRegistry() - - -def _install_lookup_strategy(implementation): - """Replace global class/object management functions - with either faster or more comprehensive implementations, - based on whether or not extended class instrumentation - has been detected. - - This function is called only by InstrumentationRegistry() - and unit tests specific to this behavior. - - """ - global instance_state, instance_dict, manager_of_class - if implementation is util.symbol('native'): - instance_state = attrgetter(ClassManager.STATE_ATTR) - instance_dict = attrgetter("__dict__") - def manager_of_class(cls): - return cls.__dict__.get(ClassManager.MANAGER_ATTR, None) - else: - instance_state = instrumentation_registry.state_of - instance_dict = instrumentation_registry.dict_of - manager_of_class = instrumentation_registry.manager_of_class - attributes.instance_state = instance_state - attributes.instance_dict = instance_dict - attributes.manager_of_class = manager_of_class - -_create_manager_for_cls = instrumentation_registry.create_manager_for_cls - -# Install default "lookup" strategies. These are basically -# very fast attrgetters for key attributes. -# When a custom ClassManager is installed, more expensive per-class -# strategies are copied over these. -_install_lookup_strategy(util.symbol('native')) - - -def find_native_user_instrumentation_hook(cls): - """Find user-specified instrumentation management for a class.""" - return getattr(cls, INSTRUMENTATION_MANAGER, None) -instrumentation_finders.append(find_native_user_instrumentation_hook) - -def _generate_init(class_, class_manager): - """Build an __init__ decorator that triggers ClassManager events.""" - - # TODO: we should use the ClassManager's notion of the - # original '__init__' method, once ClassManager is fixed - # to always reference that. - original__init__ = class_.__init__ - assert original__init__ - - # Go through some effort here and don't change the user's __init__ - # calling signature, including the unlikely case that it has - # a return value. - # FIXME: need to juggle local names to avoid constructor argument - # clashes. - func_body = """\ -def __init__(%(apply_pos)s): - new_state = class_manager._new_state_if_none(%(self_arg)s) - if new_state: - return new_state.initialize_instance(%(apply_kw)s) - else: - return original__init__(%(apply_kw)s) -""" - func_vars = util.format_argspec_init(original__init__, grouped=False) - func_text = func_body % func_vars - - # Py3K - #func_defaults = getattr(original__init__, '__defaults__', None) - #func_kw_defaults = getattr(original__init__, '__kwdefaults__', None) - # Py2K - func = getattr(original__init__, 'im_func', original__init__) - func_defaults = getattr(func, 'func_defaults', None) - # end Py2K - - env = locals().copy() - exec func_text in env - __init__ = env['__init__'] - __init__.__doc__ = original__init__.__doc__ - if func_defaults: - __init__.func_defaults = func_defaults - # Py3K - #if func_kw_defaults: - # __init__.__kwdefaults__ = func_kw_defaults - return __init__ diff --git a/libs/sqlalchemy/orm/interfaces.py b/libs/sqlalchemy/orm/interfaces.py deleted file mode 100644 index b911ac29..00000000 --- a/libs/sqlalchemy/orm/interfaces.py +++ /dev/null @@ -1,783 +0,0 @@ -# orm/interfaces.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -""" - -Contains various base classes used throughout the ORM. - -Defines the now deprecated ORM extension classes as well -as ORM internals. - -Other than the deprecated extensions, this module and the -classes within should be considered mostly private. - -""" - -from itertools import chain - -from sqlalchemy import exc as sa_exc -from sqlalchemy import util -from sqlalchemy.sql import operators -deque = __import__('collections').deque - -mapperutil = util.importlater('sqlalchemy.orm', 'util') - -collections = None - -__all__ = ( - 'AttributeExtension', - 'EXT_CONTINUE', - 'EXT_STOP', - 'ExtensionOption', - 'InstrumentationManager', - 'LoaderStrategy', - 'MapperExtension', - 'MapperOption', - 'MapperProperty', - 'PropComparator', - 'PropertyOption', - 'SessionExtension', - 'StrategizedOption', - 'StrategizedProperty', - 'build_path', - ) - -EXT_CONTINUE = util.symbol('EXT_CONTINUE') -EXT_STOP = util.symbol('EXT_STOP') - -ONETOMANY = util.symbol('ONETOMANY') -MANYTOONE = util.symbol('MANYTOONE') -MANYTOMANY = util.symbol('MANYTOMANY') - -from deprecated_interfaces import AttributeExtension, SessionExtension, \ - MapperExtension - - -class MapperProperty(object): - """Manage the relationship of a ``Mapper`` to a single class - attribute, as well as that attribute as it appears on individual - instances of the class, including attribute instrumentation, - attribute access, loading behavior, and dependency calculations. - - The most common occurrences of :class:`.MapperProperty` are the - mapped :class:`.Column`, which is represented in a mapping as - an instance of :class:`.ColumnProperty`, - and a reference to another class produced by :func:`.relationship`, - represented in the mapping as an instance of :class:`.RelationshipProperty`. - - """ - - cascade = () - """The set of 'cascade' attribute names. - - This collection is checked before the 'cascade_iterator' method is called. - - """ - - def setup(self, context, entity, path, reduced_path, adapter, **kwargs): - """Called by Query for the purposes of constructing a SQL statement. - - Each MapperProperty associated with the target mapper processes the - statement referenced by the query context, adding columns and/or - criterion as appropriate. - """ - - pass - - def create_row_processor(self, context, path, reduced_path, - mapper, row, adapter): - """Return a 3-tuple consisting of three row processing functions. - - """ - return None, None, None - - def cascade_iterator(self, type_, state, visited_instances=None, - halt_on=None): - """Iterate through instances related to the given instance for - a particular 'cascade', starting with this MapperProperty. - - Return an iterator3-tuples (instance, mapper, state). - - Note that the 'cascade' collection on this MapperProperty is - checked first for the given type before cascade_iterator is called. - - See PropertyLoader for the related instance implementation. - """ - - return iter(()) - - def set_parent(self, parent, init): - self.parent = parent - - def instrument_class(self, mapper): - raise NotImplementedError() - - _compile_started = False - _compile_finished = False - - def init(self): - """Called after all mappers are created to assemble - relationships between mappers and perform other post-mapper-creation - initialization steps. - - """ - self._compile_started = True - self.do_init() - self._compile_finished = True - - @property - def class_attribute(self): - """Return the class-bound descriptor corresponding to this - MapperProperty.""" - - return getattr(self.parent.class_, self.key) - - def do_init(self): - """Perform subclass-specific initialization post-mapper-creation - steps. - - This is a template method called by the ``MapperProperty`` - object's init() method. - - """ - - pass - - def post_instrument_class(self, mapper): - """Perform instrumentation adjustments that need to occur - after init() has completed. - - """ - pass - - def per_property_preprocessors(self, uow): - pass - - def is_primary(self): - """Return True if this ``MapperProperty``'s mapper is the - primary mapper for its class. - - This flag is used to indicate that the ``MapperProperty`` can - define attribute instrumentation for the class at the class - level (as opposed to the individual instance level). - """ - - return not self.parent.non_primary - - def merge(self, session, source_state, source_dict, dest_state, - dest_dict, load, _recursive): - """Merge the attribute represented by this ``MapperProperty`` - from source to destination object""" - - pass - - def compare(self, operator, value, **kw): - """Return a compare operation for the columns represented by - this ``MapperProperty`` to the given value, which may be a - column value or an instance. 'operator' is an operator from - the operators module, or from sql.Comparator. - - By default uses the PropComparator attached to this MapperProperty - under the attribute name "comparator". - """ - - return operator(self.comparator, value) - -class PropComparator(operators.ColumnOperators): - """Defines comparison operations for MapperProperty objects. - - User-defined subclasses of :class:`.PropComparator` may be created. The - built-in Python comparison and math operator methods, such as - ``__eq__()``, ``__lt__()``, ``__add__()``, can be overridden to provide - new operator behavior. The custom :class:`.PropComparator` is passed to - the mapper property via the ``comparator_factory`` argument. In each case, - the appropriate subclass of :class:`.PropComparator` should be used:: - - from sqlalchemy.orm.properties import \\ - ColumnProperty,\\ - CompositeProperty,\\ - RelationshipProperty - - class MyColumnComparator(ColumnProperty.Comparator): - pass - - class MyCompositeComparator(CompositeProperty.Comparator): - pass - - class MyRelationshipComparator(RelationshipProperty.Comparator): - pass - - """ - - def __init__(self, prop, mapper, adapter=None): - self.prop = self.property = prop - self.mapper = mapper - self.adapter = adapter - - def __clause_element__(self): - raise NotImplementedError("%r" % self) - - def adapted(self, adapter): - """Return a copy of this PropComparator which will use the given - adaption function on the local side of generated expressions. - - """ - - return self.__class__(self.prop, self.mapper, adapter) - - @staticmethod - def any_op(a, b, **kwargs): - return a.any(b, **kwargs) - - @staticmethod - def has_op(a, b, **kwargs): - return a.has(b, **kwargs) - - @staticmethod - def of_type_op(a, class_): - return a.of_type(class_) - - def of_type(self, class_): - """Redefine this object in terms of a polymorphic subclass. - - Returns a new PropComparator from which further criterion can be - evaluated. - - e.g.:: - - query.join(Company.employees.of_type(Engineer)).\\ - filter(Engineer.name=='foo') - - :param \class_: a class or mapper indicating that criterion will be against - this specific subclass. - - - """ - - return self.operate(PropComparator.of_type_op, class_) - - def any(self, criterion=None, **kwargs): - """Return true if this collection contains any member that meets the - given criterion. - - The usual implementation of ``any()`` is - :meth:`.RelationshipProperty.Comparator.any`. - - :param criterion: an optional ClauseElement formulated against the - member class' table or attributes. - - :param \**kwargs: key/value pairs corresponding to member class attribute - names which will be compared via equality to the corresponding - values. - - """ - - return self.operate(PropComparator.any_op, criterion, **kwargs) - - def has(self, criterion=None, **kwargs): - """Return true if this element references a member which meets the - given criterion. - - The usual implementation of ``has()`` is - :meth:`.RelationshipProperty.Comparator.has`. - - :param criterion: an optional ClauseElement formulated against the - member class' table or attributes. - - :param \**kwargs: key/value pairs corresponding to member class attribute - names which will be compared via equality to the corresponding - values. - - """ - - return self.operate(PropComparator.has_op, criterion, **kwargs) - - -class StrategizedProperty(MapperProperty): - """A MapperProperty which uses selectable strategies to affect - loading behavior. - - There is a single strategy selected by default. Alternate - strategies can be selected at Query time through the usage of - ``StrategizedOption`` objects via the Query.options() method. - - """ - - strategy_wildcard_key = None - - def _get_context_strategy(self, context, reduced_path): - key = ('loaderstrategy', reduced_path) - cls = None - if key in context.attributes: - cls = context.attributes[key] - elif self.strategy_wildcard_key: - key = ('loaderstrategy', (self.strategy_wildcard_key,)) - if key in context.attributes: - cls = context.attributes[key] - - if cls: - try: - return self._strategies[cls] - except KeyError: - return self.__init_strategy(cls) - return self.strategy - - def _get_strategy(self, cls): - try: - return self._strategies[cls] - except KeyError: - return self.__init_strategy(cls) - - def __init_strategy(self, cls): - self._strategies[cls] = strategy = cls(self) - return strategy - - def setup(self, context, entity, path, reduced_path, adapter, **kwargs): - self._get_context_strategy(context, reduced_path + (self.key,)).\ - setup_query(context, entity, path, - reduced_path, adapter, **kwargs) - - def create_row_processor(self, context, path, reduced_path, mapper, row, adapter): - return self._get_context_strategy(context, reduced_path + (self.key,)).\ - create_row_processor(context, path, - reduced_path, mapper, row, adapter) - - def do_init(self): - self._strategies = {} - self.strategy = self.__init_strategy(self.strategy_class) - - def post_instrument_class(self, mapper): - if self.is_primary() and \ - not mapper.class_manager._attr_has_impl(self.key): - self.strategy.init_class_attribute(mapper) - -def build_path(entity, key, prev=None): - if prev: - return prev + (entity, key) - else: - return (entity, key) - -def serialize_path(path): - if path is None: - return None - - return zip( - [m.class_ for m in [path[i] for i in range(0, len(path), 2)]], - [path[i] for i in range(1, len(path), 2)] + [None] - ) - -def deserialize_path(path): - if path is None: - return None - - p = tuple(chain(*[(mapperutil.class_mapper(cls), key) for cls, key in path])) - if p and p[-1] is None: - p = p[0:-1] - return p - -class MapperOption(object): - """Describe a modification to a Query.""" - - propagate_to_loaders = False - """if True, indicate this option should be carried along - Query object generated by scalar or object lazy loaders. - """ - - def process_query(self, query): - pass - - def process_query_conditionally(self, query): - """same as process_query(), except that this option may not - apply to the given query. - - Used when secondary loaders resend existing options to a new - Query.""" - - self.process_query(query) - -class PropertyOption(MapperOption): - """A MapperOption that is applied to a property off the mapper or - one of its child mappers, identified by a dot-separated key - or list of class-bound attributes. """ - - def __init__(self, key, mapper=None): - self.key = key - self.mapper = mapper - - def process_query(self, query): - self._process(query, True) - - def process_query_conditionally(self, query): - self._process(query, False) - - def _process(self, query, raiseerr): - paths, mappers = self._get_paths(query, raiseerr) - if paths: - self.process_query_property(query, paths, mappers) - - def process_query_property(self, query, paths, mappers): - pass - - def __getstate__(self): - d = self.__dict__.copy() - d['key'] = ret = [] - for token in util.to_list(self.key): - if isinstance(token, PropComparator): - ret.append((token.mapper.class_, token.key)) - else: - ret.append(token) - return d - - def __setstate__(self, state): - ret = [] - for key in state['key']: - if isinstance(key, tuple): - cls, propkey = key - ret.append(getattr(cls, propkey)) - else: - ret.append(key) - state['key'] = tuple(ret) - self.__dict__ = state - - def _find_entity_prop_comparator(self, query, token, mapper, raiseerr): - if mapperutil._is_aliased_class(mapper): - searchfor = mapper - isa = False - else: - searchfor = mapperutil._class_to_mapper(mapper) - isa = True - for ent in query._mapper_entities: - if searchfor is ent.path_entity or isa \ - and searchfor.common_parent(ent.path_entity): - return ent - else: - if raiseerr: - if not list(query._mapper_entities): - raise sa_exc.ArgumentError( - "Query has only expression-based entities - " - "can't find property named '%s'." - % (token, ) - ) - else: - raise sa_exc.ArgumentError( - "Can't find property '%s' on any entity " - "specified in this Query. Note the full path " - "from root (%s) to target entity must be specified." - % (token, ",".join(str(x) for - x in query._mapper_entities)) - ) - else: - return None - - def _find_entity_basestring(self, query, token, raiseerr): - for ent in query._mapper_entities: - # return only the first _MapperEntity when searching - # based on string prop name. Ideally object - # attributes are used to specify more exactly. - return ent - else: - if raiseerr: - raise sa_exc.ArgumentError( - "Query has only expression-based entities - " - "can't find property named '%s'." - % (token, ) - ) - else: - return None - - def _get_paths(self, query, raiseerr): - path = None - entity = None - l = [] - mappers = [] - - # _current_path implies we're in a - # secondary load with an existing path - current_path = list(query._current_path) - - tokens = deque(self.key) - while tokens: - token = tokens.popleft() - if isinstance(token, basestring): - # wildcard token - if token.endswith(':*'): - return [(token,)], [] - sub_tokens = token.split(".", 1) - token = sub_tokens[0] - tokens.extendleft(sub_tokens[1:]) - - # exhaust current_path before - # matching tokens to entities - if current_path: - if current_path[1] == token: - current_path = current_path[2:] - continue - else: - return [], [] - - if not entity: - entity = self._find_entity_basestring( - query, - token, - raiseerr) - if entity is None: - return [], [] - path_element = entity.path_entity - mapper = entity.mapper - mappers.append(mapper) - if hasattr(mapper.class_, token): - prop = getattr(mapper.class_, token).property - else: - if raiseerr: - raise sa_exc.ArgumentError( - "Can't find property named '%s' on the " - "mapped entity %s in this Query. " % ( - token, mapper) - ) - else: - return [], [] - elif isinstance(token, PropComparator): - prop = token.property - - # exhaust current_path before - # matching tokens to entities - if current_path: - if current_path[0:2] == \ - [token.parententity, prop.key]: - current_path = current_path[2:] - continue - else: - return [], [] - - if not entity: - entity = self._find_entity_prop_comparator( - query, - prop.key, - token.parententity, - raiseerr) - if not entity: - return [], [] - path_element = entity.path_entity - mapper = entity.mapper - mappers.append(prop.parent) - else: - raise sa_exc.ArgumentError( - "mapper option expects " - "string key or list of attributes") - assert prop is not None - if raiseerr and not prop.parent.common_parent(mapper): - raise sa_exc.ArgumentError("Attribute '%s' does not " - "link from element '%s'" % (token, path_element)) - - path = build_path(path_element, prop.key, path) - - l.append(path) - if getattr(token, '_of_type', None): - path_element = mapper = token._of_type - else: - path_element = mapper = getattr(prop, 'mapper', None) - if mapper is None and tokens: - raise sa_exc.ArgumentError( - "Attribute '%s' of entity '%s' does not " - "refer to a mapped entity" % - (token, entity) - ) - - if current_path: - # ran out of tokens before - # current_path was exhausted. - assert not tokens - return [], [] - - return l, mappers - -class StrategizedOption(PropertyOption): - """A MapperOption that affects which LoaderStrategy will be used - for an operation by a StrategizedProperty. - """ - - chained = False - - def process_query_property(self, query, paths, mappers): - - # _get_context_strategy may receive the path in terms of a base - # mapper - e.g. options(eagerload_all(Company.employees, - # Engineer.machines)) in the polymorphic tests leads to - # "(Person, 'machines')" in the path due to the mechanics of how - # the eager strategy builds up the path - - if self.chained: - for path in paths: - query._attributes[('loaderstrategy', - _reduce_path(path))] = \ - self.get_strategy_class() - else: - query._attributes[('loaderstrategy', - _reduce_path(paths[-1]))] = \ - self.get_strategy_class() - - def get_strategy_class(self): - raise NotImplementedError() - -def _reduce_path(path): - """Convert a (mapper, path) path to use base mappers. - - This is used to allow more open ended selection of loader strategies, i.e. - Mapper -> prop1 -> Subclass -> prop2, where Subclass is a sub-mapper - of the mapper referenced by Mapper.prop1. - - """ - return tuple([i % 2 != 0 and - element or - getattr(element, 'base_mapper', element) - for i, element in enumerate(path)]) - -class LoaderStrategy(object): - """Describe the loading behavior of a StrategizedProperty object. - - The ``LoaderStrategy`` interacts with the querying process in three - ways: - - * it controls the configuration of the ``InstrumentedAttribute`` - placed on a class to handle the behavior of the attribute. this - may involve setting up class-level callable functions to fire - off a select operation when the attribute is first accessed - (i.e. a lazy load) - - * it processes the ``QueryContext`` at statement construction time, - where it can modify the SQL statement that is being produced. - Simple column attributes may add their represented column to the - list of selected columns, *eager loading* properties may add - ``LEFT OUTER JOIN`` clauses to the statement. - - * It produces "row processor" functions at result fetching time. - These "row processor" functions populate a particular attribute - on a particular mapped instance. - - """ - def __init__(self, parent): - self.parent_property = parent - self.is_class_level = False - self.parent = self.parent_property.parent - self.key = self.parent_property.key - # TODO: there's no particular reason we need - # the separate .init() method at this point. - # It's possible someone has written their - # own LS object. - self.init() - - def init(self): - raise NotImplementedError("LoaderStrategy") - - def init_class_attribute(self, mapper): - pass - - def setup_query(self, context, entity, path, reduced_path, adapter, **kwargs): - pass - - def create_row_processor(self, context, path, reduced_path, mapper, - row, adapter): - """Return row processing functions which fulfill the contract - specified by MapperProperty.create_row_processor. - - StrategizedProperty delegates its create_row_processor method - directly to this method. """ - - return None, None, None - - def __str__(self): - return str(self.parent_property) - - def debug_callable(self, fn, logger, announcement, logfn): - if announcement: - logger.debug(announcement) - if logfn: - def call(*args, **kwargs): - logger.debug(logfn(*args, **kwargs)) - return fn(*args, **kwargs) - return call - else: - return fn - -class InstrumentationManager(object): - """User-defined class instrumentation extension. - - :class:`.InstrumentationManager` can be subclassed in order - to change - how class instrumentation proceeds. This class exists for - the purposes of integration with other object management - frameworks which would like to entirely modify the - instrumentation methodology of the ORM, and is not intended - for regular usage. For interception of class instrumentation - events, see :class:`.InstrumentationEvents`. - - For an example of :class:`.InstrumentationManager`, see the - example :ref:`examples_instrumentation`. - - The API for this class should be considered as semi-stable, - and may change slightly with new releases. - - """ - - # r4361 added a mandatory (cls) constructor to this interface. - # given that, perhaps class_ should be dropped from all of these - # signatures. - - def __init__(self, class_): - pass - - def manage(self, class_, manager): - setattr(class_, '_default_class_manager', manager) - - def dispose(self, class_, manager): - delattr(class_, '_default_class_manager') - - def manager_getter(self, class_): - def get(cls): - return cls._default_class_manager - return get - - def instrument_attribute(self, class_, key, inst): - pass - - def post_configure_attribute(self, class_, key, inst): - pass - - def install_descriptor(self, class_, key, inst): - setattr(class_, key, inst) - - def uninstall_descriptor(self, class_, key): - delattr(class_, key) - - def install_member(self, class_, key, implementation): - setattr(class_, key, implementation) - - def uninstall_member(self, class_, key): - delattr(class_, key) - - def instrument_collection_class(self, class_, key, collection_class): - global collections - if collections is None: - from sqlalchemy.orm import collections - return collections.prepare_instrumentation(collection_class) - - def get_instance_dict(self, class_, instance): - return instance.__dict__ - - def initialize_instance_dict(self, class_, instance): - pass - - def install_state(self, class_, instance, state): - setattr(instance, '_default_state', state) - - def remove_state(self, class_, instance): - delattr(instance, '_default_state') - - def state_getter(self, class_): - return lambda instance: getattr(instance, '_default_state') - - def dict_getter(self, class_): - return lambda inst: self.get_instance_dict(class_, inst) diff --git a/libs/sqlalchemy/orm/mapper.py b/libs/sqlalchemy/orm/mapper.py deleted file mode 100644 index de4d351b..00000000 --- a/libs/sqlalchemy/orm/mapper.py +++ /dev/null @@ -1,2390 +0,0 @@ -# orm/mapper.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Logic to map Python classes to and from selectables. - -Defines the :class:`~sqlalchemy.orm.mapper.Mapper` class, the central -configurational unit which associates a class with a database table. - -This is a semi-private module; the main configurational API of the ORM is -available in :class:`~sqlalchemy.orm.`. - -""" - -import types -import weakref -import operator -from itertools import chain, groupby -deque = __import__('collections').deque - -from sqlalchemy import sql, util, log, exc as sa_exc, event, schema -from sqlalchemy.sql import expression, visitors, operators, util as sqlutil -from sqlalchemy.orm import instrumentation, attributes, sync, \ - exc as orm_exc, unitofwork, events -from sqlalchemy.orm.interfaces import MapperProperty, EXT_CONTINUE, \ - PropComparator - -from sqlalchemy.orm.util import _INSTRUMENTOR, _class_to_mapper, \ - _state_mapper, class_mapper, instance_str, state_str - -import sys -sessionlib = util.importlater("sqlalchemy.orm", "session") -properties = util.importlater("sqlalchemy.orm", "properties") - -__all__ = ( - 'Mapper', - '_mapper_registry', - 'class_mapper', - 'object_mapper', - ) - -_mapper_registry = weakref.WeakKeyDictionary() -_new_mappers = False -_already_compiling = False -_none_set = frozenset([None]) - -_memoized_configured_property = util.group_expirable_memoized_property() - - -# a constant returned by _get_attr_by_column to indicate -# this mapper is not handling an attribute for a particular -# column -NO_ATTRIBUTE = util.symbol('NO_ATTRIBUTE') - -# lock used to synchronize the "mapper compile" step -_COMPILE_MUTEX = util.threading.RLock() - -class Mapper(object): - """Define the correlation of class attributes to database table - columns. - - Instances of this class should be constructed via the - :func:`~sqlalchemy.orm.mapper` function. - - """ - def __init__(self, - class_, - local_table, - properties = None, - primary_key = None, - non_primary = False, - inherits = None, - inherit_condition = None, - inherit_foreign_keys = None, - extension = None, - order_by = False, - always_refresh = False, - version_id_col = None, - version_id_generator = None, - polymorphic_on=None, - _polymorphic_map=None, - polymorphic_identity=None, - concrete=False, - with_polymorphic=None, - allow_null_pks=None, - allow_partial_pks=True, - batch=True, - column_prefix=None, - include_properties=None, - exclude_properties=None, - passive_updates=True, - eager_defaults=False, - _compiled_cache_size=100, - ): - """Construct a new mapper. - - Mappers are normally constructed via the - :func:`~sqlalchemy.orm.mapper` function. See for details. - - """ - - self.class_ = util.assert_arg_type(class_, type, 'class_') - - self.class_manager = None - - self._primary_key_argument = util.to_list(primary_key) - self.non_primary = non_primary - - if order_by is not False: - self.order_by = util.to_list(order_by) - else: - self.order_by = order_by - - self.always_refresh = always_refresh - self.version_id_col = version_id_col - self.version_id_generator = version_id_generator or \ - (lambda x:(x or 0) + 1) - self.concrete = concrete - self.single = False - self.inherits = inherits - self.local_table = local_table - self.inherit_condition = inherit_condition - self.inherit_foreign_keys = inherit_foreign_keys - self._init_properties = properties or {} - self.delete_orphans = [] - self.batch = batch - self.eager_defaults = eager_defaults - self.column_prefix = column_prefix - self.polymorphic_on = expression._clause_element_as_expr(polymorphic_on) - self._dependency_processors = [] - self.validators = util.immutabledict() - self.passive_updates = passive_updates - self._clause_adapter = None - self._requires_row_aliasing = False - self._inherits_equated_pairs = None - self._memoized_values = {} - self._compiled_cache_size = _compiled_cache_size - self._reconstructor = None - self._deprecated_extensions = util.to_list(extension or []) - - if allow_null_pks: - util.warn_deprecated( - "the allow_null_pks option to Mapper() is " - "deprecated. It is now allow_partial_pks=False|True, " - "defaults to True.") - allow_partial_pks = allow_null_pks - - self.allow_partial_pks = allow_partial_pks - - self._set_with_polymorphic(with_polymorphic) - - if isinstance(self.local_table, expression._SelectBase): - raise sa_exc.InvalidRequestError( - "When mapping against a select() construct, map against " - "an alias() of the construct instead." - "This because several databases don't allow a " - "SELECT from a subquery that does not have an alias." - ) - - if self.with_polymorphic and \ - isinstance(self.with_polymorphic[1], - expression._SelectBase): - self.with_polymorphic = (self.with_polymorphic[0], - self.with_polymorphic[1].alias()) - - # our 'polymorphic identity', a string name that when located in a - # result set row indicates this Mapper should be used to construct - # the object instance for that row. - self.polymorphic_identity = polymorphic_identity - - # a dictionary of 'polymorphic identity' names, associating those - # names with Mappers that will be used to construct object instances - # upon a select operation. - if _polymorphic_map is None: - self.polymorphic_map = {} - else: - self.polymorphic_map = _polymorphic_map - - if include_properties is not None: - self.include_properties = util.to_set(include_properties) - else: - self.include_properties = None - if exclude_properties: - self.exclude_properties = util.to_set(exclude_properties) - else: - self.exclude_properties = None - - self.configured = False - - # prevent this mapper from being constructed - # while a configure_mappers() is occurring (and defer a configure_mappers() - # until construction succeeds) - _COMPILE_MUTEX.acquire() - try: - self._configure_inheritance() - self._configure_legacy_instrument_class() - self._configure_class_instrumentation() - self._configure_listeners() - self._configure_properties() - self._configure_polymorphic_setter() - self._configure_pks() - global _new_mappers - _new_mappers = True - self._log("constructed") - self._expire_memoizations() - finally: - _COMPILE_MUTEX.release() - - # major attributes initialized at the classlevel so that - # they can be Sphinx-documented. - - local_table = None - """The :class:`.Selectable` which this :class:`.Mapper` manages. - - Typically is an instance of :class:`.Table` or :class:`.Alias`. - May also be ``None``. - - The "local" table is the - selectable that the :class:`.Mapper` is directly responsible for - managing from an attribute access and flush perspective. For - non-inheriting mappers, the local table is the same as the - "mapped" table. For joined-table inheritance mappers, local_table - will be the particular sub-table of the overall "join" which - this :class:`.Mapper` represents. If this mapper is a - single-table inheriting mapper, local_table will be ``None``. - - See also :attr:`~.Mapper.mapped_table`. - - """ - - mapped_table = None - """The :class:`.Selectable` to which this :class:`.Mapper` is mapped. - - Typically an instance of :class:`.Table`, :class:`.Join`, or - :class:`.Alias`. - - The "mapped" table is the selectable that - the mapper selects from during queries. For non-inheriting - mappers, the mapped table is the same as the "local" table. - For joined-table inheritance mappers, mapped_table references the - full :class:`.Join` representing full rows for this particular - subclass. For single-table inheritance mappers, mapped_table - references the base table. - - See also :attr:`~.Mapper.local_table`. - - """ - - inherits = None - """References the :class:`.Mapper` which this :class:`.Mapper` - inherits from, if any. - - This is a *read only* attribute determined during mapper construction. - Behavior is undefined if directly modified. - - """ - - configured = None - """Represent ``True`` if this :class:`.Mapper` has been configured. - - This is a *read only* attribute determined during mapper construction. - Behavior is undefined if directly modified. - - See also :func:`.configure_mappers`. - - """ - - concrete = None - """Represent ``True`` if this :class:`.Mapper` is a concrete - inheritance mapper. - - This is a *read only* attribute determined during mapper construction. - Behavior is undefined if directly modified. - - """ - - tables = None - """An iterable containing the collection of :class:`.Table` objects - which this :class:`.Mapper` is aware of. - - If the mapper is mapped to a :class:`.Join`, or an :class:`.Alias` - representing a :class:`.Select`, the individual :class:`.Table` - objects that comprise the full construct will be represented here. - - This is a *read only* attribute determined during mapper construction. - Behavior is undefined if directly modified. - - """ - - primary_key = None - """An iterable containing the collection of :class:`.Column` objects - which comprise the 'primary key' of the mapped table, from the - perspective of this :class:`.Mapper`. - - This list is against the selectable in :attr:`~.Mapper.mapped_table`. In the - case of inheriting mappers, some columns may be managed by a superclass - mapper. For example, in the case of a :class:`.Join`, the primary - key is determined by all of the primary key columns across all tables - referenced by the :class:`.Join`. - - The list is also not necessarily the same as the primary key column - collection associated with the underlying tables; the :class:`.Mapper` - features a ``primary_key`` argument that can override what the - :class:`.Mapper` considers as primary key columns. - - This is a *read only* attribute determined during mapper construction. - Behavior is undefined if directly modified. - - """ - - class_ = None - """The Python class which this :class:`.Mapper` maps. - - This is a *read only* attribute determined during mapper construction. - Behavior is undefined if directly modified. - - """ - - class_manager = None - """The :class:`.ClassManager` which maintains event listeners - and class-bound descriptors for this :class:`.Mapper`. - - This is a *read only* attribute determined during mapper construction. - Behavior is undefined if directly modified. - - """ - - single = None - """Represent ``True`` if this :class:`.Mapper` is a single table - inheritance mapper. - - :attr:`~.Mapper.local_table` will be ``None`` if this flag is set. - - This is a *read only* attribute determined during mapper construction. - Behavior is undefined if directly modified. - - """ - - non_primary = None - """Represent ``True`` if this :class:`.Mapper` is a "non-primary" - mapper, e.g. a mapper that is used only to selet rows but not for - persistence management. - - This is a *read only* attribute determined during mapper construction. - Behavior is undefined if directly modified. - - """ - - polymorphic_on = None - """The :class:`.Column` specified as the ``polymorphic_on`` column - for this :class:`.Mapper`, within an inheritance scenario. - - This attribute may also be of other types besides :class:`.Column` - in a future SQLAlchemy release. - - This is a *read only* attribute determined during mapper construction. - Behavior is undefined if directly modified. - - """ - - polymorphic_map = None - """A mapping of "polymorphic identity" identifiers mapped to :class:`.Mapper` - instances, within an inheritance scenario. - - The identifiers can be of any type which is comparable to the - type of column represented by :attr:`~.Mapper.polymorphic_on`. - - An inheritance chain of mappers will all reference the same - polymorphic map object. The object is used to correlate incoming - result rows to target mappers. - - This is a *read only* attribute determined during mapper construction. - Behavior is undefined if directly modified. - - """ - - polymorphic_identity = None - """Represent an identifier which is matched against the :attr:`~.Mapper.polymorphic_on` - column during result row loading. - - Used only with inheritance, this object can be of any type which is - comparable to the type of column represented by :attr:`~.Mapper.polymorphic_on`. - - This is a *read only* attribute determined during mapper construction. - Behavior is undefined if directly modified. - - """ - - base_mapper = None - """The base-most :class:`.Mapper` in an inheritance chain. - - In a non-inheriting scenario, this attribute will always be this - :class:`.Mapper`. In an inheritance scenario, it references - the :class:`.Mapper` which is parent to all other :class:`.Mapper` - objects in the inheritance chain. - - This is a *read only* attribute determined during mapper construction. - Behavior is undefined if directly modified. - - """ - - columns = None - """A collection of :class:`.Column` or other scalar expression - objects maintained by this :class:`.Mapper`. - - The collection behaves the same as that of the ``c`` attribute on - any :class:`.Table` object, except that only those columns included in - this mapping are present, and are keyed based on the attribute name - defined in the mapping, not necessarily the ``key`` attribute of the - :class:`.Column` itself. Additionally, scalar expressions mapped - by :func:`.column_property` are also present here. - - This is a *read only* attribute determined during mapper construction. - Behavior is undefined if directly modified. - - """ - - validators = None - """An immutable dictionary of attributes which have been decorated - using the :func:`~.orm.validates` decorator. - - The dictionary contains string attribute names as keys - mapped to the actual validation method. - - """ - - c = None - """A synonym for :attr:`~.Mapper.columns`.""" - - dispatch = event.dispatcher(events.MapperEvents) - - def _configure_inheritance(self): - """Configure settings related to inherting and/or inherited mappers - being present.""" - - # a set of all mappers which inherit from this one. - self._inheriting_mappers = set() - - if self.inherits: - if isinstance(self.inherits, type): - self.inherits = class_mapper(self.inherits, compile=False) - if not issubclass(self.class_, self.inherits.class_): - raise sa_exc.ArgumentError( - "Class '%s' does not inherit from '%s'" % - (self.class_.__name__, self.inherits.class_.__name__)) - if self.non_primary != self.inherits.non_primary: - np = not self.non_primary and "primary" or "non-primary" - raise sa_exc.ArgumentError( - "Inheritance of %s mapper for class '%s' is " - "only allowed from a %s mapper" % - (np, self.class_.__name__, np)) - # inherit_condition is optional. - if self.local_table is None: - self.local_table = self.inherits.local_table - self.mapped_table = self.inherits.mapped_table - self.single = True - elif not self.local_table is self.inherits.local_table: - if self.concrete: - self.mapped_table = self.local_table - for mapper in self.iterate_to_root(): - if mapper.polymorphic_on is not None: - mapper._requires_row_aliasing = True - else: - if self.inherit_condition is None: - # figure out inherit condition from our table to the - # immediate table of the inherited mapper, not its - # full table which could pull in other stuff we dont - # want (allows test/inheritance.InheritTest4 to pass) - self.inherit_condition = sqlutil.join_condition( - self.inherits.local_table, - self.local_table) - self.mapped_table = sql.join( - self.inherits.mapped_table, - self.local_table, - self.inherit_condition) - - fks = util.to_set(self.inherit_foreign_keys) - self._inherits_equated_pairs = sqlutil.criterion_as_pairs( - self.mapped_table.onclause, - consider_as_foreign_keys=fks) - else: - self.mapped_table = self.local_table - - if self.polymorphic_identity is not None and not self.concrete: - self._identity_class = self.inherits._identity_class - else: - self._identity_class = self.class_ - - if self.version_id_col is None: - self.version_id_col = self.inherits.version_id_col - self.version_id_generator = self.inherits.version_id_generator - elif self.inherits.version_id_col is not None and \ - self.version_id_col is not self.inherits.version_id_col: - util.warn( - "Inheriting version_id_col '%s' does not match inherited " - "version_id_col '%s' and will not automatically populate " - "the inherited versioning column. " - "version_id_col should only be specified on " - "the base-most mapper that includes versioning." % - (self.version_id_col.description, - self.inherits.version_id_col.description) - ) - - if self.order_by is False and \ - not self.concrete and \ - self.inherits.order_by is not False: - self.order_by = self.inherits.order_by - - self.polymorphic_map = self.inherits.polymorphic_map - self.batch = self.inherits.batch - self.inherits._inheriting_mappers.add(self) - self.base_mapper = self.inherits.base_mapper - self.passive_updates = self.inherits.passive_updates - self._all_tables = self.inherits._all_tables - - if self.polymorphic_identity is not None: - self.polymorphic_map[self.polymorphic_identity] = self - - else: - self._all_tables = set() - self.base_mapper = self - self.mapped_table = self.local_table - if self.polymorphic_identity is not None: - self.polymorphic_map[self.polymorphic_identity] = self - self._identity_class = self.class_ - - if self.mapped_table is None: - raise sa_exc.ArgumentError( - "Mapper '%s' does not have a mapped_table specified." - % self) - - def _set_with_polymorphic(self, with_polymorphic): - if with_polymorphic == '*': - self.with_polymorphic = ('*', None) - elif isinstance(with_polymorphic, (tuple, list)): - if isinstance(with_polymorphic[0], (basestring, tuple, list)): - self.with_polymorphic = with_polymorphic - else: - self.with_polymorphic = (with_polymorphic, None) - elif with_polymorphic is not None: - raise sa_exc.ArgumentError("Invalid setting for with_polymorphic") - else: - self.with_polymorphic = None - - if isinstance(self.local_table, expression._SelectBase): - raise sa_exc.InvalidRequestError( - "When mapping against a select() construct, map against " - "an alias() of the construct instead." - "This because several databases don't allow a " - "SELECT from a subquery that does not have an alias." - ) - - if self.with_polymorphic and \ - isinstance(self.with_polymorphic[1], - expression._SelectBase): - self.with_polymorphic = (self.with_polymorphic[0], - self.with_polymorphic[1].alias()) - if self.configured: - self._expire_memoizations() - - def _set_concrete_base(self, mapper): - """Set the given :class:`.Mapper` as the 'inherits' for this :class:`.Mapper`, - assuming this :class:`.Mapper` is concrete and does not already have - an inherits.""" - - assert self.concrete - assert not self.inherits - assert isinstance(mapper, Mapper) - self.inherits = mapper - self.inherits.polymorphic_map.update(self.polymorphic_map) - self.polymorphic_map = self.inherits.polymorphic_map - for mapper in self.iterate_to_root(): - if mapper.polymorphic_on is not None: - mapper._requires_row_aliasing = True - self.batch = self.inherits.batch - for mp in self.self_and_descendants: - mp.base_mapper = self.inherits.base_mapper - self.inherits._inheriting_mappers.add(self) - self.passive_updates = self.inherits.passive_updates - self._all_tables = self.inherits._all_tables - for key, prop in mapper._props.iteritems(): - if key not in self._props and \ - not self._should_exclude(key, key, local=False, - column=None): - self._adapt_inherited_property(key, prop, False) - - - def _set_polymorphic_on(self, polymorphic_on): - self.polymorphic_on = polymorphic_on - self._configure_polymorphic_setter(True) - - def _configure_legacy_instrument_class(self): - - if self.inherits: - self.dispatch._update(self.inherits.dispatch) - super_extensions = set(chain(*[m._deprecated_extensions - for m in self.inherits.iterate_to_root()])) - else: - super_extensions = set() - - for ext in self._deprecated_extensions: - if ext not in super_extensions: - ext._adapt_instrument_class(self, ext) - - def _configure_listeners(self): - if self.inherits: - super_extensions = set(chain(*[m._deprecated_extensions - for m in self.inherits.iterate_to_root()])) - else: - super_extensions = set() - - for ext in self._deprecated_extensions: - if ext not in super_extensions: - ext._adapt_listener(self, ext) - - if self.inherits: - self.class_manager.dispatch._update( - self.inherits.class_manager.dispatch) - - def _configure_class_instrumentation(self): - """If this mapper is to be a primary mapper (i.e. the - non_primary flag is not set), associate this Mapper with the - given class_ and entity name. - - Subsequent calls to ``class_mapper()`` for the class_/entity - name combination will return this mapper. Also decorate the - `__init__` method on the mapped class to include optional - auto-session attachment logic. - - """ - manager = attributes.manager_of_class(self.class_) - - if self.non_primary: - if not manager or not manager.is_mapped: - raise sa_exc.InvalidRequestError( - "Class %s has no primary mapper configured. Configure " - "a primary mapper first before setting up a non primary " - "Mapper." % self.class_) - self.class_manager = manager - self._identity_class = manager.mapper._identity_class - _mapper_registry[self] = True - return - - if manager is not None: - assert manager.class_ is self.class_ - if manager.is_mapped: - raise sa_exc.ArgumentError( - "Class '%s' already has a primary mapper defined. " - "Use non_primary=True to " - "create a non primary Mapper. clear_mappers() will " - "remove *all* current mappers from all classes." % - self.class_) - #else: - # a ClassManager may already exist as - # ClassManager.instrument_attribute() creates - # new managers for each subclass if they don't yet exist. - - _mapper_registry[self] = True - - self.dispatch.instrument_class(self, self.class_) - - if manager is None: - manager = instrumentation.register_class(self.class_) - - self.class_manager = manager - - manager.mapper = self - manager.deferred_scalar_loader = self._load_scalar_attributes - - # The remaining members can be added by any mapper, - # e_name None or not. - if manager.info.get(_INSTRUMENTOR, False): - return - - event.listen(manager, 'first_init', _event_on_first_init, raw=True) - event.listen(manager, 'init', _event_on_init, raw=True) - event.listen(manager, 'resurrect', _event_on_resurrect, raw=True) - - for key, method in util.iterate_attributes(self.class_): - if isinstance(method, types.FunctionType): - if hasattr(method, '__sa_reconstructor__'): - self._reconstructor = method - event.listen(manager, 'load', _event_on_load, raw=True) - elif hasattr(method, '__sa_validators__'): - include_removes = getattr(method, "__sa_include_removes__", False) - for name in method.__sa_validators__: - self.validators = self.validators.union( - {name : (method, include_removes)} - ) - - manager.info[_INSTRUMENTOR] = self - - @util.deprecated("0.7", message=":meth:`.Mapper.compile` " - "is replaced by :func:`.configure_mappers`") - def compile(self): - """Initialize the inter-mapper relationships of all mappers that - have been constructed thus far. - - """ - configure_mappers() - return self - - - @property - @util.deprecated("0.7", message=":attr:`.Mapper.compiled` " - "is replaced by :attr:`.Mapper.configured`") - def compiled(self): - return self.configured - - def dispose(self): - # Disable any attribute-based compilation. - self.configured = True - - if hasattr(self, '_configure_failed'): - del self._configure_failed - - if not self.non_primary and \ - self.class_manager.is_mapped and \ - self.class_manager.mapper is self: - instrumentation.unregister_class(self.class_) - - def _configure_pks(self): - - self.tables = sqlutil.find_tables(self.mapped_table) - - self._pks_by_table = {} - self._cols_by_table = {} - - all_cols = util.column_set(chain(*[ - col.proxy_set for col in - self._columntoproperty])) - - pk_cols = util.column_set(c for c in all_cols if c.primary_key) - - # identify primary key columns which are also mapped by this mapper. - tables = set(self.tables + [self.mapped_table]) - self._all_tables.update(tables) - for t in tables: - if t.primary_key and pk_cols.issuperset(t.primary_key): - # ordering is important since it determines the ordering of - # mapper.primary_key (and therefore query.get()) - self._pks_by_table[t] =\ - util.ordered_column_set(t.primary_key).\ - intersection(pk_cols) - self._cols_by_table[t] = \ - util.ordered_column_set(t.c).\ - intersection(all_cols) - - # determine cols that aren't expressed within our tables; mark these - # as "read only" properties which are refreshed upon INSERT/UPDATE - self._readonly_props = set( - self._columntoproperty[col] - for col in self._columntoproperty - if not hasattr(col, 'table') or - col.table not in self._cols_by_table) - - # if explicit PK argument sent, add those columns to the - # primary key mappings - if self._primary_key_argument: - for k in self._primary_key_argument: - if k.table not in self._pks_by_table: - self._pks_by_table[k.table] = util.OrderedSet() - self._pks_by_table[k.table].add(k) - - # otherwise, see that we got a full PK for the mapped table - elif self.mapped_table not in self._pks_by_table or \ - len(self._pks_by_table[self.mapped_table]) == 0: - raise sa_exc.ArgumentError( - "Mapper %s could not assemble any primary " - "key columns for mapped table '%s'" % - (self, self.mapped_table.description)) - elif self.local_table not in self._pks_by_table and \ - isinstance(self.local_table, schema.Table): - util.warn("Could not assemble any primary " - "keys for locally mapped table '%s' - " - "no rows will be persisted in this Table." - % self.local_table.description) - - if self.inherits and \ - not self.concrete and \ - not self._primary_key_argument: - # if inheriting, the "primary key" for this mapper is - # that of the inheriting (unless concrete or explicit) - self.primary_key = self.inherits.primary_key - else: - # determine primary key from argument or mapped_table pks - - # reduce to the minimal set of columns - if self._primary_key_argument: - primary_key = sqlutil.reduce_columns( - [self.mapped_table.corresponding_column(c) for c in - self._primary_key_argument], - ignore_nonexistent_tables=True) - else: - primary_key = sqlutil.reduce_columns( - self._pks_by_table[self.mapped_table], - ignore_nonexistent_tables=True) - - if len(primary_key) == 0: - raise sa_exc.ArgumentError( - "Mapper %s could not assemble any primary " - "key columns for mapped table '%s'" % - (self, self.mapped_table.description)) - - self.primary_key = tuple(primary_key) - self._log("Identified primary key columns: %s", primary_key) - - def _configure_properties(self): - - # Column and other ClauseElement objects which are mapped - self.columns = self.c = util.OrderedProperties() - - # object attribute names mapped to MapperProperty objects - self._props = util.OrderedDict() - - # table columns mapped to lists of MapperProperty objects - # using a list allows a single column to be defined as - # populating multiple object attributes - self._columntoproperty = _ColumnMapping(self) - - # load custom properties - if self._init_properties: - for key, prop in self._init_properties.iteritems(): - self._configure_property(key, prop, False) - - # pull properties from the inherited mapper if any. - if self.inherits: - for key, prop in self.inherits._props.iteritems(): - if key not in self._props and \ - not self._should_exclude(key, key, local=False, column=None): - self._adapt_inherited_property(key, prop, False) - - # create properties for each column in the mapped table, - # for those columns which don't already map to a property - for column in self.mapped_table.columns: - if column in self._columntoproperty: - continue - - column_key = (self.column_prefix or '') + column.key - - if self._should_exclude( - column.key, column_key, - local=self.local_table.c.contains_column(column), - column=column - ): - continue - - # adjust the "key" used for this column to that - # of the inheriting mapper - for mapper in self.iterate_to_root(): - if column in mapper._columntoproperty: - column_key = mapper._columntoproperty[column].key - - self._configure_property(column_key, - column, - init=False, - setparent=True) - - def _configure_polymorphic_setter(self, init=False): - """Configure an attribute on the mapper representing the - 'polymorphic_on' column, if applicable, and not - already generated by _configure_properties (which is typical). - - Also create a setter function which will assign this - attribute to the value of the 'polymorphic_identity' - upon instance construction, also if applicable. This - routine will run when an instance is created. - - """ - setter = False - - if self.polymorphic_on is not None: - setter = True - - if isinstance(self.polymorphic_on, basestring): - # polymorphic_on specified as as string - link - # it to mapped ColumnProperty - try: - self.polymorphic_on = self._props[self.polymorphic_on] - except KeyError: - raise sa_exc.ArgumentError( - "Can't determine polymorphic_on " - "value '%s' - no attribute is " - "mapped to this name." % self.polymorphic_on) - - if self.polymorphic_on in self._columntoproperty: - # polymorphic_on is a column that is already mapped - # to a ColumnProperty - prop = self._columntoproperty[self.polymorphic_on] - polymorphic_key = prop.key - self.polymorphic_on = prop.columns[0] - polymorphic_key = prop.key - elif isinstance(self.polymorphic_on, MapperProperty): - # polymorphic_on is directly a MapperProperty, - # ensure it's a ColumnProperty - if not isinstance(self.polymorphic_on, properties.ColumnProperty): - raise sa_exc.ArgumentError( - "Only direct column-mapped " - "property or SQL expression " - "can be passed for polymorphic_on") - prop = self.polymorphic_on - self.polymorphic_on = prop.columns[0] - polymorphic_key = prop.key - elif not expression.is_column(self.polymorphic_on): - # polymorphic_on is not a Column and not a ColumnProperty; - # not supported right now. - raise sa_exc.ArgumentError( - "Only direct column-mapped " - "property or SQL expression " - "can be passed for polymorphic_on" - ) - else: - # polymorphic_on is a Column or SQL expression and doesn't - # appear to be mapped. - # this means it can be 1. only present in the with_polymorphic - # selectable or 2. a totally standalone SQL expression which we'd - # hope is compatible with this mapper's mapped_table - col = self.mapped_table.corresponding_column(self.polymorphic_on) - if col is None: - # polymorphic_on doesn't derive from any column/expression - # isn't present in the mapped table. - # we will make a "hidden" ColumnProperty for it. - # Just check that if it's directly a schema.Column and we - # have with_polymorphic, it's likely a user error if the - # schema.Column isn't represented somehow in either mapped_table or - # with_polymorphic. Otherwise as of 0.7.4 we just go with it - # and assume the user wants it that way (i.e. a CASE statement) - setter = False - instrument = False - col = self.polymorphic_on - if isinstance(col, schema.Column) and ( - self.with_polymorphic is None or \ - self.with_polymorphic[1].corresponding_column(col) is None - ): - raise sa_exc.InvalidRequestError( - "Could not map polymorphic_on column " - "'%s' to the mapped table - polymorphic " - "loads will not function properly" - % col.description) - else: - # column/expression that polymorphic_on derives from - # is present in our mapped table - # and is probably mapped, but polymorphic_on itself - # is not. This happens when - # the polymorphic_on is only directly present in the - # with_polymorphic selectable, as when use polymorphic_union. - # we'll make a separate ColumnProperty for it. - instrument = True - key = getattr(col, 'key', None) - if key: - if self._should_exclude(col.key, col.key, False, col): - raise sa_exc.InvalidRequestError( - "Cannot exclude or override the discriminator column %r" % - col.key) - else: - self.polymorphic_on = col = col.label("_sa_polymorphic_on") - key = col.key - - self._configure_property( - key, - properties.ColumnProperty(col, _instrument=instrument), - init=init, setparent=True) - polymorphic_key = key - else: - # no polymorphic_on was set. - # check inheriting mappers for one. - for mapper in self.iterate_to_root(): - # determine if polymorphic_on of the parent - # should be propagated here. If the col - # is present in our mapped table, or if our mapped - # table is the same as the parent (i.e. single table - # inheritance), we can use it - if mapper.polymorphic_on is not None: - if self.mapped_table is mapper.mapped_table: - self.polymorphic_on = mapper.polymorphic_on - else: - self.polymorphic_on = \ - self.mapped_table.corresponding_column( - mapper.polymorphic_on) - # we can use the parent mapper's _set_polymorphic_identity - # directly; it ensures the polymorphic_identity of the - # instance's mapper is used so is portable to subclasses. - if self.polymorphic_on is not None: - self._set_polymorphic_identity = mapper._set_polymorphic_identity - else: - self._set_polymorphic_identity = None - return - - if setter: - def _set_polymorphic_identity(state): - dict_ = state.dict - state.get_impl(polymorphic_key).set(state, dict_, - state.manager.mapper.polymorphic_identity, None) - - self._set_polymorphic_identity = _set_polymorphic_identity - else: - self._set_polymorphic_identity = None - - - - def _adapt_inherited_property(self, key, prop, init): - if not self.concrete: - self._configure_property(key, prop, init=False, setparent=False) - elif key not in self._props: - self._configure_property( - key, - properties.ConcreteInheritedProperty(), - init=init, setparent=True) - - def _configure_property(self, key, prop, init=True, setparent=True): - self._log("_configure_property(%s, %s)", key, prop.__class__.__name__) - - if not isinstance(prop, MapperProperty): - # we were passed a Column or a list of Columns; - # generate a properties.ColumnProperty - columns = util.to_list(prop) - column = columns[0] - if not expression.is_column(column): - raise sa_exc.ArgumentError( - "%s=%r is not an instance of MapperProperty or Column" - % (key, prop)) - - prop = self._props.get(key, None) - - if isinstance(prop, properties.ColumnProperty): - if prop.parent is self: - raise sa_exc.InvalidRequestError( - "Implicitly combining column %s with column " - "%s under attribute '%s'. Please configure one " - "or more attributes for these same-named columns " - "explicitly." - % (prop.columns[-1], column, key)) - - # existing properties.ColumnProperty from an inheriting - # mapper. make a copy and append our column to it - prop = prop.copy() - prop.columns.insert(0, column) - self._log("inserting column to existing list " - "in properties.ColumnProperty %s" % (key)) - - elif prop is None or isinstance(prop, properties.ConcreteInheritedProperty): - mapped_column = [] - for c in columns: - mc = self.mapped_table.corresponding_column(c) - if mc is None: - mc = self.local_table.corresponding_column(c) - if mc is not None: - # if the column is in the local table but not the - # mapped table, this corresponds to adding a - # column after the fact to the local table. - # [ticket:1523] - self.mapped_table._reset_exported() - mc = self.mapped_table.corresponding_column(c) - if mc is None: - raise sa_exc.ArgumentError( - "When configuring property '%s' on %s, " - "column '%s' is not represented in the mapper's " - "table. Use the `column_property()` function to " - "force this column to be mapped as a read-only " - "attribute." % (key, self, c)) - mapped_column.append(mc) - prop = properties.ColumnProperty(*mapped_column) - else: - raise sa_exc.ArgumentError( - "WARNING: when configuring property '%s' on %s, " - "column '%s' conflicts with property '%r'. " - "To resolve this, map the column to the class under a " - "different name in the 'properties' dictionary. Or, " - "to remove all awareness of the column entirely " - "(including its availability as a foreign key), " - "use the 'include_properties' or 'exclude_properties' " - "mapper arguments to control specifically which table " - "columns get mapped." % - (key, self, column.key, prop)) - - if isinstance(prop, properties.ColumnProperty): - col = self.mapped_table.corresponding_column(prop.columns[0]) - - # if the column is not present in the mapped table, - # test if a column has been added after the fact to the - # parent table (or their parent, etc.) [ticket:1570] - if col is None and self.inherits: - path = [self] - for m in self.inherits.iterate_to_root(): - col = m.local_table.corresponding_column(prop.columns[0]) - if col is not None: - for m2 in path: - m2.mapped_table._reset_exported() - col = self.mapped_table.corresponding_column( - prop.columns[0]) - break - path.append(m) - - # subquery expression, column not present in the mapped - # selectable. - if col is None: - col = prop.columns[0] - - # column is coming in after _readonly_props was - # initialized; check for 'readonly' - if hasattr(self, '_readonly_props') and \ - (not hasattr(col, 'table') or - col.table not in self._cols_by_table): - self._readonly_props.add(prop) - - else: - # if column is coming in after _cols_by_table was - # initialized, ensure the col is in the right set - if hasattr(self, '_cols_by_table') and \ - col.table in self._cols_by_table and \ - col not in self._cols_by_table[col.table]: - self._cols_by_table[col.table].add(col) - - # if this properties.ColumnProperty represents the "polymorphic - # discriminator" column, mark it. We'll need this when rendering - # columns in SELECT statements. - if not hasattr(prop, '_is_polymorphic_discriminator'): - prop._is_polymorphic_discriminator = \ - (col is self.polymorphic_on or - prop.columns[0] is self.polymorphic_on) - - self.columns[key] = col - for col in prop.columns + prop._orig_columns: - for col in col.proxy_set: - self._columntoproperty[col] = prop - - prop.key = key - - if setparent: - prop.set_parent(self, init) - - if key in self._props and \ - getattr(self._props[key], '_mapped_by_synonym', False): - syn = self._props[key]._mapped_by_synonym - raise sa_exc.ArgumentError( - "Can't call map_column=True for synonym %r=%r, " - "a ColumnProperty already exists keyed to the name " - "%r for column %r" % (syn, key, key, syn) - ) - - self._props[key] = prop - - if not self.non_primary: - prop.instrument_class(self) - - for mapper in self._inheriting_mappers: - mapper._adapt_inherited_property(key, prop, init) - - if init: - prop.init() - prop.post_instrument_class(self) - - if self.configured: - self._expire_memoizations() - - def _post_configure_properties(self): - """Call the ``init()`` method on all ``MapperProperties`` - attached to this mapper. - - This is a deferred configuration step which is intended - to execute once all mappers have been constructed. - - """ - - self._log("_post_configure_properties() started") - l = [(key, prop) for key, prop in self._props.iteritems()] - for key, prop in l: - self._log("initialize prop %s", key) - - if prop.parent is self and not prop._compile_started: - prop.init() - - if prop._compile_finished: - prop.post_instrument_class(self) - - self._log("_post_configure_properties() complete") - self.configured = True - - def add_properties(self, dict_of_properties): - """Add the given dictionary of properties to this mapper, - using `add_property`. - - """ - for key, value in dict_of_properties.iteritems(): - self.add_property(key, value) - - def add_property(self, key, prop): - """Add an individual MapperProperty to this mapper. - - If the mapper has not been configured yet, just adds the - property to the initial properties dictionary sent to the - constructor. If this Mapper has already been configured, then - the given MapperProperty is configured immediately. - - """ - self._init_properties[key] = prop - self._configure_property(key, prop, init=self.configured) - - def _expire_memoizations(self): - for mapper in self.iterate_to_root(): - _memoized_configured_property.expire_instance(mapper) - - @property - def _log_desc(self): - return "(" + self.class_.__name__ + \ - "|" + \ - (self.local_table is not None and - self.local_table.description or - str(self.local_table)) +\ - (self.non_primary and - "|non-primary" or "") + ")" - - def _log(self, msg, *args): - - self.logger.info( - "%s " + msg, *((self._log_desc,) + args) - ) - - def _log_debug(self, msg, *args): - self.logger.debug( - "%s " + msg, *((self._log_desc,) + args) - ) - - def __repr__(self): - return '' % ( - id(self), self.class_.__name__) - - def __str__(self): - return "Mapper|%s|%s%s" % ( - self.class_.__name__, - self.local_table is not None and - self.local_table.description or None, - self.non_primary and "|non-primary" or "" - ) - - def _is_orphan(self, state): - o = False - for mapper in self.iterate_to_root(): - for (key, cls) in mapper.delete_orphans: - if attributes.manager_of_class(cls).has_parent( - state, key, optimistic=bool(state.key)): - return False - o = o or bool(mapper.delete_orphans) - return o - - def has_property(self, key): - return key in self._props - - def get_property(self, key, _compile_mappers=True): - """return a MapperProperty associated with the given key. - """ - - if _compile_mappers and _new_mappers: - configure_mappers() - - try: - return self._props[key] - except KeyError: - raise sa_exc.InvalidRequestError( - "Mapper '%s' has no property '%s'" % (self, key)) - - @util.deprecated('0.6.4', - 'Call to deprecated function mapper._get_col_to_pr' - 'op(). Use mapper.get_property_by_column()') - def _get_col_to_prop(self, col): - return self._columntoproperty[col] - - def get_property_by_column(self, column): - """Given a :class:`.Column` object, return the - :class:`.MapperProperty` which maps this column.""" - - return self._columntoproperty[column] - - @property - def iterate_properties(self): - """return an iterator of all MapperProperty objects.""" - if _new_mappers: - configure_mappers() - return self._props.itervalues() - - def _mappers_from_spec(self, spec, selectable): - """given a with_polymorphic() argument, return the set of mappers it - represents. - - Trims the list of mappers to just those represented within the given - selectable, if present. This helps some more legacy-ish mappings. - - """ - if spec == '*': - mappers = list(self.self_and_descendants) - elif spec: - mappers = [_class_to_mapper(m) for m in util.to_list(spec)] - for m in mappers: - if not m.isa(self): - raise sa_exc.InvalidRequestError( - "%r does not inherit from %r" % - (m, self)) - else: - mappers = [] - - if selectable is not None: - tables = set(sqlutil.find_tables(selectable, - include_aliases=True)) - mappers = [m for m in mappers if m.local_table in tables] - - return mappers - - def _selectable_from_mappers(self, mappers): - """given a list of mappers (assumed to be within this mapper's - inheritance hierarchy), construct an outerjoin amongst those mapper's - mapped tables. - - """ - - from_obj = self.mapped_table - for m in mappers: - if m is self: - continue - if m.concrete: - raise sa_exc.InvalidRequestError( - "'with_polymorphic()' requires 'selectable' argument " - "when concrete-inheriting mappers are used.") - elif not m.single: - from_obj = from_obj.outerjoin(m.local_table, - m.inherit_condition) - - return from_obj - - @_memoized_configured_property - def _single_table_criterion(self): - if self.single and \ - self.inherits and \ - self.polymorphic_on is not None: - return self.polymorphic_on.in_( - m.polymorphic_identity - for m in self.self_and_descendants) - else: - return None - - @_memoized_configured_property - def _with_polymorphic_mappers(self): - if not self.with_polymorphic: - return [self] - return self._mappers_from_spec(*self.with_polymorphic) - - @_memoized_configured_property - def _with_polymorphic_selectable(self): - if not self.with_polymorphic: - return self.mapped_table - - spec, selectable = self.with_polymorphic - if selectable is not None: - return selectable - else: - return self._selectable_from_mappers( - self._mappers_from_spec(spec, selectable)) - - def _with_polymorphic_args(self, spec=None, selectable=False): - if self.with_polymorphic: - if not spec: - spec = self.with_polymorphic[0] - if selectable is False: - selectable = self.with_polymorphic[1] - - mappers = self._mappers_from_spec(spec, selectable) - if selectable is not None: - return mappers, selectable - else: - return mappers, self._selectable_from_mappers(mappers) - - @_memoized_configured_property - def _polymorphic_properties(self): - return tuple(self._iterate_polymorphic_properties( - self._with_polymorphic_mappers)) - - def _iterate_polymorphic_properties(self, mappers=None): - """Return an iterator of MapperProperty objects which will render into - a SELECT.""" - - if mappers is None: - mappers = self._with_polymorphic_mappers - - if not mappers: - for c in self.iterate_properties: - yield c - else: - # in the polymorphic case, filter out discriminator columns - # from other mappers, as these are sometimes dependent on that - # mapper's polymorphic selectable (which we don't want rendered) - for c in util.unique_list( - chain(*[list(mapper.iterate_properties) for mapper in [self] + - mappers]) - ): - if getattr(c, '_is_polymorphic_discriminator', False) and \ - (self.polymorphic_on is None or - c.columns[0] is not self.polymorphic_on): - continue - yield c - - @property - def properties(self): - raise NotImplementedError( - "Public collection of MapperProperty objects is " - "provided by the get_property() and iterate_properties " - "accessors.") - - @_memoized_configured_property - def _get_clause(self): - """create a "get clause" based on the primary key. this is used - by query.get() and many-to-one lazyloads to load this item - by primary key. - - """ - params = [(primary_key, sql.bindparam(None, type_=primary_key.type)) - for primary_key in self.primary_key] - return sql.and_(*[k==v for (k, v) in params]), \ - util.column_dict(params) - - @_memoized_configured_property - def _equivalent_columns(self): - """Create a map of all *equivalent* columns, based on - the determination of column pairs that are equated to - one another based on inherit condition. This is designed - to work with the queries that util.polymorphic_union - comes up with, which often don't include the columns from - the base table directly (including the subclass table columns - only). - - The resulting structure is a dictionary of columns mapped - to lists of equivalent columns, i.e. - - { - tablea.col1: - set([tableb.col1, tablec.col1]), - tablea.col2: - set([tabled.col2]) - } - - """ - result = util.column_dict() - def visit_binary(binary): - if binary.operator == operators.eq: - if binary.left in result: - result[binary.left].add(binary.right) - else: - result[binary.left] = util.column_set((binary.right,)) - if binary.right in result: - result[binary.right].add(binary.left) - else: - result[binary.right] = util.column_set((binary.left,)) - for mapper in self.base_mapper.self_and_descendants: - if mapper.inherit_condition is not None: - visitors.traverse( - mapper.inherit_condition, {}, - {'binary':visit_binary}) - - return result - - def _is_userland_descriptor(self, obj): - if isinstance(obj, (MapperProperty, - attributes.QueryableAttribute)): - return False - elif not hasattr(obj, '__get__'): - return False - else: - obj = util.unbound_method_to_callable(obj) - if isinstance( - obj.__get__(None, obj), - attributes.QueryableAttribute - ): - return False - return True - - def _should_exclude(self, name, assigned_name, local, column): - """determine whether a particular property should be implicitly - present on the class. - - This occurs when properties are propagated from an inherited class, or - are applied from the columns present in the mapped table. - - """ - - # check for descriptors, either local or from - # an inherited class - if local: - if self.class_.__dict__.get(assigned_name, None) is not None \ - and self._is_userland_descriptor( - self.class_.__dict__[assigned_name]): - return True - else: - if getattr(self.class_, assigned_name, None) is not None \ - and self._is_userland_descriptor( - getattr(self.class_, assigned_name)): - return True - - if self.include_properties is not None and \ - name not in self.include_properties and \ - (column is None or column not in self.include_properties): - self._log("not including property %s" % (name)) - return True - - if self.exclude_properties is not None and \ - ( - name in self.exclude_properties or \ - (column is not None and column in self.exclude_properties) - ): - self._log("excluding property %s" % (name)) - return True - - return False - - def common_parent(self, other): - """Return true if the given mapper shares a - common inherited parent as this mapper.""" - - return self.base_mapper is other.base_mapper - - def _canload(self, state, allow_subtypes): - s = self.primary_mapper() - if self.polymorphic_on is not None or allow_subtypes: - return _state_mapper(state).isa(s) - else: - return _state_mapper(state) is s - - def isa(self, other): - """Return True if the this mapper inherits from the given mapper.""" - - m = self - while m and m is not other: - m = m.inherits - return bool(m) - - def iterate_to_root(self): - m = self - while m: - yield m - m = m.inherits - - @_memoized_configured_property - def self_and_descendants(self): - """The collection including this mapper and all descendant mappers. - - This includes not just the immediately inheriting mappers but - all their inheriting mappers as well. - - """ - descendants = [] - stack = deque([self]) - while stack: - item = stack.popleft() - descendants.append(item) - stack.extend(item._inheriting_mappers) - return tuple(descendants) - - def polymorphic_iterator(self): - """Iterate through the collection including this mapper and - all descendant mappers. - - This includes not just the immediately inheriting mappers but - all their inheriting mappers as well. - - To iterate through an entire hierarchy, use - ``mapper.base_mapper.polymorphic_iterator()``. - - """ - return iter(self.self_and_descendants) - - def primary_mapper(self): - """Return the primary mapper corresponding to this mapper's class key - (class).""" - - return self.class_manager.mapper - - @property - def primary_base_mapper(self): - return self.class_manager.mapper.base_mapper - - def identity_key_from_row(self, row, adapter=None): - """Return an identity-map key for use in storing/retrieving an - item from the identity map. - - row - A ``sqlalchemy.engine.base.RowProxy`` instance or a - dictionary corresponding result-set ``ColumnElement`` - instances to their values within a row. - - """ - pk_cols = self.primary_key - if adapter: - pk_cols = [adapter.columns[c] for c in pk_cols] - - return self._identity_class, \ - tuple(row[column] for column in pk_cols) - - def identity_key_from_primary_key(self, primary_key): - """Return an identity-map key for use in storing/retrieving an - item from an identity map. - - primary_key - A list of values indicating the identifier. - - """ - return self._identity_class, tuple(primary_key) - - def identity_key_from_instance(self, instance): - """Return the identity key for the given instance, based on - its primary key attributes. - - This value is typically also found on the instance state under the - attribute name `key`. - - """ - return self.identity_key_from_primary_key( - self.primary_key_from_instance(instance)) - - def _identity_key_from_state(self, state): - dict_ = state.dict - manager = state.manager - return self._identity_class, tuple([ - manager[self._columntoproperty[col].key].\ - impl.get(state, dict_, attributes.PASSIVE_OFF) - for col in self.primary_key - ]) - - def primary_key_from_instance(self, instance): - """Return the list of primary key values for the given - instance. - - """ - state = attributes.instance_state(instance) - return self._primary_key_from_state(state) - - def _primary_key_from_state(self, state): - dict_ = state.dict - manager = state.manager - return [ - manager[self._columntoproperty[col].key].\ - impl.get(state, dict_, attributes.PASSIVE_OFF) - for col in self.primary_key - ] - - def _get_state_attr_by_column(self, state, dict_, column, - passive=attributes.PASSIVE_OFF): - prop = self._columntoproperty[column] - return state.manager[prop.key].impl.get(state, dict_, passive=passive) - - def _set_state_attr_by_column(self, state, dict_, column, value): - prop = self._columntoproperty[column] - state.manager[prop.key].impl.set(state, dict_, value, None) - - def _get_committed_attr_by_column(self, obj, column): - state = attributes.instance_state(obj) - dict_ = attributes.instance_dict(obj) - return self._get_committed_state_attr_by_column(state, dict_, column) - - def _get_committed_state_attr_by_column(self, state, dict_, - column, passive=attributes.PASSIVE_OFF): - - prop = self._columntoproperty[column] - return state.manager[prop.key].impl.\ - get_committed_value(state, dict_, passive=passive) - - def _load_scalar_attributes(self, state, attribute_names): - """initiate a column-based attribute refresh operation.""" - - #assert mapper is _state_mapper(state) - session = sessionlib._state_session(state) - if not session: - raise orm_exc.DetachedInstanceError( - "Instance %s is not bound to a Session; " - "attribute refresh operation cannot proceed" % - (state_str(state))) - - has_key = bool(state.key) - - result = False - - if self.inherits and not self.concrete: - statement = self._optimized_get_statement(state, attribute_names) - if statement is not None: - result = session.query(self).from_statement(statement).\ - _load_on_ident(None, - only_load_props=attribute_names, - refresh_state=state) - - if result is False: - if has_key: - identity_key = state.key - else: - # this codepath is rare - only valid when inside a flush, and the - # object is becoming persistent but hasn't yet been assigned an identity_key. - # check here to ensure we have the attrs we need. - pk_attrs = [self._columntoproperty[col].key - for col in self.primary_key] - if state.expired_attributes.intersection(pk_attrs): - raise sa_exc.InvalidRequestError("Instance %s cannot be refreshed - it's not " - " persistent and does not " - "contain a full primary key." % state_str(state)) - identity_key = self._identity_key_from_state(state) - - if (_none_set.issubset(identity_key) and \ - not self.allow_partial_pks) or \ - _none_set.issuperset(identity_key): - util.warn("Instance %s to be refreshed doesn't " - "contain a full primary key - can't be refreshed " - "(and shouldn't be expired, either)." - % state_str(state)) - return - - result = session.query(self)._load_on_ident( - identity_key, - refresh_state=state, - only_load_props=attribute_names) - - # if instance is pending, a refresh operation - # may not complete (even if PK attributes are assigned) - if has_key and result is None: - raise orm_exc.ObjectDeletedError(state) - - def _optimized_get_statement(self, state, attribute_names): - """assemble a WHERE clause which retrieves a given state by primary - key, using a minimized set of tables. - - Applies to a joined-table inheritance mapper where the - requested attribute names are only present on joined tables, - not the base table. The WHERE clause attempts to include - only those tables to minimize joins. - - """ - props = self._props - - tables = set(chain( - *[sqlutil.find_tables(c, check_columns=True) - for key in attribute_names - for c in props[key].columns] - )) - - if self.base_mapper.local_table in tables: - return None - - class ColumnsNotAvailable(Exception): - pass - - def visit_binary(binary): - leftcol = binary.left - rightcol = binary.right - if leftcol is None or rightcol is None: - return - - if leftcol.table not in tables: - leftval = self._get_committed_state_attr_by_column( - state, state.dict, - leftcol, - passive=attributes.PASSIVE_NO_INITIALIZE) - if leftval is attributes.PASSIVE_NO_RESULT or leftval is None: - raise ColumnsNotAvailable() - binary.left = sql.bindparam(None, leftval, - type_=binary.right.type) - elif rightcol.table not in tables: - rightval = self._get_committed_state_attr_by_column( - state, state.dict, - rightcol, - passive=attributes.PASSIVE_NO_INITIALIZE) - if rightval is attributes.PASSIVE_NO_RESULT or rightval is None: - raise ColumnsNotAvailable() - binary.right = sql.bindparam(None, rightval, - type_=binary.right.type) - - allconds = [] - - try: - start = False - for mapper in reversed(list(self.iterate_to_root())): - if mapper.local_table in tables: - start = True - if start and not mapper.single: - allconds.append(visitors.cloned_traverse( - mapper.inherit_condition, - {}, - {'binary':visit_binary} - ) - ) - except ColumnsNotAvailable: - return None - - cond = sql.and_(*allconds) - - cols = [] - for key in attribute_names: - cols.extend(props[key].columns) - return sql.select(cols, cond, use_labels=True) - - def cascade_iterator(self, type_, state, halt_on=None): - """Iterate each element and its mapper in an object graph, - for all relationships that meet the given cascade rule. - - :param type_: - The name of the cascade rule (i.e. save-update, delete, - etc.) - - :param state: - The lead InstanceState. child items will be processed per - the relationships defined for this object's mapper. - - the return value are object instances; this provides a strong - reference so that they don't fall out of scope immediately. - - """ - visited_states = set() - prp, mpp = object(), object() - - visitables = deque([(deque(self._props.values()), prp, - state, state.dict)]) - - while visitables: - iterator, item_type, parent_state, parent_dict = visitables[-1] - if not iterator: - visitables.pop() - continue - - if item_type is prp: - prop = iterator.popleft() - if type_ not in prop.cascade: - continue - queue = deque(prop.cascade_iterator(type_, parent_state, - parent_dict, visited_states, halt_on)) - if queue: - visitables.append((queue,mpp, None, None)) - elif item_type is mpp: - instance, instance_mapper, corresponding_state, \ - corresponding_dict = iterator.popleft() - yield instance, instance_mapper, \ - corresponding_state, corresponding_dict - visitables.append((deque(instance_mapper._props.values()), - prp, corresponding_state, - corresponding_dict)) - - @_memoized_configured_property - def _compiled_cache(self): - return util.LRUCache(self._compiled_cache_size) - - @_memoized_configured_property - def _sorted_tables(self): - table_to_mapper = {} - for mapper in self.base_mapper.self_and_descendants: - for t in mapper.tables: - table_to_mapper[t] = mapper - - sorted_ = sqlutil.sort_tables(table_to_mapper.iterkeys()) - ret = util.OrderedDict() - for t in sorted_: - ret[t] = table_to_mapper[t] - return ret - - def _per_mapper_flush_actions(self, uow): - saves = unitofwork.SaveUpdateAll(uow, self.base_mapper) - deletes = unitofwork.DeleteAll(uow, self.base_mapper) - uow.dependencies.add((saves, deletes)) - - for dep in self._dependency_processors: - dep.per_property_preprocessors(uow) - - for prop in self._props.values(): - prop.per_property_preprocessors(uow) - - def _per_state_flush_actions(self, uow, states, isdelete): - - base_mapper = self.base_mapper - save_all = unitofwork.SaveUpdateAll(uow, base_mapper) - delete_all = unitofwork.DeleteAll(uow, base_mapper) - for state in states: - # keep saves before deletes - - # this ensures 'row switch' operations work - if isdelete: - action = unitofwork.DeleteState(uow, state, base_mapper) - uow.dependencies.add((save_all, action)) - else: - action = unitofwork.SaveUpdateState(uow, state, base_mapper) - uow.dependencies.add((action, delete_all)) - - yield action - - def _memo(self, key, callable_): - if key in self._memoized_values: - return self._memoized_values[key] - else: - self._memoized_values[key] = value = callable_() - return value - - @util.memoized_property - def _table_to_equated(self): - """memoized map of tables to collections of columns to be - synchronized upwards to the base mapper.""" - - result = util.defaultdict(list) - - for table in self._sorted_tables: - cols = set(table.c) - for m in self.iterate_to_root(): - if m._inherits_equated_pairs and \ - cols.intersection( - [l for l, r in m._inherits_equated_pairs]): - result[table].append((m, m._inherits_equated_pairs)) - - return result - - - def _instance_processor(self, context, path, reduced_path, adapter, - polymorphic_from=None, - only_load_props=None, refresh_state=None, - polymorphic_discriminator=None): - - """Produce a mapper level row processor callable - which processes rows into mapped instances.""" - - # note that this method, most of which exists in a closure - # called _instance(), resists being broken out, as - # attempts to do so tend to add significant function - # call overhead. _instance() is the most - # performance-critical section in the whole ORM. - - pk_cols = self.primary_key - - if polymorphic_from or refresh_state: - polymorphic_on = None - else: - if polymorphic_discriminator is not None: - polymorphic_on = polymorphic_discriminator - else: - polymorphic_on = self.polymorphic_on - polymorphic_instances = util.PopulateDict( - self._configure_subclass_mapper( - context, path, reduced_path, adapter) - ) - - version_id_col = self.version_id_col - - if adapter: - pk_cols = [adapter.columns[c] for c in pk_cols] - if polymorphic_on is not None: - polymorphic_on = adapter.columns[polymorphic_on] - if version_id_col is not None: - version_id_col = adapter.columns[version_id_col] - - identity_class = self._identity_class - - new_populators = [] - existing_populators = [] - eager_populators = [] - load_path = context.query._current_path + path - - def populate_state(state, dict_, row, isnew, only_load_props): - if isnew: - if context.propagate_options: - state.load_options = context.propagate_options - if state.load_options: - state.load_path = load_path - - if not new_populators: - self._populators(context, path, reduced_path, row, adapter, - new_populators, - existing_populators, - eager_populators - ) - - if isnew: - populators = new_populators - else: - populators = existing_populators - - if only_load_props is None: - for key, populator in populators: - populator(state, dict_, row) - elif only_load_props: - for key, populator in populators: - if key in only_load_props: - populator(state, dict_, row) - - session_identity_map = context.session.identity_map - - listeners = self.dispatch - - translate_row = listeners.translate_row or None - create_instance = listeners.create_instance or None - populate_instance = listeners.populate_instance or None - append_result = listeners.append_result or None - populate_existing = context.populate_existing or self.always_refresh - invoke_all_eagers = context.invoke_all_eagers - - if self.allow_partial_pks: - is_not_primary_key = _none_set.issuperset - else: - is_not_primary_key = _none_set.issubset - - def _instance(row, result): - if not new_populators and invoke_all_eagers: - self._populators(context, path, reduced_path, row, adapter, - new_populators, - existing_populators, - eager_populators - ) - - if translate_row: - for fn in translate_row: - ret = fn(self, context, row) - if ret is not EXT_CONTINUE: - row = ret - break - - if polymorphic_on is not None: - discriminator = row[polymorphic_on] - if discriminator is not None: - _instance = polymorphic_instances[discriminator] - if _instance: - return _instance(row, result) - - # determine identity key - if refresh_state: - identitykey = refresh_state.key - if identitykey is None: - # super-rare condition; a refresh is being called - # on a non-instance-key instance; this is meant to only - # occur within a flush() - identitykey = self._identity_key_from_state(refresh_state) - else: - identitykey = ( - identity_class, - tuple([row[column] for column in pk_cols]) - ) - - instance = session_identity_map.get(identitykey) - if instance is not None: - state = attributes.instance_state(instance) - dict_ = attributes.instance_dict(instance) - - isnew = state.runid != context.runid - currentload = not isnew - loaded_instance = False - - if not currentload and \ - version_id_col is not None and \ - context.version_check and \ - self._get_state_attr_by_column( - state, - dict_, - self.version_id_col) != \ - row[version_id_col]: - - raise orm_exc.StaleDataError( - "Instance '%s' has version id '%s' which " - "does not match database-loaded version id '%s'." - % (state_str(state), - self._get_state_attr_by_column( - state, dict_, - self.version_id_col), - row[version_id_col])) - elif refresh_state: - # out of band refresh_state detected (i.e. its not in the - # session.identity_map) honor it anyway. this can happen - # if a _get() occurs within save_obj(), such as - # when eager_defaults is True. - state = refresh_state - instance = state.obj() - dict_ = attributes.instance_dict(instance) - isnew = state.runid != context.runid - currentload = True - loaded_instance = False - else: - # check for non-NULL values in the primary key columns, - # else no entity is returned for the row - if is_not_primary_key(identitykey[1]): - return None - - isnew = True - currentload = True - loaded_instance = True - - if create_instance: - for fn in create_instance: - instance = fn(self, context, - row, self.class_) - if instance is not EXT_CONTINUE: - manager = attributes.manager_of_class( - instance.__class__) - # TODO: if manager is None, raise a friendly error - # about returning instances of unmapped types - manager.setup_instance(instance) - break - else: - instance = self.class_manager.new_instance() - else: - instance = self.class_manager.new_instance() - - dict_ = attributes.instance_dict(instance) - state = attributes.instance_state(instance) - state.key = identitykey - - # attach instance to session. - state.session_id = context.session.hash_key - session_identity_map.add(state) - - if currentload or populate_existing: - # state is being fully loaded, so populate. - # add to the "context.progress" collection. - if isnew: - state.runid = context.runid - context.progress[state] = dict_ - - if populate_instance: - for fn in populate_instance: - ret = fn(self, context, row, state, - only_load_props=only_load_props, - instancekey=identitykey, isnew=isnew) - if ret is not EXT_CONTINUE: - break - else: - populate_state(state, dict_, row, isnew, only_load_props) - else: - populate_state(state, dict_, row, isnew, only_load_props) - - if loaded_instance: - state.manager.dispatch.load(state, context) - elif isnew: - state.manager.dispatch.refresh(state, context, only_load_props) - - elif state in context.partials or state.unloaded or eager_populators: - # state is having a partial set of its attributes - # refreshed. Populate those attributes, - # and add to the "context.partials" collection. - if state in context.partials: - isnew = False - (d_, attrs) = context.partials[state] - else: - isnew = True - attrs = state.unloaded - context.partials[state] = (dict_, attrs) - - if populate_instance: - for fn in populate_instance: - ret = fn(self, context, row, state, - only_load_props=attrs, - instancekey=identitykey, isnew=isnew) - if ret is not EXT_CONTINUE: - break - else: - populate_state(state, dict_, row, isnew, attrs) - else: - populate_state(state, dict_, row, isnew, attrs) - - for key, pop in eager_populators: - if key not in state.unloaded: - pop(state, dict_, row) - - if isnew: - state.manager.dispatch.refresh(state, context, attrs) - - - if result is not None: - if append_result: - for fn in append_result: - if fn(self, context, row, state, - result, instancekey=identitykey, - isnew=isnew) is not EXT_CONTINUE: - break - else: - result.append(instance) - else: - result.append(instance) - - return instance - return _instance - - def _populators(self, context, path, reduced_path, row, adapter, - new_populators, existing_populators, eager_populators): - """Produce a collection of attribute level row processor callables.""" - - delayed_populators = [] - pops = (new_populators, existing_populators, delayed_populators, eager_populators) - for prop in self._props.itervalues(): - for i, pop in enumerate(prop.create_row_processor( - context, path, - reduced_path, - self, row, adapter)): - if pop is not None: - pops[i].append((prop.key, pop)) - - if delayed_populators: - new_populators.extend(delayed_populators) - - def _configure_subclass_mapper(self, context, path, reduced_path, adapter): - """Produce a mapper level row processor callable factory for mappers - inheriting this one.""" - - def configure_subclass_mapper(discriminator): - try: - mapper = self.polymorphic_map[discriminator] - except KeyError: - raise AssertionError( - "No such polymorphic_identity %r is defined" % - discriminator) - if mapper is self: - return None - - # replace the tip of the path info with the subclass mapper - # being used. that way accurate "load_path" info is available - # for options invoked during deferred loads. - # we lose AliasedClass path elements this way, but currently, - # those are not needed at this stage. - - # this asserts to true - #assert mapper.isa(_class_to_mapper(path[-1])) - - return mapper._instance_processor(context, path[0:-1] + (mapper,), - reduced_path[0:-1] + (mapper.base_mapper,), - adapter, - polymorphic_from=self) - return configure_subclass_mapper - -log.class_logger(Mapper) - -def configure_mappers(): - """Initialize the inter-mapper relationships of all mappers that - have been constructed thus far. - - This function can be called any number of times, but in - most cases is handled internally. - - """ - - global _new_mappers - if not _new_mappers: - return - - _call_configured = None - _COMPILE_MUTEX.acquire() - try: - global _already_compiling - if _already_compiling: - return - _already_compiling = True - try: - - # double-check inside mutex - if not _new_mappers: - return - - # initialize properties on all mappers - # note that _mapper_registry is unordered, which - # may randomly conceal/reveal issues related to - # the order of mapper compilation - for mapper in list(_mapper_registry): - if getattr(mapper, '_configure_failed', False): - e = sa_exc.InvalidRequestError( - "One or more mappers failed to initialize - " - "can't proceed with initialization of other " - "mappers. Original exception was: %s" - % mapper._configure_failed) - e._configure_failed = mapper._configure_failed - raise e - if not mapper.configured: - try: - mapper._post_configure_properties() - mapper._expire_memoizations() - mapper.dispatch.mapper_configured(mapper, mapper.class_) - _call_configured = mapper - except: - exc = sys.exc_info()[1] - if not hasattr(exc, '_configure_failed'): - mapper._configure_failed = exc - raise - - _new_mappers = False - finally: - _already_compiling = False - finally: - _COMPILE_MUTEX.release() - if _call_configured is not None: - _call_configured.dispatch.after_configured() - -def reconstructor(fn): - """Decorate a method as the 'reconstructor' hook. - - Designates a method as the "reconstructor", an ``__init__``-like - method that will be called by the ORM after the instance has been - loaded from the database or otherwise reconstituted. - - The reconstructor will be invoked with no arguments. Scalar - (non-collection) database-mapped attributes of the instance will - be available for use within the function. Eagerly-loaded - collections are generally not yet available and will usually only - contain the first element. ORM state changes made to objects at - this stage will not be recorded for the next flush() operation, so - the activity within a reconstructor should be conservative. - - """ - fn.__sa_reconstructor__ = True - return fn - -def validates(*names, **kw): - """Decorate a method as a 'validator' for one or more named properties. - - Designates a method as a validator, a method which receives the - name of the attribute as well as a value to be assigned, or in the - case of a collection, the value to be added to the collection. The function - can then raise validation exceptions to halt the process from continuing - (where Python's built-in ``ValueError`` and ``AssertionError`` exceptions are - reasonable choices), or can modify or replace the value before proceeding. - The function should otherwise return the given value. - - Note that a validator for a collection **cannot** issue a load of that - collection within the validation routine - this usage raises - an assertion to avoid recursion overflows. This is a reentrant - condition which is not supported. - - :param \*names: list of attribute names to be validated. - :param include_removes: if True, "remove" events will be - sent as well - the validation function must accept an additional - argument "is_remove" which will be a boolean. - - .. versionadded:: 0.7.7 - - """ - include_removes = kw.pop('include_removes', False) - def wrap(fn): - fn.__sa_validators__ = names - fn.__sa_include_removes__ = include_removes - return fn - return wrap - -def _event_on_load(state, ctx): - instrumenting_mapper = state.manager.info[_INSTRUMENTOR] - if instrumenting_mapper._reconstructor: - instrumenting_mapper._reconstructor(state.obj()) - -def _event_on_first_init(manager, cls): - """Initial mapper compilation trigger. - - instrumentation calls this one when InstanceState - is first generated, and is needed for legacy mutable - attributes to work. - """ - - instrumenting_mapper = manager.info.get(_INSTRUMENTOR) - if instrumenting_mapper: - if _new_mappers: - configure_mappers() - -def _event_on_init(state, args, kwargs): - """Run init_instance hooks. - - This also includes mapper compilation, normally not needed - here but helps with some piecemeal configuration - scenarios (such as in the ORM tutorial). - - """ - - instrumenting_mapper = state.manager.info.get(_INSTRUMENTOR) - if instrumenting_mapper: - if _new_mappers: - configure_mappers() - if instrumenting_mapper._set_polymorphic_identity: - instrumenting_mapper._set_polymorphic_identity(state) - -def _event_on_resurrect(state): - # re-populate the primary key elements - # of the dict based on the mapping. - instrumenting_mapper = state.manager.info.get(_INSTRUMENTOR) - if instrumenting_mapper: - for col, val in zip(instrumenting_mapper.primary_key, state.key[1]): - instrumenting_mapper._set_state_attr_by_column( - state, state.dict, col, val) - - -class _ColumnMapping(util.py25_dict): - """Error reporting helper for mapper._columntoproperty.""" - - def __init__(self, mapper): - self.mapper = mapper - - def __missing__(self, column): - prop = self.mapper._props.get(column) - if prop: - raise orm_exc.UnmappedColumnError( - "Column '%s.%s' is not available, due to " - "conflicting property '%s':%r" % ( - column.table.name, column.name, column.key, prop)) - raise orm_exc.UnmappedColumnError( - "No column %s is configured on mapper %s..." % - (column, self.mapper)) diff --git a/libs/sqlalchemy/orm/persistence.py b/libs/sqlalchemy/orm/persistence.py deleted file mode 100644 index 5be57cce..00000000 --- a/libs/sqlalchemy/orm/persistence.py +++ /dev/null @@ -1,779 +0,0 @@ -# orm/persistence.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""private module containing functions used to emit INSERT, UPDATE -and DELETE statements on behalf of a :class:`.Mapper` and its descending -mappers. - -The functions here are called only by the unit of work functions -in unitofwork.py. - -""" - -import operator -from itertools import groupby - -from sqlalchemy import sql, util, exc as sa_exc -from sqlalchemy.orm import attributes, sync, \ - exc as orm_exc - -from sqlalchemy.orm.util import _state_mapper, state_str - -def save_obj(base_mapper, states, uowtransaction, single=False): - """Issue ``INSERT`` and/or ``UPDATE`` statements for a list - of objects. - - This is called within the context of a UOWTransaction during a - flush operation, given a list of states to be flushed. The - base mapper in an inheritance hierarchy handles the inserts/ - updates for all descendant mappers. - - """ - - # if batch=false, call _save_obj separately for each object - if not single and not base_mapper.batch: - for state in _sort_states(states): - save_obj(base_mapper, [state], uowtransaction, single=True) - return - - states_to_insert, states_to_update = _organize_states_for_save( - base_mapper, - states, - uowtransaction) - - cached_connections = _cached_connection_dict(base_mapper) - - for table, mapper in base_mapper._sorted_tables.iteritems(): - insert = _collect_insert_commands(base_mapper, uowtransaction, - table, states_to_insert) - - update = _collect_update_commands(base_mapper, uowtransaction, - table, states_to_update) - - if update: - _emit_update_statements(base_mapper, uowtransaction, - cached_connections, - mapper, table, update) - - if insert: - _emit_insert_statements(base_mapper, uowtransaction, - cached_connections, - table, insert) - - _finalize_insert_update_commands(base_mapper, uowtransaction, - states_to_insert, states_to_update) - -def post_update(base_mapper, states, uowtransaction, post_update_cols): - """Issue UPDATE statements on behalf of a relationship() which - specifies post_update. - - """ - cached_connections = _cached_connection_dict(base_mapper) - - states_to_update = _organize_states_for_post_update( - base_mapper, - states, uowtransaction) - - - for table, mapper in base_mapper._sorted_tables.iteritems(): - update = _collect_post_update_commands(base_mapper, uowtransaction, - table, states_to_update, - post_update_cols) - - if update: - _emit_post_update_statements(base_mapper, uowtransaction, - cached_connections, - mapper, table, update) - -def delete_obj(base_mapper, states, uowtransaction): - """Issue ``DELETE`` statements for a list of objects. - - This is called within the context of a UOWTransaction during a - flush operation. - - """ - - cached_connections = _cached_connection_dict(base_mapper) - - states_to_delete = _organize_states_for_delete( - base_mapper, - states, - uowtransaction) - - table_to_mapper = base_mapper._sorted_tables - - for table in reversed(table_to_mapper.keys()): - delete = _collect_delete_commands(base_mapper, uowtransaction, - table, states_to_delete) - - mapper = table_to_mapper[table] - - _emit_delete_statements(base_mapper, uowtransaction, - cached_connections, mapper, table, delete) - - for state, state_dict, mapper, has_identity, connection \ - in states_to_delete: - mapper.dispatch.after_delete(mapper, connection, state) - -def _organize_states_for_save(base_mapper, states, uowtransaction): - """Make an initial pass across a set of states for INSERT or - UPDATE. - - This includes splitting out into distinct lists for - each, calling before_insert/before_update, obtaining - key information for each state including its dictionary, - mapper, the connection to use for the execution per state, - and the identity flag. - - """ - - states_to_insert = [] - states_to_update = [] - - for state, dict_, mapper, connection in _connections_for_states( - base_mapper, uowtransaction, - states): - - has_identity = bool(state.key) - instance_key = state.key or mapper._identity_key_from_state(state) - - row_switch = None - - # call before_XXX extensions - if not has_identity: - mapper.dispatch.before_insert(mapper, connection, state) - else: - mapper.dispatch.before_update(mapper, connection, state) - - # detect if we have a "pending" instance (i.e. has - # no instance_key attached to it), and another instance - # with the same identity key already exists as persistent. - # convert to an UPDATE if so. - if not has_identity and \ - instance_key in uowtransaction.session.identity_map: - instance = \ - uowtransaction.session.identity_map[instance_key] - existing = attributes.instance_state(instance) - if not uowtransaction.is_deleted(existing): - raise orm_exc.FlushError( - "New instance %s with identity key %s conflicts " - "with persistent instance %s" % - (state_str(state), instance_key, - state_str(existing))) - - base_mapper._log_debug( - "detected row switch for identity %s. " - "will update %s, remove %s from " - "transaction", instance_key, - state_str(state), state_str(existing)) - - # remove the "delete" flag from the existing element - uowtransaction.remove_state_actions(existing) - row_switch = existing - - if not has_identity and not row_switch: - states_to_insert.append( - (state, dict_, mapper, connection, - has_identity, instance_key, row_switch) - ) - else: - states_to_update.append( - (state, dict_, mapper, connection, - has_identity, instance_key, row_switch) - ) - - return states_to_insert, states_to_update - -def _organize_states_for_post_update(base_mapper, states, - uowtransaction): - """Make an initial pass across a set of states for UPDATE - corresponding to post_update. - - This includes obtaining key information for each state - including its dictionary, mapper, the connection to use for - the execution per state. - - """ - return list(_connections_for_states(base_mapper, uowtransaction, - states)) - -def _organize_states_for_delete(base_mapper, states, uowtransaction): - """Make an initial pass across a set of states for DELETE. - - This includes calling out before_delete and obtaining - key information for each state including its dictionary, - mapper, the connection to use for the execution per state. - - """ - states_to_delete = [] - - for state, dict_, mapper, connection in _connections_for_states( - base_mapper, uowtransaction, - states): - - mapper.dispatch.before_delete(mapper, connection, state) - - states_to_delete.append((state, dict_, mapper, - bool(state.key), connection)) - return states_to_delete - -def _collect_insert_commands(base_mapper, uowtransaction, table, - states_to_insert): - """Identify sets of values to use in INSERT statements for a - list of states. - - """ - insert = [] - for state, state_dict, mapper, connection, has_identity, \ - instance_key, row_switch in states_to_insert: - if table not in mapper._pks_by_table: - continue - - pks = mapper._pks_by_table[table] - - params = {} - value_params = {} - - has_all_pks = True - for col in mapper._cols_by_table[table]: - if col is mapper.version_id_col: - params[col.key] = mapper.version_id_generator(None) - else: - # pull straight from the dict for - # pending objects - prop = mapper._columntoproperty[col] - value = state_dict.get(prop.key, None) - - if value is None: - if col in pks: - has_all_pks = False - elif col.default is None and \ - col.server_default is None: - params[col.key] = value - - elif isinstance(value, sql.ClauseElement): - value_params[col] = value - else: - params[col.key] = value - - insert.append((state, state_dict, params, mapper, - connection, value_params, has_all_pks)) - return insert - -def _collect_update_commands(base_mapper, uowtransaction, - table, states_to_update): - """Identify sets of values to use in UPDATE statements for a - list of states. - - This function works intricately with the history system - to determine exactly what values should be updated - as well as how the row should be matched within an UPDATE - statement. Includes some tricky scenarios where the primary - key of an object might have been changed. - - """ - - update = [] - for state, state_dict, mapper, connection, has_identity, \ - instance_key, row_switch in states_to_update: - if table not in mapper._pks_by_table: - continue - - pks = mapper._pks_by_table[table] - - params = {} - value_params = {} - - hasdata = hasnull = False - for col in mapper._cols_by_table[table]: - if col is mapper.version_id_col: - params[col._label] = \ - mapper._get_committed_state_attr_by_column( - row_switch or state, - row_switch and row_switch.dict - or state_dict, - col) - - prop = mapper._columntoproperty[col] - history = attributes.get_state_history( - state, prop.key, - attributes.PASSIVE_NO_INITIALIZE - ) - if history.added: - params[col.key] = history.added[0] - hasdata = True - else: - params[col.key] = mapper.version_id_generator( - params[col._label]) - - # HACK: check for history, in case the - # history is only - # in a different table than the one - # where the version_id_col is. - for prop in mapper._columntoproperty.itervalues(): - history = attributes.get_state_history( - state, prop.key, - attributes.PASSIVE_NO_INITIALIZE) - if history.added: - hasdata = True - else: - prop = mapper._columntoproperty[col] - history = attributes.get_state_history( - state, prop.key, - attributes.PASSIVE_NO_INITIALIZE) - if history.added: - if isinstance(history.added[0], - sql.ClauseElement): - value_params[col] = history.added[0] - else: - value = history.added[0] - params[col.key] = value - - if col in pks: - if history.deleted and \ - not row_switch: - # if passive_updates and sync detected - # this was a pk->pk sync, use the new - # value to locate the row, since the - # DB would already have set this - if ("pk_cascaded", state, col) in \ - uowtransaction.attributes: - value = history.added[0] - params[col._label] = value - else: - # use the old value to - # locate the row - value = history.deleted[0] - params[col._label] = value - hasdata = True - else: - # row switch logic can reach us here - # remove the pk from the update params - # so the update doesn't - # attempt to include the pk in the - # update statement - del params[col.key] - value = history.added[0] - params[col._label] = value - if value is None: - hasnull = True - else: - hasdata = True - elif col in pks: - value = state.manager[prop.key].impl.get( - state, state_dict) - if value is None: - hasnull = True - params[col._label] = value - if hasdata: - if hasnull: - raise sa_exc.FlushError( - "Can't update table " - "using NULL for primary " - "key value") - update.append((state, state_dict, params, mapper, - connection, value_params)) - return update - - -def _collect_post_update_commands(base_mapper, uowtransaction, table, - states_to_update, post_update_cols): - """Identify sets of values to use in UPDATE statements for a - list of states within a post_update operation. - - """ - - update = [] - for state, state_dict, mapper, connection in states_to_update: - if table not in mapper._pks_by_table: - continue - pks = mapper._pks_by_table[table] - params = {} - hasdata = False - - for col in mapper._cols_by_table[table]: - if col in pks: - params[col._label] = \ - mapper._get_state_attr_by_column( - state, - state_dict, col) - elif col in post_update_cols: - prop = mapper._columntoproperty[col] - history = attributes.get_state_history( - state, prop.key, - attributes.PASSIVE_NO_INITIALIZE) - if history.added: - value = history.added[0] - params[col.key] = value - hasdata = True - if hasdata: - update.append((state, state_dict, params, mapper, - connection)) - return update - -def _collect_delete_commands(base_mapper, uowtransaction, table, - states_to_delete): - """Identify values to use in DELETE statements for a list of - states to be deleted.""" - - delete = util.defaultdict(list) - - for state, state_dict, mapper, has_identity, connection \ - in states_to_delete: - if not has_identity or table not in mapper._pks_by_table: - continue - - params = {} - delete[connection].append(params) - for col in mapper._pks_by_table[table]: - params[col.key] = \ - value = \ - mapper._get_state_attr_by_column( - state, state_dict, col) - if value is None: - raise sa_exc.FlushError( - "Can't delete from table " - "using NULL for primary " - "key value") - - if mapper.version_id_col is not None and \ - table.c.contains_column(mapper.version_id_col): - params[mapper.version_id_col.key] = \ - mapper._get_committed_state_attr_by_column( - state, state_dict, - mapper.version_id_col) - return delete - - -def _emit_update_statements(base_mapper, uowtransaction, - cached_connections, mapper, table, update): - """Emit UPDATE statements corresponding to value lists collected - by _collect_update_commands().""" - - needs_version_id = mapper.version_id_col is not None and \ - table.c.contains_column(mapper.version_id_col) - - def update_stmt(): - clause = sql.and_() - - for col in mapper._pks_by_table[table]: - clause.clauses.append(col == sql.bindparam(col._label, - type_=col.type)) - - if needs_version_id: - clause.clauses.append(mapper.version_id_col ==\ - sql.bindparam(mapper.version_id_col._label, - type_=mapper.version_id_col.type)) - - return table.update(clause) - - statement = base_mapper._memo(('update', table), update_stmt) - - rows = 0 - for state, state_dict, params, mapper, \ - connection, value_params in update: - - if value_params: - c = connection.execute( - statement.values(value_params), - params) - else: - c = cached_connections[connection].\ - execute(statement, params) - - _postfetch( - mapper, - uowtransaction, - table, - state, - state_dict, - c.context.prefetch_cols, - c.context.postfetch_cols, - c.context.compiled_parameters[0], - value_params) - rows += c.rowcount - - if connection.dialect.supports_sane_rowcount: - if rows != len(update): - raise orm_exc.StaleDataError( - "UPDATE statement on table '%s' expected to " - "update %d row(s); %d were matched." % - (table.description, len(update), rows)) - - elif needs_version_id: - util.warn("Dialect %s does not support updated rowcount " - "- versioning cannot be verified." % - c.dialect.dialect_description, - stacklevel=12) - -def _emit_insert_statements(base_mapper, uowtransaction, - cached_connections, table, insert): - """Emit INSERT statements corresponding to value lists collected - by _collect_insert_commands().""" - - statement = base_mapper._memo(('insert', table), table.insert) - - for (connection, pkeys, hasvalue, has_all_pks), \ - records in groupby(insert, - lambda rec: (rec[4], - rec[2].keys(), - bool(rec[5]), - rec[6]) - ): - if has_all_pks and not hasvalue: - records = list(records) - multiparams = [rec[2] for rec in records] - c = cached_connections[connection].\ - execute(statement, multiparams) - - for (state, state_dict, params, mapper, - conn, value_params, has_all_pks), \ - last_inserted_params in \ - zip(records, c.context.compiled_parameters): - _postfetch( - mapper, - uowtransaction, - table, - state, - state_dict, - c.context.prefetch_cols, - c.context.postfetch_cols, - last_inserted_params, - value_params) - - else: - for state, state_dict, params, mapper, \ - connection, value_params, \ - has_all_pks in records: - - if value_params: - result = connection.execute( - statement.values(value_params), - params) - else: - result = cached_connections[connection].\ - execute(statement, params) - - primary_key = result.context.inserted_primary_key - - if primary_key is not None: - # set primary key attributes - for pk, col in zip(primary_key, - mapper._pks_by_table[table]): - prop = mapper._columntoproperty[col] - if state_dict.get(prop.key) is None: - # TODO: would rather say: - #state_dict[prop.key] = pk - mapper._set_state_attr_by_column( - state, - state_dict, - col, pk) - - _postfetch( - mapper, - uowtransaction, - table, - state, - state_dict, - result.context.prefetch_cols, - result.context.postfetch_cols, - result.context.compiled_parameters[0], - value_params) - - - -def _emit_post_update_statements(base_mapper, uowtransaction, - cached_connections, mapper, table, update): - """Emit UPDATE statements corresponding to value lists collected - by _collect_post_update_commands().""" - - def update_stmt(): - clause = sql.and_() - - for col in mapper._pks_by_table[table]: - clause.clauses.append(col == sql.bindparam(col._label, - type_=col.type)) - - return table.update(clause) - - statement = base_mapper._memo(('post_update', table), update_stmt) - - # execute each UPDATE in the order according to the original - # list of states to guarantee row access order, but - # also group them into common (connection, cols) sets - # to support executemany(). - for key, grouper in groupby( - update, lambda rec: (rec[4], rec[2].keys()) - ): - connection = key[0] - multiparams = [params for state, state_dict, - params, mapper, conn in grouper] - cached_connections[connection].\ - execute(statement, multiparams) - - -def _emit_delete_statements(base_mapper, uowtransaction, cached_connections, - mapper, table, delete): - """Emit DELETE statements corresponding to value lists collected - by _collect_delete_commands().""" - - need_version_id = mapper.version_id_col is not None and \ - table.c.contains_column(mapper.version_id_col) - - def delete_stmt(): - clause = sql.and_() - for col in mapper._pks_by_table[table]: - clause.clauses.append( - col == sql.bindparam(col.key, type_=col.type)) - - if need_version_id: - clause.clauses.append( - mapper.version_id_col == - sql.bindparam( - mapper.version_id_col.key, - type_=mapper.version_id_col.type - ) - ) - - return table.delete(clause) - - for connection, del_objects in delete.iteritems(): - statement = base_mapper._memo(('delete', table), delete_stmt) - - connection = cached_connections[connection] - - if need_version_id: - # TODO: need test coverage for this [ticket:1761] - if connection.dialect.supports_sane_rowcount: - rows = 0 - # execute deletes individually so that versioned - # rows can be verified - for params in del_objects: - c = connection.execute(statement, params) - rows += c.rowcount - if rows != len(del_objects): - raise orm_exc.StaleDataError( - "DELETE statement on table '%s' expected to " - "delete %d row(s); %d were matched." % - (table.description, len(del_objects), c.rowcount) - ) - else: - util.warn( - "Dialect %s does not support deleted rowcount " - "- versioning cannot be verified." % - connection.dialect.dialect_description, - stacklevel=12) - connection.execute(statement, del_objects) - else: - connection.execute(statement, del_objects) - - -def _finalize_insert_update_commands(base_mapper, uowtransaction, - states_to_insert, states_to_update): - """finalize state on states that have been inserted or updated, - including calling after_insert/after_update events. - - """ - for state, state_dict, mapper, connection, has_identity, \ - instance_key, row_switch in states_to_insert + \ - states_to_update: - - if mapper._readonly_props: - readonly = state.unmodified_intersection( - [p.key for p in mapper._readonly_props - if p.expire_on_flush or p.key not in state.dict] - ) - if readonly: - state.expire_attributes(state.dict, readonly) - - # if eager_defaults option is enabled, - # refresh whatever has been expired. - if base_mapper.eager_defaults and state.unloaded: - state.key = base_mapper._identity_key_from_state(state) - uowtransaction.session.query(base_mapper)._load_on_ident( - state.key, refresh_state=state, - only_load_props=state.unloaded) - - # call after_XXX extensions - if not has_identity: - mapper.dispatch.after_insert(mapper, connection, state) - else: - mapper.dispatch.after_update(mapper, connection, state) - -def _postfetch(mapper, uowtransaction, table, - state, dict_, prefetch_cols, postfetch_cols, - params, value_params): - """Expire attributes in need of newly persisted database state, - after an INSERT or UPDATE statement has proceeded for that - state.""" - - if mapper.version_id_col is not None: - prefetch_cols = list(prefetch_cols) + [mapper.version_id_col] - - for c in prefetch_cols: - if c.key in params and c in mapper._columntoproperty: - mapper._set_state_attr_by_column(state, dict_, c, params[c.key]) - - if postfetch_cols: - state.expire_attributes(state.dict, - [mapper._columntoproperty[c].key - for c in postfetch_cols if c in - mapper._columntoproperty] - ) - - # synchronize newly inserted ids from one table to the next - # TODO: this still goes a little too often. would be nice to - # have definitive list of "columns that changed" here - for m, equated_pairs in mapper._table_to_equated[table]: - sync.populate(state, m, state, m, - equated_pairs, - uowtransaction, - mapper.passive_updates) - -def _connections_for_states(base_mapper, uowtransaction, states): - """Return an iterator of (state, state.dict, mapper, connection). - - The states are sorted according to _sort_states, then paired - with the connection they should be using for the given - unit of work transaction. - - """ - # if session has a connection callable, - # organize individual states with the connection - # to use for update - if uowtransaction.session.connection_callable: - connection_callable = \ - uowtransaction.session.connection_callable - else: - connection = None - connection_callable = None - - for state in _sort_states(states): - if connection_callable: - connection = connection_callable(base_mapper, state.obj()) - elif not connection: - connection = uowtransaction.transaction.connection( - base_mapper) - - mapper = _state_mapper(state) - - yield state, state.dict, mapper, connection - -def _cached_connection_dict(base_mapper): - # dictionary of connection->connection_with_cache_options. - return util.PopulateDict( - lambda conn:conn.execution_options( - compiled_cache=base_mapper._compiled_cache - )) - -def _sort_states(states): - pending = set(states) - persistent = set(s for s in pending if s.key is not None) - pending.difference_update(persistent) - return sorted(pending, key=operator.attrgetter("insert_order")) + \ - sorted(persistent, key=lambda q:q.key[1]) - - diff --git a/libs/sqlalchemy/orm/properties.py b/libs/sqlalchemy/orm/properties.py deleted file mode 100644 index 204232cf..00000000 --- a/libs/sqlalchemy/orm/properties.py +++ /dev/null @@ -1,1565 +0,0 @@ -# orm/properties.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""MapperProperty implementations. - -This is a private module which defines the behavior of invidual ORM- -mapped attributes. - -""" - -from sqlalchemy import sql, util, log, exc as sa_exc -from sqlalchemy.sql.util import ClauseAdapter, criterion_as_pairs, \ - join_condition, _shallow_annotate -from sqlalchemy.sql import operators, expression -from sqlalchemy.orm import attributes, dependency, mapper, \ - object_mapper, strategies, configure_mappers -from sqlalchemy.orm.util import CascadeOptions, _class_to_mapper, \ - _orm_annotate, _orm_deannotate - -from sqlalchemy.orm.interfaces import MANYTOMANY, MANYTOONE, \ - MapperProperty, ONETOMANY, PropComparator, StrategizedProperty -mapperlib = util.importlater("sqlalchemy.orm", "mapperlib") -NoneType = type(None) - -__all__ = ('ColumnProperty', 'CompositeProperty', 'SynonymProperty', - 'ComparableProperty', 'RelationshipProperty', 'RelationProperty') - -from descriptor_props import CompositeProperty, SynonymProperty, \ - ComparableProperty,ConcreteInheritedProperty - -class ColumnProperty(StrategizedProperty): - """Describes an object attribute that corresponds to a table column. - - Public constructor is the :func:`.orm.column_property` function. - - """ - - def __init__(self, *columns, **kwargs): - """Construct a ColumnProperty. - - Note the public constructor is the :func:`.orm.column_property` function. - - :param \*columns: The list of `columns` describes a single - object property. If there are multiple tables joined - together for the mapper, this list represents the equivalent - column as it appears across each table. - - :param group: - - :param deferred: - - :param comparator_factory: - - :param descriptor: - - :param expire_on_flush: - - :param extension: - - """ - self._orig_columns = [expression._labeled(c) for c in columns] - self.columns = [expression._labeled(_orm_deannotate(c)) - for c in columns] - self.group = kwargs.pop('group', None) - self.deferred = kwargs.pop('deferred', False) - self.instrument = kwargs.pop('_instrument', True) - self.comparator_factory = kwargs.pop('comparator_factory', - self.__class__.Comparator) - self.descriptor = kwargs.pop('descriptor', None) - self.extension = kwargs.pop('extension', None) - self.active_history = kwargs.pop('active_history', False) - self.expire_on_flush = kwargs.pop('expire_on_flush', True) - - if 'doc' in kwargs: - self.doc = kwargs.pop('doc') - else: - for col in reversed(self.columns): - doc = getattr(col, 'doc', None) - if doc is not None: - self.doc = doc - break - else: - self.doc = None - - if kwargs: - raise TypeError( - "%s received unexpected keyword argument(s): %s" % ( - self.__class__.__name__, - ', '.join(sorted(kwargs.keys())))) - - util.set_creation_order(self) - if not self.instrument: - self.strategy_class = strategies.UninstrumentedColumnLoader - elif self.deferred: - self.strategy_class = strategies.DeferredColumnLoader - else: - self.strategy_class = strategies.ColumnLoader - - def instrument_class(self, mapper): - if not self.instrument: - return - - attributes.register_descriptor( - mapper.class_, - self.key, - comparator=self.comparator_factory(self, mapper), - parententity=mapper, - doc=self.doc - ) - - def do_init(self): - super(ColumnProperty, self).do_init() - if len(self.columns) > 1 and \ - set(self.parent.primary_key).issuperset(self.columns): - util.warn( - ("On mapper %s, primary key column '%s' is being combined " - "with distinct primary key column '%s' in attribute '%s'. " - "Use explicit properties to give each column its own mapped " - "attribute name.") % (self.parent, self.columns[1], - self.columns[0], self.key)) - - def copy(self): - return ColumnProperty( - deferred=self.deferred, - group=self.group, - active_history=self.active_history, - *self.columns) - - def _getcommitted(self, state, dict_, column, - passive=attributes.PASSIVE_OFF): - return state.get_impl(self.key).\ - get_committed_value(state, dict_, passive=passive) - - def merge(self, session, source_state, source_dict, dest_state, - dest_dict, load, _recursive): - if not self.instrument: - return - elif self.key in source_dict: - value = source_dict[self.key] - - if not load: - dest_dict[self.key] = value - else: - impl = dest_state.get_impl(self.key) - impl.set(dest_state, dest_dict, value, None) - elif dest_state.has_identity and self.key not in dest_dict: - dest_state.expire_attributes(dest_dict, [self.key]) - - class Comparator(PropComparator): - @util.memoized_instancemethod - def __clause_element__(self): - if self.adapter: - return self.adapter(self.prop.columns[0]) - else: - return self.prop.columns[0]._annotate({ - "parententity": self.mapper, - "parentmapper":self.mapper}) - - def operate(self, op, *other, **kwargs): - return op(self.__clause_element__(), *other, **kwargs) - - def reverse_operate(self, op, other, **kwargs): - col = self.__clause_element__() - return op(col._bind_param(op, other), col, **kwargs) - - # TODO: legacy..do we need this ? (0.5) - ColumnComparator = Comparator - - def __str__(self): - return str(self.parent.class_.__name__) + "." + self.key - -log.class_logger(ColumnProperty) - -class RelationshipProperty(StrategizedProperty): - """Describes an object property that holds a single item or list - of items that correspond to a related database table. - - Public constructor is the :func:`.orm.relationship` function. - - Of note here is the :class:`.RelationshipProperty.Comparator` - class, which implements comparison operations for scalar- - and collection-referencing mapped attributes. - - """ - - strategy_wildcard_key = 'relationship:*' - - def __init__(self, argument, - secondary=None, primaryjoin=None, - secondaryjoin=None, - foreign_keys=None, - uselist=None, - order_by=False, - backref=None, - back_populates=None, - post_update=False, - cascade=False, extension=None, - viewonly=False, lazy=True, - collection_class=None, passive_deletes=False, - passive_updates=True, remote_side=None, - enable_typechecks=True, join_depth=None, - comparator_factory=None, - single_parent=False, innerjoin=False, - doc=None, - active_history=False, - cascade_backrefs=True, - load_on_pending=False, - strategy_class=None, _local_remote_pairs=None, - query_class=None): - - self.uselist = uselist - self.argument = argument - self.secondary = secondary - self.primaryjoin = primaryjoin - self.secondaryjoin = secondaryjoin - self.post_update = post_update - self.direction = None - self.viewonly = viewonly - self.lazy = lazy - self.single_parent = single_parent - self._user_defined_foreign_keys = foreign_keys - self.collection_class = collection_class - self.passive_deletes = passive_deletes - self.cascade_backrefs = cascade_backrefs - self.passive_updates = passive_updates - self.remote_side = remote_side - self.enable_typechecks = enable_typechecks - self.query_class = query_class - self.innerjoin = innerjoin - self.doc = doc - self.active_history = active_history - self.join_depth = join_depth - self.local_remote_pairs = _local_remote_pairs - self.extension = extension - self.load_on_pending = load_on_pending - self.comparator_factory = comparator_factory or \ - RelationshipProperty.Comparator - self.comparator = self.comparator_factory(self, None) - util.set_creation_order(self) - - if strategy_class: - self.strategy_class = strategy_class - elif self.lazy== 'dynamic': - from sqlalchemy.orm import dynamic - self.strategy_class = dynamic.DynaLoader - else: - self.strategy_class = strategies.factory(self.lazy) - - self._reverse_property = set() - - if cascade is not False: - self.cascade = CascadeOptions(cascade) - else: - self.cascade = CascadeOptions("save-update, merge") - - if self.passive_deletes == 'all' and \ - ("delete" in self.cascade or - "delete-orphan" in self.cascade): - raise sa_exc.ArgumentError( - "Can't set passive_deletes='all' in conjunction " - "with 'delete' or 'delete-orphan' cascade") - - self.order_by = order_by - - self.back_populates = back_populates - - if self.back_populates: - if backref: - raise sa_exc.ArgumentError( - "backref and back_populates keyword arguments " - "are mutually exclusive") - self.backref = None - else: - self.backref = backref - - - def instrument_class(self, mapper): - attributes.register_descriptor( - mapper.class_, - self.key, - comparator=self.comparator_factory(self, mapper), - parententity=mapper, - doc=self.doc, - ) - - class Comparator(PropComparator): - """Produce comparison operations for :func:`~.orm.relationship`-based - attributes.""" - - def __init__(self, prop, mapper, of_type=None, adapter=None): - """Construction of :class:`.RelationshipProperty.Comparator` - is internal to the ORM's attribute mechanics. - - """ - self.prop = prop - self.mapper = mapper - self.adapter = adapter - if of_type: - self._of_type = _class_to_mapper(of_type) - - def adapted(self, adapter): - """Return a copy of this PropComparator which will use the - given adaption function on the local side of generated - expressions. - - """ - - return self.__class__(self.property, self.mapper, - getattr(self, '_of_type', None), - adapter) - - @property - def parententity(self): - return self.property.parent - - def __clause_element__(self): - elem = self.property.parent._with_polymorphic_selectable - if self.adapter: - return self.adapter(elem) - else: - return elem - - def of_type(self, cls): - """Produce a construct that represents a particular 'subtype' of - attribute for the parent class. - - Currently this is usable in conjunction with :meth:`.Query.join` - and :meth:`.Query.outerjoin`. - - """ - return RelationshipProperty.Comparator( - self.property, - self.mapper, - cls, adapter=self.adapter) - - def in_(self, other): - """Produce an IN clause - this is not implemented - for :func:`~.orm.relationship`-based attributes at this time. - - """ - raise NotImplementedError('in_() not yet supported for ' - 'relationships. For a simple many-to-one, use ' - 'in_() against the set of foreign key values.') - - __hash__ = None - - def __eq__(self, other): - """Implement the ``==`` operator. - - In a many-to-one context, such as:: - - MyClass.some_prop == - - this will typically produce a - clause such as:: - - mytable.related_id == - - Where ```` is the primary key of the given - object. - - The ``==`` operator provides partial functionality for non- - many-to-one comparisons: - - * Comparisons against collections are not supported. - Use :meth:`~.RelationshipProperty.Comparator.contains`. - * Compared to a scalar one-to-many, will produce a - clause that compares the target columns in the parent to - the given target. - * Compared to a scalar many-to-many, an alias - of the association table will be rendered as - well, forming a natural join that is part of the - main body of the query. This will not work for - queries that go beyond simple AND conjunctions of - comparisons, such as those which use OR. Use - explicit joins, outerjoins, or - :meth:`~.RelationshipProperty.Comparator.has` for - more comprehensive non-many-to-one scalar - membership tests. - * Comparisons against ``None`` given in a one-to-many - or many-to-many context produce a NOT EXISTS clause. - - """ - if isinstance(other, (NoneType, expression._Null)): - if self.property.direction in [ONETOMANY, MANYTOMANY]: - return ~self._criterion_exists() - else: - return _orm_annotate(self.property._optimized_compare( - None, adapt_source=self.adapter)) - elif self.property.uselist: - raise sa_exc.InvalidRequestError("Can't compare a colle" - "ction to an object or collection; use " - "contains() to test for membership.") - else: - return _orm_annotate(self.property._optimized_compare(other, - adapt_source=self.adapter)) - - def _criterion_exists(self, criterion=None, **kwargs): - if getattr(self, '_of_type', None): - target_mapper = self._of_type - to_selectable = target_mapper._with_polymorphic_selectable - if self.property._is_self_referential: - to_selectable = to_selectable.alias() - - single_crit = target_mapper._single_table_criterion - if single_crit is not None: - if criterion is not None: - criterion = single_crit & criterion - else: - criterion = single_crit - else: - to_selectable = None - - if self.adapter: - source_selectable = self.__clause_element__() - else: - source_selectable = None - - pj, sj, source, dest, secondary, target_adapter = \ - self.property._create_joins(dest_polymorphic=True, - dest_selectable=to_selectable, - source_selectable=source_selectable) - - for k in kwargs: - crit = getattr(self.property.mapper.class_, k) == kwargs[k] - if criterion is None: - criterion = crit - else: - criterion = criterion & crit - - # annotate the *local* side of the join condition, in the case - # of pj + sj this is the full primaryjoin, in the case of just - # pj its the local side of the primaryjoin. - if sj is not None: - j = _orm_annotate(pj) & sj - else: - j = _orm_annotate(pj, exclude=self.property.remote_side) - - if criterion is not None and target_adapter: - # limit this adapter to annotated only? - criterion = target_adapter.traverse(criterion) - - # only have the "joined left side" of what we - # return be subject to Query adaption. The right - # side of it is used for an exists() subquery and - # should not correlate or otherwise reach out - # to anything in the enclosing query. - if criterion is not None: - criterion = criterion._annotate({'no_replacement_traverse': True}) - - crit = j & criterion - - return sql.exists([1], crit, from_obj=dest).\ - correlate(source._annotate({'_orm_adapt':True})) - - def any(self, criterion=None, **kwargs): - """Produce an expression that tests a collection against - particular criterion, using EXISTS. - - An expression like:: - - session.query(MyClass).filter( - MyClass.somereference.any(SomeRelated.x==2) - ) - - - Will produce a query like:: - - SELECT * FROM my_table WHERE - EXISTS (SELECT 1 FROM related WHERE related.my_id=my_table.id - AND related.x=2) - - Because :meth:`~.RelationshipProperty.Comparator.any` uses - a correlated subquery, its performance is not nearly as - good when compared against large target tables as that of - using a join. - - :meth:`~.RelationshipProperty.Comparator.any` is particularly - useful for testing for empty collections:: - - session.query(MyClass).filter( - ~MyClass.somereference.any() - ) - - will produce:: - - SELECT * FROM my_table WHERE - NOT EXISTS (SELECT 1 FROM related WHERE related.my_id=my_table.id) - - :meth:`~.RelationshipProperty.Comparator.any` is only - valid for collections, i.e. a :func:`.relationship` - that has ``uselist=True``. For scalar references, - use :meth:`~.RelationshipProperty.Comparator.has`. - - """ - if not self.property.uselist: - raise sa_exc.InvalidRequestError( - "'any()' not implemented for scalar " - "attributes. Use has()." - ) - - return self._criterion_exists(criterion, **kwargs) - - def has(self, criterion=None, **kwargs): - """Produce an expression that tests a scalar reference against - particular criterion, using EXISTS. - - An expression like:: - - session.query(MyClass).filter( - MyClass.somereference.has(SomeRelated.x==2) - ) - - - Will produce a query like:: - - SELECT * FROM my_table WHERE - EXISTS (SELECT 1 FROM related WHERE related.id==my_table.related_id - AND related.x=2) - - Because :meth:`~.RelationshipProperty.Comparator.has` uses - a correlated subquery, its performance is not nearly as - good when compared against large target tables as that of - using a join. - - :meth:`~.RelationshipProperty.Comparator.has` is only - valid for scalar references, i.e. a :func:`.relationship` - that has ``uselist=False``. For collection references, - use :meth:`~.RelationshipProperty.Comparator.any`. - - """ - if self.property.uselist: - raise sa_exc.InvalidRequestError( - "'has()' not implemented for collections. " - "Use any().") - return self._criterion_exists(criterion, **kwargs) - - def contains(self, other, **kwargs): - """Return a simple expression that tests a collection for - containment of a particular item. - - :meth:`~.RelationshipProperty.Comparator.contains` is - only valid for a collection, i.e. a - :func:`~.orm.relationship` that implements - one-to-many or many-to-many with ``uselist=True``. - - When used in a simple one-to-many context, an - expression like:: - - MyClass.contains(other) - - Produces a clause like:: - - mytable.id == - - Where ```` is the value of the foreign key - attribute on ``other`` which refers to the primary - key of its parent object. From this it follows that - :meth:`~.RelationshipProperty.Comparator.contains` is - very useful when used with simple one-to-many - operations. - - For many-to-many operations, the behavior of - :meth:`~.RelationshipProperty.Comparator.contains` - has more caveats. The association table will be - rendered in the statement, producing an "implicit" - join, that is, includes multiple tables in the FROM - clause which are equated in the WHERE clause:: - - query(MyClass).filter(MyClass.contains(other)) - - Produces a query like:: - - SELECT * FROM my_table, my_association_table AS - my_association_table_1 WHERE - my_table.id = my_association_table_1.parent_id - AND my_association_table_1.child_id = - - Where ```` would be the primary key of - ``other``. From the above, it is clear that - :meth:`~.RelationshipProperty.Comparator.contains` - will **not** work with many-to-many collections when - used in queries that move beyond simple AND - conjunctions, such as multiple - :meth:`~.RelationshipProperty.Comparator.contains` - expressions joined by OR. In such cases subqueries or - explicit "outer joins" will need to be used instead. - See :meth:`~.RelationshipProperty.Comparator.any` for - a less-performant alternative using EXISTS, or refer - to :meth:`.Query.outerjoin` as well as :ref:`ormtutorial_joins` - for more details on constructing outer joins. - - """ - if not self.property.uselist: - raise sa_exc.InvalidRequestError( - "'contains' not implemented for scalar " - "attributes. Use ==") - clause = self.property._optimized_compare(other, - adapt_source=self.adapter) - - if self.property.secondaryjoin is not None: - clause.negation_clause = \ - self.__negated_contains_or_equals(other) - - return clause - - def __negated_contains_or_equals(self, other): - if self.property.direction == MANYTOONE: - state = attributes.instance_state(other) - - def state_bindparam(x, state, col): - o = state.obj() # strong ref - return sql.bindparam(x, unique=True, callable_=lambda : \ - self.property.mapper._get_committed_attr_by_column(o, - col)) - - def adapt(col): - if self.adapter: - return self.adapter(col) - else: - return col - - if self.property._use_get: - return sql.and_(*[ - sql.or_( - adapt(x) != state_bindparam(adapt(x), state, y), - adapt(x) == None) - for (x, y) in self.property.local_remote_pairs]) - - criterion = sql.and_(*[x==y for (x, y) in - zip( - self.property.mapper.primary_key, - self.property.\ - mapper.\ - primary_key_from_instance(other)) - ]) - return ~self._criterion_exists(criterion) - - def __ne__(self, other): - """Implement the ``!=`` operator. - - In a many-to-one context, such as:: - - MyClass.some_prop != - - This will typically produce a clause such as:: - - mytable.related_id != - - Where ```` is the primary key of the - given object. - - The ``!=`` operator provides partial functionality for non- - many-to-one comparisons: - - * Comparisons against collections are not supported. - Use - :meth:`~.RelationshipProperty.Comparator.contains` - in conjunction with :func:`~.expression.not_`. - * Compared to a scalar one-to-many, will produce a - clause that compares the target columns in the parent to - the given target. - * Compared to a scalar many-to-many, an alias - of the association table will be rendered as - well, forming a natural join that is part of the - main body of the query. This will not work for - queries that go beyond simple AND conjunctions of - comparisons, such as those which use OR. Use - explicit joins, outerjoins, or - :meth:`~.RelationshipProperty.Comparator.has` in - conjunction with :func:`~.expression.not_` for - more comprehensive non-many-to-one scalar - membership tests. - * Comparisons against ``None`` given in a one-to-many - or many-to-many context produce an EXISTS clause. - - """ - if isinstance(other, (NoneType, expression._Null)): - if self.property.direction == MANYTOONE: - return sql.or_(*[x != None for x in - self.property._calculated_foreign_keys]) - else: - return self._criterion_exists() - elif self.property.uselist: - raise sa_exc.InvalidRequestError("Can't compare a collection" - " to an object or collection; use " - "contains() to test for membership.") - else: - return self.__negated_contains_or_equals(other) - - @util.memoized_property - def property(self): - if mapperlib.module._new_mappers: - configure_mappers() - return self.prop - - def compare(self, op, value, - value_is_parent=False, - alias_secondary=True): - if op == operators.eq: - if value is None: - if self.uselist: - return ~sql.exists([1], self.primaryjoin) - else: - return self._optimized_compare(None, - value_is_parent=value_is_parent, - alias_secondary=alias_secondary) - else: - return self._optimized_compare(value, - value_is_parent=value_is_parent, - alias_secondary=alias_secondary) - else: - return op(self.comparator, value) - - def _optimized_compare(self, value, value_is_parent=False, - adapt_source=None, - alias_secondary=True): - if value is not None: - value = attributes.instance_state(value) - return self._get_strategy(strategies.LazyLoader).lazy_clause(value, - reverse_direction=not value_is_parent, - alias_secondary=alias_secondary, - adapt_source=adapt_source) - - def __str__(self): - return str(self.parent.class_.__name__) + "." + self.key - - def merge(self, - session, - source_state, - source_dict, - dest_state, - dest_dict, - load, _recursive): - - if load: - for r in self._reverse_property: - if (source_state, r) in _recursive: - return - - - if not "merge" in self.cascade: - return - - if self.key not in source_dict: - return - - if self.uselist: - instances = source_state.get_impl(self.key).\ - get(source_state, source_dict) - if hasattr(instances, '_sa_adapter'): - # convert collections to adapters to get a true iterator - instances = instances._sa_adapter - - if load: - # for a full merge, pre-load the destination collection, - # so that individual _merge of each item pulls from identity - # map for those already present. - # also assumes CollectionAttrbiuteImpl behavior of loading - # "old" list in any case - dest_state.get_impl(self.key).get(dest_state, dest_dict) - - dest_list = [] - for current in instances: - current_state = attributes.instance_state(current) - current_dict = attributes.instance_dict(current) - _recursive[(current_state, self)] = True - obj = session._merge(current_state, current_dict, - load=load, _recursive=_recursive) - if obj is not None: - dest_list.append(obj) - - if not load: - coll = attributes.init_state_collection(dest_state, - dest_dict, self.key) - for c in dest_list: - coll.append_without_event(c) - else: - dest_state.get_impl(self.key)._set_iterable(dest_state, - dest_dict, dest_list) - else: - current = source_dict[self.key] - if current is not None: - current_state = attributes.instance_state(current) - current_dict = attributes.instance_dict(current) - _recursive[(current_state, self)] = True - obj = session._merge(current_state, current_dict, - load=load, _recursive=_recursive) - else: - obj = None - - if not load: - dest_dict[self.key] = obj - else: - dest_state.get_impl(self.key).set(dest_state, - dest_dict, obj, None) - - def cascade_iterator(self, type_, state, dict_, visited_states, halt_on=None): - #assert type_ in self.cascade - - # only actively lazy load on the 'delete' cascade - if type_ != 'delete' or self.passive_deletes: - passive = attributes.PASSIVE_NO_INITIALIZE - else: - passive = attributes.PASSIVE_OFF - - if type_ == 'save-update': - tuples = state.manager[self.key].impl.\ - get_all_pending(state, dict_) - - else: - tuples = state.value_as_iterable(dict_, self.key, - passive=passive) - - skip_pending = type_ == 'refresh-expire' and 'delete-orphan' \ - not in self.cascade - - for instance_state, c in tuples: - if instance_state in visited_states: - continue - - if c is None: - # would like to emit a warning here, but - # would not be consistent with collection.append(None) - # current behavior of silently skipping. - # see [ticket:2229] - continue - - instance_dict = attributes.instance_dict(c) - - if halt_on and halt_on(instance_state): - continue - - if skip_pending and not instance_state.key: - continue - - instance_mapper = instance_state.manager.mapper - - if not instance_mapper.isa(self.mapper.class_manager.mapper): - raise AssertionError("Attribute '%s' on class '%s' " - "doesn't handle objects " - "of type '%s'" % ( - self.key, - self.parent.class_, - c.__class__ - )) - - visited_states.add(instance_state) - - yield c, instance_mapper, instance_state, instance_dict - - - def _add_reverse_property(self, key): - other = self.mapper.get_property(key, _compile_mappers=False) - self._reverse_property.add(other) - other._reverse_property.add(self) - - if not other.mapper.common_parent(self.parent): - raise sa_exc.ArgumentError('reverse_property %r on ' - 'relationship %s references relationship %s, which ' - 'does not reference mapper %s' % (key, self, other, - self.parent)) - if self.direction in (ONETOMANY, MANYTOONE) and self.direction \ - == other.direction: - raise sa_exc.ArgumentError('%s and back-reference %s are ' - 'both of the same direction %r. Did you mean to ' - 'set remote_side on the many-to-one side ?' - % (other, self, self.direction)) - - @util.memoized_property - def mapper(self): - """Return the targeted :class:`.Mapper` for this - :class:`.RelationshipProperty`. - - This is a lazy-initializing static attribute. - - """ - if isinstance(self.argument, type): - mapper_ = mapper.class_mapper(self.argument, - compile=False) - elif isinstance(self.argument, mapper.Mapper): - mapper_ = self.argument - elif util.callable(self.argument): - - # accept a callable to suit various deferred- - # configurational schemes - - mapper_ = mapper.class_mapper(self.argument(), - compile=False) - else: - raise sa_exc.ArgumentError("relationship '%s' expects " - "a class or a mapper argument (received: %s)" - % (self.key, type(self.argument))) - assert isinstance(mapper_, mapper.Mapper), mapper_ - return mapper_ - - @util.memoized_property - @util.deprecated("0.7", "Use .target") - def table(self): - """Return the selectable linked to this - :class:`.RelationshipProperty` object's target - :class:`.Mapper`.""" - return self.target - - def do_init(self): - self._check_conflicts() - self._process_dependent_arguments() - self._determine_joins() - self._determine_synchronize_pairs() - self._determine_direction() - self._determine_local_remote_pairs() - self._post_init() - self._generate_backref() - super(RelationshipProperty, self).do_init() - - def _check_conflicts(self): - """Test that this relationship is legal, warn about - inheritance conflicts.""" - - if not self.is_primary() \ - and not mapper.class_mapper( - self.parent.class_, - compile=False).has_property(self.key): - raise sa_exc.ArgumentError("Attempting to assign a new " - "relationship '%s' to a non-primary mapper on " - "class '%s'. New relationships can only be added " - "to the primary mapper, i.e. the very first mapper " - "created for class '%s' " % (self.key, - self.parent.class_.__name__, - self.parent.class_.__name__)) - - # check for conflicting relationship() on superclass - if not self.parent.concrete: - for inheriting in self.parent.iterate_to_root(): - if inheriting is not self.parent \ - and inheriting.has_property(self.key): - util.warn("Warning: relationship '%s' on mapper " - "'%s' supersedes the same relationship " - "on inherited mapper '%s'; this can " - "cause dependency issues during flush" - % (self.key, self.parent, inheriting)) - - def _process_dependent_arguments(self): - """Convert incoming configuration arguments to their - proper form. - - Callables are resolved, ORM annotations removed. - - """ - # accept callables for other attributes which may require - # deferred initialization. This technique is used - # by declarative "string configs" and some recipes. - for attr in ( - 'order_by', - 'primaryjoin', - 'secondaryjoin', - 'secondary', - '_user_defined_foreign_keys', - 'remote_side', - ): - attr_value = getattr(self, attr) - if util.callable(attr_value): - setattr(self, attr, attr_value()) - - # remove "annotations" which are present if mapped class - # descriptors are used to create the join expression. - for attr in 'primaryjoin', 'secondaryjoin': - val = getattr(self, attr) - if val is not None: - setattr(self, attr, _orm_deannotate( - expression._only_column_elements(val, attr)) - ) - - # ensure expressions in self.order_by, foreign_keys, - # remote_side are all columns, not strings. - if self.order_by is not False and self.order_by is not None: - self.order_by = [ - expression._only_column_elements(x, "order_by") - for x in - util.to_list(self.order_by)] - - self._user_defined_foreign_keys = \ - util.column_set( - expression._only_column_elements(x, "foreign_keys") - for x in util.to_column_set( - self._user_defined_foreign_keys - )) - - self.remote_side = \ - util.column_set( - expression._only_column_elements(x, "remote_side") - for x in - util.to_column_set(self.remote_side)) - - self.target = self.mapper.mapped_table - - if self.cascade.delete_orphan: - self.mapper.primary_mapper().delete_orphans.append( - (self.key, self.parent.class_) - ) - - def _determine_joins(self): - """Determine the 'primaryjoin' and 'secondaryjoin' attributes, - if not passed to the constructor already. - - This is based on analysis of the foreign key relationships - between the parent and target mapped selectables. - - """ - if self.secondaryjoin is not None and self.secondary is None: - raise sa_exc.ArgumentError("Property '" + self.key - + "' specified with secondary join condition but " - "no secondary argument") - - # if join conditions were not specified, figure them out based - # on foreign keys - - def _search_for_join(mapper, table): - # find a join between the given mapper's mapped table and - # the given table. will try the mapper's local table first - # for more specificity, then if not found will try the more - # general mapped table, which in the case of inheritance is - # a join. - return join_condition(mapper.mapped_table, table, - a_subset=mapper.local_table) - - try: - if self.secondary is not None: - if self.secondaryjoin is None: - self.secondaryjoin = _search_for_join(self.mapper, - self.secondary) - if self.primaryjoin is None: - self.primaryjoin = _search_for_join(self.parent, - self.secondary) - else: - if self.primaryjoin is None: - self.primaryjoin = _search_for_join(self.parent, - self.target) - except sa_exc.ArgumentError, e: - raise sa_exc.ArgumentError("Could not determine join " - "condition between parent/child tables on " - "relationship %s. Specify a 'primaryjoin' " - "expression. If 'secondary' is present, " - "'secondaryjoin' is needed as well." - % self) - - def _columns_are_mapped(self, *cols): - """Return True if all columns in the given collection are - mapped by the tables referenced by this :class:`.Relationship`. - - """ - for c in cols: - if self.secondary is not None \ - and self.secondary.c.contains_column(c): - continue - if not self.parent.mapped_table.c.contains_column(c) and \ - not self.target.c.contains_column(c): - return False - return True - - def _sync_pairs_from_join(self, join_condition, primary): - """Determine a list of "source"/"destination" column pairs - based on the given join condition, as well as the - foreign keys argument. - - "source" would be a column referenced by a foreign key, - and "destination" would be the column who has a foreign key - reference to "source". - - """ - - fks = self._user_defined_foreign_keys - # locate pairs - eq_pairs = criterion_as_pairs(join_condition, - consider_as_foreign_keys=fks, - any_operator=self.viewonly) - - # couldn't find any fks, but we have - # "secondary" - assume the "secondary" columns - # are the fks - if not eq_pairs and \ - self.secondary is not None and \ - not fks: - fks = set(self.secondary.c) - eq_pairs = criterion_as_pairs(join_condition, - consider_as_foreign_keys=fks, - any_operator=self.viewonly) - - if eq_pairs: - util.warn("No ForeignKey objects were present " - "in secondary table '%s'. Assumed referenced " - "foreign key columns %s for join condition '%s' " - "on relationship %s" % ( - self.secondary.description, - ", ".join(sorted(["'%s'" % col for col in fks])), - join_condition, - self - )) - - # Filter out just to columns that are mapped. - # If viewonly, allow pairs where the FK col - # was part of "foreign keys" - the column it references - # may be in an un-mapped table - see - # test.orm.test_relationships.ViewOnlyComplexJoin.test_basic - # for an example of this. - eq_pairs = [(l, r) for (l, r) in eq_pairs - if self._columns_are_mapped(l, r) - or self.viewonly and - r in fks] - - if eq_pairs: - return eq_pairs - - # from here below is just determining the best error message - # to report. Check for a join condition using any operator - # (not just ==), perhaps they need to turn on "viewonly=True". - if not self.viewonly and criterion_as_pairs(join_condition, - consider_as_foreign_keys=self._user_defined_foreign_keys, - any_operator=True): - - err = "Could not locate any "\ - "foreign-key-equated, locally mapped column "\ - "pairs for %s "\ - "condition '%s' on relationship %s." % ( - primary and 'primaryjoin' or 'secondaryjoin', - join_condition, - self - ) - - if not self._user_defined_foreign_keys: - err += " Ensure that the "\ - "referencing Column objects have a "\ - "ForeignKey present, or are otherwise part "\ - "of a ForeignKeyConstraint on their parent "\ - "Table, or specify the foreign_keys parameter "\ - "to this relationship." - - err += " For more "\ - "relaxed rules on join conditions, the "\ - "relationship may be marked as viewonly=True." - - raise sa_exc.ArgumentError(err) - else: - if self._user_defined_foreign_keys: - raise sa_exc.ArgumentError("Could not determine " - "relationship direction for %s condition " - "'%s', on relationship %s, using manual " - "'foreign_keys' setting. Do the columns " - "in 'foreign_keys' represent all, and " - "only, the 'foreign' columns in this join " - "condition? Does the %s Table already " - "have adequate ForeignKey and/or " - "ForeignKeyConstraint objects established " - "(in which case 'foreign_keys' is usually " - "unnecessary)?" - % ( - primary and 'primaryjoin' or 'secondaryjoin', - join_condition, - self, - primary and 'mapped' or 'secondary' - )) - else: - raise sa_exc.ArgumentError("Could not determine " - "relationship direction for %s condition " - "'%s', on relationship %s. Ensure that the " - "referencing Column objects have a " - "ForeignKey present, or are otherwise part " - "of a ForeignKeyConstraint on their parent " - "Table, or specify the foreign_keys parameter " - "to this relationship." - % ( - primary and 'primaryjoin' or 'secondaryjoin', - join_condition, - self - )) - - def _determine_synchronize_pairs(self): - """Resolve 'primary'/foreign' column pairs from the primaryjoin - and secondaryjoin arguments. - - """ - if self.local_remote_pairs: - if not self._user_defined_foreign_keys: - raise sa_exc.ArgumentError( - "foreign_keys argument is " - "required with _local_remote_pairs argument") - self.synchronize_pairs = [] - for l, r in self.local_remote_pairs: - if r in self._user_defined_foreign_keys: - self.synchronize_pairs.append((l, r)) - elif l in self._user_defined_foreign_keys: - self.synchronize_pairs.append((r, l)) - else: - self.synchronize_pairs = self._sync_pairs_from_join( - self.primaryjoin, - True) - - self._calculated_foreign_keys = util.column_set( - r for (l, r) in - self.synchronize_pairs) - - if self.secondaryjoin is not None: - self.secondary_synchronize_pairs = self._sync_pairs_from_join( - self.secondaryjoin, - False) - self._calculated_foreign_keys.update( - r for (l, r) in - self.secondary_synchronize_pairs) - else: - self.secondary_synchronize_pairs = None - - def _determine_direction(self): - """Determine if this relationship is one to many, many to one, - many to many. - - This is derived from the primaryjoin, presence of "secondary", - and in the case of self-referential the "remote side". - - """ - if self.secondaryjoin is not None: - self.direction = MANYTOMANY - elif self._refers_to_parent_table(): - - # self referential defaults to ONETOMANY unless the "remote" - # side is present and does not reference any foreign key - # columns - - if self.local_remote_pairs: - remote = [r for (l, r) in self.local_remote_pairs] - elif self.remote_side: - remote = self.remote_side - else: - remote = None - if not remote or self._calculated_foreign_keys.difference(l for (l, - r) in self.synchronize_pairs).intersection(remote): - self.direction = ONETOMANY - else: - self.direction = MANYTOONE - else: - parentcols = util.column_set(self.parent.mapped_table.c) - targetcols = util.column_set(self.mapper.mapped_table.c) - - # fk collection which suggests ONETOMANY. - onetomany_fk = targetcols.intersection( - self._calculated_foreign_keys) - - # fk collection which suggests MANYTOONE. - - manytoone_fk = parentcols.intersection( - self._calculated_foreign_keys) - - if onetomany_fk and manytoone_fk: - # fks on both sides. do the same test only based on the - # local side. - referents = [c for (c, f) in self.synchronize_pairs] - onetomany_local = parentcols.intersection(referents) - manytoone_local = targetcols.intersection(referents) - - if onetomany_local and not manytoone_local: - self.direction = ONETOMANY - elif manytoone_local and not onetomany_local: - self.direction = MANYTOONE - else: - raise sa_exc.ArgumentError( - "Can't determine relationship" - " direction for relationship '%s' - foreign " - "key columns are present in both the parent " - "and the child's mapped tables. Specify " - "'foreign_keys' argument." % self) - elif onetomany_fk: - self.direction = ONETOMANY - elif manytoone_fk: - self.direction = MANYTOONE - else: - raise sa_exc.ArgumentError("Can't determine relationship " - "direction for relationship '%s' - foreign " - "key columns are present in neither the parent " - "nor the child's mapped tables" % self) - - if self.cascade.delete_orphan and not self.single_parent \ - and (self.direction is MANYTOMANY or self.direction - is MANYTOONE): - util.warn('On %s, delete-orphan cascade is not supported ' - 'on a many-to-many or many-to-one relationship ' - 'when single_parent is not set. Set ' - 'single_parent=True on the relationship().' - % self) - if self.direction is MANYTOONE and self.passive_deletes: - util.warn("On %s, 'passive_deletes' is normally configured " - "on one-to-many, one-to-one, many-to-many " - "relationships only." - % self) - - def _determine_local_remote_pairs(self): - """Determine pairs of columns representing "local" to - "remote", where "local" columns are on the parent mapper, - "remote" are on the target mapper. - - These pairs are used on the load side only to generate - lazy loading clauses. - - """ - if not self.local_remote_pairs and not self.remote_side: - # the most common, trivial case. Derive - # local/remote pairs from the synchronize pairs. - eq_pairs = util.unique_list( - self.synchronize_pairs + - (self.secondary_synchronize_pairs or [])) - if self.direction is MANYTOONE: - self.local_remote_pairs = [(r, l) for l, r in eq_pairs] - else: - self.local_remote_pairs = eq_pairs - - # "remote_side" specified, derive from the primaryjoin - # plus remote_side, similarly to how synchronize_pairs - # were determined. - elif self.remote_side: - if self.local_remote_pairs: - raise sa_exc.ArgumentError('remote_side argument is ' - 'redundant against more detailed ' - '_local_remote_side argument.') - if self.direction is MANYTOONE: - self.local_remote_pairs = [(r, l) for (l, r) in - criterion_as_pairs(self.primaryjoin, - consider_as_referenced_keys=self.remote_side, - any_operator=True)] - - else: - self.local_remote_pairs = \ - criterion_as_pairs(self.primaryjoin, - consider_as_foreign_keys=self.remote_side, - any_operator=True) - if not self.local_remote_pairs: - raise sa_exc.ArgumentError('Relationship %s could ' - 'not determine any local/remote column ' - 'pairs from remote side argument %r' - % (self, self.remote_side)) - # else local_remote_pairs were sent explcitly via - # ._local_remote_pairs. - - # create local_side/remote_side accessors - self.local_side = util.ordered_column_set( - l for l, r in self.local_remote_pairs) - self.remote_side = util.ordered_column_set( - r for l, r in self.local_remote_pairs) - - # check that the non-foreign key column in the local/remote - # collection is mapped. The foreign key - # which the individual mapped column references directly may - # itself be in a non-mapped table; see - # test.orm.test_relationships.ViewOnlyComplexJoin.test_basic - # for an example of this. - if self.direction is ONETOMANY: - for col in self.local_side: - if not self._columns_are_mapped(col): - raise sa_exc.ArgumentError( - "Local column '%s' is not " - "part of mapping %s. Specify remote_side " - "argument to indicate which column lazy join " - "condition should compare against." % (col, - self.parent)) - elif self.direction is MANYTOONE: - for col in self.remote_side: - if not self._columns_are_mapped(col): - raise sa_exc.ArgumentError( - "Remote column '%s' is not " - "part of mapping %s. Specify remote_side " - "argument to indicate which column lazy join " - "condition should bind." % (col, self.mapper)) - - def _generate_backref(self): - if not self.is_primary(): - return - if self.backref is not None and not self.back_populates: - if isinstance(self.backref, basestring): - backref_key, kwargs = self.backref, {} - else: - backref_key, kwargs = self.backref - mapper = self.mapper.primary_mapper() - if mapper.has_property(backref_key): - raise sa_exc.ArgumentError("Error creating backref " - "'%s' on relationship '%s': property of that " - "name exists on mapper '%s'" % (backref_key, - self, mapper)) - if self.secondary is not None: - pj = kwargs.pop('primaryjoin', self.secondaryjoin) - sj = kwargs.pop('secondaryjoin', self.primaryjoin) - else: - pj = kwargs.pop('primaryjoin', self.primaryjoin) - sj = kwargs.pop('secondaryjoin', None) - if sj: - raise sa_exc.InvalidRequestError( - "Can't assign 'secondaryjoin' on a backref against " - "a non-secondary relationship." - ) - foreign_keys = kwargs.pop('foreign_keys', - self._user_defined_foreign_keys) - parent = self.parent.primary_mapper() - kwargs.setdefault('viewonly', self.viewonly) - kwargs.setdefault('post_update', self.post_update) - kwargs.setdefault('passive_updates', self.passive_updates) - self.back_populates = backref_key - relationship = RelationshipProperty( - parent, - self.secondary, - pj, - sj, - foreign_keys=foreign_keys, - back_populates=self.key, - **kwargs - ) - mapper._configure_property(backref_key, relationship) - - if self.back_populates: - self._add_reverse_property(self.back_populates) - - def _post_init(self): - self.logger.info('%s setup primary join %s', self, - self.primaryjoin) - self.logger.info('%s setup secondary join %s', self, - self.secondaryjoin) - self.logger.info('%s synchronize pairs [%s]', self, - ','.join('(%s => %s)' % (l, r) for (l, r) in - self.synchronize_pairs)) - self.logger.info('%s secondary synchronize pairs [%s]', self, - ','.join('(%s => %s)' % (l, r) for (l, r) in - self.secondary_synchronize_pairs or [])) - self.logger.info('%s local/remote pairs [%s]', self, - ','.join('(%s / %s)' % (l, r) for (l, r) in - self.local_remote_pairs)) - self.logger.info('%s relationship direction %s', self, - self.direction) - if self.uselist is None: - self.uselist = self.direction is not MANYTOONE - if not self.viewonly: - self._dependency_processor = \ - dependency.DependencyProcessor.from_relationship(self) - - @util.memoized_property - def _use_get(self): - """memoize the 'use_get' attribute of this RelationshipLoader's - lazyloader.""" - - strategy = self._get_strategy(strategies.LazyLoader) - return strategy.use_get - - def _refers_to_parent_table(self): - pt = self.parent.mapped_table - mt = self.mapper.mapped_table - for c, f in self.synchronize_pairs: - if ( - pt.is_derived_from(c.table) and \ - pt.is_derived_from(f.table) and \ - mt.is_derived_from(c.table) and \ - mt.is_derived_from(f.table) - ): - return True - else: - return False - - @util.memoized_property - def _is_self_referential(self): - return self.mapper.common_parent(self.parent) - - def per_property_preprocessors(self, uow): - if not self.viewonly and self._dependency_processor: - self._dependency_processor.per_property_preprocessors(uow) - - def _create_joins(self, source_polymorphic=False, - source_selectable=None, dest_polymorphic=False, - dest_selectable=None, of_type=None): - if source_selectable is None: - if source_polymorphic and self.parent.with_polymorphic: - source_selectable = self.parent._with_polymorphic_selectable - - aliased = False - if dest_selectable is None: - if dest_polymorphic and self.mapper.with_polymorphic: - dest_selectable = self.mapper._with_polymorphic_selectable - aliased = True - else: - dest_selectable = self.mapper.mapped_table - - if self._is_self_referential and source_selectable is None: - dest_selectable = dest_selectable.alias() - aliased = True - else: - aliased = True - - # place a barrier on the destination such that - # replacement traversals won't ever dig into it. - # its internal structure remains fixed - # regardless of context. - dest_selectable = _shallow_annotate( - dest_selectable, - {'no_replacement_traverse':True}) - - aliased = aliased or (source_selectable is not None) - - primaryjoin, secondaryjoin, secondary = self.primaryjoin, \ - self.secondaryjoin, self.secondary - - # adjust the join condition for single table inheritance, - # in the case that the join is to a subclass - # this is analogous to the "_adjust_for_single_table_inheritance()" - # method in Query. - - dest_mapper = of_type or self.mapper - - single_crit = dest_mapper._single_table_criterion - if single_crit is not None: - if secondaryjoin is not None: - secondaryjoin = secondaryjoin & single_crit - else: - primaryjoin = primaryjoin & single_crit - - if aliased: - if secondary is not None: - secondary = secondary.alias() - primary_aliasizer = ClauseAdapter(secondary) - secondary_aliasizer = \ - ClauseAdapter(dest_selectable, - equivalents=self.mapper._equivalent_columns).\ - chain(primary_aliasizer) - if source_selectable is not None: - primary_aliasizer = \ - ClauseAdapter(secondary).\ - chain(ClauseAdapter(source_selectable, - equivalents=self.parent._equivalent_columns)) - secondaryjoin = \ - secondary_aliasizer.traverse(secondaryjoin) - else: - primary_aliasizer = ClauseAdapter(dest_selectable, - exclude=self.local_side, - equivalents=self.mapper._equivalent_columns) - if source_selectable is not None: - primary_aliasizer.chain( - ClauseAdapter(source_selectable, - exclude=self.remote_side, - equivalents=self.parent._equivalent_columns)) - secondary_aliasizer = None - primaryjoin = primary_aliasizer.traverse(primaryjoin) - target_adapter = secondary_aliasizer or primary_aliasizer - target_adapter.include = target_adapter.exclude = None - else: - target_adapter = None - if source_selectable is None: - source_selectable = self.parent.local_table - if dest_selectable is None: - dest_selectable = self.mapper.local_table - return ( - primaryjoin, - secondaryjoin, - source_selectable, - dest_selectable, - secondary, - target_adapter, - ) - -PropertyLoader = RelationProperty = RelationshipProperty -log.class_logger(RelationshipProperty) - diff --git a/libs/sqlalchemy/orm/query.py b/libs/sqlalchemy/orm/query.py deleted file mode 100644 index 286dbf6b..00000000 --- a/libs/sqlalchemy/orm/query.py +++ /dev/null @@ -1,3406 +0,0 @@ -# orm/query.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""The Query class and support. - -Defines the :class:`.Query` class, the central -construct used by the ORM to construct database queries. - -The :class:`.Query` class should not be confused with the -:class:`.Select` class, which defines database -SELECT operations at the SQL (non-ORM) level. ``Query`` differs from -``Select`` in that it returns ORM-mapped objects and interacts with an -ORM session, whereas the ``Select`` construct interacts directly with the -database to return iterable result sets. - -""" - -from itertools import chain -from operator import itemgetter - -from sqlalchemy import sql, util, log, schema -from sqlalchemy import exc as sa_exc -from sqlalchemy.orm import exc as orm_exc -from sqlalchemy.sql import util as sql_util -from sqlalchemy.sql import expression, visitors, operators -from sqlalchemy.orm import ( - attributes, interfaces, mapper, object_mapper, evaluator, - ) -from sqlalchemy.orm.util import ( - AliasedClass, ORMAdapter, _entity_descriptor, _entity_info, - _is_aliased_class, _is_mapped_class, _orm_columns, _orm_selectable, - join as orm_join,with_parent, _attr_as_key, aliased - ) - - -__all__ = ['Query', 'QueryContext', 'aliased'] - - -def _generative(*assertions): - """Mark a method as generative.""" - - @util.decorator - def generate(fn, *args, **kw): - self = args[0]._clone() - for assertion in assertions: - assertion(self, fn.func_name) - fn(self, *args[1:], **kw) - return self - return generate - -class Query(object): - """ORM-level SQL construction object. - - :class:`.Query` is the source of all SELECT statements generated by the - ORM, both those formulated by end-user query operations as well as by - high level internal operations such as related collection loading. It - features a generative interface whereby successive calls return a new - :class:`.Query` object, a copy of the former with additional - criteria and options associated with it. - - :class:`.Query` objects are normally initially generated using the - :meth:`~.Session.query` method of :class:`.Session`. For a full walkthrough - of :class:`.Query` usage, see the :ref:`ormtutorial_toplevel`. - - """ - - _enable_eagerloads = True - _enable_assertions = True - _with_labels = False - _criterion = None - _yield_per = None - _lockmode = None - _order_by = False - _group_by = False - _having = None - _distinct = False - _prefixes = None - _offset = None - _limit = None - _statement = None - _correlate = frozenset() - _populate_existing = False - _invoke_all_eagers = True - _version_check = False - _autoflush = True - _current_path = () - _only_load_props = None - _refresh_state = None - _from_obj = () - _join_entities = () - _select_from_entity = None - _filter_aliases = None - _from_obj_alias = None - _joinpath = _joinpoint = util.immutabledict() - _execution_options = util.immutabledict() - _params = util.immutabledict() - _attributes = util.immutabledict() - _with_options = () - _with_hints = () - _enable_single_crit = True - - def __init__(self, entities, session=None): - self.session = session - self._polymorphic_adapters = {} - self._set_entities(entities) - - def _set_entities(self, entities, entity_wrapper=None): - if entity_wrapper is None: - entity_wrapper = _QueryEntity - self._entities = [] - for ent in util.to_list(entities): - entity_wrapper(self, ent) - - self._setup_aliasizers(self._entities) - - def _setup_aliasizers(self, entities): - if hasattr(self, '_mapper_adapter_map'): - # usually safe to share a single map, but copying to prevent - # subtle leaks if end-user is reusing base query with arbitrary - # number of aliased() objects - self._mapper_adapter_map = d = self._mapper_adapter_map.copy() - else: - self._mapper_adapter_map = d = {} - - for ent in entities: - for entity in ent.entities: - if entity not in d: - mapper, selectable, is_aliased_class = \ - _entity_info(entity) - if not is_aliased_class and mapper.with_polymorphic: - with_polymorphic = mapper._with_polymorphic_mappers - if mapper.mapped_table not in \ - self._polymorphic_adapters: - self._mapper_loads_polymorphically_with(mapper, - sql_util.ColumnAdapter( - selectable, - mapper._equivalent_columns)) - adapter = None - elif is_aliased_class: - adapter = sql_util.ColumnAdapter( - selectable, - mapper._equivalent_columns) - with_polymorphic = None - else: - with_polymorphic = adapter = None - - d[entity] = (mapper, adapter, selectable, - is_aliased_class, with_polymorphic) - ent.setup_entity(entity, *d[entity]) - - def _mapper_loads_polymorphically_with(self, mapper, adapter): - for m2 in mapper._with_polymorphic_mappers: - self._polymorphic_adapters[m2] = adapter - for m in m2.iterate_to_root(): - self._polymorphic_adapters[m.mapped_table] = \ - self._polymorphic_adapters[m.local_table] = \ - adapter - - def _set_select_from(self, *obj): - - fa = [] - for from_obj in obj: - if isinstance(from_obj, expression._SelectBase): - from_obj = from_obj.alias() - fa.append(from_obj) - - self._from_obj = tuple(fa) - - if len(self._from_obj) == 1 and \ - isinstance(self._from_obj[0], expression.Alias): - equivs = self.__all_equivs() - self._from_obj_alias = sql_util.ColumnAdapter( - self._from_obj[0], equivs) - - - def _reset_polymorphic_adapter(self, mapper): - for m2 in mapper._with_polymorphic_mappers: - self._polymorphic_adapters.pop(m2, None) - for m in m2.iterate_to_root(): - self._polymorphic_adapters.pop(m.mapped_table, None) - self._polymorphic_adapters.pop(m.local_table, None) - - def __adapt_polymorphic_element(self, element): - if isinstance(element, expression.FromClause): - search = element - elif hasattr(element, 'table'): - search = element.table - else: - search = None - - if search is not None: - alias = self._polymorphic_adapters.get(search, None) - if alias: - return alias.adapt_clause(element) - - def _adapt_col_list(self, cols): - return [ - self._adapt_clause( - expression._literal_as_text(o), - True, True) - for o in cols - ] - - @_generative() - def _adapt_all_clauses(self): - self._orm_only_adapt = False - - def _adapt_clause(self, clause, as_filter, orm_only): - """Adapt incoming clauses to transformations which have been applied - within this query.""" - - adapters = [] - - # do we adapt all expression elements or only those - # tagged as 'ORM' constructs ? - orm_only = getattr(self, '_orm_only_adapt', orm_only) - - if as_filter and self._filter_aliases: - for fa in self._filter_aliases._visitor_iterator: - adapters.append( - ( - orm_only, fa.replace - ) - ) - - if self._from_obj_alias: - # for the "from obj" alias, apply extra rule to the - # 'ORM only' check, if this query were generated from a - # subquery of itself, i.e. _from_selectable(), apply adaption - # to all SQL constructs. - adapters.append( - ( - getattr(self, '_orm_only_from_obj_alias', orm_only), - self._from_obj_alias.replace - ) - ) - - if self._polymorphic_adapters: - adapters.append( - ( - orm_only, self.__adapt_polymorphic_element - ) - ) - - if not adapters: - return clause - - def replace(elem): - for _orm_only, adapter in adapters: - # if 'orm only', look for ORM annotations - # in the element before adapting. - if not _orm_only or \ - '_orm_adapt' in elem._annotations or \ - "parententity" in elem._annotations: - - e = adapter(elem) - if e is not None: - return e - - return visitors.replacement_traverse( - clause, - {}, - replace - ) - - def _entity_zero(self): - return self._entities[0] - - def _mapper_zero(self): - return self._select_from_entity or \ - self._entity_zero().entity_zero - - - @property - def _mapper_entities(self): - # TODO: this is wrong, its hardcoded to "primary entity" when - # for the case of __all_equivs() it should not be - # the name of this accessor is wrong too - for ent in self._entities: - if hasattr(ent, 'primary_entity'): - yield ent - - def _joinpoint_zero(self): - return self._joinpoint.get( - '_joinpoint_entity', - self._mapper_zero() - ) - - def _mapper_zero_or_none(self): - if not getattr(self._entities[0], 'primary_entity', False): - return None - return self._entities[0].mapper - - def _only_mapper_zero(self, rationale=None): - if len(self._entities) > 1: - raise sa_exc.InvalidRequestError( - rationale or - "This operation requires a Query against a single mapper." - ) - return self._mapper_zero() - - def _only_full_mapper_zero(self, methname): - if len(self._entities) != 1: - raise sa_exc.InvalidRequestError( - "%s() can only be used against " - "a single mapped class." % methname) - entity = self._entity_zero() - if not hasattr(entity, 'primary_entity'): - raise sa_exc.InvalidRequestError( - "%s() can only be used against " - "a single mapped class." % methname) - return entity.entity_zero - - def _only_entity_zero(self, rationale=None): - if len(self._entities) > 1: - raise sa_exc.InvalidRequestError( - rationale or - "This operation requires a Query against a single mapper." - ) - return self._entity_zero() - - - def __all_equivs(self): - equivs = {} - for ent in self._mapper_entities: - equivs.update(ent.mapper._equivalent_columns) - return equivs - - def _get_condition(self): - self._order_by = self._distinct = False - return self._no_criterion_condition("get") - - def _no_criterion_condition(self, meth): - if not self._enable_assertions: - return - if self._criterion is not None or \ - self._statement is not None or self._from_obj or \ - self._limit is not None or self._offset is not None or \ - self._group_by or self._order_by or self._distinct: - raise sa_exc.InvalidRequestError( - "Query.%s() being called on a " - "Query with existing criterion. " % meth) - - self._from_obj = () - self._statement = self._criterion = None - self._order_by = self._group_by = self._distinct = False - - def _no_clauseelement_condition(self, meth): - if not self._enable_assertions: - return - if self._order_by: - raise sa_exc.InvalidRequestError( - "Query.%s() being called on a " - "Query with existing criterion. " % meth) - self._no_criterion_condition(meth) - - def _no_statement_condition(self, meth): - if not self._enable_assertions: - return - if self._statement is not None: - raise sa_exc.InvalidRequestError( - ("Query.%s() being called on a Query with an existing full " - "statement - can't apply criterion.") % meth) - - def _no_limit_offset(self, meth): - if not self._enable_assertions: - return - if self._limit is not None or self._offset is not None: - raise sa_exc.InvalidRequestError( - "Query.%s() being called on a Query which already has LIMIT " - "or OFFSET applied. To modify the row-limited results of a " - " Query, call from_self() first. " - "Otherwise, call %s() before limit() or offset() are applied." - % (meth, meth) - ) - - def _no_select_modifiers(self, meth): - if not self._enable_assertions: - return - for attr, methname, notset in ( - ('_limit', 'limit()', None), - ('_offset', 'offset()', None), - ('_order_by', 'order_by()', False), - ('_group_by', 'group_by()', False), - ('_distinct', 'distinct()', False), - ): - if getattr(self, attr) is not notset: - raise sa_exc.InvalidRequestError( - "Can't call Query.%s() when %s has been called" % - (meth, methname) - ) - - def _get_options(self, populate_existing=None, - version_check=None, - only_load_props=None, - refresh_state=None): - if populate_existing: - self._populate_existing = populate_existing - if version_check: - self._version_check = version_check - if refresh_state: - self._refresh_state = refresh_state - if only_load_props: - self._only_load_props = set(only_load_props) - return self - - def _clone(self): - cls = self.__class__ - q = cls.__new__(cls) - q.__dict__ = self.__dict__.copy() - return q - - @property - def statement(self): - """The full SELECT statement represented by this Query. - - The statement by default will not have disambiguating labels - applied to the construct unless with_labels(True) is called - first. - - """ - - stmt = self._compile_context(labels=self._with_labels).\ - statement - if self._params: - stmt = stmt.params(self._params) - # TODO: there's no tests covering effects of - # the annotation not being there - return stmt._annotate({'no_replacement_traverse': True}) - - def subquery(self, name=None): - """return the full SELECT statement represented by this :class:`.Query`, - embedded within an :class:`.Alias`. - - Eager JOIN generation within the query is disabled. - - The statement will not have disambiguating labels - applied to the list of selected columns unless the - :meth:`.Query.with_labels` method is used to generate a new - :class:`.Query` with the option enabled. - - :param name: string name to be assigned as the alias; - this is passed through to :meth:`.FromClause.alias`. - If ``None``, a name will be deterministically generated - at compile time. - - - """ - return self.enable_eagerloads(False).statement.alias(name=name) - - def cte(self, name=None, recursive=False): - """Return the full SELECT statement represented by this :class:`.Query` - represented as a common table expression (CTE). - - .. versionadded:: 0.7.6 - - Parameters and usage are the same as those of the - :meth:`._SelectBase.cte` method; see that method for - further details. - - Here is the `Postgresql WITH - RECURSIVE example `_. - Note that, in this example, the ``included_parts`` cte and the ``incl_alias`` alias - of it are Core selectables, which - means the columns are accessed via the ``.c.`` attribute. The ``parts_alias`` - object is an :func:`.orm.aliased` instance of the ``Part`` entity, so column-mapped - attributes are available directly:: - - from sqlalchemy.orm import aliased - - class Part(Base): - __tablename__ = 'part' - part = Column(String, primary_key=True) - sub_part = Column(String, primary_key=True) - quantity = Column(Integer) - - included_parts = session.query( - Part.sub_part, - Part.part, - Part.quantity).\\ - filter(Part.part=="our part").\\ - cte(name="included_parts", recursive=True) - - incl_alias = aliased(included_parts, name="pr") - parts_alias = aliased(Part, name="p") - included_parts = included_parts.union_all( - session.query( - parts_alias.part, - parts_alias.sub_part, - parts_alias.quantity).\\ - filter(parts_alias.part==incl_alias.c.sub_part) - ) - - q = session.query( - included_parts.c.sub_part, - func.sum(included_parts.c.quantity).label('total_quantity') - ).\\ - group_by(included_parts.c.sub_part) - - See also: - - :meth:`._SelectBase.cte` - - """ - return self.enable_eagerloads(False).statement.cte(name=name, recursive=recursive) - - def label(self, name): - """Return the full SELECT statement represented by this :class:`.Query`, converted - to a scalar subquery with a label of the given name. - - Analogous to :meth:`sqlalchemy.sql._SelectBaseMixin.label`. - - .. versionadded:: 0.6.5 - - """ - - return self.enable_eagerloads(False).statement.label(name) - - - def as_scalar(self): - """Return the full SELECT statement represented by this :class:`.Query`, converted - to a scalar subquery. - - Analogous to :meth:`sqlalchemy.sql._SelectBaseMixin.as_scalar`. - - .. versionadded:: 0.6.5 - - """ - - return self.enable_eagerloads(False).statement.as_scalar() - - - def __clause_element__(self): - return self.enable_eagerloads(False).with_labels().statement - - @_generative() - def enable_eagerloads(self, value): - """Control whether or not eager joins and subqueries are - rendered. - - When set to False, the returned Query will not render - eager joins regardless of :func:`~sqlalchemy.orm.joinedload`, - :func:`~sqlalchemy.orm.subqueryload` options - or mapper-level ``lazy='joined'``/``lazy='subquery'`` - configurations. - - This is used primarily when nesting the Query's - statement into a subquery or other - selectable. - - """ - self._enable_eagerloads = value - - @_generative() - def with_labels(self): - """Apply column labels to the return value of Query.statement. - - Indicates that this Query's `statement` accessor should return - a SELECT statement that applies labels to all columns in the - form _; this is commonly used to - disambiguate columns from multiple tables which have the same - name. - - When the `Query` actually issues SQL to load rows, it always - uses column labeling. - - """ - self._with_labels = True - - @_generative() - def enable_assertions(self, value): - """Control whether assertions are generated. - - When set to False, the returned Query will - not assert its state before certain operations, - including that LIMIT/OFFSET has not been applied - when filter() is called, no criterion exists - when get() is called, and no "from_statement()" - exists when filter()/order_by()/group_by() etc. - is called. This more permissive mode is used by - custom Query subclasses to specify criterion or - other modifiers outside of the usual usage patterns. - - Care should be taken to ensure that the usage - pattern is even possible. A statement applied - by from_statement() will override any criterion - set by filter() or order_by(), for example. - - """ - self._enable_assertions = value - - @property - def whereclause(self): - """A readonly attribute which returns the current WHERE criterion for this Query. - - This returned value is a SQL expression construct, or ``None`` if no - criterion has been established. - - """ - return self._criterion - - @_generative() - def _with_current_path(self, path): - """indicate that this query applies to objects loaded - within a certain path. - - Used by deferred loaders (see strategies.py) which transfer - query options from an originating query to a newly generated - query intended for the deferred load. - - """ - self._current_path = path - - @_generative(_no_clauseelement_condition) - def with_polymorphic(self, - cls_or_mappers, - selectable=None, discriminator=None): - """Load columns for descendant mappers of this Query's mapper. - - Using this method will ensure that each descendant mapper's - tables are included in the FROM clause, and will allow filter() - criterion to be used against those tables. The resulting - instances will also have those columns already loaded so that - no "post fetch" of those columns will be required. - - :param cls_or_mappers: a single class or mapper, or list of - class/mappers, which inherit from this Query's mapper. - Alternatively, it may also be the string ``'*'``, in which case - all descending mappers will be added to the FROM clause. - - :param selectable: a table or select() statement that will - be used in place of the generated FROM clause. This argument is - required if any of the desired mappers use concrete table - inheritance, since SQLAlchemy currently cannot generate UNIONs - among tables automatically. If used, the ``selectable`` argument - must represent the full set of tables and columns mapped by every - desired mapper. Otherwise, the unaccounted mapped columns will - result in their table being appended directly to the FROM clause - which will usually lead to incorrect results. - - :param discriminator: a column to be used as the "discriminator" - column for the given selectable. If not given, the polymorphic_on - attribute of the mapper will be used, if any. This is useful for - mappers that don't have polymorphic loading behavior by default, - such as concrete table mappers. - - """ - - if not getattr(self._entities[0], 'primary_entity', False): - raise sa_exc.InvalidRequestError( - "No primary mapper set up for this Query.") - entity = self._entities[0]._clone() - self._entities = [entity] + self._entities[1:] - entity.set_with_polymorphic(self, - cls_or_mappers, - selectable=selectable, - discriminator=discriminator) - - @_generative() - def yield_per(self, count): - """Yield only ``count`` rows at a time. - - WARNING: use this method with caution; if the same instance is present - in more than one batch of rows, end-user changes to attributes will be - overwritten. - - In particular, it's usually impossible to use this setting with - eagerly loaded collections (i.e. any lazy='joined' or 'subquery') - since those collections will be cleared for a new load when - encountered in a subsequent result batch. In the case of 'subquery' - loading, the full result for all rows is fetched which generally - defeats the purpose of :meth:`~sqlalchemy.orm.query.Query.yield_per`. - - Also note that many DBAPIs do not "stream" results, pre-buffering - all rows before making them available, including mysql-python and - psycopg2. :meth:`~sqlalchemy.orm.query.Query.yield_per` will also - set the ``stream_results`` execution - option to ``True``, which currently is only understood by psycopg2 - and causes server side cursors to be used. - - """ - self._yield_per = count - self._execution_options = self._execution_options.copy() - self._execution_options['stream_results'] = True - - def get(self, ident): - """Return an instance based on the given primary key identifier, - or ``None`` if not found. - - E.g.:: - - my_user = session.query(User).get(5) - - some_object = session.query(VersionedFoo).get((5, 10)) - - :meth:`~.Query.get` is special in that it provides direct - access to the identity map of the owning :class:`.Session`. - If the given primary key identifier is present - in the local identity map, the object is returned - directly from this collection and no SQL is emitted, - unless the object has been marked fully expired. - If not present, - a SELECT is performed in order to locate the object. - - :meth:`~.Query.get` also will perform a check if - the object is present in the identity map and - marked as expired - a SELECT - is emitted to refresh the object as well as to - ensure that the row is still present. - If not, :class:`~sqlalchemy.orm.exc.ObjectDeletedError` is raised. - - :meth:`~.Query.get` is only used to return a single - mapped instance, not multiple instances or - individual column constructs, and strictly - on a single primary key value. The originating - :class:`.Query` must be constructed in this way, - i.e. against a single mapped entity, - with no additional filtering criterion. Loading - options via :meth:`~.Query.options` may be applied - however, and will be used if the object is not - yet locally present. - - A lazy-loading, many-to-one attribute configured - by :func:`.relationship`, using a simple - foreign-key-to-primary-key criterion, will also use an - operation equivalent to :meth:`~.Query.get` in order to retrieve - the target value from the local identity map - before querying the database. See :ref:`loading_toplevel` - for further details on relationship loading. - - :param ident: A scalar or tuple value representing - the primary key. For a composite primary key, - the order of identifiers corresponds in most cases - to that of the mapped :class:`.Table` object's - primary key columns. For a :func:`.mapper` that - was given the ``primary key`` argument during - construction, the order of identifiers corresponds - to the elements present in this collection. - - :return: The object instance, or ``None``. - - """ - - # convert composite types to individual args - if hasattr(ident, '__composite_values__'): - ident = ident.__composite_values__() - - ident = util.to_list(ident) - - mapper = self._only_full_mapper_zero("get") - - if len(ident) != len(mapper.primary_key): - raise sa_exc.InvalidRequestError( - "Incorrect number of values in identifier to formulate " - "primary key for query.get(); primary key columns are %s" % - ','.join("'%s'" % c for c in mapper.primary_key)) - - key = mapper.identity_key_from_primary_key(ident) - - if not self._populate_existing and \ - not mapper.always_refresh and \ - self._lockmode is None: - - instance = self._get_from_identity(self.session, key, False) - if instance is not None: - # reject calls for id in identity map but class - # mismatch. - if not issubclass(instance.__class__, mapper.class_): - return None - return instance - - return self._load_on_ident(key) - - @_generative() - def correlate(self, *args): - """Return a :class:`.Query` construct which will correlate the given - FROM clauses to that of an enclosing :class:`.Query` or - :func:`~.expression.select`. - - The method here accepts mapped classes, :func:`.aliased` constructs, - and :func:`.mapper` constructs as arguments, which are resolved into - expression constructs, in addition to appropriate expression - constructs. - - The correlation arguments are ultimately passed to - :meth:`.Select.correlate` after coercion to expression constructs. - - The correlation arguments take effect in such cases - as when :meth:`.Query.from_self` is used, or when - a subquery as returned by :meth:`.Query.subquery` is - embedded in another :func:`~.expression.select` construct. - - """ - - self._correlate = self._correlate.union( - _orm_selectable(s) - for s in args) - - @_generative() - def autoflush(self, setting): - """Return a Query with a specific 'autoflush' setting. - - Note that a Session with autoflush=False will - not autoflush, even if this flag is set to True at the - Query level. Therefore this flag is usually used only - to disable autoflush for a specific Query. - - """ - self._autoflush = setting - - @_generative() - def populate_existing(self): - """Return a :class:`.Query` that will expire and refresh all instances - as they are loaded, or reused from the current :class:`.Session`. - - :meth:`.populate_existing` does not improve behavior when - the ORM is used normally - the :class:`.Session` object's usual - behavior of maintaining a transaction and expiring all attributes - after rollback or commit handles object state automatically. - This method is not intended for general use. - - """ - self._populate_existing = True - - @_generative() - def _with_invoke_all_eagers(self, value): - """Set the 'invoke all eagers' flag which causes joined- and - subquery loaders to traverse into already-loaded related objects - and collections. - - Default is that of :attr:`.Query._invoke_all_eagers`. - - """ - self._invoke_all_eagers = value - - def with_parent(self, instance, property=None): - """Add filtering criterion that relates the given instance - to a child object or collection, using its attribute state - as well as an established :func:`.relationship()` - configuration. - - The method uses the :func:`.with_parent` function to generate - the clause, the result of which is passed to :meth:`.Query.filter`. - - Parameters are the same as :func:`.with_parent`, with the exception - that the given property can be None, in which case a search is - performed against this :class:`.Query` object's target mapper. - - """ - - if property is None: - from sqlalchemy.orm import properties - mapper = object_mapper(instance) - - for prop in mapper.iterate_properties: - if isinstance(prop, properties.PropertyLoader) and \ - prop.mapper is self._mapper_zero(): - property = prop - break - else: - raise sa_exc.InvalidRequestError( - "Could not locate a property which relates instances " - "of class '%s' to instances of class '%s'" % - ( - self._mapper_zero().class_.__name__, - instance.__class__.__name__) - ) - - return self.filter(with_parent(instance, property)) - - @_generative() - def add_entity(self, entity, alias=None): - """add a mapped entity to the list of result columns - to be returned.""" - - if alias is not None: - entity = aliased(entity, alias) - - self._entities = list(self._entities) - m = _MapperEntity(self, entity) - self._setup_aliasizers([m]) - - @_generative() - def with_session(self, session): - """Return a :class:`Query` that will use the given :class:`.Session`. - - """ - - self.session = session - - def from_self(self, *entities): - """return a Query that selects from this Query's - SELECT statement. - - \*entities - optional list of entities which will replace - those being selected. - - """ - fromclause = self.with_labels().enable_eagerloads(False).\ - _enable_single_crit(False).\ - statement.correlate(None) - q = self._from_selectable(fromclause) - if entities: - q._set_entities(entities) - return q - - @_generative() - def _enable_single_crit(self, val): - self._enable_single_crit = val - - @_generative() - def _from_selectable(self, fromclause): - for attr in ( - '_statement', '_criterion', - '_order_by', '_group_by', - '_limit', '_offset', - '_joinpath', '_joinpoint', - '_distinct', '_having', - '_prefixes', - ): - self.__dict__.pop(attr, None) - self._set_select_from(fromclause) - - # this enables clause adaptation for non-ORM - # expressions. - self._orm_only_from_obj_alias = False - - old_entities = self._entities - self._entities = [] - for e in old_entities: - e.adapt_to_selectable(self, self._from_obj[0]) - - def values(self, *columns): - """Return an iterator yielding result tuples corresponding - to the given list of columns""" - - if not columns: - return iter(()) - q = self._clone() - q._set_entities(columns, entity_wrapper=_ColumnEntity) - if not q._yield_per: - q._yield_per = 10 - return iter(q) - _values = values - - def value(self, column): - """Return a scalar result corresponding to the given - column expression.""" - try: - # Py3K - #return self.values(column).__next__()[0] - # Py2K - return self.values(column).next()[0] - # end Py2K - except StopIteration: - return None - - @_generative() - def with_entities(self, *entities): - """Return a new :class:`.Query` replacing the SELECT list with the given - entities. - - e.g.:: - - # Users, filtered on some arbitrary criterion - # and then ordered by related email address - q = session.query(User).\\ - join(User.address).\\ - filter(User.name.like('%ed%')).\\ - order_by(Address.email) - - # given *only* User.id==5, Address.email, and 'q', what - # would the *next* User in the result be ? - subq = q.with_entities(Address.email).\\ - order_by(None).\\ - filter(User.id==5).\\ - subquery() - q = q.join((subq, subq.c.email < Address.email)).\\ - limit(1) - - .. versionadded:: 0.6.5 - - """ - self._set_entities(entities) - - - @_generative() - def add_columns(self, *column): - """Add one or more column expressions to the list - of result columns to be returned.""" - - self._entities = list(self._entities) - l = len(self._entities) - for c in column: - _ColumnEntity(self, c) - # _ColumnEntity may add many entities if the - # given arg is a FROM clause - self._setup_aliasizers(self._entities[l:]) - - @util.pending_deprecation("0.7", - ":meth:`.add_column` is superseded by :meth:`.add_columns`", - False) - def add_column(self, column): - """Add a column expression to the list of result columns to be returned. - - Pending deprecation: :meth:`.add_column` will be superseded by - :meth:`.add_columns`. - - """ - - return self.add_columns(column) - - def options(self, *args): - """Return a new Query object, applying the given list of - mapper options. - - Most supplied options regard changing how column- and - relationship-mapped attributes are loaded. See the sections - :ref:`deferred` and :ref:`loading_toplevel` for reference - documentation. - - """ - return self._options(False, *args) - - def _conditional_options(self, *args): - return self._options(True, *args) - - @_generative() - def _options(self, conditional, *args): - # most MapperOptions write to the '_attributes' dictionary, - # so copy that as well - self._attributes = self._attributes.copy() - opts = tuple(util.flatten_iterator(args)) - self._with_options = self._with_options + opts - if conditional: - for opt in opts: - opt.process_query_conditionally(self) - else: - for opt in opts: - opt.process_query(self) - - def with_transformation(self, fn): - """Return a new :class:`.Query` object transformed by - the given function. - - E.g.:: - - def filter_something(criterion): - def transform(q): - return q.filter(criterion) - return transform - - q = q.with_transformation(filter_something(x==5)) - - This allows ad-hoc recipes to be created for :class:`.Query` - objects. See the example at :ref:`hybrid_transformers`. - - .. versionadded:: 0.7.4 - - """ - return fn(self) - - @_generative() - def with_hint(self, selectable, text, dialect_name='*'): - """Add an indexing hint for the given entity or selectable to - this :class:`.Query`. - - Functionality is passed straight through to - :meth:`~sqlalchemy.sql.expression.Select.with_hint`, - with the addition that ``selectable`` can be a - :class:`.Table`, :class:`.Alias`, or ORM entity / mapped class - /etc. - """ - mapper, selectable, is_aliased_class = _entity_info(selectable) - - self._with_hints += ((selectable, text, dialect_name),) - - @_generative() - def execution_options(self, **kwargs): - """ Set non-SQL options which take effect during execution. - - The options are the same as those accepted by - :meth:`.Connection.execution_options`. - - Note that the ``stream_results`` execution option is enabled - automatically if the :meth:`~sqlalchemy.orm.query.Query.yield_per()` - method is used. - - """ - self._execution_options = self._execution_options.union(kwargs) - - @_generative() - def with_lockmode(self, mode): - """Return a new Query object with the specified locking mode. - - :param mode: a string representing the desired locking mode. A - corresponding value is passed to the ``for_update`` parameter of - :meth:`~sqlalchemy.sql.expression.select` when the query is - executed. Valid values are: - - ``'update'`` - passes ``for_update=True``, which translates to - ``FOR UPDATE`` (standard SQL, supported by most dialects) - - ``'update_nowait'`` - passes ``for_update='nowait'``, which - translates to ``FOR UPDATE NOWAIT`` (supported by Oracle, - PostgreSQL 8.1 upwards) - - ``'read'`` - passes ``for_update='read'``, which translates to - ``LOCK IN SHARE MODE`` (for MySQL), and ``FOR SHARE`` (for - PostgreSQL) - - ``'read_nowait'`` - passes ``for_update='read_nowait'``, which - translates to ``FOR SHARE NOWAIT`` (supported by PostgreSQL). - - .. versionadded:: 0.7.7 - ``FOR SHARE`` and ``FOR SHARE NOWAIT`` (PostgreSQL). - """ - - self._lockmode = mode - - @_generative() - def params(self, *args, **kwargs): - """add values for bind parameters which may have been - specified in filter(). - - parameters may be specified using \**kwargs, or optionally a single - dictionary as the first positional argument. The reason for both is - that \**kwargs is convenient, however some parameter dictionaries - contain unicode keys in which case \**kwargs cannot be used. - - """ - if len(args) == 1: - kwargs.update(args[0]) - elif len(args) > 0: - raise sa_exc.ArgumentError( - "params() takes zero or one positional argument, " - "which is a dictionary.") - self._params = self._params.copy() - self._params.update(kwargs) - - @_generative(_no_statement_condition, _no_limit_offset) - def filter(self, *criterion): - """apply the given filtering criterion to a copy - of this :class:`.Query`, using SQL expressions. - - e.g.:: - - session.query(MyClass).filter(MyClass.name == 'some name') - - Multiple criteria are joined together by AND:: - - session.query(MyClass).\\ - filter(MyClass.name == 'some name', MyClass.id > 5) - - The criterion is any SQL expression object applicable to the - WHERE clause of a select. String expressions are coerced - into SQL expression constructs via the :func:`.text` construct. - - .. versionchanged:: 0.7.5 - Multiple criteria joined by AND. - - See also: - - :meth:`.Query.filter_by` - filter on keyword expressions. - - """ - for criterion in list(criterion): - if isinstance(criterion, basestring): - criterion = sql.text(criterion) - - if criterion is not None and \ - not isinstance(criterion, sql.ClauseElement): - raise sa_exc.ArgumentError( - "filter() argument must be of type " - "sqlalchemy.sql.ClauseElement or string") - - criterion = self._adapt_clause(criterion, True, True) - - if self._criterion is not None: - self._criterion = self._criterion & criterion - else: - self._criterion = criterion - - def filter_by(self, **kwargs): - """apply the given filtering criterion to a copy - of this :class:`.Query`, using keyword expressions. - - e.g.:: - - session.query(MyClass).filter_by(name = 'some name') - - Multiple criteria are joined together by AND:: - - session.query(MyClass).\\ - filter_by(name = 'some name', id = 5) - - The keyword expressions are extracted from the primary - entity of the query, or the last entity that was the - target of a call to :meth:`.Query.join`. - - See also: - - :meth:`.Query.filter` - filter on SQL expressions. - - """ - - clauses = [_entity_descriptor(self._joinpoint_zero(), key) == value - for key, value in kwargs.iteritems()] - return self.filter(sql.and_(*clauses)) - - @_generative(_no_statement_condition, _no_limit_offset) - def order_by(self, *criterion): - """apply one or more ORDER BY criterion to the query and return - the newly resulting ``Query`` - - All existing ORDER BY settings can be suppressed by - passing ``None`` - this will suppress any ORDER BY configured - on mappers as well. - - Alternatively, an existing ORDER BY setting on the Query - object can be entirely cancelled by passing ``False`` - as the value - use this before calling methods where - an ORDER BY is invalid. - - """ - - if len(criterion) == 1: - if criterion[0] is False: - if '_order_by' in self.__dict__: - del self._order_by - return - if criterion[0] is None: - self._order_by = None - return - - criterion = self._adapt_col_list(criterion) - - if self._order_by is False or self._order_by is None: - self._order_by = criterion - else: - self._order_by = self._order_by + criterion - - @_generative(_no_statement_condition, _no_limit_offset) - def group_by(self, *criterion): - """apply one or more GROUP BY criterion to the query and return - the newly resulting :class:`.Query`""" - - criterion = list(chain(*[_orm_columns(c) for c in criterion])) - - criterion = self._adapt_col_list(criterion) - - if self._group_by is False: - self._group_by = criterion - else: - self._group_by = self._group_by + criterion - - @_generative(_no_statement_condition, _no_limit_offset) - def having(self, criterion): - """apply a HAVING criterion to the query and return the - newly resulting :class:`.Query`. - - :meth:`having` is used in conjunction with :meth:`group_by`. - - HAVING criterion makes it possible to use filters on aggregate - functions like COUNT, SUM, AVG, MAX, and MIN, eg.:: - - q = session.query(User.id).\\ - join(User.addresses).\\ - group_by(User.id).\\ - having(func.count(Address.id) > 2) - - """ - - if isinstance(criterion, basestring): - criterion = sql.text(criterion) - - if criterion is not None and \ - not isinstance(criterion, sql.ClauseElement): - raise sa_exc.ArgumentError( - "having() argument must be of type " - "sqlalchemy.sql.ClauseElement or string") - - criterion = self._adapt_clause(criterion, True, True) - - if self._having is not None: - self._having = self._having & criterion - else: - self._having = criterion - - def union(self, *q): - """Produce a UNION of this Query against one or more queries. - - e.g.:: - - q1 = sess.query(SomeClass).filter(SomeClass.foo=='bar') - q2 = sess.query(SomeClass).filter(SomeClass.bar=='foo') - - q3 = q1.union(q2) - - The method accepts multiple Query objects so as to control - the level of nesting. A series of ``union()`` calls such as:: - - x.union(y).union(z).all() - - will nest on each ``union()``, and produces:: - - SELECT * FROM (SELECT * FROM (SELECT * FROM X UNION - SELECT * FROM y) UNION SELECT * FROM Z) - - Whereas:: - - x.union(y, z).all() - - produces:: - - SELECT * FROM (SELECT * FROM X UNION SELECT * FROM y UNION - SELECT * FROM Z) - - Note that many database backends do not allow ORDER BY to - be rendered on a query called within UNION, EXCEPT, etc. - To disable all ORDER BY clauses including those configured - on mappers, issue ``query.order_by(None)`` - the resulting - :class:`.Query` object will not render ORDER BY within - its SELECT statement. - - """ - - - return self._from_selectable( - expression.union(*([self]+ list(q)))) - - def union_all(self, *q): - """Produce a UNION ALL of this Query against one or more queries. - - Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See - that method for usage examples. - - """ - return self._from_selectable( - expression.union_all(*([self]+ list(q))) - ) - - def intersect(self, *q): - """Produce an INTERSECT of this Query against one or more queries. - - Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See - that method for usage examples. - - """ - return self._from_selectable( - expression.intersect(*([self]+ list(q))) - ) - - def intersect_all(self, *q): - """Produce an INTERSECT ALL of this Query against one or more queries. - - Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See - that method for usage examples. - - """ - return self._from_selectable( - expression.intersect_all(*([self]+ list(q))) - ) - - def except_(self, *q): - """Produce an EXCEPT of this Query against one or more queries. - - Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See - that method for usage examples. - - """ - return self._from_selectable( - expression.except_(*([self]+ list(q))) - ) - - def except_all(self, *q): - """Produce an EXCEPT ALL of this Query against one or more queries. - - Works the same way as :meth:`~sqlalchemy.orm.query.Query.union`. See - that method for usage examples. - - """ - return self._from_selectable( - expression.except_all(*([self]+ list(q))) - ) - - def join(self, *props, **kwargs): - """Create a SQL JOIN against this :class:`.Query` object's criterion - and apply generatively, returning the newly resulting :class:`.Query`. - - **Simple Relationship Joins** - - Consider a mapping between two classes ``User`` and ``Address``, - with a relationship ``User.addresses`` representing a collection - of ``Address`` objects associated with each ``User``. The most common - usage of :meth:`~.Query.join` is to create a JOIN along this - relationship, using the ``User.addresses`` attribute as an indicator - for how this should occur:: - - q = session.query(User).join(User.addresses) - - Where above, the call to :meth:`~.Query.join` along ``User.addresses`` - will result in SQL equivalent to:: - - SELECT user.* FROM user JOIN address ON user.id = address.user_id - - In the above example we refer to ``User.addresses`` as passed to - :meth:`~.Query.join` as the *on clause*, that is, it indicates - how the "ON" portion of the JOIN should be constructed. For a - single-entity query such as the one above (i.e. we start by selecting only from - ``User`` and nothing else), the relationship can also be specified by its - string name:: - - q = session.query(User).join("addresses") - - :meth:`~.Query.join` can also accommodate multiple - "on clause" arguments to produce a chain of joins, such as below - where a join across four related entities is constructed:: - - q = session.query(User).join("orders", "items", "keywords") - - The above would be shorthand for three separate calls to :meth:`~.Query.join`, - each using an explicit attribute to indicate the source entity:: - - q = session.query(User).\\ - join(User.orders).\\ - join(Order.items).\\ - join(Item.keywords) - - **Joins to a Target Entity or Selectable** - - A second form of :meth:`~.Query.join` allows any mapped entity - or core selectable construct as a target. In this usage, - :meth:`~.Query.join` will attempt - to create a JOIN along the natural foreign key relationship between - two entities:: - - q = session.query(User).join(Address) - - The above calling form of :meth:`.join` will raise an error if - either there are no foreign keys between the two entities, or if - there are multiple foreign key linkages between them. In the - above calling form, :meth:`~.Query.join` is called upon to - create the "on clause" automatically for us. The target can - be any mapped entity or selectable, such as a :class:`.Table`:: - - q = session.query(User).join(addresses_table) - - **Joins to a Target with an ON Clause** - - The third calling form allows both the target entity as well - as the ON clause to be passed explicitly. Suppose for - example we wanted to join to ``Address`` twice, using - an alias the second time. We use :func:`~sqlalchemy.orm.aliased` - to create a distinct alias of ``Address``, and join - to it using the ``target, onclause`` form, so that the - alias can be specified explicitly as the target along with - the relationship to instruct how the ON clause should proceed:: - - a_alias = aliased(Address) - - q = session.query(User).\\ - join(User.addresses).\\ - join(a_alias, User.addresses).\\ - filter(Address.email_address=='ed@foo.com').\\ - filter(a_alias.email_address=='ed@bar.com') - - Where above, the generated SQL would be similar to:: - - SELECT user.* FROM user - JOIN address ON user.id = address.user_id - JOIN address AS address_1 ON user.id=address_1.user_id - WHERE address.email_address = :email_address_1 - AND address_1.email_address = :email_address_2 - - The two-argument calling form of :meth:`~.Query.join` - also allows us to construct arbitrary joins with SQL-oriented - "on clause" expressions, not relying upon configured relationships - at all. Any SQL expression can be passed as the ON clause - when using the two-argument form, which should refer to the target - entity in some way as well as an applicable source entity:: - - q = session.query(User).join(Address, User.id==Address.user_id) - - .. versionchanged:: 0.7 - In SQLAlchemy 0.6 and earlier, the two argument form of - :meth:`~.Query.join` requires the usage of a tuple: - ``query(User).join((Address, User.id==Address.user_id))``\ . - This calling form is accepted in 0.7 and further, though - is not necessary unless multiple join conditions are passed to - a single :meth:`~.Query.join` call, which itself is also not - generally necessary as it is now equivalent to multiple - calls (this wasn't always the case). - - **Advanced Join Targeting and Adaption** - - There is a lot of flexibility in what the "target" can be when using - :meth:`~.Query.join`. As noted previously, it also accepts - :class:`.Table` constructs and other selectables such as :func:`.alias` - and :func:`.select` constructs, with either the one or two-argument forms:: - - addresses_q = select([Address.user_id]).\\ - where(Address.email_address.endswith("@bar.com")).\\ - alias() - - q = session.query(User).\\ - join(addresses_q, addresses_q.c.user_id==User.id) - - :meth:`~.Query.join` also features the ability to *adapt* a - :meth:`~sqlalchemy.orm.relationship` -driven ON clause to the target selectable. - Below we construct a JOIN from ``User`` to a subquery against ``Address``, allowing - the relationship denoted by ``User.addresses`` to *adapt* itself - to the altered target:: - - address_subq = session.query(Address).\\ - filter(Address.email_address == 'ed@foo.com').\\ - subquery() - - q = session.query(User).join(address_subq, User.addresses) - - Producing SQL similar to:: - - SELECT user.* FROM user - JOIN ( - SELECT address.id AS id, - address.user_id AS user_id, - address.email_address AS email_address - FROM address - WHERE address.email_address = :email_address_1 - ) AS anon_1 ON user.id = anon_1.user_id - - The above form allows one to fall back onto an explicit ON - clause at any time:: - - q = session.query(User).\\ - join(address_subq, User.id==address_subq.c.user_id) - - **Controlling what to Join From** - - While :meth:`~.Query.join` exclusively deals with the "right" - side of the JOIN, we can also control the "left" side, in those - cases where it's needed, using :meth:`~.Query.select_from`. - Below we construct a query against ``Address`` but can still - make usage of ``User.addresses`` as our ON clause by instructing - the :class:`.Query` to select first from the ``User`` - entity:: - - q = session.query(Address).select_from(User).\\ - join(User.addresses).\\ - filter(User.name == 'ed') - - Which will produce SQL similar to:: - - SELECT address.* FROM user - JOIN address ON user.id=address.user_id - WHERE user.name = :name_1 - - **Constructing Aliases Anonymously** - - :meth:`~.Query.join` can construct anonymous aliases - using the ``aliased=True`` flag. This feature is useful - when a query is being joined algorithmically, such as - when querying self-referentially to an arbitrary depth:: - - q = session.query(Node).\\ - join("children", "children", aliased=True) - - When ``aliased=True`` is used, the actual "alias" construct - is not explicitly available. To work with it, methods such as - :meth:`.Query.filter` will adapt the incoming entity to - the last join point:: - - q = session.query(Node).\\ - join("children", "children", aliased=True).\\ - filter(Node.name == 'grandchild 1') - - When using automatic aliasing, the ``from_joinpoint=True`` - argument can allow a multi-node join to be broken into - multiple calls to :meth:`~.Query.join`, so that - each path along the way can be further filtered:: - - q = session.query(Node).\\ - join("children", aliased=True).\\ - filter(Node.name='child 1').\\ - join("children", aliased=True, from_joinpoint=True).\\ - filter(Node.name == 'grandchild 1') - - The filtering aliases above can then be reset back to the - original ``Node`` entity using :meth:`~.Query.reset_joinpoint`:: - - q = session.query(Node).\\ - join("children", "children", aliased=True).\\ - filter(Node.name == 'grandchild 1').\\ - reset_joinpoint().\\ - filter(Node.name == 'parent 1) - - For an example of ``aliased=True``, see the distribution - example :ref:`examples_xmlpersistence` which illustrates - an XPath-like query system using algorithmic joins. - - :param *props: A collection of one or more join conditions, - each consisting of a relationship-bound attribute or string - relationship name representing an "on clause", or a single - target entity, or a tuple in the form of ``(target, onclause)``. - A special two-argument calling form of the form ``target, onclause`` - is also accepted. - :param aliased=False: If True, indicate that the JOIN target should be - anonymously aliased. Subsequent calls to :class:`~.Query.filter` - and similar will adapt the incoming criterion to the target - alias, until :meth:`~.Query.reset_joinpoint` is called. - :param from_joinpoint=False: When using ``aliased=True``, a setting - of True here will cause the join to be from the most recent - joined target, rather than starting back from the original - FROM clauses of the query. - - See also: - - :ref:`ormtutorial_joins` in the ORM tutorial. - - :ref:`inheritance_toplevel` for details on how :meth:`~.Query.join` - is used for inheritance relationships. - - :func:`.orm.join` - a standalone ORM-level join function, - used internally by :meth:`.Query.join`, which in previous - SQLAlchemy versions was the primary ORM-level joining interface. - - """ - aliased, from_joinpoint = kwargs.pop('aliased', False),\ - kwargs.pop('from_joinpoint', False) - if kwargs: - raise TypeError("unknown arguments: %s" % - ','.join(kwargs.iterkeys())) - return self._join(props, - outerjoin=False, create_aliases=aliased, - from_joinpoint=from_joinpoint) - - def outerjoin(self, *props, **kwargs): - """Create a left outer join against this ``Query`` object's criterion - and apply generatively, returning the newly resulting ``Query``. - - Usage is the same as the ``join()`` method. - - """ - aliased, from_joinpoint = kwargs.pop('aliased', False), \ - kwargs.pop('from_joinpoint', False) - if kwargs: - raise TypeError("unknown arguments: %s" % - ','.join(kwargs.iterkeys())) - return self._join(props, - outerjoin=True, create_aliases=aliased, - from_joinpoint=from_joinpoint) - - def _update_joinpoint(self, jp): - self._joinpoint = jp - # copy backwards to the root of the _joinpath - # dict, so that no existing dict in the path is mutated - while 'prev' in jp: - f, prev = jp['prev'] - prev = prev.copy() - prev[f] = jp - jp['prev'] = (f, prev) - jp = prev - self._joinpath = jp - - @_generative(_no_statement_condition, _no_limit_offset) - def _join(self, keys, outerjoin, create_aliases, from_joinpoint): - """consumes arguments from join() or outerjoin(), places them into a - consistent format with which to form the actual JOIN constructs. - - """ - - if not from_joinpoint: - self._reset_joinpoint() - - if len(keys) == 2 and \ - isinstance(keys[0], (expression.FromClause, - type, AliasedClass)) and \ - isinstance(keys[1], (basestring, expression.ClauseElement, - interfaces.PropComparator)): - # detect 2-arg form of join and - # convert to a tuple. - keys = (keys,) - - for arg1 in util.to_list(keys): - if isinstance(arg1, tuple): - # "tuple" form of join, multiple - # tuples are accepted as well. The simpler - # "2-arg" form is preferred. May deprecate - # the "tuple" usage. - arg1, arg2 = arg1 - else: - arg2 = None - - # determine onclause/right_entity. there - # is a little bit of legacy behavior still at work here - # which means they might be in either order. may possibly - # lock this down to (right_entity, onclause) in 0.6. - if isinstance(arg1, (interfaces.PropComparator, basestring)): - right_entity, onclause = arg2, arg1 - else: - right_entity, onclause = arg1, arg2 - - left_entity = prop = None - - if isinstance(onclause, basestring): - left_entity = self._joinpoint_zero() - - descriptor = _entity_descriptor(left_entity, onclause) - onclause = descriptor - - # check for q.join(Class.propname, from_joinpoint=True) - # and Class is that of the current joinpoint - elif from_joinpoint and \ - isinstance(onclause, interfaces.PropComparator): - left_entity = onclause.parententity - - left_mapper, left_selectable, left_is_aliased = \ - _entity_info(self._joinpoint_zero()) - if left_mapper is left_entity: - left_entity = self._joinpoint_zero() - descriptor = _entity_descriptor(left_entity, - onclause.key) - onclause = descriptor - - if isinstance(onclause, interfaces.PropComparator): - if right_entity is None: - right_entity = onclause.property.mapper - of_type = getattr(onclause, '_of_type', None) - if of_type: - right_entity = of_type - else: - right_entity = onclause.property.mapper - - left_entity = onclause.parententity - - prop = onclause.property - if not isinstance(onclause, attributes.QueryableAttribute): - onclause = prop - - if not create_aliases: - # check for this path already present. - # don't render in that case. - edge = (left_entity, right_entity, prop.key) - if edge in self._joinpoint: - # The child's prev reference might be stale -- - # it could point to a parent older than the - # current joinpoint. If this is the case, - # then we need to update it and then fix the - # tree's spine with _update_joinpoint. Copy - # and then mutate the child, which might be - # shared by a different query object. - jp = self._joinpoint[edge].copy() - jp['prev'] = (edge, self._joinpoint) - self._update_joinpoint(jp) - continue - - elif onclause is not None and right_entity is None: - # TODO: no coverage here - raise NotImplementedError("query.join(a==b) not supported.") - - self._join_left_to_right( - left_entity, - right_entity, onclause, - outerjoin, create_aliases, prop) - - def _join_left_to_right(self, left, right, - onclause, outerjoin, create_aliases, prop): - """append a JOIN to the query's from clause.""" - - self._polymorphic_adapters = self._polymorphic_adapters.copy() - - if left is None: - if self._from_obj: - left = self._from_obj[0] - elif self._entities: - left = self._entities[0].entity_zero_or_selectable - - if left is right and \ - not create_aliases: - raise sa_exc.InvalidRequestError( - "Can't construct a join from %s to %s, they " - "are the same entity" % - (left, right)) - - right, right_is_aliased, onclause = self._prepare_right_side( - right, onclause, - outerjoin, create_aliases, - prop) - - # if joining on a MapperProperty path, - # track the path to prevent redundant joins - if not create_aliases and prop: - self._update_joinpoint({ - '_joinpoint_entity':right, - 'prev':((left, right, prop.key), self._joinpoint) - }) - else: - self._joinpoint = { - '_joinpoint_entity':right - } - - self._join_to_left(left, right, - right_is_aliased, - onclause, outerjoin) - - def _prepare_right_side(self, right, onclause, outerjoin, - create_aliases, prop): - right_mapper, right_selectable, right_is_aliased = _entity_info(right) - - if right_mapper: - self._join_entities += (right, ) - - if right_mapper and prop and \ - not right_mapper.common_parent(prop.mapper): - raise sa_exc.InvalidRequestError( - "Join target %s does not correspond to " - "the right side of join condition %s" % (right, onclause) - ) - - if not right_mapper and prop: - right_mapper = prop.mapper - - need_adapter = False - - if right_mapper and right is right_selectable: - if not right_selectable.is_derived_from( - right_mapper.mapped_table): - raise sa_exc.InvalidRequestError( - "Selectable '%s' is not derived from '%s'" % - (right_selectable.description, - right_mapper.mapped_table.description)) - - if not isinstance(right_selectable, expression.Alias): - right_selectable = right_selectable.alias() - - right = aliased(right_mapper, right_selectable) - need_adapter = True - - aliased_entity = right_mapper and \ - not right_is_aliased and \ - ( - right_mapper.with_polymorphic or - isinstance( - right_mapper.mapped_table, - expression.Join) - ) - - if not need_adapter and (create_aliases or aliased_entity): - right = aliased(right) - need_adapter = True - - # if an alias() of the right side was generated here, - # apply an adapter to all subsequent filter() calls - # until reset_joinpoint() is called. - if need_adapter: - self._filter_aliases = ORMAdapter(right, - equivalents=right_mapper and - right_mapper._equivalent_columns or {}, - chain_to=self._filter_aliases) - - # if the onclause is a ClauseElement, adapt it with any - # adapters that are in place right now - if isinstance(onclause, expression.ClauseElement): - onclause = self._adapt_clause(onclause, True, True) - - # if an alias() on the right side was generated, - # which is intended to wrap a the right side in a subquery, - # ensure that columns retrieved from this target in the result - # set are also adapted. - if aliased_entity and not create_aliases: - self._mapper_loads_polymorphically_with( - right_mapper, - ORMAdapter( - right, - equivalents=right_mapper._equivalent_columns - ) - ) - - return right, right_is_aliased, onclause - - def _join_to_left(self, left, right, right_is_aliased, onclause, outerjoin): - left_mapper, left_selectable, left_is_aliased = _entity_info(left) - - # this is an overly broad assumption here, but there's a - # very wide variety of situations where we rely upon orm.join's - # adaption to glue clauses together, with joined-table inheritance's - # wide array of variables taking up most of the space. - # Setting the flag here is still a guess, so it is a bug - # that we don't have definitive criterion to determine when - # adaption should be enabled (or perhaps that we're even doing the - # whole thing the way we are here). - join_to_left = not right_is_aliased and not left_is_aliased - - if self._from_obj and left_selectable is not None: - replace_clause_index, clause = sql_util.find_join_source( - self._from_obj, - left_selectable) - if clause is not None: - # the entire query's FROM clause is an alias of itself (i.e. - # from_self(), similar). if the left clause is that one, - # ensure it adapts to the left side. - if self._from_obj_alias and clause is self._from_obj[0]: - join_to_left = True - - # An exception case where adaption to the left edge is not - # desirable. See above note on join_to_left. - if join_to_left and isinstance(clause, expression.Join) and \ - sql_util.clause_is_present(left_selectable, clause): - join_to_left = False - - try: - clause = orm_join(clause, - right, - onclause, isouter=outerjoin, - join_to_left=join_to_left) - except sa_exc.ArgumentError, ae: - raise sa_exc.InvalidRequestError( - "Could not find a FROM clause to join from. " - "Tried joining to %s, but got: %s" % (right, ae)) - - self._from_obj = \ - self._from_obj[:replace_clause_index] + \ - (clause, ) + \ - self._from_obj[replace_clause_index + 1:] - return - - if left_mapper: - for ent in self._entities: - if ent.corresponds_to(left): - clause = ent.selectable - break - else: - clause = left - elif left_selectable is not None: - clause = left_selectable - else: - clause = None - - if clause is None: - raise sa_exc.InvalidRequestError( - "Could not find a FROM clause to join from") - - try: - clause = orm_join(clause, right, onclause, - isouter=outerjoin, join_to_left=join_to_left) - except sa_exc.ArgumentError, ae: - raise sa_exc.InvalidRequestError( - "Could not find a FROM clause to join from. " - "Tried joining to %s, but got: %s" % (right, ae)) - - self._from_obj = self._from_obj + (clause,) - - def _reset_joinpoint(self): - self._joinpoint = self._joinpath - self._filter_aliases = None - - @_generative(_no_statement_condition) - def reset_joinpoint(self): - """Return a new :class:`.Query`, where the "join point" has - been reset back to the base FROM entities of the query. - - This method is usually used in conjunction with the - ``aliased=True`` feature of the :meth:`~.Query.join` - method. See the example in :meth:`~.Query.join` for how - this is used. - - """ - self._reset_joinpoint() - - @_generative(_no_clauseelement_condition) - def select_from(self, *from_obj): - """Set the FROM clause of this :class:`.Query` explicitly. - - Sending a mapped class or entity here effectively replaces the - "left edge" of any calls to :meth:`~.Query.join`, when no - joinpoint is otherwise established - usually, the default "join - point" is the leftmost entity in the :class:`~.Query` object's - list of entities to be selected. - - Mapped entities or plain :class:`~.Table` or other selectables - can be sent here which will form the default FROM clause. - - See the example in :meth:`~.Query.join` for a typical - usage of :meth:`~.Query.select_from`. - - """ - obj = [] - for fo in from_obj: - if _is_mapped_class(fo): - mapper, selectable, is_aliased_class = _entity_info(fo) - self._select_from_entity = fo - obj.append(selectable) - elif not isinstance(fo, expression.FromClause): - raise sa_exc.ArgumentError( - "select_from() accepts FromClause objects only.") - else: - obj.append(fo) - - self._set_select_from(*obj) - - def __getitem__(self, item): - if isinstance(item, slice): - start, stop, step = util.decode_slice(item) - - if isinstance(stop, int) and \ - isinstance(start, int) and \ - stop - start <= 0: - return [] - - # perhaps we should execute a count() here so that we - # can still use LIMIT/OFFSET ? - elif (isinstance(start, int) and start < 0) \ - or (isinstance(stop, int) and stop < 0): - return list(self)[item] - - res = self.slice(start, stop) - if step is not None: - return list(res)[None:None:item.step] - else: - return list(res) - else: - if item == -1: - return list(self)[-1] - else: - return list(self[item:item+1])[0] - - @_generative(_no_statement_condition) - def slice(self, start, stop): - """apply LIMIT/OFFSET to the ``Query`` based on a " - "range and return the newly resulting ``Query``.""" - - if start is not None and stop is not None: - self._offset = (self._offset or 0) + start - self._limit = stop - start - elif start is None and stop is not None: - self._limit = stop - elif start is not None and stop is None: - self._offset = (self._offset or 0) + start - - if self._offset == 0: - self._offset = None - - @_generative(_no_statement_condition) - def limit(self, limit): - """Apply a ``LIMIT`` to the query and return the newly resulting - - ``Query``. - - """ - self._limit = limit - - @_generative(_no_statement_condition) - def offset(self, offset): - """Apply an ``OFFSET`` to the query and return the newly resulting - ``Query``. - - """ - self._offset = offset - - @_generative(_no_statement_condition) - def distinct(self, *criterion): - """Apply a ``DISTINCT`` to the query and return the newly resulting - ``Query``. - - :param \*expr: optional column expressions. When present, - the Postgresql dialect will render a ``DISTINCT ON (>)`` - construct. - - """ - if not criterion: - self._distinct = True - else: - criterion = self._adapt_col_list(criterion) - if isinstance(self._distinct, list): - self._distinct += criterion - else: - self._distinct = criterion - - @_generative() - def prefix_with(self, *prefixes): - """Apply the prefixes to the query and return the newly resulting - ``Query``. - - :param \*prefixes: optional prefixes, typically strings, - not using any commas. In particular is useful for MySQL keywords. - - e.g.:: - - query = sess.query(User.name).\\ - prefix_with('HIGH_PRIORITY').\\ - prefix_with('SQL_SMALL_RESULT', 'ALL') - - Would render:: - - SELECT HIGH_PRIORITY SQL_SMALL_RESULT ALL users.name AS users_name - FROM users - - .. versionadded:: 0.7.7 - - """ - if self._prefixes: - self._prefixes += prefixes - else: - self._prefixes = prefixes - - def all(self): - """Return the results represented by this ``Query`` as a list. - - This results in an execution of the underlying query. - - """ - return list(self) - - @_generative(_no_clauseelement_condition) - def from_statement(self, statement): - """Execute the given SELECT statement and return results. - - This method bypasses all internal statement compilation, and the - statement is executed without modification. - - The statement argument is either a string, a ``select()`` construct, - or a ``text()`` construct, and should return the set of columns - appropriate to the entity class represented by this ``Query``. - - """ - if isinstance(statement, basestring): - statement = sql.text(statement) - - if not isinstance(statement, - (expression._TextClause, - expression._SelectBase)): - raise sa_exc.ArgumentError( - "from_statement accepts text(), select(), " - "and union() objects only.") - - self._statement = statement - - def first(self): - """Return the first result of this ``Query`` or - None if the result doesn't contain any row. - - first() applies a limit of one within the generated SQL, so that - only one primary entity row is generated on the server side - (note this may consist of multiple result rows if join-loaded - collections are present). - - Calling ``first()`` results in an execution of the underlying query. - - """ - if self._statement is not None: - ret = list(self)[0:1] - else: - ret = list(self[0:1]) - if len(ret) > 0: - return ret[0] - else: - return None - - def one(self): - """Return exactly one result or raise an exception. - - Raises ``sqlalchemy.orm.exc.NoResultFound`` if the query selects - no rows. Raises ``sqlalchemy.orm.exc.MultipleResultsFound`` - if multiple object identities are returned, or if multiple - rows are returned for a query that does not return object - identities. - - Note that an entity query, that is, one which selects one or - more mapped classes as opposed to individual column attributes, - may ultimately represent many rows but only one row of - unique entity or entities - this is a successful result for one(). - - Calling ``one()`` results in an execution of the underlying query. - - .. versionchanged:: 0.6 - ``one()`` fully fetches all results instead of applying - any kind of limit, so that the "unique"-ing of entities does not - conceal multiple object identities. - - """ - ret = list(self) - - l = len(ret) - if l == 1: - return ret[0] - elif l == 0: - raise orm_exc.NoResultFound("No row was found for one()") - else: - raise orm_exc.MultipleResultsFound( - "Multiple rows were found for one()") - - def scalar(self): - """Return the first element of the first result or None - if no rows present. If multiple rows are returned, - raises MultipleResultsFound. - - >>> session.query(Item).scalar() - - >>> session.query(Item.id).scalar() - 1 - >>> session.query(Item.id).filter(Item.id < 0).scalar() - None - >>> session.query(Item.id, Item.name).scalar() - 1 - >>> session.query(func.count(Parent.id)).scalar() - 20 - - This results in an execution of the underlying query. - - """ - try: - ret = self.one() - if not isinstance(ret, tuple): - return ret - return ret[0] - except orm_exc.NoResultFound: - return None - - def __iter__(self): - context = self._compile_context() - context.statement.use_labels = True - if self._autoflush and not self._populate_existing: - self.session._autoflush() - return self._execute_and_instances(context) - - def _connection_from_session(self, **kw): - conn = self.session.connection( - **kw) - if self._execution_options: - conn = conn.execution_options(**self._execution_options) - return conn - - def _execute_and_instances(self, querycontext): - conn = self._connection_from_session( - mapper = self._mapper_zero_or_none(), - clause = querycontext.statement, - close_with_result=True) - - result = conn.execute(querycontext.statement, self._params) - return self.instances(result, querycontext) - - @property - def column_descriptions(self): - """Return metadata about the columns which would be - returned by this :class:`.Query`. - - Format is a list of dictionaries:: - - user_alias = aliased(User, name='user2') - q = sess.query(User, User.id, user_alias) - - # this expression: - q.column_descriptions - - # would return: - [ - { - 'name':'User', - 'type':User, - 'aliased':False, - 'expr':User, - }, - { - 'name':'id', - 'type':Integer(), - 'aliased':False, - 'expr':User.id, - }, - { - 'name':'user2', - 'type':User, - 'aliased':True, - 'expr':user_alias - } - ] - - """ - return [ - { - 'name':ent._label_name, - 'type':ent.type, - 'aliased':getattr(ent, 'is_aliased_class', False), - 'expr':ent.expr - } - for ent in self._entities - ] - - def instances(self, cursor, __context=None): - """Given a ResultProxy cursor as returned by connection.execute(), - return an ORM result as an iterator. - - e.g.:: - - result = engine.execute("select * from users") - for u in session.query(User).instances(result): - print u - """ - session = self.session - - context = __context - if context is None: - context = QueryContext(self) - - context.runid = _new_runid() - - filter_fns = [ent.filter_fn - for ent in self._entities] - filtered = id in filter_fns - - single_entity = filtered and len(self._entities) == 1 - - if filtered: - if single_entity: - filter_fn = id - else: - def filter_fn(row): - return tuple(fn(x) for x, fn in zip(row, filter_fns)) - - custom_rows = single_entity and \ - self._entities[0].mapper.dispatch.append_result - - (process, labels) = \ - zip(*[ - query_entity.row_processor(self, context, custom_rows) - for query_entity in self._entities - ]) - - - while True: - context.progress = {} - context.partials = {} - - if self._yield_per: - fetch = cursor.fetchmany(self._yield_per) - if not fetch: - break - else: - fetch = cursor.fetchall() - - if custom_rows: - rows = [] - for row in fetch: - process[0](row, rows) - elif single_entity: - rows = [process[0](row, None) for row in fetch] - else: - rows = [util.NamedTuple([proc(row, None) for proc in process], - labels) for row in fetch] - - if filtered: - rows = util.unique_list(rows, filter_fn) - - if context.refresh_state and self._only_load_props \ - and context.refresh_state in context.progress: - context.refresh_state.commit( - context.refresh_state.dict, self._only_load_props) - context.progress.pop(context.refresh_state) - - session._finalize_loaded(context.progress) - - for ii, (dict_, attrs) in context.partials.iteritems(): - ii.commit(dict_, attrs) - - for row in rows: - yield row - - if not self._yield_per: - break - - def merge_result(self, iterator, load=True): - """Merge a result into this :class:`.Query` object's Session. - - Given an iterator returned by a :class:`.Query` of the same structure as this - one, return an identical iterator of results, with all mapped - instances merged into the session using :meth:`.Session.merge`. This is an - optimized method which will merge all mapped instances, preserving the - structure of the result rows and unmapped columns with less method - overhead than that of calling :meth:`.Session.merge` explicitly for each - value. - - The structure of the results is determined based on the column list of - this :class:`.Query` - if these do not correspond, unchecked errors will occur. - - The 'load' argument is the same as that of :meth:`.Session.merge`. - - For an example of how :meth:`~.Query.merge_result` is used, see - the source code for the example :ref:`examples_caching`, where - :meth:`~.Query.merge_result` is used to efficiently restore state - from a cache back into a target :class:`.Session`. - - """ - - session = self.session - if load: - # flush current contents if we expect to load data - session._autoflush() - - autoflush = session.autoflush - try: - session.autoflush = False - single_entity = len(self._entities) == 1 - if single_entity: - if isinstance(self._entities[0], _MapperEntity): - result = [session._merge( - attributes.instance_state(instance), - attributes.instance_dict(instance), - load=load, _recursive={}) - for instance in iterator] - else: - result = list(iterator) - else: - mapped_entities = [i for i, e in enumerate(self._entities) - if isinstance(e, _MapperEntity)] - result = [] - keys = [ent._label_name for ent in self._entities] - for row in iterator: - newrow = list(row) - for i in mapped_entities: - if newrow[i] is not None: - newrow[i] = session._merge( - attributes.instance_state(newrow[i]), - attributes.instance_dict(newrow[i]), - load=load, _recursive={}) - result.append(util.NamedTuple(newrow, keys)) - - return iter(result) - finally: - session.autoflush = autoflush - - @classmethod - def _get_from_identity(cls, session, key, passive): - """Look up the given key in the given session's identity map, - check the object for expired state if found. - - """ - instance = session.identity_map.get(key) - if instance is not None: - - state = attributes.instance_state(instance) - - # expired - ensure it still exists - if state.expired: - if passive is attributes.PASSIVE_NO_FETCH: - # TODO: no coverage here - return attributes.PASSIVE_NO_RESULT - elif passive is attributes.PASSIVE_NO_FETCH_RELATED: - # this mode is used within a flush and the instance's - # expired state will be checked soon enough, if necessary - return instance - try: - state(passive) - except orm_exc.ObjectDeletedError: - session._remove_newly_deleted(state) - return None - return instance - else: - return None - - def _load_on_ident(self, key, refresh_state=None, lockmode=None, - only_load_props=None): - """Load the given identity key from the database.""" - - lockmode = lockmode or self._lockmode - - if key is not None: - ident = key[1] - else: - ident = None - - if refresh_state is None: - q = self._clone() - q._get_condition() - else: - q = self._clone() - - if ident is not None: - mapper = self._mapper_zero() - - (_get_clause, _get_params) = mapper._get_clause - - # None present in ident - turn those comparisons - # into "IS NULL" - if None in ident: - nones = set([ - _get_params[col].key for col, value in - zip(mapper.primary_key, ident) if value is None - ]) - _get_clause = sql_util.adapt_criterion_to_null( - _get_clause, nones) - - _get_clause = q._adapt_clause(_get_clause, True, False) - q._criterion = _get_clause - - params = dict([ - (_get_params[primary_key].key, id_val) - for id_val, primary_key in zip(ident, mapper.primary_key) - ]) - - q._params = params - - if lockmode is not None: - q._lockmode = lockmode - q._get_options( - populate_existing=bool(refresh_state), - version_check=(lockmode is not None), - only_load_props=only_load_props, - refresh_state=refresh_state) - q._order_by = None - - try: - return q.one() - except orm_exc.NoResultFound: - return None - - @property - def _select_args(self): - return { - 'limit':self._limit, - 'offset':self._offset, - 'distinct':self._distinct, - 'prefixes':self._prefixes, - 'group_by':self._group_by or None, - 'having':self._having - } - - @property - def _should_nest_selectable(self): - kwargs = self._select_args - return (kwargs.get('limit') is not None or - kwargs.get('offset') is not None or - kwargs.get('distinct', False)) - - def count(self): - """Return a count of rows this Query would return. - - This generates the SQL for this Query as follows:: - - SELECT count(1) AS count_1 FROM ( - SELECT - ) AS anon_1 - - .. versionchanged:: 0.7 - The above scheme is newly refined as of 0.7b3. - - For fine grained control over specific columns - to count, to skip the usage of a subquery or - otherwise control of the FROM clause, - or to use other aggregate functions, - use :attr:`~sqlalchemy.sql.expression.func` expressions in conjunction - with :meth:`~.Session.query`, i.e.:: - - from sqlalchemy import func - - # count User records, without - # using a subquery. - session.query(func.count(User.id)) - - # return count of user "id" grouped - # by "name" - session.query(func.count(User.id)).\\ - group_by(User.name) - - from sqlalchemy import distinct - - # count distinct "name" values - session.query(func.count(distinct(User.name))) - - """ - col = sql.func.count(sql.literal_column('*')) - return self.from_self(col).scalar() - - def delete(self, synchronize_session='evaluate'): - """Perform a bulk delete query. - - Deletes rows matched by this query from the database. - - :param synchronize_session: chooses the strategy for the removal of - matched objects from the session. Valid values are: - - ``False`` - don't synchronize the session. This option is the most - efficient and is reliable once the session is expired, which - typically occurs after a commit(), or explicitly using - expire_all(). Before the expiration, objects may still remain in - the session which were in fact deleted which can lead to confusing - results if they are accessed via get() or already loaded - collections. - - ``'fetch'`` - performs a select query before the delete to find - objects that are matched by the delete query and need to be - removed from the session. Matched objects are removed from the - session. - - ``'evaluate'`` - Evaluate the query's criteria in Python straight on - the objects in the session. If evaluation of the criteria isn't - implemented, an error is raised. In that case you probably - want to use the 'fetch' strategy as a fallback. - - The expression evaluator currently doesn't account for differing - string collations between the database and Python. - - Returns the number of rows deleted, excluding any cascades. - - The method does *not* offer in-Python cascading of relationships - it - is assumed that ON DELETE CASCADE is configured for any foreign key - references which require it. The Session needs to be expired (occurs - automatically after commit(), or call expire_all()) in order for the - state of dependent objects subject to delete or delete-orphan cascade - to be correctly represented. - - Note that the :meth:`.MapperEvents.before_delete` and - :meth:`.MapperEvents.after_delete` - events are **not** invoked from this method. It instead - invokes :meth:`.SessionEvents.after_bulk_delete`. - - """ - #TODO: lots of duplication and ifs - probably needs to be - # refactored to strategies - #TODO: cascades need handling. - - if synchronize_session not in [False, 'evaluate', 'fetch']: - raise sa_exc.ArgumentError( - "Valid strategies for session " - "synchronization are False, 'evaluate' and " - "'fetch'") - self._no_select_modifiers("delete") - - self = self.enable_eagerloads(False) - - context = self._compile_context() - if len(context.statement.froms) != 1 or \ - not isinstance(context.statement.froms[0], schema.Table): - raise sa_exc.ArgumentError("Only deletion via a single table " - "query is currently supported") - primary_table = context.statement.froms[0] - - session = self.session - - if self._autoflush: - session._autoflush() - - if synchronize_session == 'evaluate': - try: - evaluator_compiler = evaluator.EvaluatorCompiler() - if self.whereclause is not None: - eval_condition = evaluator_compiler.process( - self.whereclause) - else: - def eval_condition(obj): - return True - - except evaluator.UnevaluatableError: - raise sa_exc.InvalidRequestError( - "Could not evaluate current criteria in Python. " - "Specify 'fetch' or False for the synchronize_session " - "parameter.") - - target_cls = self._mapper_zero().class_ - - #TODO: detect when the where clause is a trivial primary key match - objs_to_expunge = [ - obj for (cls, pk),obj in - session.identity_map.iteritems() - if issubclass(cls, target_cls) and - eval_condition(obj)] - - elif synchronize_session == 'fetch': - #TODO: use RETURNING when available - select_stmt = context.statement.with_only_columns( - primary_table.primary_key) - matched_rows = session.execute( - select_stmt, - params=self._params).fetchall() - - delete_stmt = sql.delete(primary_table, context.whereclause) - - result = session.execute(delete_stmt, params=self._params) - - if synchronize_session == 'evaluate': - for obj in objs_to_expunge: - session._remove_newly_deleted(attributes.instance_state(obj)) - elif synchronize_session == 'fetch': - target_mapper = self._mapper_zero() - for primary_key in matched_rows: - identity_key = target_mapper.identity_key_from_primary_key( - list(primary_key)) - if identity_key in session.identity_map: - session._remove_newly_deleted( - attributes.instance_state( - session.identity_map[identity_key] - ) - ) - - session.dispatch.after_bulk_delete(session, self, context, result) - - return result.rowcount - - def update(self, values, synchronize_session='evaluate'): - """Perform a bulk update query. - - Updates rows matched by this query in the database. - - :param values: a dictionary with attributes names as keys and literal - values or sql expressions as values. - - :param synchronize_session: chooses the strategy to update the - attributes on objects in the session. Valid values are: - - ``False`` - don't synchronize the session. This option is the most - efficient and is reliable once the session is expired, which - typically occurs after a commit(), or explicitly using - expire_all(). Before the expiration, updated objects may still - remain in the session with stale values on their attributes, which - can lead to confusing results. - - ``'fetch'`` - performs a select query before the update to find - objects that are matched by the update query. The updated - attributes are expired on matched objects. - - ``'evaluate'`` - Evaluate the Query's criteria in Python straight on - the objects in the session. If evaluation of the criteria isn't - implemented, an exception is raised. - - The expression evaluator currently doesn't account for differing - string collations between the database and Python. - - Returns the number of rows matched by the update. - - The method does *not* offer in-Python cascading of relationships - it - is assumed that ON UPDATE CASCADE is configured for any foreign key - references which require it. - - The Session needs to be expired (occurs automatically after commit(), - or call expire_all()) in order for the state of dependent objects - subject foreign key cascade to be correctly represented. - - Note that the :meth:`.MapperEvents.before_update` and - :meth:`.MapperEvents.after_update` - events are **not** invoked from this method. It instead - invokes :meth:`.SessionEvents.after_bulk_update`. - - """ - - #TODO: value keys need to be mapped to corresponding sql cols and - # instr.attr.s to string keys - #TODO: updates of manytoone relationships need to be converted to - # fk assignments - #TODO: cascades need handling. - - if synchronize_session == 'expire': - util.warn_deprecated("The 'expire' value as applied to " - "the synchronize_session argument of " - "query.update() is now called 'fetch'") - synchronize_session = 'fetch' - - if synchronize_session not in [False, 'evaluate', 'fetch']: - raise sa_exc.ArgumentError( - "Valid strategies for session synchronization " - "are False, 'evaluate' and 'fetch'") - self._no_select_modifiers("update") - - self = self.enable_eagerloads(False) - - context = self._compile_context() - if len(context.statement.froms) != 1 or \ - not isinstance(context.statement.froms[0], schema.Table): - raise sa_exc.ArgumentError( - "Only update via a single table query is " - "currently supported") - primary_table = context.statement.froms[0] - - session = self.session - - if self._autoflush: - session._autoflush() - - if synchronize_session == 'evaluate': - try: - evaluator_compiler = evaluator.EvaluatorCompiler() - if self.whereclause is not None: - eval_condition = evaluator_compiler.process( - self.whereclause) - else: - def eval_condition(obj): - return True - - value_evaluators = {} - for key,value in values.iteritems(): - key = _attr_as_key(key) - value_evaluators[key] = evaluator_compiler.process( - expression._literal_as_binds(value)) - except evaluator.UnevaluatableError: - raise sa_exc.InvalidRequestError( - "Could not evaluate current criteria in Python. " - "Specify 'fetch' or False for the " - "synchronize_session parameter.") - target_cls = self._mapper_zero().class_ - matched_objects = [] - for (cls, pk),obj in session.identity_map.iteritems(): - evaluated_keys = value_evaluators.keys() - - if issubclass(cls, target_cls) and eval_condition(obj): - matched_objects.append(obj) - - elif synchronize_session == 'fetch': - select_stmt = context.statement.with_only_columns( - primary_table.primary_key) - matched_rows = session.execute( - select_stmt, - params=self._params).fetchall() - - update_stmt = sql.update(primary_table, context.whereclause, values) - - result = session.execute(update_stmt, params=self._params) - - if synchronize_session == 'evaluate': - target_cls = self._mapper_zero().class_ - - for obj in matched_objects: - state, dict_ = attributes.instance_state(obj),\ - attributes.instance_dict(obj) - - # only evaluate unmodified attributes - to_evaluate = state.unmodified.intersection( - evaluated_keys) - for key in to_evaluate: - dict_[key] = value_evaluators[key](obj) - - state.commit(dict_, list(to_evaluate)) - - # expire attributes with pending changes - # (there was no autoflush, so they are overwritten) - state.expire_attributes(dict_, - set(evaluated_keys). - difference(to_evaluate)) - - elif synchronize_session == 'fetch': - target_mapper = self._mapper_zero() - - for primary_key in matched_rows: - identity_key = target_mapper.identity_key_from_primary_key( - list(primary_key)) - if identity_key in session.identity_map: - session.expire( - session.identity_map[identity_key], - [_attr_as_key(k) for k in values] - ) - - session.dispatch.after_bulk_update(session, self, context, result) - - return result.rowcount - - def _compile_context(self, labels=True): - context = QueryContext(self) - - if context.statement is not None: - return context - - if self._lockmode: - try: - for_update = {'read': 'read', - 'read_nowait': 'read_nowait', - 'update': True, - 'update_nowait': 'nowait', - None: False}[self._lockmode] - except KeyError: - raise sa_exc.ArgumentError( - "Unknown lockmode %r" % self._lockmode) - else: - for_update = False - - for entity in self._entities: - entity.setup_context(self, context) - - for rec in context.create_eager_joins: - strategy = rec[0] - strategy(*rec[1:]) - - eager_joins = context.eager_joins.values() - - if context.from_clause: - # "load from explicit FROMs" mode, - # i.e. when select_from() or join() is used - froms = list(context.from_clause) - else: - # "load from discrete FROMs" mode, - # i.e. when each _MappedEntity has its own FROM - froms = context.froms - - if self._enable_single_crit: - self._adjust_for_single_inheritance(context) - - if not context.primary_columns: - if self._only_load_props: - raise sa_exc.InvalidRequestError( - "No column-based properties specified for " - "refresh operation. Use session.expire() " - "to reload collections and related items.") - else: - raise sa_exc.InvalidRequestError( - "Query contains no columns with which to " - "SELECT from.") - - if context.multi_row_eager_loaders and self._should_nest_selectable: - # for eager joins present and LIMIT/OFFSET/DISTINCT, - # wrap the query inside a select, - # then append eager joins onto that - - if context.order_by: - order_by_col_expr = list( - chain(*[ - sql_util.unwrap_order_by(o) - for o in context.order_by - ]) - ) - else: - context.order_by = None - order_by_col_expr = [] - - inner = sql.select( - context.primary_columns + order_by_col_expr, - context.whereclause, - from_obj=froms, - use_labels=labels, - correlate=False, - # TODO: this order_by is only needed if - # LIMIT/OFFSET is present in self._select_args, - # else the application on the outside is enough - order_by=context.order_by, - **self._select_args - ) - - for hint in self._with_hints: - inner = inner.with_hint(*hint) - - if self._correlate: - inner = inner.correlate(*self._correlate) - - inner = inner.alias() - - equivs = self.__all_equivs() - - context.adapter = sql_util.ColumnAdapter(inner, equivs) - - statement = sql.select( - [inner] + context.secondary_columns, - for_update=for_update, - use_labels=labels) - - from_clause = inner - for eager_join in eager_joins: - # EagerLoader places a 'stop_on' attribute on the join, - # giving us a marker as to where the "splice point" of - # the join should be - from_clause = sql_util.splice_joins( - from_clause, - eager_join, eager_join.stop_on) - - statement.append_from(from_clause) - - if context.order_by: - statement.append_order_by( - *context.adapter.copy_and_process( - context.order_by - ) - ) - - statement.append_order_by(*context.eager_order_by) - else: - if not context.order_by: - context.order_by = None - - if self._distinct and context.order_by: - order_by_col_expr = list( - chain(*[ - sql_util.unwrap_order_by(o) - for o in context.order_by - ]) - ) - context.primary_columns += order_by_col_expr - - froms += tuple(context.eager_joins.values()) - - statement = sql.select( - context.primary_columns + - context.secondary_columns, - context.whereclause, - from_obj=froms, - use_labels=labels, - for_update=for_update, - correlate=False, - order_by=context.order_by, - **self._select_args - ) - - for hint in self._with_hints: - statement = statement.with_hint(*hint) - - if self._correlate: - statement = statement.correlate(*self._correlate) - - if context.eager_order_by: - statement.append_order_by(*context.eager_order_by) - - context.statement = statement - - return context - - def _adjust_for_single_inheritance(self, context): - """Apply single-table-inheritance filtering. - - For all distinct single-table-inheritance mappers represented in the - columns clause of this query, add criterion to the WHERE clause of the - given QueryContext such that only the appropriate subtypes are - selected from the total results. - - """ - for entity, (mapper, adapter, s, i, w) in \ - self._mapper_adapter_map.iteritems(): - if entity in self._join_entities: - continue - single_crit = mapper._single_table_criterion - if single_crit is not None: - if adapter: - single_crit = adapter.traverse(single_crit) - single_crit = self._adapt_clause(single_crit, False, False) - context.whereclause = sql.and_( - context.whereclause, single_crit) - - def __str__(self): - return str(self._compile_context().statement) - - -class _QueryEntity(object): - """represent an entity column returned within a Query result.""" - - def __new__(cls, *args, **kwargs): - if cls is _QueryEntity: - entity = args[1] - if not isinstance(entity, basestring) and \ - _is_mapped_class(entity): - cls = _MapperEntity - else: - cls = _ColumnEntity - return object.__new__(cls) - - def _clone(self): - q = self.__class__.__new__(self.__class__) - q.__dict__ = self.__dict__.copy() - return q - -class _MapperEntity(_QueryEntity): - """mapper/class/AliasedClass entity""" - - def __init__(self, query, entity): - self.primary_entity = not query._entities - query._entities.append(self) - - self.entities = [entity] - self.entity_zero = self.expr = entity - - def setup_entity(self, entity, mapper, adapter, - from_obj, is_aliased_class, with_polymorphic): - self.mapper = mapper - self.adapter = adapter - self.selectable = from_obj - self._with_polymorphic = with_polymorphic - self._polymorphic_discriminator = None - self.is_aliased_class = is_aliased_class - if is_aliased_class: - self.path_entity = self.entity_zero = entity - self._path = (entity,) - self._label_name = self.entity_zero._sa_label_name - self._reduced_path = (self.path_entity, ) - else: - self.path_entity = mapper - self._path = (mapper,) - self._reduced_path = (mapper.base_mapper, ) - self.entity_zero = mapper - self._label_name = self.mapper.class_.__name__ - - - def set_with_polymorphic(self, query, cls_or_mappers, - selectable, discriminator): - if cls_or_mappers is None: - query._reset_polymorphic_adapter(self.mapper) - return - - mappers, from_obj = self.mapper._with_polymorphic_args( - cls_or_mappers, selectable) - self._with_polymorphic = mappers - self._polymorphic_discriminator = discriminator - - # TODO: do the wrapped thing here too so that - # with_polymorphic() can be applied to aliases - if not self.is_aliased_class: - self.selectable = from_obj - query._mapper_loads_polymorphically_with(self.mapper, - sql_util.ColumnAdapter(from_obj, - self.mapper._equivalent_columns)) - - filter_fn = id - - @property - def type(self): - return self.mapper.class_ - - @property - def entity_zero_or_selectable(self): - return self.entity_zero - - def corresponds_to(self, entity): - if _is_aliased_class(entity) or self.is_aliased_class: - return entity is self.path_entity - else: - return entity.common_parent(self.path_entity) - - def adapt_to_selectable(self, query, sel): - query._entities.append(self) - - def _get_entity_clauses(self, query, context): - - adapter = None - if not self.is_aliased_class and query._polymorphic_adapters: - adapter = query._polymorphic_adapters.get(self.mapper, None) - - if not adapter and self.adapter: - adapter = self.adapter - - if adapter: - if query._from_obj_alias: - ret = adapter.wrap(query._from_obj_alias) - else: - ret = adapter - else: - ret = query._from_obj_alias - - return ret - - def row_processor(self, query, context, custom_rows): - adapter = self._get_entity_clauses(query, context) - - if context.adapter and adapter: - adapter = adapter.wrap(context.adapter) - elif not adapter: - adapter = context.adapter - - # polymorphic mappers which have concrete tables in - # their hierarchy usually - # require row aliasing unconditionally. - if not adapter and self.mapper._requires_row_aliasing: - adapter = sql_util.ColumnAdapter( - self.selectable, - self.mapper._equivalent_columns) - - if self.primary_entity: - _instance = self.mapper._instance_processor( - context, - self._path, - self._reduced_path, - adapter, - only_load_props=query._only_load_props, - refresh_state=context.refresh_state, - polymorphic_discriminator= - self._polymorphic_discriminator - ) - else: - _instance = self.mapper._instance_processor( - context, - self._path, - self._reduced_path, - adapter, - polymorphic_discriminator= - self._polymorphic_discriminator) - - return _instance, self._label_name - - def setup_context(self, query, context): - adapter = self._get_entity_clauses(query, context) - - context.froms += (self.selectable,) - - if context.order_by is False and self.mapper.order_by: - context.order_by = self.mapper.order_by - - # apply adaptation to the mapper's order_by if needed. - if adapter: - context.order_by = adapter.adapt_list( - util.to_list( - context.order_by - ) - ) - - if self._with_polymorphic: - poly_properties = self.mapper._iterate_polymorphic_properties( - self._with_polymorphic) - else: - poly_properties = self.mapper._polymorphic_properties - - for value in poly_properties: - if query._only_load_props and \ - value.key not in query._only_load_props: - continue - value.setup( - context, - self, - self._path, - self._reduced_path, - adapter, - only_load_props=query._only_load_props, - column_collection=context.primary_columns - ) - - if self._polymorphic_discriminator is not None: - if adapter: - pd = adapter.columns[self._polymorphic_discriminator] - else: - pd = self._polymorphic_discriminator - context.primary_columns.append(pd) - - def __str__(self): - return str(self.mapper) - -class _ColumnEntity(_QueryEntity): - """Column/expression based entity.""" - - def __init__(self, query, column, namespace=None): - self.expr = column - self.namespace = namespace - - if isinstance(column, basestring): - column = sql.literal_column(column) - self._label_name = column.name - elif isinstance(column, ( - attributes.QueryableAttribute, - interfaces.PropComparator - )): - self._label_name = column.key - column = column.__clause_element__() - else: - self._label_name = getattr(column, 'key', None) - - if not isinstance(column, expression.ColumnElement) and \ - hasattr(column, '_select_iterable'): - for c in column._select_iterable: - if c is column: - break - _ColumnEntity(query, c, namespace=column) - - if c is not column: - return - - if not isinstance(column, sql.ColumnElement): - raise sa_exc.InvalidRequestError( - "SQL expression, column, or mapped entity " - "expected - got '%r'" % (column, ) - ) - - # If the Column is unnamed, give it a - # label() so that mutable column expressions - # can be located in the result even - # if the expression's identity has been changed - # due to adaption. - if not column._label: - column = column.label(None) - - query._entities.append(self) - - self.column = column - self.froms = set() - - # look for ORM entities represented within the - # given expression. Try to count only entities - # for columns whose FROM object is in the actual list - # of FROMs for the overall expression - this helps - # subqueries which were built from ORM constructs from - # leaking out their entities into the main select construct - self.actual_froms = actual_froms = set(column._from_objects) - - self.entities = util.OrderedSet( - elem._annotations['parententity'] - for elem in visitors.iterate(column, {}) - if 'parententity' in elem._annotations - and actual_froms.intersection(elem._from_objects) - ) - - if self.entities: - self.entity_zero = list(self.entities)[0] - elif self.namespace is not None: - self.entity_zero = self.namespace - else: - self.entity_zero = None - - @property - def entity_zero_or_selectable(self): - if self.entity_zero is not None: - return self.entity_zero - elif self.actual_froms: - return list(self.actual_froms)[0] - else: - return None - - @property - def type(self): - return self.column.type - - def filter_fn(self, item): - return item - - def adapt_to_selectable(self, query, sel): - c = _ColumnEntity(query, sel.corresponding_column(self.column)) - c._label_name = self._label_name - c.entity_zero = self.entity_zero - c.entities = self.entities - - def setup_entity(self, entity, mapper, adapter, from_obj, - is_aliased_class, with_polymorphic): - if 'selectable' not in self.__dict__: - self.selectable = from_obj - self.froms.add(from_obj) - - def corresponds_to(self, entity): - if self.entity_zero is None: - return False - elif _is_aliased_class(entity): - return entity is self.entity_zero - else: - return not _is_aliased_class(self.entity_zero) and \ - entity.common_parent(self.entity_zero) - - def _resolve_expr_against_query_aliases(self, query, expr, context): - return query._adapt_clause(expr, False, True) - - def row_processor(self, query, context, custom_rows): - column = self._resolve_expr_against_query_aliases( - query, self.column, context) - - if context.adapter: - column = context.adapter.columns[column] - - def proc(row, result): - return row[column] - - return proc, self._label_name - - def setup_context(self, query, context): - column = self._resolve_expr_against_query_aliases( - query, self.column, context) - context.froms += tuple(self.froms) - context.primary_columns.append(column) - - def __str__(self): - return str(self.column) - -log.class_logger(Query) - -class QueryContext(object): - multi_row_eager_loaders = False - adapter = None - froms = () - - def __init__(self, query): - - if query._statement is not None: - if isinstance(query._statement, expression._SelectBase) and \ - not query._statement.use_labels: - self.statement = query._statement.apply_labels() - else: - self.statement = query._statement - else: - self.statement = None - self.from_clause = query._from_obj - self.whereclause = query._criterion - self.order_by = query._order_by - - self.query = query - self.session = query.session - self.populate_existing = query._populate_existing - self.invoke_all_eagers = query._invoke_all_eagers - self.version_check = query._version_check - self.refresh_state = query._refresh_state - self.primary_columns = [] - self.secondary_columns = [] - self.eager_order_by = [] - self.eager_joins = {} - self.create_eager_joins = [] - self.propagate_options = set(o for o in query._with_options if - o.propagate_to_loaders) - self.attributes = query._attributes.copy() - -class AliasOption(interfaces.MapperOption): - - def __init__(self, alias): - self.alias = alias - - def process_query(self, query): - if isinstance(self.alias, basestring): - alias = query._mapper_zero().mapped_table.alias(self.alias) - else: - alias = self.alias - query._from_obj_alias = sql_util.ColumnAdapter(alias) - - -_new_runid = util.counter() diff --git a/libs/sqlalchemy/orm/scoping.py b/libs/sqlalchemy/orm/scoping.py deleted file mode 100644 index b5bd65b2..00000000 --- a/libs/sqlalchemy/orm/scoping.py +++ /dev/null @@ -1,135 +0,0 @@ -# orm/scoping.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -from sqlalchemy import exc as sa_exc -from sqlalchemy.util import ScopedRegistry, ThreadLocalRegistry, warn -from sqlalchemy.orm import class_mapper -from sqlalchemy.orm import exc as orm_exc -from sqlalchemy.orm.session import Session - - -__all__ = ['ScopedSession'] - - -class ScopedSession(object): - """Provides thread-local management of Sessions. - - Typical invocation is via the :func:`.scoped_session` - function:: - - Session = scoped_session(sessionmaker()) - - The internal registry is accessible, - and by default is an instance of :class:`.ThreadLocalRegistry`. - - See also: :ref:`unitofwork_contextual`. - - """ - - def __init__(self, session_factory, scopefunc=None): - self.session_factory = session_factory - if scopefunc: - self.registry = ScopedRegistry(session_factory, scopefunc) - else: - self.registry = ThreadLocalRegistry(session_factory) - - def __call__(self, **kwargs): - if kwargs: - scope = kwargs.pop('scope', False) - if scope is not None: - if self.registry.has(): - raise sa_exc.InvalidRequestError( - "Scoped session is already present; " - "no new arguments may be specified.") - else: - sess = self.session_factory(**kwargs) - self.registry.set(sess) - return sess - else: - return self.session_factory(**kwargs) - else: - return self.registry() - - def remove(self): - """Dispose of the current contextual session.""" - - if self.registry.has(): - self.registry().close() - self.registry.clear() - - def configure(self, **kwargs): - """reconfigure the sessionmaker used by this ScopedSession.""" - - if self.registry.has(): - warn('At least one scoped session is already present. ' - ' configure() can not affect sessions that have ' - 'already been created.') - - self.session_factory.configure(**kwargs) - - def query_property(self, query_cls=None): - """return a class property which produces a `Query` object - against the class when called. - - e.g.:: - - Session = scoped_session(sessionmaker()) - - class MyClass(object): - query = Session.query_property() - - # after mappers are defined - result = MyClass.query.filter(MyClass.name=='foo').all() - - Produces instances of the session's configured query class by - default. To override and use a custom implementation, provide - a ``query_cls`` callable. The callable will be invoked with - the class's mapper as a positional argument and a session - keyword argument. - - There is no limit to the number of query properties placed on - a class. - - """ - class query(object): - def __get__(s, instance, owner): - try: - mapper = class_mapper(owner) - if mapper: - if query_cls: - # custom query class - return query_cls(mapper, session=self.registry()) - else: - # session's configured query class - return self.registry().query(mapper) - except orm_exc.UnmappedClassError: - return None - return query() - -def instrument(name): - def do(self, *args, **kwargs): - return getattr(self.registry(), name)(*args, **kwargs) - return do -for meth in Session.public_methods: - setattr(ScopedSession, meth, instrument(meth)) - -def makeprop(name): - def set(self, attr): - setattr(self.registry(), name, attr) - def get(self): - return getattr(self.registry(), name) - return property(get, set) -for prop in ('bind', 'dirty', 'deleted', 'new', 'identity_map', - 'is_active', 'autoflush', 'no_autoflush'): - setattr(ScopedSession, prop, makeprop(prop)) - -def clslevel(name): - def do(cls, *args, **kwargs): - return getattr(Session, name)(*args, **kwargs) - return classmethod(do) -for prop in ('close_all', 'object_session', 'identity_key'): - setattr(ScopedSession, prop, clslevel(prop)) - diff --git a/libs/sqlalchemy/orm/session.py b/libs/sqlalchemy/orm/session.py deleted file mode 100644 index 8994a339..00000000 --- a/libs/sqlalchemy/orm/session.py +++ /dev/null @@ -1,2103 +0,0 @@ -# orm/session.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Provides the Session class and related utilities.""" - -import weakref -from itertools import chain -from sqlalchemy import util, sql, engine, log, exc as sa_exc -from sqlalchemy.sql import util as sql_util, expression -from sqlalchemy.orm import ( - SessionExtension, attributes, exc, query, unitofwork, util as mapperutil, state - ) -from sqlalchemy.orm.util import object_mapper as _object_mapper -from sqlalchemy.orm.util import class_mapper as _class_mapper -from sqlalchemy.orm.util import ( - _class_to_mapper, _state_mapper, - ) -from sqlalchemy.orm.mapper import Mapper, _none_set -from sqlalchemy.orm.unitofwork import UOWTransaction -from sqlalchemy.orm import identity -from sqlalchemy import event -from sqlalchemy.orm.events import SessionEvents - -import sys - -__all__ = ['Session', 'SessionTransaction', 'SessionExtension'] - - -def sessionmaker(bind=None, class_=None, autoflush=True, autocommit=False, - expire_on_commit=True, **kwargs): - """Generate a custom-configured :class:`.Session` class. - - The returned object is a subclass of :class:`.Session`, which, when instantiated - with no arguments, uses the keyword arguments configured here as its - constructor arguments. - - It is intended that the :func:`.sessionmaker()` function be called within the - global scope of an application, and the returned class be made available - to the rest of the application as the single class used to instantiate - sessions. - - e.g.:: - - # global scope - Session = sessionmaker(autoflush=False) - - # later, in a local scope, create and use a session: - sess = Session() - - Any keyword arguments sent to the constructor itself will override the - "configured" keywords:: - - Session = sessionmaker() - - # bind an individual session to a connection - sess = Session(bind=connection) - - The class also includes a special classmethod ``configure()``, which - allows additional configurational options to take place after the custom - ``Session`` class has been generated. This is useful particularly for - defining the specific ``Engine`` (or engines) to which new instances of - ``Session`` should be bound:: - - Session = sessionmaker() - Session.configure(bind=create_engine('sqlite:///foo.db')) - - sess = Session() - - For options, see the constructor options for :class:`.Session`. - - """ - kwargs['bind'] = bind - kwargs['autoflush'] = autoflush - kwargs['autocommit'] = autocommit - kwargs['expire_on_commit'] = expire_on_commit - - if class_ is None: - class_ = Session - - class Sess(object): - def __init__(self, **local_kwargs): - for k in kwargs: - local_kwargs.setdefault(k, kwargs[k]) - super(Sess, self).__init__(**local_kwargs) - - @classmethod - def configure(self, **new_kwargs): - """(Re)configure the arguments for this sessionmaker. - - e.g.:: - - Session = sessionmaker() - - Session.configure(bind=create_engine('sqlite://')) - """ - kwargs.update(new_kwargs) - - - return type("SessionMaker", (Sess, class_), {}) - - -class SessionTransaction(object): - """A :class:`.Session`-level transaction. - - :class:`.SessionTransaction` is a mostly behind-the-scenes object - not normally referenced directly by application code. It coordinates - among multiple :class:`.Connection` objects, maintaining a database - transaction for each one individually, committing or rolling them - back all at once. It also provides optional two-phase commit behavior - which can augment this coordination operation. - - The :attr:`.Session.transaction` attribute of :class:`.Session` refers to the - current :class:`.SessionTransaction` object in use, if any. - - - A :class:`.SessionTransaction` is associated with a :class:`.Session` - in its default mode of ``autocommit=False`` immediately, associated - with no database connections. As the :class:`.Session` is called upon - to emit SQL on behalf of various :class:`.Engine` or :class:`.Connection` - objects, a corresponding :class:`.Connection` and associated :class:`.Transaction` - is added to a collection within the :class:`.SessionTransaction` object, - becoming one of the connection/transaction pairs maintained by the - :class:`.SessionTransaction`. - - The lifespan of the :class:`.SessionTransaction` ends when the - :meth:`.Session.commit`, :meth:`.Session.rollback` or :meth:`.Session.close` - methods are called. At this point, the :class:`.SessionTransaction` removes - its association with its parent :class:`.Session`. A :class:`.Session` - that is in ``autocommit=False`` mode will create a new - :class:`.SessionTransaction` to replace it immediately, whereas a - :class:`.Session` that's in ``autocommit=True`` - mode will remain without a :class:`.SessionTransaction` until the - :meth:`.Session.begin` method is called. - - Another detail of :class:`.SessionTransaction` behavior is that it is - capable of "nesting". This means that the :meth:`.begin` method can - be called while an existing :class:`.SessionTransaction` is already present, - producing a new :class:`.SessionTransaction` that temporarily replaces - the parent :class:`.SessionTransaction`. When a :class:`.SessionTransaction` - is produced as nested, it assigns itself to the :attr:`.Session.transaction` - attribute. When it is ended via :meth:`.Session.commit` or :meth:`.Session.rollback`, - it restores its parent :class:`.SessionTransaction` back onto the - :attr:`.Session.transaction` attribute. The - behavior is effectively a stack, where :attr:`.Session.transaction` refers - to the current head of the stack. - - The purpose of this stack is to allow nesting of :meth:`.rollback` or - :meth:`.commit` calls in context with various flavors of :meth:`.begin`. - This nesting behavior applies to when :meth:`.Session.begin_nested` - is used to emit a SAVEPOINT transaction, and is also used to produce - a so-called "subtransaction" which allows a block of code to use a - begin/rollback/commit sequence regardless of whether or not its enclosing - code block has begun a transaction. The :meth:`.flush` method, whether called - explicitly or via autoflush, is the primary consumer of the "subtransaction" - feature, in that it wishes to guarantee that it works within in a transaction block - regardless of whether or not the :class:`.Session` is in transactional mode - when the method is called. - - See also: - - :meth:`.Session.rollback` - - :meth:`.Session.commit` - - :meth:`.Session.begin` - - :meth:`.Session.begin_nested` - - :attr:`.Session.is_active` - - :meth:`.SessionEvents.after_commit` - - :meth:`.SessionEvents.after_rollback` - - :meth:`.SessionEvents.after_soft_rollback` - - """ - - _rollback_exception = None - - def __init__(self, session, parent=None, nested=False): - self.session = session - self._connections = {} - self._parent = parent - self.nested = nested - self._active = True - self._prepared = False - if not parent and nested: - raise sa_exc.InvalidRequestError( - "Can't start a SAVEPOINT transaction when no existing " - "transaction is in progress") - - if self.session._enable_transaction_accounting: - self._take_snapshot() - - @property - def is_active(self): - return self.session is not None and self._active - - def _assert_is_active(self): - self._assert_is_open() - if not self._active: - if self._rollback_exception: - raise sa_exc.InvalidRequestError( - "This Session's transaction has been rolled back " - "due to a previous exception during flush." - " To begin a new transaction with this Session, " - "first issue Session.rollback()." - " Original exception was: %s" - % self._rollback_exception - ) - else: - raise sa_exc.InvalidRequestError( - "This Session's transaction has been rolled back " - "by a nested rollback() call. To begin a new " - "transaction, issue Session.rollback() first." - ) - - def _assert_is_open(self, error_msg="The transaction is closed"): - if self.session is None: - raise sa_exc.ResourceClosedError(error_msg) - - @property - def _is_transaction_boundary(self): - return self.nested or not self._parent - - def connection(self, bindkey, **kwargs): - self._assert_is_active() - engine = self.session.get_bind(bindkey, **kwargs) - return self._connection_for_bind(engine) - - def _begin(self, nested=False): - self._assert_is_active() - return SessionTransaction( - self.session, self, nested=nested) - - def _iterate_parents(self, upto=None): - if self._parent is upto: - return (self,) - else: - if self._parent is None: - raise sa_exc.InvalidRequestError( - "Transaction %s is not on the active transaction list" % ( - upto)) - return (self,) + self._parent._iterate_parents(upto) - - def _take_snapshot(self): - if not self._is_transaction_boundary: - self._new = self._parent._new - self._deleted = self._parent._deleted - self._key_switches = self._parent._key_switches - return - - if not self.session._flushing: - self.session.flush() - - self._new = weakref.WeakKeyDictionary() - self._deleted = weakref.WeakKeyDictionary() - self._key_switches = weakref.WeakKeyDictionary() - - def _restore_snapshot(self): - assert self._is_transaction_boundary - - for s in set(self._new).union(self.session._new): - self.session._expunge_state(s) - if s.key: - del s.key - - for s, (oldkey, newkey) in self._key_switches.items(): - self.session.identity_map.discard(s) - s.key = oldkey - self.session.identity_map.replace(s) - - for s in set(self._deleted).union(self.session._deleted): - if s.deleted: - #assert s in self._deleted - del s.deleted - self.session._update_impl(s, discard_existing=True) - - assert not self.session._deleted - - for s in self.session.identity_map.all_states(): - s.expire(s.dict, self.session.identity_map._modified) - - def _remove_snapshot(self): - assert self._is_transaction_boundary - - if not self.nested and self.session.expire_on_commit: - for s in self.session.identity_map.all_states(): - s.expire(s.dict, self.session.identity_map._modified) - - def _connection_for_bind(self, bind): - self._assert_is_active() - - if bind in self._connections: - return self._connections[bind][0] - - if self._parent: - conn = self._parent._connection_for_bind(bind) - if not self.nested: - return conn - else: - if isinstance(bind, engine.Connection): - conn = bind - if conn.engine in self._connections: - raise sa_exc.InvalidRequestError( - "Session already has a Connection associated for the " - "given Connection's Engine") - else: - conn = bind.contextual_connect() - - if self.session.twophase and self._parent is None: - transaction = conn.begin_twophase() - elif self.nested: - transaction = conn.begin_nested() - else: - transaction = conn.begin() - - self._connections[conn] = self._connections[conn.engine] = \ - (conn, transaction, conn is not bind) - self.session.dispatch.after_begin(self.session, self, conn) - return conn - - def prepare(self): - if self._parent is not None or not self.session.twophase: - raise sa_exc.InvalidRequestError( - "Only root two phase transactions of can be prepared") - self._prepare_impl() - - def _prepare_impl(self): - self._assert_is_active() - if self._parent is None or self.nested: - self.session.dispatch.before_commit(self.session) - - stx = self.session.transaction - if stx is not self: - for subtransaction in stx._iterate_parents(upto=self): - subtransaction.commit() - - if not self.session._flushing: - for _flush_guard in xrange(100): - if self.session._is_clean(): - break - self.session.flush() - else: - raise exc.FlushError( - "Over 100 subsequent flushes have occurred within " - "session.commit() - is an after_flush() hook " - "creating new objects?") - - if self._parent is None and self.session.twophase: - try: - for t in set(self._connections.values()): - t[1].prepare() - except: - self.rollback() - raise - - self._deactivate() - self._prepared = True - - def commit(self): - self._assert_is_open() - if not self._prepared: - self._prepare_impl() - - if self._parent is None or self.nested: - for t in set(self._connections.values()): - t[1].commit() - - self.session.dispatch.after_commit(self.session) - - if self.session._enable_transaction_accounting: - self._remove_snapshot() - - self.close() - return self._parent - - def rollback(self, _capture_exception=False): - self._assert_is_open() - - stx = self.session.transaction - if stx is not self: - for subtransaction in stx._iterate_parents(upto=self): - subtransaction.close() - - if self.is_active or self._prepared: - for transaction in self._iterate_parents(): - if transaction._parent is None or transaction.nested: - transaction._rollback_impl() - transaction._deactivate() - break - else: - transaction._deactivate() - - sess = self.session - - if self.session._enable_transaction_accounting and \ - not sess._is_clean(): - # if items were added, deleted, or mutated - # here, we need to re-restore the snapshot - util.warn( - "Session's state has been changed on " - "a non-active transaction - this state " - "will be discarded.") - self._restore_snapshot() - - self.close() - if self._parent and _capture_exception: - self._parent._rollback_exception = sys.exc_info()[1] - - sess.dispatch.after_soft_rollback(sess, self) - - return self._parent - - def _rollback_impl(self): - for t in set(self._connections.values()): - t[1].rollback() - - if self.session._enable_transaction_accounting: - self._restore_snapshot() - - self.session.dispatch.after_rollback(self.session) - - def _deactivate(self): - self._active = False - - def close(self): - self.session.transaction = self._parent - if self._parent is None: - for connection, transaction, autoclose in \ - set(self._connections.values()): - if autoclose: - connection.close() - else: - transaction.close() - if not self.session.autocommit: - self.session.begin() - self._deactivate() - self.session = None - self._connections = None - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - self._assert_is_open("Cannot end transaction context. The transaction " - "was closed from within the context") - if self.session.transaction is None: - return - if type is None: - try: - self.commit() - except: - self.rollback() - raise - else: - self.rollback() - -class Session(object): - """Manages persistence operations for ORM-mapped objects. - - The Session's usage paradigm is described at :ref:`session_toplevel`. - - - """ - - public_methods = ( - '__contains__', '__iter__', 'add', 'add_all', 'begin', 'begin_nested', - 'close', 'commit', 'connection', 'delete', 'execute', 'expire', - 'expire_all', 'expunge', 'expunge_all', 'flush', 'get_bind', - 'is_modified', - 'merge', 'query', 'refresh', 'rollback', - 'scalar') - - - def __init__(self, bind=None, autoflush=True, expire_on_commit=True, - _enable_transaction_accounting=True, - autocommit=False, twophase=False, - weak_identity_map=True, binds=None, extension=None, - query_cls=query.Query): - """Construct a new Session. - - See also the :func:`.sessionmaker` function which is used to - generate a :class:`.Session`-producing callable with a given - set of arguments. - - :param autocommit: Defaults to ``False``. When ``True``, the ``Session`` - does not keep a persistent transaction running, and will acquire - connections from the engine on an as-needed basis, returning them - immediately after their use. Flushes will begin and commit (or possibly - rollback) their own transaction if no transaction is present. When using - this mode, the `session.begin()` method may be used to begin a - transaction explicitly. - - Leaving it on its default value of ``False`` means that the ``Session`` - will acquire a connection and begin a transaction the first time it is - used, which it will maintain persistently until ``rollback()``, - ``commit()``, or ``close()`` is called. When the transaction is released - by any of these methods, the ``Session`` is ready for the next usage, - which will again acquire and maintain a new connection/transaction. - - :param autoflush: When ``True``, all query operations will issue a - ``flush()`` call to this ``Session`` before proceeding. This is a - convenience feature so that ``flush()`` need not be called repeatedly - in order for database queries to retrieve results. It's typical that - ``autoflush`` is used in conjunction with ``autocommit=False``. In this - scenario, explicit calls to ``flush()`` are rarely needed; you usually - only need to call ``commit()`` (which flushes) to finalize changes. - - :param bind: An optional ``Engine`` or ``Connection`` to which this - ``Session`` should be bound. When specified, all SQL operations - performed by this session will execute via this connectable. - - :param binds: An optional dictionary which contains more granular "bind" - information than the ``bind`` parameter provides. This dictionary can - map individual ``Table`` instances as well as ``Mapper`` instances to - individual ``Engine`` or ``Connection`` objects. Operations which - proceed relative to a particular ``Mapper`` will consult this - dictionary for the direct ``Mapper`` instance as well as the mapper's - ``mapped_table`` attribute in order to locate an connectable to use. - The full resolution is described in the ``get_bind()`` method of - ``Session``. Usage looks like:: - - Session = sessionmaker(binds={ - SomeMappedClass: create_engine('postgresql://engine1'), - somemapper: create_engine('postgresql://engine2'), - some_table: create_engine('postgresql://engine3'), - }) - - Also see the :meth:`.Session.bind_mapper` and :meth:`.Session.bind_table` methods. - - :param \class_: Specify an alternate class other than - ``sqlalchemy.orm.session.Session`` which should be used by the returned - class. This is the only argument that is local to the - ``sessionmaker()`` function, and is not sent directly to the - constructor for ``Session``. - - :param _enable_transaction_accounting: Defaults to ``True``. A - legacy-only flag which when ``False`` disables *all* 0.5-style object - accounting on transaction boundaries, including auto-expiry of - instances on rollback and commit, maintenance of the "new" and - "deleted" lists upon rollback, and autoflush of pending changes upon - begin(), all of which are interdependent. - - :param expire_on_commit: Defaults to ``True``. When ``True``, all - instances will be fully expired after each ``commit()``, so that all - attribute/object access subsequent to a completed transaction will load - from the most recent database state. - - :param extension: An optional - :class:`~.SessionExtension` instance, or a list - of such instances, which will receive pre- and post- commit and flush - events, as well as a post-rollback event. **Deprecated.** - Please see :class:`.SessionEvents`. - - :param query_cls: Class which should be used to create new Query objects, - as returned by the ``query()`` method. Defaults to - :class:`~sqlalchemy.orm.query.Query`. - - :param twophase: When ``True``, all transactions will be started as - a "two phase" transaction, i.e. using the "two phase" semantics - of the database in use along with an XID. During a ``commit()``, - after ``flush()`` has been issued for all attached databases, the - ``prepare()`` method on each database's ``TwoPhaseTransaction`` will - be called. This allows each database to roll back the entire - transaction, before each transaction is committed. - - :param weak_identity_map: Defaults to ``True`` - when set to - ``False``, objects placed in the :class:`.Session` will be - strongly referenced until explicitly removed or the - :class:`.Session` is closed. **Deprecated** - this option - is obsolete. - - """ - - if weak_identity_map: - self._identity_cls = identity.WeakInstanceDict - else: - util.warn_deprecated("weak_identity_map=False is deprecated. " - "This feature is not needed.") - self._identity_cls = identity.StrongInstanceDict - self.identity_map = self._identity_cls() - - self._new = {} # InstanceState->object, strong refs object - self._deleted = {} # same - self.bind = bind - self.__binds = {} - self._flushing = False - self.transaction = None - self.hash_key = _new_sessionid() - self.autoflush = autoflush - self.autocommit = autocommit - self.expire_on_commit = expire_on_commit - self._enable_transaction_accounting = _enable_transaction_accounting - self.twophase = twophase - self._query_cls = query_cls - - if extension: - for ext in util.to_list(extension): - SessionExtension._adapt_listener(self, ext) - - if binds is not None: - for mapperortable, bind in binds.iteritems(): - if isinstance(mapperortable, (type, Mapper)): - self.bind_mapper(mapperortable, bind) - else: - self.bind_table(mapperortable, bind) - - if not self.autocommit: - self.begin() - _sessions[self.hash_key] = self - - dispatch = event.dispatcher(SessionEvents) - - connection_callable = None - - transaction = None - """The current active or inactive :class:`.SessionTransaction`.""" - - def begin(self, subtransactions=False, nested=False): - """Begin a transaction on this Session. - - If this Session is already within a transaction, either a plain - transaction or nested transaction, an error is raised, unless - ``subtransactions=True`` or ``nested=True`` is specified. - - The ``subtransactions=True`` flag indicates that this :meth:`~.Session.begin` - can create a subtransaction if a transaction is already in progress. - For documentation on subtransactions, please see :ref:`session_subtransactions`. - - The ``nested`` flag begins a SAVEPOINT transaction and is equivalent - to calling :meth:`~.Session.begin_nested`. For documentation on SAVEPOINT - transactions, please see :ref:`session_begin_nested`. - - """ - if self.transaction is not None: - if subtransactions or nested: - self.transaction = self.transaction._begin( - nested=nested) - else: - raise sa_exc.InvalidRequestError( - "A transaction is already begun. Use subtransactions=True " - "to allow subtransactions.") - else: - self.transaction = SessionTransaction( - self, nested=nested) - return self.transaction # needed for __enter__/__exit__ hook - - def begin_nested(self): - """Begin a `nested` transaction on this Session. - - The target database(s) must support SQL SAVEPOINTs or a - SQLAlchemy-supported vendor implementation of the idea. - - For documentation on SAVEPOINT - transactions, please see :ref:`session_begin_nested`. - - """ - return self.begin(nested=True) - - def rollback(self): - """Rollback the current transaction in progress. - - If no transaction is in progress, this method is a pass-through. - - This method rolls back the current transaction or nested transaction - regardless of subtransactions being in effect. All subtransactions up - to the first real transaction are closed. Subtransactions occur when - begin() is called multiple times. - - """ - if self.transaction is None: - pass - else: - self.transaction.rollback() - - def commit(self): - """Flush pending changes and commit the current transaction. - - If no transaction is in progress, this method raises an - InvalidRequestError. - - By default, the :class:`.Session` also expires all database - loaded state on all ORM-managed attributes after transaction commit. - This so that subsequent operations load the most recent - data from the database. This behavior can be disabled using - the ``expire_on_commit=False`` option to :func:`.sessionmaker` or - the :class:`.Session` constructor. - - If a subtransaction is in effect (which occurs when begin() is called - multiple times), the subtransaction will be closed, and the next call - to ``commit()`` will operate on the enclosing transaction. - - For a session configured with autocommit=False, a new transaction will - be begun immediately after the commit, but note that the newly begun - transaction does *not* use any connection resources until the first - SQL is actually emitted. - - """ - if self.transaction is None: - if not self.autocommit: - self.begin() - else: - raise sa_exc.InvalidRequestError("No transaction is begun.") - - self.transaction.commit() - - def prepare(self): - """Prepare the current transaction in progress for two phase commit. - - If no transaction is in progress, this method raises an - InvalidRequestError. - - Only root transactions of two phase sessions can be prepared. If the - current transaction is not such, an InvalidRequestError is raised. - - """ - if self.transaction is None: - if not self.autocommit: - self.begin() - else: - raise sa_exc.InvalidRequestError("No transaction is begun.") - - self.transaction.prepare() - - def connection(self, mapper=None, clause=None, - bind=None, - close_with_result=False, - **kw): - """Return a :class:`.Connection` object corresponding to this - :class:`.Session` object's transactional state. - - If this :class:`.Session` is configured with ``autocommit=False``, - either the :class:`.Connection` corresponding to the current transaction - is returned, or if no transaction is in progress, a new one is begun - and the :class:`.Connection` returned (note that no transactional state - is established with the DBAPI until the first SQL statement is emitted). - - Alternatively, if this :class:`.Session` is configured with ``autocommit=True``, - an ad-hoc :class:`.Connection` is returned using :meth:`.Engine.contextual_connect` - on the underlying :class:`.Engine`. - - Ambiguity in multi-bind or unbound :class:`.Session` objects can be resolved through - any of the optional keyword arguments. This ultimately makes usage of the - :meth:`.get_bind` method for resolution. - - :param bind: - Optional :class:`.Engine` to be used as the bind. If - this engine is already involved in an ongoing transaction, - that connection will be used. This argument takes precedence - over ``mapper``, ``clause``. - - :param mapper: - Optional :func:`.mapper` mapped class, used to identify - the appropriate bind. This argument takes precedence over - ``clause``. - - :param clause: - A :class:`.ClauseElement` (i.e. :func:`~.sql.expression.select`, - :func:`~.sql.expression.text`, - etc.) which will be used to locate a bind, if a bind - cannot otherwise be identified. - - :param close_with_result: Passed to :meth:`Engine.connect`, indicating - the :class:`.Connection` should be considered "single use", automatically - closing when the first result set is closed. This flag only has - an effect if this :class:`.Session` is configured with ``autocommit=True`` - and does not already have a transaction in progress. - - :param \**kw: - Additional keyword arguments are sent to :meth:`get_bind()`, - allowing additional arguments to be passed to custom - implementations of :meth:`get_bind`. - - """ - if bind is None: - bind = self.get_bind(mapper, clause=clause, **kw) - - return self._connection_for_bind(bind, - close_with_result=close_with_result) - - def _connection_for_bind(self, engine, **kwargs): - if self.transaction is not None: - return self.transaction._connection_for_bind(engine) - else: - return engine.contextual_connect(**kwargs) - - def execute(self, clause, params=None, mapper=None, bind=None, **kw): - """Execute a SQL expression construct or string statement within - the current transaction. - - Returns a :class:`.ResultProxy` representing - results of the statement execution, in the same manner as that of an - :class:`.Engine` or - :class:`.Connection`. - - E.g.:: - - result = session.execute( - user_table.select().where(user_table.c.id == 5) - ) - - :meth:`~.Session.execute` accepts any executable clause construct, such - as :func:`~.sql.expression.select`, - :func:`~.sql.expression.insert`, - :func:`~.sql.expression.update`, - :func:`~.sql.expression.delete`, and - :func:`~.sql.expression.text`. Plain SQL strings can be passed - as well, which in the case of :meth:`.Session.execute` only - will be interpreted the same as if it were passed via a :func:`~.expression.text` - construct. That is, the following usage:: - - result = session.execute( - "SELECT * FROM user WHERE id=:param", - {"param":5} - ) - - is equivalent to:: - - from sqlalchemy import text - result = session.execute( - text("SELECT * FROM user WHERE id=:param"), - {"param":5} - ) - - The second positional argument to :meth:`.Session.execute` is an - optional parameter set. Similar to that of :meth:`.Connection.execute`, whether this - is passed as a single dictionary, or a list of dictionaries, determines - whether the DBAPI cursor's ``execute()`` or ``executemany()`` is used to execute the - statement. An INSERT construct may be invoked for a single row:: - - result = session.execute(users.insert(), {"id": 7, "name": "somename"}) - - or for multiple rows:: - - result = session.execute(users.insert(), [ - {"id": 7, "name": "somename7"}, - {"id": 8, "name": "somename8"}, - {"id": 9, "name": "somename9"} - ]) - - The statement is executed within the current transactional context of - this :class:`.Session`. The :class:`.Connection` which is used - to execute the statement can also be acquired directly by - calling the :meth:`.Session.connection` method. Both methods use - a rule-based resolution scheme in order to determine the - :class:`.Connection`, which in the average case is derived directly - from the "bind" of the :class:`.Session` itself, and in other cases - can be based on the :func:`.mapper` - and :class:`.Table` objects passed to the method; see the documentation - for :meth:`.Session.get_bind` for a full description of this scheme. - - The :meth:`.Session.execute` method does *not* invoke autoflush. - - The :class:`.ResultProxy` returned by the :meth:`.Session.execute` - method is returned with the "close_with_result" flag set to true; - the significance of this flag is that if this :class:`.Session` is - autocommitting and does not have a transaction-dedicated :class:`.Connection` - available, a temporary :class:`.Connection` is established for the - statement execution, which is closed (meaning, returned to the connection - pool) when the :class:`.ResultProxy` has consumed all available data. - This applies *only* when the :class:`.Session` is configured with - autocommit=True and no transaction has been started. - - :param clause: - An executable statement (i.e. an :class:`.Executable` expression - such as :func:`.expression.select`) or string SQL statement - to be executed. - - :param params: - Optional dictionary, or list of dictionaries, containing - bound parameter values. If a single dictionary, single-row - execution occurs; if a list of dictionaries, an - "executemany" will be invoked. The keys in each dictionary - must correspond to parameter names present in the statement. - - :param mapper: - Optional :func:`.mapper` or mapped class, used to identify - the appropriate bind. This argument takes precedence over - ``clause`` when locating a bind. See :meth:`.Session.get_bind` - for more details. - - :param bind: - Optional :class:`.Engine` to be used as the bind. If - this engine is already involved in an ongoing transaction, - that connection will be used. This argument takes - precedence over ``mapper`` and ``clause`` when locating - a bind. - - :param \**kw: - Additional keyword arguments are sent to :meth:`.Session.get_bind()` - to allow extensibility of "bind" schemes. - - .. seealso:: - - :ref:`sqlexpression_toplevel` - Tutorial on using Core SQL - constructs. - - :ref:`connections_toplevel` - Further information on direct - statement execution. - - :meth:`.Connection.execute` - core level statement execution - method, which is :meth:`.Session.execute` ultimately uses - in order to execute the statement. - - """ - clause = expression._literal_as_text(clause) - - if bind is None: - bind = self.get_bind(mapper, clause=clause, **kw) - - return self._connection_for_bind(bind, close_with_result=True).execute( - clause, params or {}) - - def scalar(self, clause, params=None, mapper=None, bind=None, **kw): - """Like :meth:`~.Session.execute` but return a scalar result.""" - - return self.execute(clause, params=params, mapper=mapper, bind=bind, **kw).scalar() - - def close(self): - """Close this Session. - - This clears all items and ends any transaction in progress. - - If this session were created with ``autocommit=False``, a new - transaction is immediately begun. Note that this new transaction does - not use any connection resources until they are first needed. - - """ - self.expunge_all() - if self.transaction is not None: - for transaction in self.transaction._iterate_parents(): - transaction.close() - - @classmethod - def close_all(cls): - """Close *all* sessions in memory.""" - - for sess in _sessions.values(): - sess.close() - - def expunge_all(self): - """Remove all object instances from this ``Session``. - - This is equivalent to calling ``expunge(obj)`` on all objects in this - ``Session``. - - """ - for state in self.identity_map.all_states() + list(self._new): - state.detach() - - self.identity_map = self._identity_cls() - self._new = {} - self._deleted = {} - - # TODO: need much more test coverage for bind_mapper() and similar ! - # TODO: + crystalize + document resolution order vis. bind_mapper/bind_table - - def bind_mapper(self, mapper, bind): - """Bind operations for a mapper to a Connectable. - - mapper - A mapper instance or mapped class - - bind - Any Connectable: a ``Engine`` or ``Connection``. - - All subsequent operations involving this mapper will use the given - `bind`. - - """ - if isinstance(mapper, type): - mapper = _class_mapper(mapper) - - self.__binds[mapper.base_mapper] = bind - for t in mapper._all_tables: - self.__binds[t] = bind - - def bind_table(self, table, bind): - """Bind operations on a Table to a Connectable. - - table - A ``Table`` instance - - bind - Any Connectable: a ``Engine`` or ``Connection``. - - All subsequent operations involving this ``Table`` will use the - given `bind`. - - """ - self.__binds[table] = bind - - def get_bind(self, mapper=None, clause=None): - """Return a "bind" to which this :class:`.Session` is bound. - - The "bind" is usually an instance of :class:`.Engine`, - except in the case where the :class:`.Session` has been - explicitly bound directly to a :class:`.Connection`. - - For a multiply-bound or unbound :class:`.Session`, the - ``mapper`` or ``clause`` arguments are used to determine the - appropriate bind to return. - - Note that the "mapper" argument is usually present - when :meth:`.Session.get_bind` is called via an ORM - operation such as a :meth:`.Session.query`, each - individual INSERT/UPDATE/DELETE operation within a - :meth:`.Session.flush`, call, etc. - - The order of resolution is: - - 1. if mapper given and session.binds is present, - locate a bind based on mapper. - 2. if clause given and session.binds is present, - locate a bind based on :class:`.Table` objects - found in the given clause present in session.binds. - 3. if session.bind is present, return that. - 4. if clause given, attempt to return a bind - linked to the :class:`.MetaData` ultimately - associated with the clause. - 5. if mapper given, attempt to return a bind - linked to the :class:`.MetaData` ultimately - associated with the :class:`.Table` or other - selectable to which the mapper is mapped. - 6. No bind can be found, :class:`.UnboundExecutionError` - is raised. - - :param mapper: - Optional :func:`.mapper` mapped class or instance of - :class:`.Mapper`. The bind can be derived from a :class:`.Mapper` - first by consulting the "binds" map associated with this - :class:`.Session`, and secondly by consulting the :class:`.MetaData` - associated with the :class:`.Table` to which the :class:`.Mapper` - is mapped for a bind. - - :param clause: - A :class:`.ClauseElement` (i.e. :func:`~.sql.expression.select`, - :func:`~.sql.expression.text`, - etc.). If the ``mapper`` argument is not present or could not produce - a bind, the given expression construct will be searched for a bound - element, typically a :class:`.Table` associated with bound - :class:`.MetaData`. - - """ - if mapper is clause is None: - if self.bind: - return self.bind - else: - raise sa_exc.UnboundExecutionError( - "This session is not bound to a single Engine or " - "Connection, and no context was provided to locate " - "a binding.") - - c_mapper = mapper is not None and _class_to_mapper(mapper) or None - - # manually bound? - if self.__binds: - if c_mapper: - if c_mapper.base_mapper in self.__binds: - return self.__binds[c_mapper.base_mapper] - elif c_mapper.mapped_table in self.__binds: - return self.__binds[c_mapper.mapped_table] - if clause is not None: - for t in sql_util.find_tables(clause, include_crud=True): - if t in self.__binds: - return self.__binds[t] - - if self.bind: - return self.bind - - if isinstance(clause, sql.expression.ClauseElement) and clause.bind: - return clause.bind - - if c_mapper and c_mapper.mapped_table.bind: - return c_mapper.mapped_table.bind - - context = [] - if mapper is not None: - context.append('mapper %s' % c_mapper) - if clause is not None: - context.append('SQL expression') - - raise sa_exc.UnboundExecutionError( - "Could not locate a bind configured on %s or this Session" % ( - ', '.join(context))) - - def query(self, *entities, **kwargs): - """Return a new ``Query`` object corresponding to this ``Session``.""" - - return self._query_cls(entities, self, **kwargs) - - @property - @util.contextmanager - def no_autoflush(self): - """Return a context manager that disables autoflush. - - e.g.:: - - with session.no_autoflush: - - some_object = SomeClass() - session.add(some_object) - # won't autoflush - some_object.related_thing = session.query(SomeRelated).first() - - Operations that proceed within the ``with:`` block - will not be subject to flushes occurring upon query - access. This is useful when initializing a series - of objects which involve existing database queries, - where the uncompleted object should not yet be flushed. - - .. versionadded:: 0.7.6 - - """ - autoflush = self.autoflush - self.autoflush = False - yield self - self.autoflush = autoflush - - def _autoflush(self): - if self.autoflush and not self._flushing: - self.flush() - - def _finalize_loaded(self, states): - for state, dict_ in states.items(): - state.commit_all(dict_, self.identity_map) - - def refresh(self, instance, attribute_names=None, lockmode=None): - """Expire and refresh the attributes on the given instance. - - A query will be issued to the database and all attributes will be - refreshed with their current database value. - - Lazy-loaded relational attributes will remain lazily loaded, so that - the instance-wide refresh operation will be followed immediately by - the lazy load of that attribute. - - Eagerly-loaded relational attributes will eagerly load within the - single refresh operation. - - Note that a highly isolated transaction will return the same values as - were previously read in that same transaction, regardless of changes - in database state outside of that transaction - usage of - :meth:`~Session.refresh` usually only makes sense if non-ORM SQL - statement were emitted in the ongoing transaction, or if autocommit - mode is turned on. - - :param attribute_names: optional. An iterable collection of - string attribute names indicating a subset of attributes to - be refreshed. - - :param lockmode: Passed to the :class:`~sqlalchemy.orm.query.Query` - as used by :meth:`~sqlalchemy.orm.query.Query.with_lockmode`. - - """ - try: - state = attributes.instance_state(instance) - except exc.NO_STATE: - raise exc.UnmappedInstanceError(instance) - - self._expire_state(state, attribute_names) - - if self.query(_object_mapper(instance))._load_on_ident( - state.key, refresh_state=state, - lockmode=lockmode, - only_load_props=attribute_names) is None: - raise sa_exc.InvalidRequestError( - "Could not refresh instance '%s'" % - mapperutil.instance_str(instance)) - - def expire_all(self): - """Expires all persistent instances within this Session. - - When any attributes on a persistent instance is next accessed, - a query will be issued using the - :class:`.Session` object's current transactional context in order to - load all expired attributes for the given instance. Note that - a highly isolated transaction will return the same values as were - previously read in that same transaction, regardless of changes - in database state outside of that transaction. - - To expire individual objects and individual attributes - on those objects, use :meth:`Session.expire`. - - The :class:`.Session` object's default behavior is to - expire all state whenever the :meth:`Session.rollback` - or :meth:`Session.commit` methods are called, so that new - state can be loaded for the new transaction. For this reason, - calling :meth:`Session.expire_all` should not be needed when - autocommit is ``False``, assuming the transaction is isolated. - - """ - for state in self.identity_map.all_states(): - state.expire(state.dict, self.identity_map._modified) - - def expire(self, instance, attribute_names=None): - """Expire the attributes on an instance. - - Marks the attributes of an instance as out of date. When an expired - attribute is next accessed, a query will be issued to the - :class:`.Session` object's current transactional context in order to - load all expired attributes for the given instance. Note that - a highly isolated transaction will return the same values as were - previously read in that same transaction, regardless of changes - in database state outside of that transaction. - - To expire all objects in the :class:`.Session` simultaneously, - use :meth:`Session.expire_all`. - - The :class:`.Session` object's default behavior is to - expire all state whenever the :meth:`Session.rollback` - or :meth:`Session.commit` methods are called, so that new - state can be loaded for the new transaction. For this reason, - calling :meth:`Session.expire` only makes sense for the specific - case that a non-ORM SQL statement was emitted in the current - transaction. - - :param instance: The instance to be refreshed. - :param attribute_names: optional list of string attribute names - indicating a subset of attributes to be expired. - - """ - try: - state = attributes.instance_state(instance) - except exc.NO_STATE: - raise exc.UnmappedInstanceError(instance) - self._expire_state(state, attribute_names) - - def _expire_state(self, state, attribute_names): - self._validate_persistent(state) - if attribute_names: - state.expire_attributes(state.dict, attribute_names) - else: - # pre-fetch the full cascade since the expire is going to - # remove associations - cascaded = list(state.manager.mapper.cascade_iterator( - 'refresh-expire', state)) - self._conditional_expire(state) - for o, m, st_, dct_ in cascaded: - self._conditional_expire(st_) - - def _conditional_expire(self, state): - """Expire a state if persistent, else expunge if pending""" - - if state.key: - state.expire(state.dict, self.identity_map._modified) - elif state in self._new: - self._new.pop(state) - state.detach() - - @util.deprecated("0.7", "The non-weak-referencing identity map " - "feature is no longer needed.") - def prune(self): - """Remove unreferenced instances cached in the identity map. - - Note that this method is only meaningful if "weak_identity_map" is set - to False. The default weak identity map is self-pruning. - - Removes any object in this Session's identity map that is not - referenced in user code, modified, new or scheduled for deletion. - Returns the number of objects pruned. - - """ - return self.identity_map.prune() - - def expunge(self, instance): - """Remove the `instance` from this ``Session``. - - This will free all internal references to the instance. Cascading - will be applied according to the *expunge* cascade rule. - - """ - try: - state = attributes.instance_state(instance) - except exc.NO_STATE: - raise exc.UnmappedInstanceError(instance) - if state.session_id is not self.hash_key: - raise sa_exc.InvalidRequestError( - "Instance %s is not present in this Session" % - mapperutil.state_str(state)) - - cascaded = list(state.manager.mapper.cascade_iterator( - 'expunge', state)) - self._expunge_state(state) - for o, m, st_, dct_ in cascaded: - self._expunge_state(st_) - - def _expunge_state(self, state): - if state in self._new: - self._new.pop(state) - state.detach() - elif self.identity_map.contains_state(state): - self.identity_map.discard(state) - self._deleted.pop(state, None) - state.detach() - elif self.transaction: - self.transaction._deleted.pop(state, None) - - def _register_newly_persistent(self, state): - mapper = _state_mapper(state) - - # prevent against last minute dereferences of the object - obj = state.obj() - if obj is not None: - - instance_key = mapper._identity_key_from_state(state) - - if _none_set.issubset(instance_key[1]) and \ - not mapper.allow_partial_pks or \ - _none_set.issuperset(instance_key[1]): - raise exc.FlushError( - "Instance %s has a NULL identity key. If this is an " - "auto-generated value, check that the database table " - "allows generation of new primary key values, and that " - "the mapped Column object is configured to expect these " - "generated values. Ensure also that this flush() is " - "not occurring at an inappropriate time, such as within " - "a load() event." % mapperutil.state_str(state) - ) - - if state.key is None: - state.key = instance_key - elif state.key != instance_key: - # primary key switch. use discard() in case another - # state has already replaced this one in the identity - # map (see test/orm/test_naturalpks.py ReversePKsTest) - self.identity_map.discard(state) - if state in self.transaction._key_switches: - orig_key = self.transaction._key_switches[state][0] - else: - orig_key = state.key - self.transaction._key_switches[state] = (orig_key, instance_key) - state.key = instance_key - - self.identity_map.replace(state) - state.commit_all(state.dict, self.identity_map) - - # remove from new last, might be the last strong ref - if state in self._new: - if self._enable_transaction_accounting and self.transaction: - self.transaction._new[state] = True - self._new.pop(state) - - def _remove_newly_deleted(self, state): - if self._enable_transaction_accounting and self.transaction: - self.transaction._deleted[state] = True - - self.identity_map.discard(state) - self._deleted.pop(state, None) - state.deleted = True - - def add(self, instance): - """Place an object in the ``Session``. - - Its state will be persisted to the database on the next flush - operation. - - Repeated calls to ``add()`` will be ignored. The opposite of ``add()`` - is ``expunge()``. - - """ - try: - state = attributes.instance_state(instance) - except exc.NO_STATE: - raise exc.UnmappedInstanceError(instance) - - self._save_or_update_state(state) - - def add_all(self, instances): - """Add the given collection of instances to this ``Session``.""" - - for instance in instances: - self.add(instance) - - def _save_or_update_state(self, state): - self._save_or_update_impl(state) - - mapper = _state_mapper(state) - for o, m, st_, dct_ in mapper.cascade_iterator( - 'save-update', - state, - halt_on=self._contains_state): - self._save_or_update_impl(st_) - - def delete(self, instance): - """Mark an instance as deleted. - - The database delete operation occurs upon ``flush()``. - - """ - try: - state = attributes.instance_state(instance) - except exc.NO_STATE: - raise exc.UnmappedInstanceError(instance) - - if state.key is None: - raise sa_exc.InvalidRequestError( - "Instance '%s' is not persisted" % - mapperutil.state_str(state)) - - if state in self._deleted: - return - - # ensure object is attached to allow the - # cascade operation to load deferred attributes - # and collections - self._attach(state) - - # grab the cascades before adding the item to the deleted list - # so that autoflush does not delete the item - # the strong reference to the instance itself is significant here - cascade_states = list(state.manager.mapper.cascade_iterator( - 'delete', state)) - - self._deleted[state] = state.obj() - self.identity_map.add(state) - - for o, m, st_, dct_ in cascade_states: - self._delete_impl(st_) - - def merge(self, instance, load=True, **kw): - """Copy the state of a given instance into a corresponding instance - within this :class:`.Session`. - - :meth:`.Session.merge` examines the primary key attributes of the - source instance, and attempts to reconcile it with an instance of the - same primary key in the session. If not found locally, it attempts - to load the object from the database based on primary key, and if - none can be located, creates a new instance. The state of each attribute - on the source instance is then copied to the target instance. - The resulting target instance is then returned by the method; the - original source instance is left unmodified, and un-associated with the - :class:`.Session` if not already. - - This operation cascades to associated instances if the association is - mapped with ``cascade="merge"``. - - See :ref:`unitofwork_merging` for a detailed discussion of merging. - - :param instance: Instance to be merged. - :param load: Boolean, when False, :meth:`.merge` switches into - a "high performance" mode which causes it to forego emitting history - events as well as all database access. This flag is used for - cases such as transferring graphs of objects into a :class:`.Session` - from a second level cache, or to transfer just-loaded objects - into the :class:`.Session` owned by a worker thread or process - without re-querying the database. - - The ``load=False`` use case adds the caveat that the given - object has to be in a "clean" state, that is, has no pending changes - to be flushed - even if the incoming object is detached from any - :class:`.Session`. This is so that when - the merge operation populates local attributes and - cascades to related objects and - collections, the values can be "stamped" onto the - target object as is, without generating any history or attribute - events, and without the need to reconcile the incoming data with - any existing related objects or collections that might not - be loaded. The resulting objects from ``load=False`` are always - produced as "clean", so it is only appropriate that the given objects - should be "clean" as well, else this suggests a mis-use of the method. - - """ - if 'dont_load' in kw: - load = not kw['dont_load'] - util.warn_deprecated('dont_load=True has been renamed to ' - 'load=False.') - - _recursive = {} - - if load: - # flush current contents if we expect to load data - self._autoflush() - - _object_mapper(instance) # verify mapped - autoflush = self.autoflush - try: - self.autoflush = False - return self._merge( - attributes.instance_state(instance), - attributes.instance_dict(instance), - load=load, _recursive=_recursive) - finally: - self.autoflush = autoflush - - def _merge(self, state, state_dict, load=True, _recursive=None): - mapper = _state_mapper(state) - if state in _recursive: - return _recursive[state] - - new_instance = False - key = state.key - - if key is None: - if not load: - raise sa_exc.InvalidRequestError( - "merge() with load=False option does not support " - "objects transient (i.e. unpersisted) objects. flush() " - "all changes on mapped instances before merging with " - "load=False.") - key = mapper._identity_key_from_state(state) - - if key in self.identity_map: - merged = self.identity_map[key] - - elif not load: - if state.modified: - raise sa_exc.InvalidRequestError( - "merge() with load=False option does not support " - "objects marked as 'dirty'. flush() all changes on " - "mapped instances before merging with load=False.") - merged = mapper.class_manager.new_instance() - merged_state = attributes.instance_state(merged) - merged_state.key = key - self._update_impl(merged_state) - new_instance = True - - elif not _none_set.issubset(key[1]) or \ - (mapper.allow_partial_pks and - not _none_set.issuperset(key[1])): - merged = self.query(mapper.class_).get(key[1]) - else: - merged = None - - if merged is None: - merged = mapper.class_manager.new_instance() - merged_state = attributes.instance_state(merged) - merged_dict = attributes.instance_dict(merged) - new_instance = True - self._save_or_update_state(merged_state) - else: - merged_state = attributes.instance_state(merged) - merged_dict = attributes.instance_dict(merged) - - _recursive[state] = merged - - # check that we didn't just pull the exact same - # state out. - if state is not merged_state: - # version check if applicable - if mapper.version_id_col is not None: - existing_version = mapper._get_state_attr_by_column( - state, - state_dict, - mapper.version_id_col, - passive=attributes.PASSIVE_NO_INITIALIZE) - - merged_version = mapper._get_state_attr_by_column( - merged_state, - merged_dict, - mapper.version_id_col, - passive=attributes.PASSIVE_NO_INITIALIZE) - - if existing_version is not attributes.PASSIVE_NO_RESULT and \ - merged_version is not attributes.PASSIVE_NO_RESULT and \ - existing_version != merged_version: - raise exc.StaleDataError( - "Version id '%s' on merged state %s " - "does not match existing version '%s'. " - "Leave the version attribute unset when " - "merging to update the most recent version." - % ( - existing_version, - mapperutil.state_str(merged_state), - merged_version - )) - - merged_state.load_path = state.load_path - merged_state.load_options = state.load_options - - for prop in mapper.iterate_properties: - prop.merge(self, state, state_dict, - merged_state, merged_dict, - load, _recursive) - - if not load: - # remove any history - merged_state.commit_all(merged_dict, self.identity_map) - - if new_instance: - merged_state.manager.dispatch.load(merged_state, None) - return merged - - @classmethod - def identity_key(cls, *args, **kwargs): - return mapperutil.identity_key(*args, **kwargs) - - @classmethod - def object_session(cls, instance): - """Return the ``Session`` to which an object belongs.""" - - return object_session(instance) - - def _validate_persistent(self, state): - if not self.identity_map.contains_state(state): - raise sa_exc.InvalidRequestError( - "Instance '%s' is not persistent within this Session" % - mapperutil.state_str(state)) - - def _save_impl(self, state): - if state.key is not None: - raise sa_exc.InvalidRequestError( - "Object '%s' already has an identity - it can't be registered " - "as pending" % mapperutil.state_str(state)) - - self._attach(state) - if state not in self._new: - self._new[state] = state.obj() - state.insert_order = len(self._new) - - def _update_impl(self, state, discard_existing=False): - if (self.identity_map.contains_state(state) and - state not in self._deleted): - return - - if state.key is None: - raise sa_exc.InvalidRequestError( - "Instance '%s' is not persisted" % - mapperutil.state_str(state)) - - if state.deleted: - raise sa_exc.InvalidRequestError( - "Instance '%s' has been deleted. Use the make_transient() " - "function to send this object back to the transient state." % - mapperutil.state_str(state) - ) - if discard_existing: - existing = self.identity_map.get(state.key) - if existing is not None: - self.identity_map.discard(attributes.instance_state(existing)) - self._attach(state) - self._deleted.pop(state, None) - self.identity_map.add(state) - - def _save_or_update_impl(self, state): - if state.key is None: - self._save_impl(state) - else: - self._update_impl(state) - - def _delete_impl(self, state): - if state in self._deleted: - return - - if state.key is None: - return - - self._attach(state) - self._deleted[state] = state.obj() - self.identity_map.add(state) - - def _attach(self, state): - if state.key and \ - state.key in self.identity_map and \ - not self.identity_map.contains_state(state): - raise sa_exc.InvalidRequestError("Can't attach instance " - "%s; another instance with key %s is already " - "present in this session." - % (mapperutil.state_str(state), state.key)) - - if state.session_id and \ - state.session_id is not self.hash_key and \ - state.session_id in _sessions: - raise sa_exc.InvalidRequestError( - "Object '%s' is already attached to session '%s' " - "(this is '%s')" % (mapperutil.state_str(state), - state.session_id, self.hash_key)) - - if state.session_id != self.hash_key: - state.session_id = self.hash_key - if self.dispatch.after_attach: - self.dispatch.after_attach(self, state.obj()) - - def __contains__(self, instance): - """Return True if the instance is associated with this session. - - The instance may be pending or persistent within the Session for a - result of True. - - """ - try: - state = attributes.instance_state(instance) - except exc.NO_STATE: - raise exc.UnmappedInstanceError(instance) - return self._contains_state(state) - - def __iter__(self): - """Iterate over all pending or persistent instances within this Session.""" - - return iter(list(self._new.values()) + self.identity_map.values()) - - def _contains_state(self, state): - return state in self._new or self.identity_map.contains_state(state) - - def flush(self, objects=None): - """Flush all the object changes to the database. - - Writes out all pending object creations, deletions and modifications - to the database as INSERTs, DELETEs, UPDATEs, etc. Operations are - automatically ordered by the Session's unit of work dependency - solver. - - Database operations will be issued in the current transactional - context and do not affect the state of the transaction, unless an - error occurs, in which case the entire transaction is rolled back. - You may flush() as often as you like within a transaction to move - changes from Python to the database's transaction buffer. - - For ``autocommit`` Sessions with no active manual transaction, flush() - will create a transaction on the fly that surrounds the entire set of - operations int the flush. - - :param objects: Optional; restricts the flush operation to operate - only on elements that are in the given collection. - - This feature is for an extremely narrow set of use cases where - particular objects may need to be operated upon before the - full flush() occurs. It is not intended for general use. - - """ - - if self._flushing: - raise sa_exc.InvalidRequestError("Session is already flushing") - - if self._is_clean(): - return - try: - self._flushing = True - self._flush(objects) - finally: - self._flushing = False - - def _is_clean(self): - return not self.identity_map.check_modified() and \ - not self._deleted and \ - not self._new - - def _flush(self, objects=None): - - dirty = self._dirty_states - if not dirty and not self._deleted and not self._new: - self.identity_map._modified.clear() - return - - flush_context = UOWTransaction(self) - - if self.dispatch.before_flush: - self.dispatch.before_flush(self, flush_context, objects) - # re-establish "dirty states" in case the listeners - # added - dirty = self._dirty_states - - deleted = set(self._deleted) - new = set(self._new) - - dirty = set(dirty).difference(deleted) - - # create the set of all objects we want to operate upon - if objects: - # specific list passed in - objset = set() - for o in objects: - try: - state = attributes.instance_state(o) - except exc.NO_STATE: - raise exc.UnmappedInstanceError(o) - objset.add(state) - else: - objset = None - - # store objects whose fate has been decided - processed = set() - - # put all saves/updates into the flush context. detect top-level - # orphans and throw them into deleted. - if objset: - proc = new.union(dirty).intersection(objset).difference(deleted) - else: - proc = new.union(dirty).difference(deleted) - - for state in proc: - is_orphan = _state_mapper(state)._is_orphan(state) and state.has_identity - flush_context.register_object(state, isdelete=is_orphan) - processed.add(state) - - # put all remaining deletes into the flush context. - if objset: - proc = deleted.intersection(objset).difference(processed) - else: - proc = deleted.difference(processed) - for state in proc: - flush_context.register_object(state, isdelete=True) - - if not flush_context.has_work: - return - - flush_context.transaction = transaction = self.begin( - subtransactions=True) - try: - flush_context.execute() - - self.dispatch.after_flush(self, flush_context) - - flush_context.finalize_flush_changes() - - # useful assertions: - #if not objects: - # assert not self.identity_map._modified - #else: - # assert self.identity_map._modified == \ - # self.identity_map._modified.difference(objects) - - self.dispatch.after_flush_postexec(self, flush_context) - - transaction.commit() - - except: - transaction.rollback(_capture_exception=True) - raise - - - def is_modified(self, instance, include_collections=True, - passive=attributes.PASSIVE_OFF): - """Return ``True`` if the given instance has locally - modified attributes. - - This method retrieves the history for each instrumented - attribute on the instance and performs a comparison of the current - value to its previously committed value, if any. - - It is in effect a more expensive and accurate - version of checking for the given instance in the - :attr:`.Session.dirty` collection; a full test for - each attribute's net "dirty" status is performed. - - E.g.:: - - return session.is_modified(someobject, passive=True) - - .. versionchanged:: 0.8 - In SQLAlchemy 0.7 and earlier, the ``passive`` - flag should **always** be explicitly set to ``True``. - The current default value of :data:`.attributes.PASSIVE_OFF` - for this flag is incorrect, in that it loads unloaded - collections and attributes which by definition - have no modified state, and furthermore trips off - autoflush which then causes all subsequent, possibly - modified attributes to lose their modified state. - The default value of the flag will be changed in 0.8. - - A few caveats to this method apply: - - * Instances present in the :attr:`.Session.dirty` collection may report - ``False`` when tested with this method. This is because - the object may have received change events via attribute - mutation, thus placing it in :attr:`.Session.dirty`, - but ultimately the state is the same as that loaded from - the database, resulting in no net change here. - * Scalar attributes may not have recorded the previously set - value when a new value was applied, if the attribute was not loaded, - or was expired, at the time the new value was received - in these - cases, the attribute is assumed to have a change, even if there is - ultimately no net change against its database value. SQLAlchemy in - most cases does not need the "old" value when a set event occurs, so - it skips the expense of a SQL call if the old value isn't present, - based on the assumption that an UPDATE of the scalar value is - usually needed, and in those few cases where it isn't, is less - expensive on average than issuing a defensive SELECT. - - The "old" value is fetched unconditionally only if the attribute - container has the ``active_history`` flag set to ``True``. This flag - is set typically for primary key attributes and scalar object references - that are not a simple many-to-one. To set this flag for - any arbitrary mapped column, use the ``active_history`` argument - with :func:`.column_property`. - - :param instance: mapped instance to be tested for pending changes. - :param include_collections: Indicates if multivalued collections should be - included in the operation. Setting this to ``False`` is a way to detect - only local-column based properties (i.e. scalar columns or many-to-one - foreign keys) that would result in an UPDATE for this instance upon - flush. - :param passive: Indicates if unloaded attributes and - collections should be loaded in the course of performing - this test. If set to ``False``, or left at its default - value of :data:`.PASSIVE_OFF`, unloaded attributes - will be loaded. If set to ``True`` or - :data:`.PASSIVE_NO_INITIALIZE`, unloaded - collections and attributes will remain unloaded. As - noted previously, the existence of this flag here - is a bug, as unloaded attributes by definition have - no changes, and the load operation also triggers an - autoflush which then cancels out subsequent changes. - This flag should **always be set to True**. - - .. versionchanged:: 0.8 - The flag will be deprecated and the default - set to ``True``. - - """ - try: - state = attributes.instance_state(instance) - except exc.NO_STATE: - raise exc.UnmappedInstanceError(instance) - dict_ = state.dict - - if passive is True: - passive = attributes.PASSIVE_NO_INITIALIZE - elif passive is False: - passive = attributes.PASSIVE_OFF - - for attr in state.manager.attributes: - if \ - ( - not include_collections and - hasattr(attr.impl, 'get_collection') - ) or not hasattr(attr.impl, 'get_history'): - continue - - (added, unchanged, deleted) = \ - attr.impl.get_history(state, dict_, passive=passive) - - if added or deleted: - return True - return False - - @property - def is_active(self): - """True if this :class:`.Session` is in "transaction mode" and - is not in "partial rollback" state. - - The :class:`.Session` in its default mode of ``autocommit=False`` - is essentially always in "transaction mode", in that a - :class:`.SessionTransaction` is associated with it as soon as - it is instantiated. This :class:`.SessionTransaction` is immediately - replaced with a new one as soon as it is ended, due to a rollback, - commit, or close operation. - - "Transaction mode" does *not* indicate whether - or not actual database connection resources are in use; the - :class:`.SessionTransaction` object coordinates among zero or more - actual database transactions, and starts out with none, accumulating - individual DBAPI connections as different data sources are used - within its scope. The best way to track when a particular - :class:`.Session` has actually begun to use DBAPI resources is to - implement a listener using the :meth:`.SessionEvents.after_begin` - method, which will deliver both the :class:`.Session` as well as the - target :class:`.Connection` to a user-defined event listener. - - The "partial rollback" state refers to when an "inner" transaction, - typically used during a flush, encounters an error and emits - a rollback of the DBAPI connection. At this point, the :class:`.Session` - is in "partial rollback" and awaits for the user to call :meth:`.rollback`, - in order to close out the transaction stack. It is in this "partial - rollback" period that the :attr:`.is_active` flag returns False. After - the call to :meth:`.rollback`, the :class:`.SessionTransaction` is replaced - with a new one and :attr:`.is_active` returns ``True`` again. - - When a :class:`.Session` is used in ``autocommit=True`` mode, the - :class:`.SessionTransaction` is only instantiated within the scope - of a flush call, or when :meth:`.Session.begin` is called. So - :attr:`.is_active` will always be ``False`` outside of a flush or - :meth:`.begin` block in this mode, and will be ``True`` within the - :meth:`.begin` block as long as it doesn't enter "partial rollback" - state. - - From all the above, it follows that the only purpose to this flag is - for application frameworks that wish to detect is a "rollback" is - necessary within a generic error handling routine, for :class:`.Session` - objects that would otherwise be in "partial rollback" mode. In - a typical integration case, this is also not necessary as it is standard - practice to emit :meth:`.Session.rollback` unconditionally within the - outermost exception catch. - - To track the transactional state of a :class:`.Session` fully, - use event listeners, primarily the :meth:`.SessionEvents.after_begin`, - :meth:`.SessionEvents.after_commit`, :meth:`.SessionEvents.after_rollback` - and related events. - - """ - return self.transaction and self.transaction.is_active - - identity_map = None - """A mapping of object identities to objects themselves. - - Iterating through ``Session.identity_map.values()`` provides - access to the full set of persistent objects (i.e., those - that have row identity) currently in the session. - - See also: - - :func:`.identity_key` - operations involving identity keys. - - """ - - @property - def _dirty_states(self): - """The set of all persistent states considered dirty. - - This method returns all states that were modified including - those that were possibly deleted. - - """ - return self.identity_map._dirty_states() - - @property - def dirty(self): - """The set of all persistent instances considered dirty. - - E.g.:: - - some_mapped_object in session.dirty - - Instances are considered dirty when they were modified but not - deleted. - - Note that this 'dirty' calculation is 'optimistic'; most - attribute-setting or collection modification operations will - mark an instance as 'dirty' and place it in this set, even if - there is no net change to the attribute's value. At flush - time, the value of each attribute is compared to its - previously saved value, and if there's no net change, no SQL - operation will occur (this is a more expensive operation so - it's only done at flush time). - - To check if an instance has actionable net changes to its - attributes, use the :meth:`.Session.is_modified` method. - - """ - return util.IdentitySet( - [state.obj() - for state in self._dirty_states - if state not in self._deleted]) - - @property - def deleted(self): - "The set of all instances marked as 'deleted' within this ``Session``" - - return util.IdentitySet(self._deleted.values()) - - @property - def new(self): - "The set of all instances marked as 'new' within this ``Session``." - - return util.IdentitySet(self._new.values()) - -_sessions = weakref.WeakValueDictionary() - -def make_transient(instance): - """Make the given instance 'transient'. - - This will remove its association with any - session and additionally will remove its "identity key", - such that it's as though the object were newly constructed, - except retaining its values. It also resets the - "deleted" flag on the state if this object - had been explicitly deleted by its session. - - Attributes which were "expired" or deferred at the - instance level are reverted to undefined, and - will not trigger any loads. - - """ - state = attributes.instance_state(instance) - s = _state_session(state) - if s: - s._expunge_state(state) - - # remove expired state and - # deferred callables - state.callables.clear() - if state.key: - del state.key - if state.deleted: - del state.deleted - -def object_session(instance): - """Return the ``Session`` to which instance belongs. - - If the instance is not a mapped instance, an error is raised. - - """ - - try: - return _state_session(attributes.instance_state(instance)) - except exc.NO_STATE: - raise exc.UnmappedInstanceError(instance) - - -def _state_session(state): - if state.session_id: - try: - return _sessions[state.session_id] - except KeyError: - pass - return None - -_new_sessionid = util.counter() diff --git a/libs/sqlalchemy/orm/shard.py b/libs/sqlalchemy/orm/shard.py deleted file mode 100644 index 93bc7a6b..00000000 --- a/libs/sqlalchemy/orm/shard.py +++ /dev/null @@ -1,15 +0,0 @@ -# orm/shard.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -from sqlalchemy import util - -util.warn_deprecated( - "Horizontal sharding is now importable via " - "'import sqlalchemy.ext.horizontal_shard" -) - -from sqlalchemy.ext.horizontal_shard import * - diff --git a/libs/sqlalchemy/orm/state.py b/libs/sqlalchemy/orm/state.py deleted file mode 100644 index b9a9c463..00000000 --- a/libs/sqlalchemy/orm/state.py +++ /dev/null @@ -1,567 +0,0 @@ -# orm/state.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Defines instrumentation of instances. - -This module is usually not directly visible to user applications, but -defines a large part of the ORM's interactivity. - -""" - -from sqlalchemy.util import EMPTY_SET -import weakref -from sqlalchemy import util - -from sqlalchemy.orm import exc as orm_exc, attributes, interfaces,\ - util as orm_util -from sqlalchemy.orm.attributes import PASSIVE_OFF, PASSIVE_NO_RESULT, \ - PASSIVE_NO_FETCH, NEVER_SET, ATTR_WAS_SET, NO_VALUE - -mapperlib = util.importlater("sqlalchemy.orm", "mapperlib") - -import sys - -class InstanceState(object): - """tracks state information at the instance level.""" - - session_id = None - key = None - runid = None - load_options = EMPTY_SET - load_path = () - insert_order = None - mutable_dict = None - _strong_obj = None - modified = False - expired = False - deleted = False - - def __init__(self, obj, manager): - self.class_ = obj.__class__ - self.manager = manager - self.obj = weakref.ref(obj, self._cleanup) - self.callables = {} - self.committed_state = {} - - @util.memoized_property - def parents(self): - return {} - - @util.memoized_property - def pending(self): - return {} - - @property - def has_identity(self): - return bool(self.key) - - def detach(self): - self.session_id = None - - def dispose(self): - self.detach() - del self.obj - - def _cleanup(self, ref): - instance_dict = self._instance_dict() - if instance_dict: - instance_dict.discard(self) - - self.callables = {} - self.session_id = None - del self.obj - - def obj(self): - return None - - @property - def dict(self): - o = self.obj() - if o is not None: - return attributes.instance_dict(o) - else: - return {} - - def initialize_instance(*mixed, **kwargs): - self, instance, args = mixed[0], mixed[1], mixed[2:] - manager = self.manager - - manager.dispatch.init(self, args, kwargs) - - #if manager.mutable_attributes: - # assert self.__class__ is MutableAttrInstanceState - - try: - return manager.original_init(*mixed[1:], **kwargs) - except: - manager.dispatch.init_failure(self, args, kwargs) - raise - - def get_history(self, key, passive): - return self.manager[key].impl.get_history(self, self.dict, passive) - - def get_impl(self, key): - return self.manager[key].impl - - def get_pending(self, key): - if key not in self.pending: - self.pending[key] = PendingCollection() - return self.pending[key] - - def value_as_iterable(self, dict_, key, passive=PASSIVE_OFF): - """Return a list of tuples (state, obj) for the given - key. - - returns an empty list if the value is None/empty/PASSIVE_NO_RESULT - """ - - impl = self.manager[key].impl - x = impl.get(self, dict_, passive=passive) - if x is PASSIVE_NO_RESULT or x is None: - return [] - elif hasattr(impl, 'get_collection'): - return [ - (attributes.instance_state(o), o) for o in - impl.get_collection(self, dict_, x, passive=passive) - ] - else: - return [(attributes.instance_state(x), x)] - - def __getstate__(self): - d = {'instance':self.obj()} - d.update( - (k, self.__dict__[k]) for k in ( - 'committed_state', 'pending', 'modified', 'expired', - 'callables', 'key', 'parents', 'load_options', 'mutable_dict', - 'class_', - ) if k in self.__dict__ - ) - if self.load_path: - d['load_path'] = interfaces.serialize_path(self.load_path) - - self.manager.dispatch.pickle(self, d) - - return d - - def __setstate__(self, state): - from sqlalchemy.orm import instrumentation - inst = state['instance'] - if inst is not None: - self.obj = weakref.ref(inst, self._cleanup) - self.class_ = inst.__class__ - else: - # None being possible here generally new as of 0.7.4 - # due to storage of state in "parents". "class_" - # also new. - self.obj = None - self.class_ = state['class_'] - self.manager = manager = instrumentation.manager_of_class(self.class_) - if manager is None: - raise orm_exc.UnmappedInstanceError( - inst, - "Cannot deserialize object of type %r - no mapper() has" - " been configured for this class within the current Python process!" % - self.class_) - elif manager.is_mapped and not manager.mapper.configured: - mapperlib.configure_mappers() - - self.committed_state = state.get('committed_state', {}) - self.pending = state.get('pending', {}) - self.parents = state.get('parents', {}) - self.modified = state.get('modified', False) - self.expired = state.get('expired', False) - self.callables = state.get('callables', {}) - - if self.modified: - self._strong_obj = inst - - self.__dict__.update([ - (k, state[k]) for k in ( - 'key', 'load_options', 'mutable_dict' - ) if k in state - ]) - - if 'load_path' in state: - self.load_path = interfaces.deserialize_path(state['load_path']) - - # setup _sa_instance_state ahead of time so that - # unpickle events can access the object normally. - # see [ticket:2362] - manager.setup_instance(inst, self) - manager.dispatch.unpickle(self, state) - - def initialize(self, key): - """Set this attribute to an empty value or collection, - based on the AttributeImpl in use.""" - - self.manager.get_impl(key).initialize(self, self.dict) - - def reset(self, dict_, key): - """Remove the given attribute and any - callables associated with it.""" - - dict_.pop(key, None) - self.callables.pop(key, None) - - def expire_attribute_pre_commit(self, dict_, key): - """a fast expire that can be called by column loaders during a load. - - The additional bookkeeping is finished up in commit_all(). - - This method is actually called a lot with joined-table - loading, when the second table isn't present in the result. - - """ - dict_.pop(key, None) - self.callables[key] = self - - def set_callable(self, dict_, key, callable_): - """Remove the given attribute and set the given callable - as a loader.""" - - dict_.pop(key, None) - self.callables[key] = callable_ - - def expire(self, dict_, modified_set): - self.expired = True - if self.modified: - modified_set.discard(self) - - self.modified = False - - self.committed_state.clear() - - self.__dict__.pop('pending', None) - self.__dict__.pop('mutable_dict', None) - - # clear out 'parents' collection. not - # entirely clear how we can best determine - # which to remove, or not. - self.__dict__.pop('parents', None) - - for key in self.manager: - impl = self.manager[key].impl - if impl.accepts_scalar_loader and \ - (impl.expire_missing or key in dict_): - self.callables[key] = self - dict_.pop(key, None) - - self.manager.dispatch.expire(self, None) - - def expire_attributes(self, dict_, attribute_names): - pending = self.__dict__.get('pending', None) - mutable_dict = self.mutable_dict - - for key in attribute_names: - impl = self.manager[key].impl - if impl.accepts_scalar_loader: - self.callables[key] = self - dict_.pop(key, None) - - self.committed_state.pop(key, None) - if mutable_dict: - mutable_dict.pop(key, None) - if pending: - pending.pop(key, None) - - self.manager.dispatch.expire(self, attribute_names) - - def __call__(self, passive): - """__call__ allows the InstanceState to act as a deferred - callable for loading expired attributes, which is also - serializable (picklable). - - """ - - if passive is PASSIVE_NO_FETCH: - return PASSIVE_NO_RESULT - - toload = self.expired_attributes.\ - intersection(self.unmodified) - - self.manager.deferred_scalar_loader(self, toload) - - # if the loader failed, or this - # instance state didn't have an identity, - # the attributes still might be in the callables - # dict. ensure they are removed. - for k in toload.intersection(self.callables): - del self.callables[k] - - return ATTR_WAS_SET - - @property - def unmodified(self): - """Return the set of keys which have no uncommitted changes""" - - return set(self.manager).difference(self.committed_state) - - def unmodified_intersection(self, keys): - """Return self.unmodified.intersection(keys).""" - - return set(keys).intersection(self.manager).\ - difference(self.committed_state) - - - @property - def unloaded(self): - """Return the set of keys which do not have a loaded value. - - This includes expired attributes and any other attribute that - was never populated or modified. - - """ - return set(self.manager).\ - difference(self.committed_state).\ - difference(self.dict) - - @property - def expired_attributes(self): - """Return the set of keys which are 'expired' to be loaded by - the manager's deferred scalar loader, assuming no pending - changes. - - see also the ``unmodified`` collection which is intersected - against this set when a refresh operation occurs. - - """ - return set([k for k, v in self.callables.items() if v is self]) - - def _instance_dict(self): - return None - - def _is_really_none(self): - return self.obj() - - def modified_event(self, dict_, attr, previous, collection=False): - if attr.key not in self.committed_state: - if collection: - if previous is NEVER_SET: - if attr.key in dict_: - previous = dict_[attr.key] - - if previous not in (None, NO_VALUE, NEVER_SET): - previous = attr.copy(previous) - - self.committed_state[attr.key] = previous - - # the "or not self.modified" is defensive at - # this point. The assertion below is expected - # to be True: - # assert self._strong_obj is None or self.modified - - if self._strong_obj is None or not self.modified: - instance_dict = self._instance_dict() - if instance_dict: - instance_dict._modified.add(self) - - self._strong_obj = self.obj() - if self._strong_obj is None: - raise orm_exc.ObjectDereferencedError( - "Can't emit change event for attribute '%s' - " - "parent object of type %s has been garbage " - "collected." - % ( - self.manager[attr.key], - orm_util.state_class_str(self) - )) - self.modified = True - - def commit(self, dict_, keys): - """Commit attributes. - - This is used by a partial-attribute load operation to mark committed - those attributes which were refreshed from the database. - - Attributes marked as "expired" can potentially remain "expired" after - this step if a value was not populated in state.dict. - - """ - class_manager = self.manager - if class_manager.mutable_attributes: - for key in keys: - if key in dict_ and key in class_manager.mutable_attributes: - self.committed_state[key] = self.manager[key].impl.copy(dict_[key]) - else: - self.committed_state.pop(key, None) - else: - for key in keys: - self.committed_state.pop(key, None) - - self.expired = False - - for key in set(self.callables).\ - intersection(keys).\ - intersection(dict_): - del self.callables[key] - - def commit_all(self, dict_, instance_dict=None): - """commit all attributes unconditionally. - - This is used after a flush() or a full load/refresh - to remove all pending state from the instance. - - - all attributes are marked as "committed" - - the "strong dirty reference" is removed - - the "modified" flag is set to False - - any "expired" markers/callables for attributes loaded are removed. - - Attributes marked as "expired" can potentially remain "expired" after this step - if a value was not populated in state.dict. - - """ - - self.committed_state.clear() - self.__dict__.pop('pending', None) - - callables = self.callables - for key in list(callables): - if key in dict_ and callables[key] is self: - del callables[key] - - for key in self.manager.mutable_attributes: - if key in dict_: - self.committed_state[key] = self.manager[key].impl.copy(dict_[key]) - - if instance_dict and self.modified: - instance_dict._modified.discard(self) - - self.modified = self.expired = False - self._strong_obj = None - -class MutableAttrInstanceState(InstanceState): - """InstanceState implementation for objects that reference 'mutable' - attributes. - - Has a more involved "cleanup" handler that checks mutable attributes - for changes upon dereference, resurrecting if needed. - - """ - - @util.memoized_property - def mutable_dict(self): - return {} - - def _get_modified(self, dict_=None): - if self.__dict__.get('modified', False): - return True - else: - if dict_ is None: - dict_ = self.dict - for key in self.manager.mutable_attributes: - if self.manager[key].impl.check_mutable_modified(self, dict_): - return True - else: - return False - - def _set_modified(self, value): - self.__dict__['modified'] = value - - modified = property(_get_modified, _set_modified) - - @property - def unmodified(self): - """a set of keys which have no uncommitted changes""" - - dict_ = self.dict - - return set([ - key for key in self.manager - if (key not in self.committed_state or - (key in self.manager.mutable_attributes and - not self.manager[key].impl.check_mutable_modified(self, dict_)))]) - - def unmodified_intersection(self, keys): - """Return self.unmodified.intersection(keys).""" - - dict_ = self.dict - - return set([ - key for key in keys - if (key not in self.committed_state or - (key in self.manager.mutable_attributes and - not self.manager[key].impl.check_mutable_modified(self, dict_)))]) - - - def _is_really_none(self): - """do a check modified/resurrect. - - This would be called in the extremely rare - race condition that the weakref returned None but - the cleanup handler had not yet established the - __resurrect callable as its replacement. - - """ - if self.modified: - self.obj = self.__resurrect - return self.obj() - else: - return None - - def reset(self, dict_, key): - self.mutable_dict.pop(key, None) - InstanceState.reset(self, dict_, key) - - def _cleanup(self, ref): - """weakref callback. - - This method may be called by an asynchronous - gc. - - If the state shows pending changes, the weakref - is replaced by the __resurrect callable which will - re-establish an object reference on next access, - else removes this InstanceState from the owning - identity map, if any. - - """ - if self._get_modified(self.mutable_dict): - self.obj = self.__resurrect - else: - instance_dict = self._instance_dict() - if instance_dict: - instance_dict.discard(self) - self.dispose() - - def __resurrect(self): - """A substitute for the obj() weakref function which resurrects.""" - - # store strong ref'ed version of the object; will revert - # to weakref when changes are persisted - obj = self.manager.new_instance(state=self) - self.obj = weakref.ref(obj, self._cleanup) - self._strong_obj = obj - obj.__dict__.update(self.mutable_dict) - - # re-establishes identity attributes from the key - self.manager.dispatch.resurrect(self) - - return obj - -class PendingCollection(object): - """A writable placeholder for an unloaded collection. - - Stores items appended to and removed from a collection that has not yet - been loaded. When the collection is loaded, the changes stored in - PendingCollection are applied to it to produce the final result. - - """ - def __init__(self): - self.deleted_items = util.IdentitySet() - self.added_items = util.OrderedIdentitySet() - - def append(self, value): - if value in self.deleted_items: - self.deleted_items.remove(value) - else: - self.added_items.add(value) - - def remove(self, value): - if value in self.added_items: - self.added_items.remove(value) - else: - self.deleted_items.add(value) - diff --git a/libs/sqlalchemy/orm/strategies.py b/libs/sqlalchemy/orm/strategies.py deleted file mode 100644 index 2cde3f67..00000000 --- a/libs/sqlalchemy/orm/strategies.py +++ /dev/null @@ -1,1432 +0,0 @@ -# orm/strategies.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""sqlalchemy.orm.interfaces.LoaderStrategy - implementations, and related MapperOptions.""" - -from sqlalchemy import exc as sa_exc -from sqlalchemy import sql, util, log, event -from sqlalchemy.sql import util as sql_util -from sqlalchemy.sql import visitors -from sqlalchemy.orm import attributes, interfaces, exc as orm_exc -from sqlalchemy.orm.mapper import _none_set -from sqlalchemy.orm.interfaces import ( - LoaderStrategy, StrategizedOption, MapperOption, PropertyOption, - StrategizedProperty - ) -from sqlalchemy.orm import session as sessionlib, unitofwork -from sqlalchemy.orm import util as mapperutil -from sqlalchemy.orm.query import Query -import itertools - -def _register_attribute(strategy, mapper, useobject, - compare_function=None, - typecallable=None, - copy_function=None, - mutable_scalars=False, - uselist=False, - callable_=None, - proxy_property=None, - active_history=False, - impl_class=None, - **kw -): - - prop = strategy.parent_property - - attribute_ext = list(util.to_list(prop.extension, default=[])) - - listen_hooks = [] - - if useobject and prop.single_parent: - listen_hooks.append(single_parent_validator) - - if prop.key in prop.parent.validators: - fn, include_removes = prop.parent.validators[prop.key] - listen_hooks.append( - lambda desc, prop: mapperutil._validator_events(desc, - prop.key, fn, include_removes) - ) - - if useobject: - listen_hooks.append(unitofwork.track_cascade_events) - - # need to assemble backref listeners - # after the singleparentvalidator, mapper validator - backref = kw.pop('backref', None) - if backref: - listen_hooks.append( - lambda desc, prop: attributes.backref_listeners(desc, - backref, - uselist) - ) - - for m in mapper.self_and_descendants: - if prop is m._props.get(prop.key): - - desc = attributes.register_attribute_impl( - m.class_, - prop.key, - parent_token=prop, - mutable_scalars=mutable_scalars, - uselist=uselist, - copy_function=copy_function, - compare_function=compare_function, - useobject=useobject, - extension=attribute_ext, - trackparent=useobject and (prop.single_parent or prop.direction is interfaces.ONETOMANY), - typecallable=typecallable, - callable_=callable_, - active_history=active_history, - impl_class=impl_class, - doc=prop.doc, - **kw - ) - - for hook in listen_hooks: - hook(desc, prop) - -class UninstrumentedColumnLoader(LoaderStrategy): - """Represent the a non-instrumented MapperProperty. - - The polymorphic_on argument of mapper() often results in this, - if the argument is against the with_polymorphic selectable. - - """ - def init(self): - self.columns = self.parent_property.columns - - def setup_query(self, context, entity, path, reduced_path, adapter, - column_collection=None, **kwargs): - for c in self.columns: - if adapter: - c = adapter.columns[c] - column_collection.append(c) - - def create_row_processor(self, context, path, reduced_path, mapper, row, adapter): - return None, None, None - -class ColumnLoader(LoaderStrategy): - """Provide loading behavior for a :class:`.ColumnProperty`.""" - - def init(self): - self.columns = self.parent_property.columns - self.is_composite = hasattr(self.parent_property, 'composite_class') - - def setup_query(self, context, entity, path, reduced_path, - adapter, column_collection, **kwargs): - for c in self.columns: - if adapter: - c = adapter.columns[c] - column_collection.append(c) - - def init_class_attribute(self, mapper): - self.is_class_level = True - coltype = self.columns[0].type - # TODO: check all columns ? check for foreign key as well? - active_history = self.parent_property.active_history or \ - self.columns[0].primary_key - - _register_attribute(self, mapper, useobject=False, - compare_function=coltype.compare_values, - copy_function=coltype.copy_value, - mutable_scalars=self.columns[0].type.is_mutable(), - active_history = active_history - ) - - def create_row_processor(self, context, path, reduced_path, - mapper, row, adapter): - key = self.key - # look through list of columns represented here - # to see which, if any, is present in the row. - for col in self.columns: - if adapter: - col = adapter.columns[col] - if col is not None and col in row: - def fetch_col(state, dict_, row): - dict_[key] = row[col] - return fetch_col, None, None - else: - def expire_for_non_present_col(state, dict_, row): - state.expire_attribute_pre_commit(dict_, key) - return expire_for_non_present_col, None, None - -log.class_logger(ColumnLoader) - -class DeferredColumnLoader(LoaderStrategy): - """Provide loading behavior for a deferred :class:`.ColumnProperty`.""" - - def create_row_processor(self, context, path, reduced_path, mapper, row, adapter): - col = self.columns[0] - if adapter: - col = adapter.columns[col] - - key = self.key - if col in row: - return self.parent_property._get_strategy(ColumnLoader).\ - create_row_processor( - context, path, reduced_path, mapper, row, adapter) - - elif not self.is_class_level: - def set_deferred_for_local_state(state, dict_, row): - state.set_callable(dict_, key, LoadDeferredColumns(state, key)) - return set_deferred_for_local_state, None, None - else: - def reset_col_for_deferred(state, dict_, row): - # reset state on the key so that deferred callables - # fire off on next access. - state.reset(dict_, key) - return reset_col_for_deferred, None, None - - def init(self): - if hasattr(self.parent_property, 'composite_class'): - raise NotImplementedError("Deferred loading for composite " - "types not implemented yet") - self.columns = self.parent_property.columns - self.group = self.parent_property.group - - def init_class_attribute(self, mapper): - self.is_class_level = True - - _register_attribute(self, mapper, useobject=False, - compare_function=self.columns[0].type.compare_values, - copy_function=self.columns[0].type.copy_value, - mutable_scalars=self.columns[0].type.is_mutable(), - callable_=self._load_for_state, - expire_missing=False - ) - - def setup_query(self, context, entity, path, reduced_path, adapter, - only_load_props=None, **kwargs): - if ( - self.group is not None and - context.attributes.get(('undefer', self.group), False) - ) or (only_load_props and self.key in only_load_props): - self.parent_property._get_strategy(ColumnLoader).\ - setup_query(context, entity, - path, reduced_path, adapter, **kwargs) - - def _load_for_state(self, state, passive): - if not state.key: - return attributes.ATTR_EMPTY - - if passive is attributes.PASSIVE_NO_FETCH: - return attributes.PASSIVE_NO_RESULT - - localparent = state.manager.mapper - - if self.group: - toload = [ - p.key for p in - localparent.iterate_properties - if isinstance(p, StrategizedProperty) and - isinstance(p.strategy, DeferredColumnLoader) and - p.group==self.group - ] - else: - toload = [self.key] - - # narrow the keys down to just those which have no history - group = [k for k in toload if k in state.unmodified] - - session = sessionlib._state_session(state) - if session is None: - raise orm_exc.DetachedInstanceError( - "Parent instance %s is not bound to a Session; " - "deferred load operation of attribute '%s' cannot proceed" % - (mapperutil.state_str(state), self.key) - ) - - query = session.query(localparent) - if query._load_on_ident(state.key, - only_load_props=group, refresh_state=state) is None: - raise orm_exc.ObjectDeletedError(state) - - return attributes.ATTR_WAS_SET - -log.class_logger(DeferredColumnLoader) - -class LoadDeferredColumns(object): - """serializable loader object used by DeferredColumnLoader""" - - def __init__(self, state, key): - self.state = state - self.key = key - - def __call__(self, passive=attributes.PASSIVE_OFF): - state, key = self.state, self.key - - localparent = state.manager.mapper - prop = localparent._props[key] - strategy = prop._strategies[DeferredColumnLoader] - return strategy._load_for_state(state, passive) - -class DeferredOption(StrategizedOption): - propagate_to_loaders = True - - def __init__(self, key, defer=False): - super(DeferredOption, self).__init__(key) - self.defer = defer - - def get_strategy_class(self): - if self.defer: - return DeferredColumnLoader - else: - return ColumnLoader - -class UndeferGroupOption(MapperOption): - propagate_to_loaders = True - - def __init__(self, group): - self.group = group - - def process_query(self, query): - query._attributes[('undefer', self.group)] = True - -class AbstractRelationshipLoader(LoaderStrategy): - """LoaderStratgies which deal with related objects.""" - - def init(self): - self.mapper = self.parent_property.mapper - self.target = self.parent_property.target - self.uselist = self.parent_property.uselist - -class NoLoader(AbstractRelationshipLoader): - """Provide loading behavior for a :class:`.RelationshipProperty` - with "lazy=None". - - """ - - def init_class_attribute(self, mapper): - self.is_class_level = True - - _register_attribute(self, mapper, - useobject=True, - uselist=self.parent_property.uselist, - typecallable = self.parent_property.collection_class, - ) - - def create_row_processor(self, context, path, reduced_path, mapper, row, adapter): - def invoke_no_load(state, dict_, row): - state.initialize(self.key) - return invoke_no_load, None, None - -log.class_logger(NoLoader) - -class LazyLoader(AbstractRelationshipLoader): - """Provide loading behavior for a :class:`.RelationshipProperty` - with "lazy=True", that is loads when first accessed. - - """ - - def init(self): - super(LazyLoader, self).init() - self._lazywhere, \ - self._bind_to_col, \ - self._equated_columns = self._create_lazy_clause(self.parent_property) - - self._rev_lazywhere, \ - self._rev_bind_to_col, \ - self._rev_equated_columns = self._create_lazy_clause( - self.parent_property, - reverse_direction=True) - - self.logger.info("%s lazy loading clause %s", self, self._lazywhere) - - # determine if our "lazywhere" clause is the same as the mapper's - # get() clause. then we can just use mapper.get() - #from sqlalchemy.orm import query - self.use_get = not self.uselist and \ - self.mapper._get_clause[0].compare( - self._lazywhere, - use_proxies=True, - equivalents=self.mapper._equivalent_columns - ) - - if self.use_get: - for col in self._equated_columns.keys(): - if col in self.mapper._equivalent_columns: - for c in self.mapper._equivalent_columns[col]: - self._equated_columns[c] = self._equated_columns[col] - - self.logger.info("%s will use query.get() to " - "optimize instance loads" % self) - - def init_class_attribute(self, mapper): - self.is_class_level = True - - # MANYTOONE currently only needs the - # "old" value for delete-orphan - # cascades. the required _SingleParentValidator - # will enable active_history - # in that case. otherwise we don't need the - # "old" value during backref operations. - _register_attribute(self, - mapper, - useobject=True, - callable_=self._load_for_state, - uselist = self.parent_property.uselist, - backref = self.parent_property.back_populates, - typecallable = self.parent_property.collection_class, - active_history = \ - self.parent_property.active_history or \ - self.parent_property.direction is not \ - interfaces.MANYTOONE or \ - not self.use_get, - ) - - def lazy_clause(self, state, reverse_direction=False, - alias_secondary=False, - adapt_source=None): - if state is None: - return self._lazy_none_clause( - reverse_direction, - adapt_source=adapt_source) - - if not reverse_direction: - criterion, bind_to_col, rev = \ - self._lazywhere, \ - self._bind_to_col, \ - self._equated_columns - else: - criterion, bind_to_col, rev = \ - self._rev_lazywhere, \ - self._rev_bind_to_col, \ - self._rev_equated_columns - - if reverse_direction: - mapper = self.parent_property.mapper - else: - mapper = self.parent_property.parent - - o = state.obj() # strong ref - dict_ = attributes.instance_dict(o) - - # use the "committed state" only if we're in a flush - # for this state. - - sess = sessionlib._state_session(state) - if sess is not None and sess._flushing: - def visit_bindparam(bindparam): - if bindparam._identifying_key in bind_to_col: - bindparam.callable = \ - lambda: mapper._get_committed_state_attr_by_column( - state, dict_, - bind_to_col[bindparam._identifying_key]) - else: - def visit_bindparam(bindparam): - if bindparam._identifying_key in bind_to_col: - bindparam.callable = \ - lambda: mapper._get_state_attr_by_column( - state, dict_, - bind_to_col[bindparam._identifying_key]) - - - if self.parent_property.secondary is not None and alias_secondary: - criterion = sql_util.ClauseAdapter( - self.parent_property.secondary.alias()).\ - traverse(criterion) - - criterion = visitors.cloned_traverse( - criterion, {}, {'bindparam':visit_bindparam}) - - if adapt_source: - criterion = adapt_source(criterion) - return criterion - - def _lazy_none_clause(self, reverse_direction=False, adapt_source=None): - if not reverse_direction: - criterion, bind_to_col, rev = \ - self._lazywhere, \ - self._bind_to_col,\ - self._equated_columns - else: - criterion, bind_to_col, rev = \ - self._rev_lazywhere, \ - self._rev_bind_to_col, \ - self._rev_equated_columns - - criterion = sql_util.adapt_criterion_to_null(criterion, bind_to_col) - - if adapt_source: - criterion = adapt_source(criterion) - return criterion - - def _load_for_state(self, state, passive): - if not state.key and \ - (not self.parent_property.load_on_pending or not state.session_id): - return attributes.ATTR_EMPTY - - pending = not state.key - ident_key = None - - if ( - (passive is attributes.PASSIVE_NO_FETCH or \ - passive is attributes.PASSIVE_NO_FETCH_RELATED) and - not self.use_get - ) or ( - passive is attributes.PASSIVE_ONLY_PERSISTENT and - pending - ): - return attributes.PASSIVE_NO_RESULT - - session = sessionlib._state_session(state) - if not session: - raise orm_exc.DetachedInstanceError( - "Parent instance %s is not bound to a Session; " - "lazy load operation of attribute '%s' cannot proceed" % - (mapperutil.state_str(state), self.key) - ) - - # if we have a simple primary key load, check the - # identity map without generating a Query at all - if self.use_get: - ident = self._get_ident_for_use_get( - session, - state, - passive - ) - if attributes.PASSIVE_NO_RESULT in ident: - return attributes.PASSIVE_NO_RESULT - elif attributes.NEVER_SET in ident: - return attributes.NEVER_SET - - if _none_set.issuperset(ident): - return None - - ident_key = self.mapper.identity_key_from_primary_key(ident) - instance = Query._get_from_identity(session, ident_key, passive) - if instance is not None: - return instance - elif passive is attributes.PASSIVE_NO_FETCH or \ - passive is attributes.PASSIVE_NO_FETCH_RELATED: - return attributes.PASSIVE_NO_RESULT - - return self._emit_lazyload(session, state, ident_key) - - def _get_ident_for_use_get(self, session, state, passive): - instance_mapper = state.manager.mapper - - if session._flushing: - get_attr = instance_mapper._get_committed_state_attr_by_column - else: - get_attr = instance_mapper._get_state_attr_by_column - - dict_ = state.dict - - if passive is attributes.PASSIVE_NO_FETCH_RELATED: - attr_passive = attributes.PASSIVE_OFF - else: - attr_passive = passive - - return [ - get_attr( - state, - dict_, - self._equated_columns[pk], - passive=attr_passive) - for pk in self.mapper.primary_key - ] - - def _emit_lazyload(self, session, state, ident_key): - q = session.query(self.mapper)._adapt_all_clauses() - - q = q._with_invoke_all_eagers(False) - - pending = not state.key - - # don't autoflush on pending - if pending: - q = q.autoflush(False) - - if state.load_path: - q = q._with_current_path(state.load_path + (self.key,)) - - if state.load_options: - q = q._conditional_options(*state.load_options) - - if self.use_get: - return q._load_on_ident(ident_key) - - if self.parent_property.order_by: - q = q.order_by(*util.to_list(self.parent_property.order_by)) - - for rev in self.parent_property._reverse_property: - # reverse props that are MANYTOONE are loading *this* - # object from get(), so don't need to eager out to those. - if rev.direction is interfaces.MANYTOONE and \ - rev._use_get and \ - not isinstance(rev.strategy, LazyLoader): - q = q.options(EagerLazyOption((rev.key,), lazy='select')) - - lazy_clause = self.lazy_clause(state) - - if pending: - bind_values = sql_util.bind_values(lazy_clause) - if None in bind_values: - return None - - q = q.filter(lazy_clause) - - result = q.all() - if self.uselist: - return result - else: - l = len(result) - if l: - if l > 1: - util.warn( - "Multiple rows returned with " - "uselist=False for lazily-loaded attribute '%s' " - % self.parent_property) - - return result[0] - else: - return None - - - def create_row_processor(self, context, path, reduced_path, - mapper, row, adapter): - key = self.key - if not self.is_class_level: - def set_lazy_callable(state, dict_, row): - # we are not the primary manager for this attribute - # on this class - set up a - # per-instance lazyloader, which will override the - # class-level behavior. - # this currently only happens when using a - # "lazyload" option on a "no load" - # attribute - "eager" attributes always have a - # class-level lazyloader installed. - state.set_callable(dict_, key, LoadLazyAttribute(state, key)) - return set_lazy_callable, None, None - else: - def reset_for_lazy_callable(state, dict_, row): - # we are the primary manager for this attribute on - # this class - reset its - # per-instance attribute state, so that the class-level - # lazy loader is - # executed when next referenced on this instance. - # this is needed in - # populate_existing() types of scenarios to reset - # any existing state. - state.reset(dict_, key) - - return reset_for_lazy_callable, None, None - - @classmethod - def _create_lazy_clause(cls, prop, reverse_direction=False): - binds = util.column_dict() - lookup = util.column_dict() - equated_columns = util.column_dict() - - if reverse_direction and prop.secondaryjoin is None: - for l, r in prop.local_remote_pairs: - _list = lookup.setdefault(r, []) - _list.append((r, l)) - equated_columns[l] = r - else: - for l, r in prop.local_remote_pairs: - _list = lookup.setdefault(l, []) - _list.append((l, r)) - equated_columns[r] = l - - def col_to_bind(col): - if col in lookup: - for tobind, equated in lookup[col]: - if equated in binds: - return None - if col not in binds: - binds[col] = sql.bindparam(None, None, type_=col.type, unique=True) - return binds[col] - return None - - lazywhere = prop.primaryjoin - - if prop.secondaryjoin is None or not reverse_direction: - lazywhere = visitors.replacement_traverse( - lazywhere, {}, col_to_bind) - - if prop.secondaryjoin is not None: - secondaryjoin = prop.secondaryjoin - if reverse_direction: - secondaryjoin = visitors.replacement_traverse( - secondaryjoin, {}, col_to_bind) - lazywhere = sql.and_(lazywhere, secondaryjoin) - - bind_to_col = dict((binds[col].key, col) for col in binds) - - return lazywhere, bind_to_col, equated_columns - -log.class_logger(LazyLoader) - -class LoadLazyAttribute(object): - """serializable loader object used by LazyLoader""" - - def __init__(self, state, key): - self.state = state - self.key = key - - def __call__(self, passive=attributes.PASSIVE_OFF): - state, key = self.state, self.key - instance_mapper = state.manager.mapper - prop = instance_mapper._props[key] - strategy = prop._strategies[LazyLoader] - - return strategy._load_for_state(state, passive) - - -class ImmediateLoader(AbstractRelationshipLoader): - def init_class_attribute(self, mapper): - self.parent_property.\ - _get_strategy(LazyLoader).\ - init_class_attribute(mapper) - - def setup_query(self, context, entity, - path, reduced_path, adapter, column_collection=None, - parentmapper=None, **kwargs): - pass - - def create_row_processor(self, context, path, reduced_path, - mapper, row, adapter): - def load_immediate(state, dict_, row): - state.get_impl(self.key).get(state, dict_) - - return None, None, load_immediate - -class SubqueryLoader(AbstractRelationshipLoader): - def init(self): - super(SubqueryLoader, self).init() - self.join_depth = self.parent_property.join_depth - - def init_class_attribute(self, mapper): - self.parent_property.\ - _get_strategy(LazyLoader).\ - init_class_attribute(mapper) - - def setup_query(self, context, entity, - path, reduced_path, adapter, - column_collection=None, - parentmapper=None, **kwargs): - - if not context.query._enable_eagerloads: - return - - path = path + (self.key, ) - reduced_path = reduced_path + (self.key, ) - - # build up a path indicating the path from the leftmost - # entity to the thing we're subquery loading. - subq_path = context.attributes.get(('subquery_path', None), ()) - - subq_path = subq_path + path - - # join-depth / recursion check - if ("loaderstrategy", reduced_path) not in context.attributes: - if self.join_depth: - if len(path) / 2 > self.join_depth: - return - else: - if self.mapper.base_mapper in \ - interfaces._reduce_path(subq_path): - return - - subq_mapper, leftmost_mapper, leftmost_attr = \ - self._get_leftmost(subq_path) - - orig_query = context.attributes.get( - ("orig_query", SubqueryLoader), - context.query) - - # generate a new Query from the original, then - # produce a subquery from it. - left_alias = self._generate_from_original_query( - orig_query, leftmost_mapper, - leftmost_attr, subq_path - ) - - # generate another Query that will join the - # left alias to the target relationships. - # basically doing a longhand - # "from_self()". (from_self() itself not quite industrial - # strength enough for all contingencies...but very close) - q = orig_query.session.query(self.mapper) - q._attributes = { - ("orig_query", SubqueryLoader): orig_query, - ('subquery_path', None) : subq_path - } - q = q._enable_single_crit(False) - - to_join, local_attr, parent_alias = \ - self._prep_for_joins(left_alias, subq_path) - q = q.order_by(*local_attr) - q = q.add_columns(*local_attr) - - q = self._apply_joins(q, to_join, left_alias, parent_alias) - - q = self._setup_options(q, subq_path, orig_query) - q = self._setup_outermost_orderby(q) - - # add new query to attributes to be picked up - # by create_row_processor - context.attributes[('subquery', reduced_path)] = q - - def _get_leftmost(self, subq_path): - subq_mapper = mapperutil._class_to_mapper(subq_path[0]) - - # determine attributes of the leftmost mapper - if self.parent.isa(subq_mapper) and self.key==subq_path[1]: - leftmost_mapper, leftmost_prop = \ - self.parent, self.parent_property - else: - leftmost_mapper, leftmost_prop = \ - subq_mapper, \ - subq_mapper._props[subq_path[1]] - leftmost_cols, remote_cols = self._local_remote_columns(leftmost_prop) - - leftmost_attr = [ - leftmost_mapper._columntoproperty[c].class_attribute - for c in leftmost_cols - ] - return subq_mapper, leftmost_mapper, leftmost_attr - - def _generate_from_original_query(self, - orig_query, leftmost_mapper, - leftmost_attr, subq_path - ): - # reformat the original query - # to look only for significant columns - q = orig_query._clone() - - # TODO: why does polymporphic etc. require hardcoding - # into _adapt_col_list ? Does query.add_columns(...) work - # with polymorphic loading ? - q._set_entities(q._adapt_col_list(leftmost_attr)) - - if q._order_by is False: - q._order_by = leftmost_mapper.order_by - - # don't need ORDER BY if no limit/offset - if q._limit is None and q._offset is None: - q._order_by = None - - # the original query now becomes a subquery - # which we'll join onto. - embed_q = q.with_labels().subquery() - left_alias = mapperutil.AliasedClass(leftmost_mapper, embed_q) - return left_alias - - - def _prep_for_joins(self, left_alias, subq_path): - # figure out what's being joined. a.k.a. the fun part - to_join = [ - (subq_path[i], subq_path[i+1]) - for i in xrange(0, len(subq_path), 2) - ] - - # determine the immediate parent class we are joining from, - # which needs to be aliased. - - if len(to_join) < 2: - # in the case of a one level eager load, this is the - # leftmost "left_alias". - parent_alias = left_alias - elif subq_path[-2].isa(self.parent): - # In the case of multiple levels, retrieve - # it from subq_path[-2]. This is the same as self.parent - # in the vast majority of cases, and [ticket:2014] - # illustrates a case where sub_path[-2] is a subclass - # of self.parent - parent_alias = mapperutil.AliasedClass(subq_path[-2]) - else: - # if of_type() were used leading to this relationship, - # self.parent is more specific than subq_path[-2] - parent_alias = mapperutil.AliasedClass(self.parent) - - local_cols, remote_cols = \ - self._local_remote_columns(self.parent_property) - - local_attr = [ - getattr(parent_alias, self.parent._columntoproperty[c].key) - for c in local_cols - ] - - return to_join, local_attr, parent_alias - - def _apply_joins(self, q, to_join, left_alias, parent_alias): - for i, (mapper, key) in enumerate(to_join): - - # we need to use query.join() as opposed to - # orm.join() here because of the - # rich behavior it brings when dealing with - # "with_polymorphic" mappers. "aliased" - # and "from_joinpoint" take care of most of - # the chaining and aliasing for us. - - first = i == 0 - middle = i < len(to_join) - 1 - second_to_last = i == len(to_join) - 2 - - if first: - attr = getattr(left_alias, key) - else: - attr = key - - if second_to_last: - q = q.join(parent_alias, attr, from_joinpoint=True) - else: - q = q.join(attr, aliased=middle, from_joinpoint=True) - return q - - def _local_remote_columns(self, prop): - if prop.secondary is None: - return zip(*prop.local_remote_pairs) - else: - return \ - [p[0] for p in prop.synchronize_pairs],\ - [ - p[0] for p in prop. - secondary_synchronize_pairs - ] - - def _setup_options(self, q, subq_path, orig_query): - # propagate loader options etc. to the new query. - # these will fire relative to subq_path. - q = q._with_current_path(subq_path) - q = q._conditional_options(*orig_query._with_options) - if orig_query._populate_existing: - q._populate_existing = orig_query._populate_existing - return q - - def _setup_outermost_orderby(self, q): - if self.parent_property.order_by: - # if there's an ORDER BY, alias it the same - # way joinedloader does, but we have to pull out - # the "eagerjoin" from the query. - # this really only picks up the "secondary" table - # right now. - eagerjoin = q._from_obj[0] - eager_order_by = \ - eagerjoin._target_adapter.\ - copy_and_process( - util.to_list( - self.parent_property.order_by - ) - ) - q = q.order_by(*eager_order_by) - return q - - def create_row_processor(self, context, path, reduced_path, - mapper, row, adapter): - if not self.parent.class_manager[self.key].impl.supports_population: - raise sa_exc.InvalidRequestError( - "'%s' does not support object " - "population - eager loading cannot be applied." % - self) - - reduced_path = reduced_path + (self.key,) - - if ('subquery', reduced_path) not in context.attributes: - return None, None, None - - local_cols, remote_cols = self._local_remote_columns(self.parent_property) - - q = context.attributes[('subquery', reduced_path)] - - # cache the loaded collections in the context - # so that inheriting mappers don't re-load when they - # call upon create_row_processor again - if ('collections', reduced_path) in context.attributes: - collections = context.attributes[('collections', reduced_path)] - else: - collections = context.attributes[('collections', reduced_path)] = dict( - (k, [v[0] for v in v]) - for k, v in itertools.groupby( - q, - lambda x:x[1:] - )) - - if adapter: - local_cols = [adapter.columns[c] for c in local_cols] - - if self.uselist: - return self._create_collection_loader(collections, local_cols) - else: - return self._create_scalar_loader(collections, local_cols) - - def _create_collection_loader(self, collections, local_cols): - def load_collection_from_subq(state, dict_, row): - collection = collections.get( - tuple([row[col] for col in local_cols]), - () - ) - state.get_impl(self.key).\ - set_committed_value(state, dict_, collection) - - return load_collection_from_subq, None, None - - def _create_scalar_loader(self, collections, local_cols): - def load_scalar_from_subq(state, dict_, row): - collection = collections.get( - tuple([row[col] for col in local_cols]), - (None,) - ) - if len(collection) > 1: - util.warn( - "Multiple rows returned with " - "uselist=False for eagerly-loaded attribute '%s' " - % self) - - scalar = collection[0] - state.get_impl(self.key).\ - set_committed_value(state, dict_, scalar) - - return load_scalar_from_subq, None, None - -log.class_logger(SubqueryLoader) - -class JoinedLoader(AbstractRelationshipLoader): - """Provide loading behavior for a :class:`.RelationshipProperty` - using joined eager loading. - - """ - def init(self): - super(JoinedLoader, self).init() - self.join_depth = self.parent_property.join_depth - - def init_class_attribute(self, mapper): - self.parent_property.\ - _get_strategy(LazyLoader).init_class_attribute(mapper) - - def setup_query(self, context, entity, path, reduced_path, adapter, \ - column_collection=None, parentmapper=None, - allow_innerjoin=True, - **kwargs): - """Add a left outer join to the statement thats being constructed.""" - - - if not context.query._enable_eagerloads: - return - - path = path + (self.key,) - reduced_path = reduced_path + (self.key,) - - if ("user_defined_eager_row_processor", reduced_path) in\ - context.attributes: - clauses, adapter, add_to_collection = \ - self._get_user_defined_adapter( - context, entity, reduced_path, adapter - ) - else: - # check for join_depth or basic recursion, - # if the current path was not explicitly stated as - # a desired "loaderstrategy" (i.e. via query.options()) - if ("loaderstrategy", reduced_path) not in context.attributes: - if self.join_depth: - if len(path) / 2 > self.join_depth: - return - else: - if self.mapper.base_mapper in reduced_path: - return - - clauses, adapter, add_to_collection, \ - allow_innerjoin = self._generate_row_adapter( - context, entity, path, reduced_path, adapter, - column_collection, parentmapper, allow_innerjoin - ) - - path += (self.mapper,) - reduced_path += (self.mapper.base_mapper,) - - for value in self.mapper._polymorphic_properties: - value.setup( - context, - entity, - path, - reduced_path, - clauses, - parentmapper=self.mapper, - column_collection=add_to_collection, - allow_innerjoin=allow_innerjoin) - - def _get_user_defined_adapter(self, context, entity, - reduced_path, adapter): - clauses = context.attributes[ - ("user_defined_eager_row_processor", - reduced_path)] - - adapter = entity._get_entity_clauses(context.query, context) - if adapter and clauses: - context.attributes[ - ("user_defined_eager_row_processor", - reduced_path)] = clauses = clauses.wrap(adapter) - elif adapter: - context.attributes[ - ("user_defined_eager_row_processor", - reduced_path)] = clauses = adapter - - add_to_collection = context.primary_columns - return clauses, adapter, add_to_collection - - def _generate_row_adapter(self, - context, entity, path, reduced_path, adapter, - column_collection, parentmapper, allow_innerjoin - ): - clauses = mapperutil.ORMAdapter( - mapperutil.AliasedClass(self.mapper), - equivalents=self.mapper._equivalent_columns, - adapt_required=True) - - if self.parent_property.direction != interfaces.MANYTOONE: - context.multi_row_eager_loaders = True - - innerjoin = allow_innerjoin and context.attributes.get( - ("eager_join_type", path), - self.parent_property.innerjoin) - if not innerjoin: - # if this is an outer join, all eager joins from - # here must also be outer joins - allow_innerjoin = False - - context.create_eager_joins.append( - (self._create_eager_join, context, - entity, path, adapter, - parentmapper, clauses, innerjoin) - ) - - add_to_collection = context.secondary_columns - context.attributes[ - ("eager_row_processor", reduced_path) - ] = clauses - return clauses, adapter, add_to_collection, allow_innerjoin - - def _create_eager_join(self, context, entity, - path, adapter, parentmapper, - clauses, innerjoin): - - if parentmapper is None: - localparent = entity.mapper - else: - localparent = parentmapper - - # whether or not the Query will wrap the selectable in a subquery, - # and then attach eager load joins to that (i.e., in the case of - # LIMIT/OFFSET etc.) - should_nest_selectable = context.multi_row_eager_loaders and \ - context.query._should_nest_selectable - - entity_key = None - if entity not in context.eager_joins and \ - not should_nest_selectable and \ - context.from_clause: - index, clause = \ - sql_util.find_join_source( - context.from_clause, entity.selectable) - if clause is not None: - # join to an existing FROM clause on the query. - # key it to its list index in the eager_joins dict. - # Query._compile_context will adapt as needed and - # append to the FROM clause of the select(). - entity_key, default_towrap = index, clause - - if entity_key is None: - entity_key, default_towrap = entity, entity.selectable - - towrap = context.eager_joins.setdefault(entity_key, default_towrap) - - join_to_left = False - if adapter: - if getattr(adapter, 'aliased_class', None): - onclause = getattr( - adapter.aliased_class, self.key, - self.parent_property) - else: - onclause = getattr( - mapperutil.AliasedClass( - self.parent, - adapter.selectable - ), - self.key, self.parent_property - ) - - if onclause is self.parent_property: - # TODO: this is a temporary hack to - # account for polymorphic eager loads where - # the eagerload is referencing via of_type(). - join_to_left = True - else: - onclause = self.parent_property - - context.eager_joins[entity_key] = eagerjoin = \ - mapperutil.join( - towrap, - clauses.aliased_class, - onclause, - join_to_left=join_to_left, - isouter=not innerjoin - ) - - # send a hint to the Query as to where it may "splice" this join - eagerjoin.stop_on = entity.selectable - - if self.parent_property.secondary is None and \ - not parentmapper: - # for parentclause that is the non-eager end of the join, - # ensure all the parent cols in the primaryjoin are actually - # in the - # columns clause (i.e. are not deferred), so that aliasing applied - # by the Query propagates those columns outward. - # This has the effect - # of "undefering" those columns. - for col in sql_util.find_columns( - self.parent_property.primaryjoin): - if localparent.mapped_table.c.contains_column(col): - if adapter: - col = adapter.columns[col] - context.primary_columns.append(col) - - if self.parent_property.order_by: - context.eager_order_by += \ - eagerjoin._target_adapter.\ - copy_and_process( - util.to_list( - self.parent_property.order_by - ) - ) - - - def _create_eager_adapter(self, context, row, adapter, path, reduced_path): - if ("user_defined_eager_row_processor", reduced_path) in \ - context.attributes: - decorator = context.attributes[ - ("user_defined_eager_row_processor", - reduced_path)] - # user defined eagerloads are part of the "primary" - # portion of the load. - # the adapters applied to the Query should be honored. - if context.adapter and decorator: - decorator = decorator.wrap(context.adapter) - elif context.adapter: - decorator = context.adapter - elif ("eager_row_processor", reduced_path) in context.attributes: - decorator = context.attributes[ - ("eager_row_processor", reduced_path)] - else: - return False - - try: - self.mapper.identity_key_from_row(row, decorator) - return decorator - except KeyError: - # no identity key - dont return a row - # processor, will cause a degrade to lazy - return False - - def create_row_processor(self, context, path, reduced_path, mapper, row, adapter): - if not self.parent.class_manager[self.key].impl.supports_population: - raise sa_exc.InvalidRequestError( - "'%s' does not support object " - "population - eager loading cannot be applied." % - self) - - our_path = path + (self.key,) - our_reduced_path = reduced_path + (self.key,) - - eager_adapter = self._create_eager_adapter( - context, - row, - adapter, our_path, - our_reduced_path) - - if eager_adapter is not False: - key = self.key - _instance = self.mapper._instance_processor( - context, - our_path + (self.mapper,), - our_reduced_path + (self.mapper.base_mapper,), - eager_adapter) - - if not self.uselist: - return self._create_scalar_loader(context, key, _instance) - else: - return self._create_collection_loader(context, key, _instance) - else: - return self.parent_property.\ - _get_strategy(LazyLoader).\ - create_row_processor( - context, path, - reduced_path, - mapper, row, adapter) - - def _create_collection_loader(self, context, key, _instance): - def load_collection_from_joined_new_row(state, dict_, row): - collection = attributes.init_state_collection( - state, dict_, key) - result_list = util.UniqueAppender(collection, - 'append_without_event') - context.attributes[(state, key)] = result_list - _instance(row, result_list) - - def load_collection_from_joined_existing_row(state, dict_, row): - if (state, key) in context.attributes: - result_list = context.attributes[(state, key)] - else: - # appender_key can be absent from context.attributes - # with isnew=False when self-referential eager loading - # is used; the same instance may be present in two - # distinct sets of result columns - collection = attributes.init_state_collection(state, - dict_, key) - result_list = util.UniqueAppender( - collection, - 'append_without_event') - context.attributes[(state, key)] = result_list - _instance(row, result_list) - - def load_collection_from_joined_exec(state, dict_, row): - _instance(row, None) - - return load_collection_from_joined_new_row, \ - load_collection_from_joined_existing_row, \ - None, load_collection_from_joined_exec - - def _create_scalar_loader(self, context, key, _instance): - def load_scalar_from_joined_new_row(state, dict_, row): - # set a scalar object instance directly on the parent - # object, bypassing InstrumentedAttribute event handlers. - dict_[key] = _instance(row, None) - - def load_scalar_from_joined_existing_row(state, dict_, row): - # call _instance on the row, even though the object has - # been created, so that we further descend into properties - existing = _instance(row, None) - if existing is not None \ - and key in dict_ \ - and existing is not dict_[key]: - util.warn( - "Multiple rows returned with " - "uselist=False for eagerly-loaded attribute '%s' " - % self) - - def load_scalar_from_joined_exec(state, dict_, row): - _instance(row, None) - - return load_scalar_from_joined_new_row, \ - load_scalar_from_joined_existing_row, \ - None, load_scalar_from_joined_exec - -EagerLoader = JoinedLoader -"""Deprecated, use JoinedLoader""" - -log.class_logger(JoinedLoader) - -class EagerLazyOption(StrategizedOption): - def __init__(self, key, lazy=True, chained=False, - propagate_to_loaders=True - ): - if isinstance(key[0], basestring) and key[0] == '*': - if len(key) != 1: - raise sa_exc.ArgumentError( - "Wildcard identifier '*' must " - "be specified alone.") - key = ("relationship:*",) - propagate_to_loaders = False - super(EagerLazyOption, self).__init__(key) - self.lazy = lazy - self.chained = self.lazy in (False, 'joined', 'subquery') and chained - self.propagate_to_loaders = propagate_to_loaders - self.strategy_cls = factory(lazy) - - def get_strategy_class(self): - return self.strategy_cls - -def factory(identifier): - if identifier is False or identifier == 'joined': - return JoinedLoader - elif identifier is None or identifier == 'noload': - return NoLoader - elif identifier is False or identifier == 'select': - return LazyLoader - elif identifier == 'subquery': - return SubqueryLoader - elif identifier == 'immediate': - return ImmediateLoader - else: - return LazyLoader - -class EagerJoinOption(PropertyOption): - - def __init__(self, key, innerjoin, chained=False): - super(EagerJoinOption, self).__init__(key) - self.innerjoin = innerjoin - self.chained = chained - - def process_query_property(self, query, paths, mappers): - if self.chained: - for path in paths: - query._attributes[("eager_join_type", path)] = self.innerjoin - else: - query._attributes[("eager_join_type", paths[-1])] = self.innerjoin - -class LoadEagerFromAliasOption(PropertyOption): - - def __init__(self, key, alias=None, chained=False): - super(LoadEagerFromAliasOption, self).__init__(key) - if alias is not None: - if not isinstance(alias, basestring): - m, alias, is_aliased_class = mapperutil._entity_info(alias) - self.alias = alias - self.chained = chained - - def process_query_property(self, query, paths, mappers): - if self.chained: - for path in paths[0:-1]: - (root_mapper, propname) = path[-2:] - prop = root_mapper._props[propname] - adapter = query._polymorphic_adapters.get(prop.mapper, None) - query._attributes.setdefault( - ("user_defined_eager_row_processor", - interfaces._reduce_path(path)), adapter) - - if self.alias is not None: - if isinstance(self.alias, basestring): - (root_mapper, propname) = paths[-1][-2:] - prop = root_mapper._props[propname] - self.alias = prop.target.alias(self.alias) - query._attributes[ - ("user_defined_eager_row_processor", - interfaces._reduce_path(paths[-1])) - ] = sql_util.ColumnAdapter(self.alias) - else: - (root_mapper, propname) = paths[-1][-2:] - prop = root_mapper._props[propname] - adapter = query._polymorphic_adapters.get(prop.mapper, None) - query._attributes[ - ("user_defined_eager_row_processor", - interfaces._reduce_path(paths[-1]))] = adapter - -def single_parent_validator(desc, prop): - def _do_check(state, value, oldvalue, initiator): - if value is not None and initiator.key == prop.key: - hasparent = initiator.hasparent(attributes.instance_state(value)) - if hasparent and oldvalue is not value: - raise sa_exc.InvalidRequestError( - "Instance %s is already associated with an instance " - "of %s via its %s attribute, and is only allowed a " - "single parent." % - (mapperutil.instance_str(value), state.class_, prop) - ) - return value - - def append(state, value, initiator): - return _do_check(state, value, None, initiator) - - def set_(state, value, oldvalue, initiator): - return _do_check(state, value, oldvalue, initiator) - - event.listen(desc, 'append', append, raw=True, retval=True, active_history=True) - event.listen(desc, 'set', set_, raw=True, retval=True, active_history=True) - diff --git a/libs/sqlalchemy/orm/sync.py b/libs/sqlalchemy/orm/sync.py deleted file mode 100644 index 3094386b..00000000 --- a/libs/sqlalchemy/orm/sync.py +++ /dev/null @@ -1,108 +0,0 @@ -# orm/sync.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""private module containing functions used for copying data -between instances based on join conditions. - -""" - -from sqlalchemy.orm import exc, util as mapperutil, attributes - -def populate(source, source_mapper, dest, dest_mapper, - synchronize_pairs, uowcommit, flag_cascaded_pks): - source_dict = source.dict - dest_dict = dest.dict - - for l, r in synchronize_pairs: - try: - # inline of source_mapper._get_state_attr_by_column - prop = source_mapper._columntoproperty[l] - value = source.manager[prop.key].impl.get(source, source_dict, - attributes.PASSIVE_OFF) - except exc.UnmappedColumnError: - _raise_col_to_prop(False, source_mapper, l, dest_mapper, r) - - try: - # inline of dest_mapper._set_state_attr_by_column - prop = dest_mapper._columntoproperty[r] - dest.manager[prop.key].impl.set(dest, dest_dict, value, None) - except exc.UnmappedColumnError: - _raise_col_to_prop(True, source_mapper, l, dest_mapper, r) - - # technically the "r.primary_key" check isn't - # needed here, but we check for this condition to limit - # how often this logic is invoked for memory/performance - # reasons, since we only need this info for a primary key - # destination. - if flag_cascaded_pks and l.primary_key and \ - r.primary_key and \ - r.references(l): - uowcommit.attributes[("pk_cascaded", dest, r)] = True - -def clear(dest, dest_mapper, synchronize_pairs): - for l, r in synchronize_pairs: - if r.primary_key: - raise AssertionError( - "Dependency rule tried to blank-out primary key " - "column '%s' on instance '%s'" % - (r, mapperutil.state_str(dest)) - ) - try: - dest_mapper._set_state_attr_by_column(dest, dest.dict, r, None) - except exc.UnmappedColumnError: - _raise_col_to_prop(True, None, l, dest_mapper, r) - -def update(source, source_mapper, dest, old_prefix, synchronize_pairs): - for l, r in synchronize_pairs: - try: - oldvalue = source_mapper._get_committed_attr_by_column(source.obj(), l) - value = source_mapper._get_state_attr_by_column(source, source.dict, l) - except exc.UnmappedColumnError: - _raise_col_to_prop(False, source_mapper, l, None, r) - dest[r.key] = value - dest[old_prefix + r.key] = oldvalue - -def populate_dict(source, source_mapper, dict_, synchronize_pairs): - for l, r in synchronize_pairs: - try: - value = source_mapper._get_state_attr_by_column(source, source.dict, l) - except exc.UnmappedColumnError: - _raise_col_to_prop(False, source_mapper, l, None, r) - - dict_[r.key] = value - -def source_modified(uowcommit, source, source_mapper, synchronize_pairs): - """return true if the source object has changes from an old to a - new value on the given synchronize pairs - - """ - for l, r in synchronize_pairs: - try: - prop = source_mapper._columntoproperty[l] - except exc.UnmappedColumnError: - _raise_col_to_prop(False, source_mapper, l, None, r) - history = uowcommit.get_attribute_history(source, prop.key, - attributes.PASSIVE_NO_INITIALIZE) - return bool(history.deleted) - else: - return False - -def _raise_col_to_prop(isdest, source_mapper, source_column, dest_mapper, dest_column): - if isdest: - raise exc.UnmappedColumnError( - "Can't execute sync rule for destination column '%s'; " - "mapper '%s' does not map this column. Try using an explicit" - " `foreign_keys` collection which does not include this column " - "(or use a viewonly=True relation)." % (dest_column, dest_mapper) - ) - else: - raise exc.UnmappedColumnError( - "Can't execute sync rule for source column '%s'; mapper '%s' " - "does not map this column. Try using an explicit `foreign_keys`" - " collection which does not include destination column '%s' (or " - "use a viewonly=True relation)." % - (source_column, source_mapper, dest_column) - ) diff --git a/libs/sqlalchemy/orm/unitofwork.py b/libs/sqlalchemy/orm/unitofwork.py deleted file mode 100644 index 003d7ae7..00000000 --- a/libs/sqlalchemy/orm/unitofwork.py +++ /dev/null @@ -1,587 +0,0 @@ -# orm/unitofwork.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""The internals for the unit of work system. - -The session's flush() process passes objects to a contextual object -here, which assembles flush tasks based on mappers and their properties, -organizes them in order of dependency, and executes. - -""" - -from sqlalchemy import util, event -from sqlalchemy.util import topological -from sqlalchemy.orm import attributes, interfaces, persistence -from sqlalchemy.orm import util as mapperutil -session = util.importlater("sqlalchemy.orm", "session") - -def track_cascade_events(descriptor, prop): - """Establish event listeners on object attributes which handle - cascade-on-set/append. - - """ - key = prop.key - - def append(state, item, initiator): - # process "save_update" cascade rules for when - # an instance is appended to the list of another instance - - sess = session._state_session(state) - if sess: - prop = state.manager.mapper._props[key] - item_state = attributes.instance_state(item) - if prop.cascade.save_update and \ - (prop.cascade_backrefs or key == initiator.key) and \ - not sess._contains_state(item_state): - sess._save_or_update_state(item_state) - return item - - def remove(state, item, initiator): - sess = session._state_session(state) - if sess: - prop = state.manager.mapper._props[key] - # expunge pending orphans - item_state = attributes.instance_state(item) - if prop.cascade.delete_orphan and \ - item_state in sess._new and \ - prop.mapper._is_orphan(item_state): - sess.expunge(item) - - def set_(state, newvalue, oldvalue, initiator): - # process "save_update" cascade rules for when an instance - # is attached to another instance - if oldvalue is newvalue: - return newvalue - - sess = session._state_session(state) - if sess: - prop = state.manager.mapper._props[key] - if newvalue is not None: - newvalue_state = attributes.instance_state(newvalue) - if prop.cascade.save_update and \ - (prop.cascade_backrefs or key == initiator.key) and \ - not sess._contains_state(newvalue_state): - sess._save_or_update_state(newvalue_state) - - if oldvalue is not None and \ - oldvalue is not attributes.PASSIVE_NO_RESULT and \ - prop.cascade.delete_orphan: - # possible to reach here with attributes.NEVER_SET ? - oldvalue_state = attributes.instance_state(oldvalue) - - if oldvalue_state in sess._new and \ - prop.mapper._is_orphan(oldvalue_state): - sess.expunge(oldvalue) - return newvalue - - event.listen(descriptor, 'append', append, raw=True, retval=True) - event.listen(descriptor, 'remove', remove, raw=True, retval=True) - event.listen(descriptor, 'set', set_, raw=True, retval=True) - - -class UOWTransaction(object): - def __init__(self, session): - self.session = session - - # dictionary used by external actors to - # store arbitrary state information. - self.attributes = {} - - # dictionary of mappers to sets of - # DependencyProcessors, which are also - # set to be part of the sorted flush actions, - # which have that mapper as a parent. - self.deps = util.defaultdict(set) - - # dictionary of mappers to sets of InstanceState - # items pending for flush which have that mapper - # as a parent. - self.mappers = util.defaultdict(set) - - # a dictionary of Preprocess objects, which gather - # additional states impacted by the flush - # and determine if a flush action is needed - self.presort_actions = {} - - # dictionary of PostSortRec objects, each - # one issues work during the flush within - # a certain ordering. - self.postsort_actions = {} - - # a set of 2-tuples, each containing two - # PostSortRec objects where the second - # is dependent on the first being executed - # first - self.dependencies = set() - - # dictionary of InstanceState-> (isdelete, listonly) - # tuples, indicating if this state is to be deleted - # or insert/updated, or just refreshed - self.states = {} - - # tracks InstanceStates which will be receiving - # a "post update" call. Keys are mappers, - # values are a set of states and a set of the - # columns which should be included in the update. - self.post_update_states = util.defaultdict(lambda: (set(), set())) - - @property - def has_work(self): - return bool(self.states) - - def is_deleted(self, state): - """return true if the given state is marked as deleted - within this uowtransaction.""" - - return state in self.states and self.states[state][0] - - def memo(self, key, callable_): - if key in self.attributes: - return self.attributes[key] - else: - self.attributes[key] = ret = callable_() - return ret - - def remove_state_actions(self, state): - """remove pending actions for a state from the uowtransaction.""" - - isdelete = self.states[state][0] - - self.states[state] = (isdelete, True) - - def get_attribute_history(self, state, key, - passive=attributes.PASSIVE_NO_INITIALIZE): - """facade to attributes.get_state_history(), including caching of results.""" - - hashkey = ("history", state, key) - - # cache the objects, not the states; the strong reference here - # prevents newly loaded objects from being dereferenced during the - # flush process - - if hashkey in self.attributes: - history, state_history, cached_passive = self.attributes[hashkey] - # if the cached lookup was "passive" and now - # we want non-passive, do a non-passive lookup and re-cache - if cached_passive is not attributes.PASSIVE_OFF \ - and passive is attributes.PASSIVE_OFF: - impl = state.manager[key].impl - history = impl.get_history(state, state.dict, - attributes.PASSIVE_OFF) - if history and impl.uses_objects: - state_history = history.as_state() - else: - state_history = history - self.attributes[hashkey] = (history, state_history, passive) - else: - impl = state.manager[key].impl - # TODO: store the history as (state, object) tuples - # so we don't have to keep converting here - history = impl.get_history(state, state.dict, passive) - if history and impl.uses_objects: - state_history = history.as_state() - else: - state_history = history - self.attributes[hashkey] = (history, state_history, passive) - - return state_history - - def has_dep(self, processor): - return (processor, True) in self.presort_actions - - def register_preprocessor(self, processor, fromparent): - key = (processor, fromparent) - if key not in self.presort_actions: - self.presort_actions[key] = Preprocess(processor, fromparent) - - def register_object(self, state, isdelete=False, - listonly=False, cancel_delete=False, - operation=None, prop=None): - if not self.session._contains_state(state): - if not state.deleted and operation is not None: - util.warn("Object of type %s not in session, %s operation " - "along '%s' will not proceed" % - (mapperutil.state_class_str(state), operation, prop)) - return False - - if state not in self.states: - mapper = state.manager.mapper - - if mapper not in self.mappers: - mapper._per_mapper_flush_actions(self) - - self.mappers[mapper].add(state) - self.states[state] = (isdelete, listonly) - else: - if not listonly and (isdelete or cancel_delete): - self.states[state] = (isdelete, False) - return True - - def issue_post_update(self, state, post_update_cols): - mapper = state.manager.mapper.base_mapper - states, cols = self.post_update_states[mapper] - states.add(state) - cols.update(post_update_cols) - - @util.memoized_property - def _mapper_for_dep(self): - """return a dynamic mapping of (Mapper, DependencyProcessor) to - True or False, indicating if the DependencyProcessor operates - on objects of that Mapper. - - The result is stored in the dictionary persistently once - calculated. - - """ - return util.PopulateDict( - lambda tup:tup[0]._props.get(tup[1].key) is tup[1].prop - ) - - def filter_states_for_dep(self, dep, states): - """Filter the given list of InstanceStates to those relevant to the - given DependencyProcessor. - - """ - mapper_for_dep = self._mapper_for_dep - return [s for s in states if mapper_for_dep[(s.manager.mapper, dep)]] - - def states_for_mapper_hierarchy(self, mapper, isdelete, listonly): - checktup = (isdelete, listonly) - for mapper in mapper.base_mapper.self_and_descendants: - for state in self.mappers[mapper]: - if self.states[state] == checktup: - yield state - - def _generate_actions(self): - """Generate the full, unsorted collection of PostSortRecs as - well as dependency pairs for this UOWTransaction. - - """ - # execute presort_actions, until all states - # have been processed. a presort_action might - # add new states to the uow. - while True: - ret = False - for action in list(self.presort_actions.values()): - if action.execute(self): - ret = True - if not ret: - break - - # see if the graph of mapper dependencies has cycles. - self.cycles = cycles = topological.find_cycles( - self.dependencies, - self.postsort_actions.values()) - - if cycles: - # if yes, break the per-mapper actions into - # per-state actions - convert = dict( - (rec, set(rec.per_state_flush_actions(self))) - for rec in cycles - ) - - # rewrite the existing dependencies to point to - # the per-state actions for those per-mapper actions - # that were broken up. - for edge in list(self.dependencies): - if None in edge or \ - edge[0].disabled or edge[1].disabled or \ - cycles.issuperset(edge): - self.dependencies.remove(edge) - elif edge[0] in cycles: - self.dependencies.remove(edge) - for dep in convert[edge[0]]: - self.dependencies.add((dep, edge[1])) - elif edge[1] in cycles: - self.dependencies.remove(edge) - for dep in convert[edge[1]]: - self.dependencies.add((edge[0], dep)) - - return set([a for a in self.postsort_actions.values() - if not a.disabled - ] - ).difference(cycles) - - def execute(self): - postsort_actions = self._generate_actions() - - #sort = topological.sort(self.dependencies, postsort_actions) - #print "--------------" - #print "\ndependencies:", self.dependencies - #print "\ncycles:", self.cycles - #print "\nsort:", list(sort) - #print "\nCOUNT OF POSTSORT ACTIONS", len(postsort_actions) - - # execute - if self.cycles: - for set_ in topological.sort_as_subsets( - self.dependencies, - postsort_actions): - while set_: - n = set_.pop() - n.execute_aggregate(self, set_) - else: - for rec in topological.sort( - self.dependencies, - postsort_actions): - rec.execute(self) - - - def finalize_flush_changes(self): - """mark processed objects as clean / deleted after a successful flush(). - - this method is called within the flush() method after the - execute() method has succeeded and the transaction has been committed. - - """ - for state, (isdelete, listonly) in self.states.iteritems(): - if isdelete: - self.session._remove_newly_deleted(state) - else: - # if listonly: - # debug... would like to see how many do this - self.session._register_newly_persistent(state) - -class IterateMappersMixin(object): - def _mappers(self, uow): - if self.fromparent: - return iter( - m for m in self.dependency_processor.parent.self_and_descendants - if uow._mapper_for_dep[(m, self.dependency_processor)] - ) - else: - return self.dependency_processor.mapper.self_and_descendants - -class Preprocess(IterateMappersMixin): - def __init__(self, dependency_processor, fromparent): - self.dependency_processor = dependency_processor - self.fromparent = fromparent - self.processed = set() - self.setup_flush_actions = False - - def execute(self, uow): - delete_states = set() - save_states = set() - - for mapper in self._mappers(uow): - for state in uow.mappers[mapper].difference(self.processed): - (isdelete, listonly) = uow.states[state] - if not listonly: - if isdelete: - delete_states.add(state) - else: - save_states.add(state) - - if delete_states: - self.dependency_processor.presort_deletes(uow, delete_states) - self.processed.update(delete_states) - if save_states: - self.dependency_processor.presort_saves(uow, save_states) - self.processed.update(save_states) - - if (delete_states or save_states): - if not self.setup_flush_actions and ( - self.dependency_processor.\ - prop_has_changes(uow, delete_states, True) or - self.dependency_processor.\ - prop_has_changes(uow, save_states, False) - ): - self.dependency_processor.per_property_flush_actions(uow) - self.setup_flush_actions = True - return True - else: - return False - -class PostSortRec(object): - disabled = False - - def __new__(cls, uow, *args): - key = (cls, ) + args - if key in uow.postsort_actions: - return uow.postsort_actions[key] - else: - uow.postsort_actions[key] = \ - ret = \ - object.__new__(cls) - return ret - - def execute_aggregate(self, uow, recs): - self.execute(uow) - - def __repr__(self): - return "%s(%s)" % ( - self.__class__.__name__, - ",".join(str(x) for x in self.__dict__.values()) - ) - -class ProcessAll(IterateMappersMixin, PostSortRec): - def __init__(self, uow, dependency_processor, delete, fromparent): - self.dependency_processor = dependency_processor - self.delete = delete - self.fromparent = fromparent - uow.deps[dependency_processor.parent.base_mapper].add(dependency_processor) - - def execute(self, uow): - states = self._elements(uow) - if self.delete: - self.dependency_processor.process_deletes(uow, states) - else: - self.dependency_processor.process_saves(uow, states) - - def per_state_flush_actions(self, uow): - # this is handled by SaveUpdateAll and DeleteAll, - # since a ProcessAll should unconditionally be pulled - # into per-state if either the parent/child mappers - # are part of a cycle - return iter([]) - - def __repr__(self): - return "%s(%s, delete=%s)" % ( - self.__class__.__name__, - self.dependency_processor, - self.delete - ) - - def _elements(self, uow): - for mapper in self._mappers(uow): - for state in uow.mappers[mapper]: - (isdelete, listonly) = uow.states[state] - if isdelete == self.delete and not listonly: - yield state - -class IssuePostUpdate(PostSortRec): - def __init__(self, uow, mapper, isdelete): - self.mapper = mapper - self.isdelete = isdelete - - def execute(self, uow): - states, cols = uow.post_update_states[self.mapper] - states = [s for s in states if uow.states[s][0] == self.isdelete] - - persistence.post_update(self.mapper, states, uow, cols) - -class SaveUpdateAll(PostSortRec): - def __init__(self, uow, mapper): - self.mapper = mapper - assert mapper is mapper.base_mapper - - def execute(self, uow): - persistence.save_obj(self.mapper, - uow.states_for_mapper_hierarchy(self.mapper, False, False), - uow - ) - - def per_state_flush_actions(self, uow): - states = list(uow.states_for_mapper_hierarchy(self.mapper, False, False)) - for rec in self.mapper._per_state_flush_actions( - uow, - states, - False): - yield rec - - for dep in uow.deps[self.mapper]: - states_for_prop = uow.filter_states_for_dep(dep, states) - dep.per_state_flush_actions(uow, states_for_prop, False) - -class DeleteAll(PostSortRec): - def __init__(self, uow, mapper): - self.mapper = mapper - assert mapper is mapper.base_mapper - - def execute(self, uow): - persistence.delete_obj(self.mapper, - uow.states_for_mapper_hierarchy(self.mapper, True, False), - uow - ) - - def per_state_flush_actions(self, uow): - states = list(uow.states_for_mapper_hierarchy(self.mapper, True, False)) - for rec in self.mapper._per_state_flush_actions( - uow, - states, - True): - yield rec - - for dep in uow.deps[self.mapper]: - states_for_prop = uow.filter_states_for_dep(dep, states) - dep.per_state_flush_actions(uow, states_for_prop, True) - -class ProcessState(PostSortRec): - def __init__(self, uow, dependency_processor, delete, state): - self.dependency_processor = dependency_processor - self.delete = delete - self.state = state - - def execute_aggregate(self, uow, recs): - cls_ = self.__class__ - dependency_processor = self.dependency_processor - delete = self.delete - our_recs = [r for r in recs - if r.__class__ is cls_ and - r.dependency_processor is dependency_processor and - r.delete is delete] - recs.difference_update(our_recs) - states = [self.state] + [r.state for r in our_recs] - if delete: - dependency_processor.process_deletes(uow, states) - else: - dependency_processor.process_saves(uow, states) - - def __repr__(self): - return "%s(%s, %s, delete=%s)" % ( - self.__class__.__name__, - self.dependency_processor, - mapperutil.state_str(self.state), - self.delete - ) - -class SaveUpdateState(PostSortRec): - def __init__(self, uow, state, mapper): - self.state = state - self.mapper = mapper - - def execute_aggregate(self, uow, recs): - cls_ = self.__class__ - mapper = self.mapper - our_recs = [r for r in recs - if r.__class__ is cls_ and - r.mapper is mapper] - recs.difference_update(our_recs) - persistence.save_obj(mapper, - [self.state] + - [r.state for r in our_recs], - uow) - - def __repr__(self): - return "%s(%s)" % ( - self.__class__.__name__, - mapperutil.state_str(self.state) - ) - -class DeleteState(PostSortRec): - def __init__(self, uow, state, mapper): - self.state = state - self.mapper = mapper - - def execute_aggregate(self, uow, recs): - cls_ = self.__class__ - mapper = self.mapper - our_recs = [r for r in recs - if r.__class__ is cls_ and - r.mapper is mapper] - recs.difference_update(our_recs) - states = [self.state] + [r.state for r in our_recs] - persistence.delete_obj(mapper, - [s for s in states if uow.states[s][0]], - uow) - - def __repr__(self): - return "%s(%s)" % ( - self.__class__.__name__, - mapperutil.state_str(self.state) - ) - diff --git a/libs/sqlalchemy/orm/util.py b/libs/sqlalchemy/orm/util.py deleted file mode 100644 index a8cc80ce..00000000 --- a/libs/sqlalchemy/orm/util.py +++ /dev/null @@ -1,750 +0,0 @@ -# orm/util.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - - -from sqlalchemy import sql, util, event, exc as sa_exc -from sqlalchemy.sql import expression, util as sql_util, operators -from sqlalchemy.orm.interfaces import MapperExtension, EXT_CONTINUE,\ - PropComparator, MapperProperty -from sqlalchemy.orm import attributes, exc -import operator -import re - -mapperlib = util.importlater("sqlalchemy.orm", "mapperlib") - -all_cascades = frozenset(("delete", "delete-orphan", "all", "merge", - "expunge", "save-update", "refresh-expire", - "none")) - -_INSTRUMENTOR = ('mapper', 'instrumentor') - -class CascadeOptions(frozenset): - """Keeps track of the options sent to relationship().cascade""" - - _add_w_all_cascades = all_cascades.difference([ - 'all', 'none', 'delete-orphan']) - _allowed_cascades = all_cascades - - def __new__(cls, arg): - values = set([ - c for c - in re.split('\s*,\s*', arg or "") - if c - ]) - - if values.difference(cls._allowed_cascades): - raise sa_exc.ArgumentError( - "Invalid cascade option(s): %s" % - ", ".join([repr(x) for x in - sorted( - values.difference(cls._allowed_cascades) - )]) - ) - - if "all" in values: - values.update(cls._add_w_all_cascades) - if "none" in values: - values.clear() - values.discard('all') - - self = frozenset.__new__(CascadeOptions, values) - self.save_update = 'save-update' in values - self.delete = 'delete' in values - self.refresh_expire = 'refresh-expire' in values - self.merge = 'merge' in values - self.expunge = 'expunge' in values - self.delete_orphan = "delete-orphan" in values - - if self.delete_orphan and not self.delete: - util.warn("The 'delete-orphan' cascade " - "option requires 'delete'.") - return self - - def __repr__(self): - return "CascadeOptions(%r)" % ( - ",".join([x for x in sorted(self)]) - ) - -def _validator_events(desc, key, validator, include_removes): - """Runs a validation method on an attribute value to be set or appended.""" - - if include_removes: - def append(state, value, initiator): - return validator(state.obj(), key, value, False) - - def set_(state, value, oldvalue, initiator): - return validator(state.obj(), key, value, False) - - def remove(state, value, initiator): - validator(state.obj(), key, value, True) - else: - def append(state, value, initiator): - return validator(state.obj(), key, value) - - def set_(state, value, oldvalue, initiator): - return validator(state.obj(), key, value) - - event.listen(desc, 'append', append, raw=True, retval=True) - event.listen(desc, 'set', set_, raw=True, retval=True) - if include_removes: - event.listen(desc, "remove", remove, raw=True, retval=True) - -def polymorphic_union(table_map, typecolname, aliasname='p_union', cast_nulls=True): - """Create a ``UNION`` statement used by a polymorphic mapper. - - See :ref:`concrete_inheritance` for an example of how - this is used. - - :param table_map: mapping of polymorphic identities to - :class:`.Table` objects. - :param typecolname: string name of a "discriminator" column, which will be - derived from the query, producing the polymorphic identity for each row. If - ``None``, no polymorphic discriminator is generated. - :param aliasname: name of the :func:`~sqlalchemy.sql.expression.alias()` - construct generated. - :param cast_nulls: if True, non-existent columns, which are represented as labeled - NULLs, will be passed into CAST. This is a legacy behavior that is problematic - on some backends such as Oracle - in which case it can be set to False. - - """ - - colnames = util.OrderedSet() - colnamemaps = {} - types = {} - for key in table_map.keys(): - table = table_map[key] - - # mysql doesnt like selecting from a select; - # make it an alias of the select - if isinstance(table, sql.Select): - table = table.alias() - table_map[key] = table - - m = {} - for c in table.c: - colnames.add(c.key) - m[c.key] = c - types[c.key] = c.type - colnamemaps[table] = m - - def col(name, table): - try: - return colnamemaps[table][name] - except KeyError: - if cast_nulls: - return sql.cast(sql.null(), types[name]).label(name) - else: - return sql.type_coerce(sql.null(), types[name]).label(name) - - result = [] - for type, table in table_map.iteritems(): - if typecolname is not None: - result.append( - sql.select([col(name, table) for name in colnames] + - [sql.literal_column(sql_util._quote_ddl_expr(type)). - label(typecolname)], - from_obj=[table])) - else: - result.append(sql.select([col(name, table) for name in colnames], - from_obj=[table])) - return sql.union_all(*result).alias(aliasname) - -def identity_key(*args, **kwargs): - """Get an identity key. - - Valid call signatures: - - * ``identity_key(class, ident)`` - - class - mapped class (must be a positional argument) - - ident - primary key, if the key is composite this is a tuple - - - * ``identity_key(instance=instance)`` - - instance - object instance (must be given as a keyword arg) - - * ``identity_key(class, row=row)`` - - class - mapped class (must be a positional argument) - - row - result proxy row (must be given as a keyword arg) - - """ - if args: - if len(args) == 1: - class_ = args[0] - try: - row = kwargs.pop("row") - except KeyError: - ident = kwargs.pop("ident") - elif len(args) == 2: - class_, ident = args - elif len(args) == 3: - class_, ident = args - else: - raise sa_exc.ArgumentError("expected up to three " - "positional arguments, got %s" % len(args)) - if kwargs: - raise sa_exc.ArgumentError("unknown keyword arguments: %s" - % ", ".join(kwargs.keys())) - mapper = class_mapper(class_) - if "ident" in locals(): - return mapper.identity_key_from_primary_key(util.to_list(ident)) - return mapper.identity_key_from_row(row) - instance = kwargs.pop("instance") - if kwargs: - raise sa_exc.ArgumentError("unknown keyword arguments: %s" - % ", ".join(kwargs.keys())) - mapper = object_mapper(instance) - return mapper.identity_key_from_instance(instance) - -class ORMAdapter(sql_util.ColumnAdapter): - """Extends ColumnAdapter to accept ORM entities. - - The selectable is extracted from the given entity, - and the AliasedClass if any is referenced. - - """ - def __init__(self, entity, equivalents=None, - chain_to=None, adapt_required=False): - self.mapper, selectable, is_aliased_class = _entity_info(entity) - if is_aliased_class: - self.aliased_class = entity - else: - self.aliased_class = None - sql_util.ColumnAdapter.__init__(self, selectable, - equivalents, chain_to, - adapt_required=adapt_required) - - def replace(self, elem): - entity = elem._annotations.get('parentmapper', None) - if not entity or entity.isa(self.mapper): - return sql_util.ColumnAdapter.replace(self, elem) - else: - return None - -class AliasedClass(object): - """Represents an "aliased" form of a mapped class for usage with Query. - - The ORM equivalent of a :func:`sqlalchemy.sql.expression.alias` - construct, this object mimics the mapped class using a - __getattr__ scheme and maintains a reference to a - real :class:`~sqlalchemy.sql.expression.Alias` object. - - Usage is via the :class:`~sqlalchemy.orm.aliased()` synonym:: - - # find all pairs of users with the same name - user_alias = aliased(User) - session.query(User, user_alias).\\ - join((user_alias, User.id > user_alias.id)).\\ - filter(User.name==user_alias.name) - - The resulting object is an instance of :class:`.AliasedClass`, however - it implements a ``__getattribute__()`` scheme which will proxy attribute - access to that of the ORM class being aliased. All classmethods - on the mapped entity should also be available here, including - hybrids created with the :ref:`hybrids_toplevel` extension, - which will receive the :class:`.AliasedClass` as the "class" argument - when classmethods are called. - - :param cls: ORM mapped entity which will be "wrapped" around an alias. - :param alias: a selectable, such as an :func:`.alias` or :func:`.select` - construct, which will be rendered in place of the mapped table of the - ORM entity. If left as ``None``, an ordinary :class:`.Alias` of the - ORM entity's mapped table will be generated. - :param name: A name which will be applied both to the :class:`.Alias` - if one is generated, as well as the name present in the "named tuple" - returned by the :class:`.Query` object when results are returned. - :param adapt_on_names: if True, more liberal "matching" will be used when - mapping the mapped columns of the ORM entity to those of the given selectable - - a name-based match will be performed if the given selectable doesn't - otherwise have a column that corresponds to one on the entity. The - use case for this is when associating an entity with some derived - selectable such as one that uses aggregate functions:: - - class UnitPrice(Base): - __tablename__ = 'unit_price' - ... - unit_id = Column(Integer) - price = Column(Numeric) - - aggregated_unit_price = Session.query( - func.sum(UnitPrice.price).label('price') - ).group_by(UnitPrice.unit_id).subquery() - - aggregated_unit_price = aliased(UnitPrice, alias=aggregated_unit_price, adapt_on_names=True) - - Above, functions on ``aggregated_unit_price`` which - refer to ``.price`` will return the - ``fund.sum(UnitPrice.price).label('price')`` column, - as it is matched on the name "price". Ordinarily, the "price" function wouldn't - have any "column correspondence" to the actual ``UnitPrice.price`` column - as it is not a proxy of the original. - - .. versionadded:: 0.7.3 - - """ - def __init__(self, cls, alias=None, name=None, adapt_on_names=False): - self.__mapper = _class_to_mapper(cls) - self.__target = self.__mapper.class_ - self.__adapt_on_names = adapt_on_names - if alias is None: - alias = self.__mapper._with_polymorphic_selectable.alias(name=name) - self.__adapter = sql_util.ClauseAdapter(alias, - equivalents=self.__mapper._equivalent_columns, - adapt_on_names=self.__adapt_on_names) - self.__alias = alias - # used to assign a name to the RowTuple object - # returned by Query. - self._sa_label_name = name - self.__name__ = 'AliasedClass_' + str(self.__target) - - def __getstate__(self): - return { - 'mapper':self.__mapper, - 'alias':self.__alias, - 'name':self._sa_label_name, - 'adapt_on_names':self.__adapt_on_names, - } - - def __setstate__(self, state): - self.__mapper = state['mapper'] - self.__target = self.__mapper.class_ - self.__adapt_on_names = state['adapt_on_names'] - alias = state['alias'] - self.__adapter = sql_util.ClauseAdapter(alias, - equivalents=self.__mapper._equivalent_columns, - adapt_on_names=self.__adapt_on_names) - self.__alias = alias - name = state['name'] - self._sa_label_name = name - self.__name__ = 'AliasedClass_' + str(self.__target) - - def __adapt_element(self, elem): - return self.__adapter.traverse(elem).\ - _annotate({ - 'parententity': self, - 'parentmapper':self.__mapper} - ) - - def __adapt_prop(self, existing, key): - comparator = existing.comparator.adapted(self.__adapt_element) - - queryattr = attributes.QueryableAttribute(self, key, - impl=existing.impl, parententity=self, comparator=comparator) - setattr(self, key, queryattr) - return queryattr - - def __getattr__(self, key): - for base in self.__target.__mro__: - try: - attr = object.__getattribute__(base, key) - except AttributeError: - continue - else: - break - else: - raise AttributeError(key) - - if isinstance(attr, attributes.QueryableAttribute): - return self.__adapt_prop(attr, key) - elif hasattr(attr, 'func_code'): - is_method = getattr(self.__target, key, None) - if is_method and is_method.im_self is not None: - return util.types.MethodType(attr.im_func, self, self) - else: - return None - elif hasattr(attr, '__get__'): - ret = attr.__get__(None, self) - if isinstance(ret, PropComparator): - return ret.adapted(self.__adapt_element) - return ret - else: - return attr - - def __repr__(self): - return '' % ( - id(self), self.__target.__name__) - -def aliased(element, alias=None, name=None, adapt_on_names=False): - if isinstance(element, expression.FromClause): - if adapt_on_names: - raise sa_exc.ArgumentError("adapt_on_names only applies to ORM elements") - return element.alias(name) - else: - return AliasedClass(element, alias=alias, name=name, adapt_on_names=adapt_on_names) - -def _orm_annotate(element, exclude=None): - """Deep copy the given ClauseElement, annotating each element with the - "_orm_adapt" flag. - - Elements within the exclude collection will be cloned but not annotated. - - """ - return sql_util._deep_annotate(element, {'_orm_adapt':True}, exclude) - -_orm_deannotate = sql_util._deep_deannotate - -class _ORMJoin(expression.Join): - """Extend Join to support ORM constructs as input.""" - - __visit_name__ = expression.Join.__visit_name__ - - def __init__(self, left, right, onclause=None, - isouter=False, join_to_left=True): - adapt_from = None - - if hasattr(left, '_orm_mappers'): - left_mapper = left._orm_mappers[1] - if join_to_left: - adapt_from = left.right - else: - left_mapper, left, left_is_aliased = _entity_info(left) - if join_to_left and (left_is_aliased or not left_mapper): - adapt_from = left - - right_mapper, right, right_is_aliased = _entity_info(right) - if right_is_aliased: - adapt_to = right - else: - adapt_to = None - - if left_mapper or right_mapper: - self._orm_mappers = (left_mapper, right_mapper) - - if isinstance(onclause, basestring): - prop = left_mapper.get_property(onclause) - elif isinstance(onclause, attributes.QueryableAttribute): - if adapt_from is None: - adapt_from = onclause.__clause_element__() - prop = onclause.property - elif isinstance(onclause, MapperProperty): - prop = onclause - else: - prop = None - - if prop: - pj, sj, source, dest, \ - secondary, target_adapter = prop._create_joins( - source_selectable=adapt_from, - dest_selectable=adapt_to, - source_polymorphic=True, - dest_polymorphic=True, - of_type=right_mapper) - - if sj is not None: - left = sql.join(left, secondary, pj, isouter) - onclause = sj - else: - onclause = pj - self._target_adapter = target_adapter - - expression.Join.__init__(self, left, right, onclause, isouter) - - def join(self, right, onclause=None, isouter=False, join_to_left=True): - return _ORMJoin(self, right, onclause, isouter, join_to_left) - - def outerjoin(self, right, onclause=None, join_to_left=True): - return _ORMJoin(self, right, onclause, True, join_to_left) - -def join(left, right, onclause=None, isouter=False, join_to_left=True): - """Produce an inner join between left and right clauses. - - :func:`.orm.join` is an extension to the core join interface - provided by :func:`.sql.expression.join()`, where the - left and right selectables may be not only core selectable - objects such as :class:`.Table`, but also mapped classes or - :class:`.AliasedClass` instances. The "on" clause can - be a SQL expression, or an attribute or string name - referencing a configured :func:`.relationship`. - - ``join_to_left`` indicates to attempt aliasing the ON clause, - in whatever form it is passed, to the selectable - passed as the left side. If False, the onclause - is used as is. - - :func:`.orm.join` is not commonly needed in modern usage, - as its functionality is encapsulated within that of the - :meth:`.Query.join` method, which features a - significant amount of automation beyond :func:`.orm.join` - by itself. Explicit usage of :func:`.orm.join` - with :class:`.Query` involves usage of the - :meth:`.Query.select_from` method, as in:: - - from sqlalchemy.orm import join - session.query(User).\\ - select_from(join(User, Address, User.addresses)).\\ - filter(Address.email_address=='foo@bar.com') - - In modern SQLAlchemy the above join can be written more - succinctly as:: - - session.query(User).\\ - join(User.addresses).\\ - filter(Address.email_address=='foo@bar.com') - - See :meth:`.Query.join` for information on modern usage - of ORM level joins. - - """ - return _ORMJoin(left, right, onclause, isouter, join_to_left) - -def outerjoin(left, right, onclause=None, join_to_left=True): - """Produce a left outer join between left and right clauses. - - This is the "outer join" version of the :func:`.orm.join` function, - featuring the same behavior except that an OUTER JOIN is generated. - See that function's documentation for other usage details. - - """ - return _ORMJoin(left, right, onclause, True, join_to_left) - -def with_parent(instance, prop): - """Create filtering criterion that relates this query's primary entity - to the given related instance, using established :func:`.relationship()` - configuration. - - The SQL rendered is the same as that rendered when a lazy loader - would fire off from the given parent on that attribute, meaning - that the appropriate state is taken from the parent object in - Python without the need to render joins to the parent table - in the rendered statement. - - .. versionchanged:: 0.6.4 - This method accepts parent instances in all - persistence states, including transient, persistent, and detached. - Only the requisite primary key/foreign key attributes need to - be populated. Previous versions didn't work with transient - instances. - - :param instance: - An instance which has some :func:`.relationship`. - - :param property: - String property name, or class-bound attribute, which indicates - what relationship from the instance should be used to reconcile the - parent/child relationship. - - """ - if isinstance(prop, basestring): - mapper = object_mapper(instance) - prop = getattr(mapper.class_, prop).property - elif isinstance(prop, attributes.QueryableAttribute): - prop = prop.property - - return prop.compare(operators.eq, - instance, - value_is_parent=True) - - -def _entity_info(entity, compile=True): - """Return mapping information given a class, mapper, or AliasedClass. - - Returns 3-tuple of: mapper, mapped selectable, boolean indicating if this - is an aliased() construct. - - If the given entity is not a mapper, mapped class, or aliased construct, - returns None, the entity, False. This is typically used to allow - unmapped selectables through. - - """ - if isinstance(entity, AliasedClass): - return entity._AliasedClass__mapper, entity._AliasedClass__alias, True - - if isinstance(entity, mapperlib.Mapper): - mapper = entity - - elif isinstance(entity, type): - class_manager = attributes.manager_of_class(entity) - - if class_manager is None: - return None, entity, False - - mapper = class_manager.mapper - else: - return None, entity, False - - if compile and mapperlib.module._new_mappers: - mapperlib.configure_mappers() - return mapper, mapper._with_polymorphic_selectable, False - -def _entity_descriptor(entity, key): - """Return a class attribute given an entity and string name. - - May return :class:`.InstrumentedAttribute` or user-defined - attribute. - - """ - if isinstance(entity, expression.FromClause): - description = entity - entity = entity.c - elif not isinstance(entity, (AliasedClass, type)): - description = entity = entity.class_ - else: - description = entity - - try: - return getattr(entity, key) - except AttributeError: - raise sa_exc.InvalidRequestError( - "Entity '%s' has no property '%s'" % - (description, key) - ) - -def _orm_columns(entity): - mapper, selectable, is_aliased_class = _entity_info(entity) - if isinstance(selectable, expression.Selectable): - return [c for c in selectable.c] - else: - return [selectable] - -def _orm_selectable(entity): - mapper, selectable, is_aliased_class = _entity_info(entity) - return selectable - -def _attr_as_key(attr): - if hasattr(attr, 'key'): - return attr.key - else: - return expression._column_as_key(attr) - -def _is_aliased_class(entity): - return isinstance(entity, AliasedClass) - -_state_mapper = util.dottedgetter('manager.mapper') - -def object_mapper(instance): - """Given an object, return the primary Mapper associated with the object - instance. - - Raises UnmappedInstanceError if no mapping is configured. - - """ - try: - state = attributes.instance_state(instance) - return state.manager.mapper - except exc.UnmappedClassError: - raise exc.UnmappedInstanceError(instance) - except exc.NO_STATE: - raise exc.UnmappedInstanceError(instance) - -def class_mapper(class_, compile=True): - """Given a class, return the primary :class:`.Mapper` associated - with the key. - - Raises :class:`.UnmappedClassError` if no mapping is configured - on the given class, or :class:`.ArgumentError` if a non-class - object is passed. - - """ - - try: - class_manager = attributes.manager_of_class(class_) - mapper = class_manager.mapper - - except exc.NO_STATE: - if not isinstance(class_, type): - raise sa_exc.ArgumentError("Class object expected, got '%r'." % class_) - raise exc.UnmappedClassError(class_) - - if compile and mapperlib.module._new_mappers: - mapperlib.configure_mappers() - return mapper - -def _class_to_mapper(class_or_mapper, compile=True): - if _is_aliased_class(class_or_mapper): - return class_or_mapper._AliasedClass__mapper - - elif isinstance(class_or_mapper, type): - try: - class_manager = attributes.manager_of_class(class_or_mapper) - mapper = class_manager.mapper - except exc.NO_STATE: - raise exc.UnmappedClassError(class_or_mapper) - elif isinstance(class_or_mapper, mapperlib.Mapper): - mapper = class_or_mapper - else: - raise exc.UnmappedClassError(class_or_mapper) - - if compile and mapperlib.module._new_mappers: - mapperlib.configure_mappers() - return mapper - -def has_identity(object): - state = attributes.instance_state(object) - return state.has_identity - -def _is_mapped_class(cls): - """Return True if the given object is a mapped class, - :class:`.Mapper`, or :class:`.AliasedClass`.""" - - if isinstance(cls, (AliasedClass, mapperlib.Mapper)): - return True - if isinstance(cls, expression.ClauseElement): - return False - if isinstance(cls, type): - manager = attributes.manager_of_class(cls) - return manager and _INSTRUMENTOR in manager.info - return False - -def _mapper_or_none(cls): - """Return the :class:`.Mapper` for the given class or None if the - class is not mapped.""" - - manager = attributes.manager_of_class(cls) - if manager is not None and _INSTRUMENTOR in manager.info: - return manager.info[_INSTRUMENTOR] - else: - return None - -def instance_str(instance): - """Return a string describing an instance.""" - - return state_str(attributes.instance_state(instance)) - -def state_str(state): - """Return a string describing an instance via its InstanceState.""" - - if state is None: - return "None" - else: - return '<%s at 0x%x>' % (state.class_.__name__, id(state.obj())) - -def state_class_str(state): - """Return a string describing an instance's class via its InstanceState.""" - - if state is None: - return "None" - else: - return '<%s>' % (state.class_.__name__, ) - -def attribute_str(instance, attribute): - return instance_str(instance) + "." + attribute - -def state_attribute_str(state, attribute): - return state_str(state) + "." + attribute - -def identity_equal(a, b): - if a is b: - return True - if a is None or b is None: - return False - try: - state_a = attributes.instance_state(a) - state_b = attributes.instance_state(b) - except exc.NO_STATE: - return False - if state_a.key is None or state_b.key is None: - return False - return state_a.key == state_b.key - diff --git a/libs/sqlalchemy/pool.py b/libs/sqlalchemy/pool.py deleted file mode 100644 index 0d04998c..00000000 --- a/libs/sqlalchemy/pool.py +++ /dev/null @@ -1,1028 +0,0 @@ -# sqlalchemy/pool.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - - -"""Connection pooling for DB-API connections. - -Provides a number of connection pool implementations for a variety of -usage scenarios and thread behavior requirements imposed by the -application, DB-API or database itself. - -Also provides a DB-API 2.0 connection proxying mechanism allowing -regular DB-API connect() methods to be transparently managed by a -SQLAlchemy connection pool. -""" - -import weakref -import time -import traceback - -from sqlalchemy import exc, log, event, events, interfaces, util -from sqlalchemy.util import queue as sqla_queue -from sqlalchemy.util import threading, memoized_property, \ - chop_traceback -proxies = {} - -def manage(module, **params): - """Return a proxy for a DB-API module that automatically - pools connections. - - Given a DB-API 2.0 module and pool management parameters, returns - a proxy for the module that will automatically pool connections, - creating new connection pools for each distinct set of connection - arguments sent to the decorated module's connect() function. - - :param module: a DB-API 2.0 database module - - :param poolclass: the class used by the pool module to provide - pooling. Defaults to :class:`.QueuePool`. - - :param \*\*params: will be passed through to *poolclass* - - """ - try: - return proxies[module] - except KeyError: - return proxies.setdefault(module, _DBProxy(module, **params)) - -def clear_managers(): - """Remove all current DB-API 2.0 managers. - - All pools and connections are disposed. - """ - - for manager in proxies.itervalues(): - manager.close() - proxies.clear() - -reset_rollback = util.symbol('reset_rollback') -reset_commit = util.symbol('reset_commit') -reset_none = util.symbol('reset_none') - - -class Pool(log.Identified): - """Abstract base class for connection pools.""" - - def __init__(self, - creator, recycle=-1, echo=None, - use_threadlocal=False, - logging_name=None, - reset_on_return=True, - listeners=None, - events=None, - _dispatch=None): - """ - Construct a Pool. - - :param creator: a callable function that returns a DB-API - connection object. The function will be called with - parameters. - - :param recycle: If set to non -1, number of seconds between - connection recycling, which means upon checkout, if this - timeout is surpassed the connection will be closed and - replaced with a newly opened connection. Defaults to -1. - - :param logging_name: String identifier which will be used within - the "name" field of logging records generated within the - "sqlalchemy.pool" logger. Defaults to a hexstring of the object's - id. - - :param echo: If True, connections being pulled and retrieved - from the pool will be logged to the standard output, as well - as pool sizing information. Echoing can also be achieved by - enabling logging for the "sqlalchemy.pool" - namespace. Defaults to False. - - :param use_threadlocal: If set to True, repeated calls to - :meth:`connect` within the same application thread will be - guaranteed to return the same connection object, if one has - already been retrieved from the pool and has not been - returned yet. Offers a slight performance advantage at the - cost of individual transactions by default. The - :meth:`unique_connection` method is provided to bypass the - threadlocal behavior installed into :meth:`connect`. - - :param reset_on_return: If true, reset the database state of - connections returned to the pool. This is typically a - ROLLBACK to release locks and transaction resources. - Disable at your own peril. Defaults to True. - - :param events: a list of 2-tuples, each of the form - ``(callable, target)`` which will be passed to event.listen() - upon construction. Provided here so that event listeners - can be assigned via ``create_engine`` before dialect-level - listeners are applied. - - :param listeners: Deprecated. A list of - :class:`~sqlalchemy.interfaces.PoolListener`-like objects or - dictionaries of callables that receive events when DB-API - connections are created, checked out and checked in to the - pool. This has been superseded by - :func:`~sqlalchemy.event.listen`. - - """ - if logging_name: - self.logging_name = self._orig_logging_name = logging_name - else: - self._orig_logging_name = None - - log.instance_logger(self, echoflag=echo) - self._threadconns = threading.local() - self._creator = creator - self._recycle = recycle - self._use_threadlocal = use_threadlocal - if reset_on_return in ('rollback', True, reset_rollback): - self._reset_on_return = reset_rollback - elif reset_on_return in (None, False, reset_none): - self._reset_on_return = reset_none - elif reset_on_return in ('commit', reset_commit): - self._reset_on_return = reset_commit - else: - raise exc.ArgumentError( - "Invalid value for 'reset_on_return': %r" - % reset_on_return) - - self.echo = echo - if _dispatch: - self.dispatch._update(_dispatch, only_propagate=False) - if events: - for fn, target in events: - event.listen(self, target, fn) - if listeners: - util.warn_deprecated( - "The 'listeners' argument to Pool (and " - "create_engine()) is deprecated. Use event.listen().") - for l in listeners: - self.add_listener(l) - - dispatch = event.dispatcher(events.PoolEvents) - - @util.deprecated(2.7, "Pool.add_listener is deprecated. Use event.listen()") - def add_listener(self, listener): - """Add a :class:`.PoolListener`-like object to this pool. - - ``listener`` may be an object that implements some or all of - PoolListener, or a dictionary of callables containing implementations - of some or all of the named methods in PoolListener. - - """ - interfaces.PoolListener._adapt_listener(self, listener) - - def unique_connection(self): - """Produce a DBAPI connection that is not referenced by any - thread-local context. - - This method is different from :meth:`.Pool.connect` only if the - ``use_threadlocal`` flag has been set to ``True``. - - """ - - return _ConnectionFairy(self).checkout() - - def _create_connection(self): - """Called by subclasses to create a new ConnectionRecord.""" - - return _ConnectionRecord(self) - - def recreate(self): - """Return a new :class:`.Pool`, of the same class as this one - and configured with identical creation arguments. - - This method is used in conjunection with :meth:`dispose` - to close out an entire :class:`.Pool` and create a new one in - its place. - - """ - - raise NotImplementedError() - - def dispose(self): - """Dispose of this pool. - - This method leaves the possibility of checked-out connections - remaining open, as it only affects connections that are - idle in the pool. - - See also the :meth:`Pool.recreate` method. - - """ - - raise NotImplementedError() - - def _replace(self): - """Dispose + recreate this pool. - - Subclasses may employ special logic to - move threads waiting on this pool to the - new one. - - """ - self.dispose() - return self.recreate() - - def connect(self): - """Return a DBAPI connection from the pool. - - The connection is instrumented such that when its - ``close()`` method is called, the connection will be returned to - the pool. - - """ - if not self._use_threadlocal: - return _ConnectionFairy(self).checkout() - - try: - rec = self._threadconns.current() - if rec: - return rec.checkout() - except AttributeError: - pass - - agent = _ConnectionFairy(self) - self._threadconns.current = weakref.ref(agent) - return agent.checkout() - - def _return_conn(self, record): - """Given a _ConnectionRecord, return it to the :class:`.Pool`. - - This method is called when an instrumented DBAPI connection - has its ``close()`` method called. - - """ - if self._use_threadlocal: - try: - del self._threadconns.current - except AttributeError: - pass - self._do_return_conn(record) - - def _do_get(self): - """Implementation for :meth:`get`, supplied by subclasses.""" - - raise NotImplementedError() - - def _do_return_conn(self, conn): - """Implementation for :meth:`return_conn`, supplied by subclasses.""" - - raise NotImplementedError() - - def status(self): - raise NotImplementedError() - - -class _ConnectionRecord(object): - finalize_callback = None - - def __init__(self, pool): - self.__pool = pool - self.connection = self.__connect() - self.info = {} - - pool.dispatch.first_connect.\ - for_modify(pool.dispatch).\ - exec_once(self.connection, self) - pool.dispatch.connect(self.connection, self) - - def close(self): - if self.connection is not None: - self.__pool.logger.debug("Closing connection %r", self.connection) - try: - self.connection.close() - except (SystemExit, KeyboardInterrupt): - raise - except: - self.__pool.logger.debug("Exception closing connection %r", - self.connection) - - def invalidate(self, e=None): - if e is not None: - self.__pool.logger.info( - "Invalidate connection %r (reason: %s:%s)", - self.connection, e.__class__.__name__, e) - else: - self.__pool.logger.info( - "Invalidate connection %r", self.connection) - self.__close() - self.connection = None - - def get_connection(self): - if self.connection is None: - self.connection = self.__connect() - self.info.clear() - if self.__pool.dispatch.connect: - self.__pool.dispatch.connect(self.connection, self) - elif self.__pool._recycle > -1 and \ - time.time() - self.starttime > self.__pool._recycle: - self.__pool.logger.info( - "Connection %r exceeded timeout; recycling", - self.connection) - self.__close() - self.connection = self.__connect() - self.info.clear() - if self.__pool.dispatch.connect: - self.__pool.dispatch.connect(self.connection, self) - return self.connection - - def __close(self): - try: - self.__pool.logger.debug("Closing connection %r", self.connection) - self.connection.close() - except (SystemExit, KeyboardInterrupt): - raise - except Exception, e: - self.__pool.logger.debug( - "Connection %r threw an error on close: %s", - self.connection, e) - - def __connect(self): - try: - self.starttime = time.time() - connection = self.__pool._creator() - self.__pool.logger.debug("Created new connection %r", connection) - return connection - except Exception, e: - self.__pool.logger.debug("Error on connect(): %s", e) - raise - - -def _finalize_fairy(connection, connection_record, pool, ref, echo): - _refs.discard(connection_record) - - if ref is not None and \ - connection_record.fairy is not ref: - return - - if connection is not None: - try: - if pool._reset_on_return is reset_rollback: - connection.rollback() - elif pool._reset_on_return is reset_commit: - connection.commit() - # Immediately close detached instances - if connection_record is None: - connection.close() - except Exception, e: - if connection_record is not None: - connection_record.invalidate(e=e) - if isinstance(e, (SystemExit, KeyboardInterrupt)): - raise - - if connection_record is not None: - connection_record.fairy = None - if echo: - pool.logger.debug("Connection %r being returned to pool", - connection) - if connection_record.finalize_callback: - connection_record.finalize_callback(connection) - del connection_record.finalize_callback - if pool.dispatch.checkin: - pool.dispatch.checkin(connection, connection_record) - pool._return_conn(connection_record) - -_refs = set() - -class _ConnectionFairy(object): - """Proxies a DB-API connection and provides return-on-dereference - support.""" - - __slots__ = '_pool', '__counter', 'connection', \ - '_connection_record', '__weakref__', \ - '_detached_info', '_echo' - - def __init__(self, pool): - self._pool = pool - self.__counter = 0 - self._echo = _echo = pool._should_log_debug() - try: - rec = self._connection_record = pool._do_get() - conn = self.connection = self._connection_record.get_connection() - rec.fairy = weakref.ref( - self, - lambda ref:_finalize_fairy and _finalize_fairy(conn, rec, pool, ref, _echo) - ) - _refs.add(rec) - except: - # helps with endless __getattr__ loops later on - self.connection = None - self._connection_record = None - raise - if self._echo: - self._pool.logger.debug("Connection %r checked out from pool" % - self.connection) - - @property - def _logger(self): - return self._pool.logger - - @property - def is_valid(self): - return self.connection is not None - - @property - def info(self): - """An info collection unique to this DB-API connection.""" - - try: - return self._connection_record.info - except AttributeError: - if self.connection is None: - raise exc.InvalidRequestError("This connection is closed") - try: - return self._detached_info - except AttributeError: - self._detached_info = value = {} - return value - - def invalidate(self, e=None): - """Mark this connection as invalidated. - - The connection will be immediately closed. The containing - ConnectionRecord will create a new connection when next used. - """ - - if self.connection is None: - raise exc.InvalidRequestError("This connection is closed") - if self._connection_record is not None: - self._connection_record.invalidate(e=e) - self.connection = None - self._close() - - def cursor(self, *args, **kwargs): - return self.connection.cursor(*args, **kwargs) - - def __getattr__(self, key): - return getattr(self.connection, key) - - def checkout(self): - if self.connection is None: - raise exc.InvalidRequestError("This connection is closed") - self.__counter += 1 - - if not self._pool.dispatch.checkout or self.__counter != 1: - return self - - # Pool listeners can trigger a reconnection on checkout - attempts = 2 - while attempts > 0: - try: - self._pool.dispatch.checkout(self.connection, - self._connection_record, - self) - return self - except exc.DisconnectionError, e: - self._pool.logger.info( - "Disconnection detected on checkout: %s", e) - self._connection_record.invalidate(e) - self.connection = self._connection_record.get_connection() - attempts -= 1 - - self._pool.logger.info("Reconnection attempts exhausted on checkout") - self.invalidate() - raise exc.InvalidRequestError("This connection is closed") - - def detach(self): - """Separate this connection from its Pool. - - This means that the connection will no longer be returned to the - pool when closed, and will instead be literally closed. The - containing ConnectionRecord is separated from the DB-API connection, - and will create a new connection when next used. - - Note that any overall connection limiting constraints imposed by a - Pool implementation may be violated after a detach, as the detached - connection is removed from the pool's knowledge and control. - """ - - if self._connection_record is not None: - _refs.remove(self._connection_record) - self._connection_record.fairy = None - self._connection_record.connection = None - self._pool._do_return_conn(self._connection_record) - self._detached_info = \ - self._connection_record.info.copy() - self._connection_record = None - - def close(self): - self.__counter -= 1 - if self.__counter == 0: - self._close() - - def _close(self): - _finalize_fairy(self.connection, self._connection_record, - self._pool, None, self._echo) - self.connection = None - self._connection_record = None - -class SingletonThreadPool(Pool): - """A Pool that maintains one connection per thread. - - Maintains one connection per each thread, never moving a connection to a - thread other than the one which it was created in. - - Options are the same as those of :class:`.Pool`, as well as: - - :param pool_size: The number of threads in which to maintain connections - at once. Defaults to five. - - :class:`.SingletonThreadPool` is used by the SQLite dialect - automatically when a memory-based database is used. - See :ref:`sqlite_toplevel`. - - """ - - def __init__(self, creator, pool_size=5, **kw): - kw['use_threadlocal'] = True - Pool.__init__(self, creator, **kw) - self._conn = threading.local() - self._all_conns = set() - self.size = pool_size - - def recreate(self): - self.logger.info("Pool recreating") - return self.__class__(self._creator, - pool_size=self.size, - recycle=self._recycle, - echo=self.echo, - logging_name=self._orig_logging_name, - use_threadlocal=self._use_threadlocal, - _dispatch=self.dispatch) - - def dispose(self): - """Dispose of this pool.""" - - for conn in self._all_conns: - try: - conn.close() - except (SystemExit, KeyboardInterrupt): - raise - except: - # pysqlite won't even let you close a conn from a thread - # that didn't create it - pass - - self._all_conns.clear() - - def _cleanup(self): - while len(self._all_conns) > self.size: - c = self._all_conns.pop() - c.close() - - def status(self): - return "SingletonThreadPool id:%d size: %d" % \ - (id(self), len(self._all_conns)) - - def _do_return_conn(self, conn): - pass - - def _do_get(self): - try: - c = self._conn.current() - if c: - return c - except AttributeError: - pass - c = self._create_connection() - self._conn.current = weakref.ref(c) - self._all_conns.add(c) - if len(self._all_conns) > self.size: - self._cleanup() - return c - -class DummyLock(object): - def acquire(self, wait=True): - return True - def release(self): - pass - -class QueuePool(Pool): - """A :class:`.Pool` that imposes a limit on the number of open connections. - - :class:`.QueuePool` is the default pooling implementation used for - all :class:`.Engine` objects, unless the SQLite dialect is in use. - - """ - - def __init__(self, creator, pool_size=5, max_overflow=10, timeout=30, - **kw): - """ - Construct a QueuePool. - - :param creator: a callable function that returns a DB-API - connection object. The function will be called with - parameters. - - :param pool_size: The size of the pool to be maintained, - defaults to 5. This is the largest number of connections that - will be kept persistently in the pool. Note that the pool - begins with no connections; once this number of connections - is requested, that number of connections will remain. - ``pool_size`` can be set to 0 to indicate no size limit; to - disable pooling, use a :class:`~sqlalchemy.pool.NullPool` - instead. - - :param max_overflow: The maximum overflow size of the - pool. When the number of checked-out connections reaches the - size set in pool_size, additional connections will be - returned up to this limit. When those additional connections - are returned to the pool, they are disconnected and - discarded. It follows then that the total number of - simultaneous connections the pool will allow is pool_size + - `max_overflow`, and the total number of "sleeping" - connections the pool will allow is pool_size. `max_overflow` - can be set to -1 to indicate no overflow limit; no limit - will be placed on the total number of concurrent - connections. Defaults to 10. - - :param timeout: The number of seconds to wait before giving up - on returning a connection. Defaults to 30. - - :param recycle: If set to non -1, number of seconds between - connection recycling, which means upon checkout, if this - timeout is surpassed the connection will be closed and - replaced with a newly opened connection. Defaults to -1. - - :param echo: If True, connections being pulled and retrieved - from the pool will be logged to the standard output, as well - as pool sizing information. Echoing can also be achieved by - enabling logging for the "sqlalchemy.pool" - namespace. Defaults to False. - - :param use_threadlocal: If set to True, repeated calls to - :meth:`connect` within the same application thread will be - guaranteed to return the same connection object, if one has - already been retrieved from the pool and has not been - returned yet. Offers a slight performance advantage at the - cost of individual transactions by default. The - :meth:`unique_connection` method is provided to bypass the - threadlocal behavior installed into :meth:`connect`. - - :param reset_on_return: Determine steps to take on - connections as they are returned to the pool. - reset_on_return can have any of these values: - - * 'rollback' - call rollback() on the connection, - to release locks and transaction resources. - This is the default value. The vast majority - of use cases should leave this value set. - * True - same as 'rollback', this is here for - backwards compatibility. - * 'commit' - call commit() on the connection, - to release locks and transaction resources. - A commit here may be desirable for databases that - cache query plans if a commit is emitted, - such as Microsoft SQL Server. However, this - value is more dangerous than 'rollback' because - any data changes present on the transaction - are committed unconditionally. - * None - don't do anything on the connection. - This setting should only be made on a database - that has no transaction support at all, - namely MySQL MyISAM. By not doing anything, - performance can be improved. This - setting should **never be selected** for a - database that supports transactions, - as it will lead to deadlocks and stale - state. - * False - same as None, this is here for - backwards compatibility. - - .. versionchanged:: 0.7.6 - ``reset_on_return`` accepts values. - - :param listeners: A list of - :class:`~sqlalchemy.interfaces.PoolListener`-like objects or - dictionaries of callables that receive events when DB-API - connections are created, checked out and checked in to the - pool. - - """ - Pool.__init__(self, creator, **kw) - self._pool = sqla_queue.Queue(pool_size) - self._overflow = 0 - pool_size - self._max_overflow = max_overflow - self._timeout = timeout - self._overflow_lock = self._max_overflow > -1 and \ - threading.Lock() or DummyLock() - - def _do_return_conn(self, conn): - try: - self._pool.put(conn, False) - except sqla_queue.Full: - conn.close() - self._overflow_lock.acquire() - try: - self._overflow -= 1 - finally: - self._overflow_lock.release() - - def _do_get(self): - try: - wait = self._max_overflow > -1 and \ - self._overflow >= self._max_overflow - return self._pool.get(wait, self._timeout) - except sqla_queue.SAAbort, aborted: - return aborted.context._do_get() - except sqla_queue.Empty: - if self._max_overflow > -1 and \ - self._overflow >= self._max_overflow: - if not wait: - return self._do_get() - else: - raise exc.TimeoutError( - "QueuePool limit of size %d overflow %d reached, " - "connection timed out, timeout %d" % - (self.size(), self.overflow(), self._timeout)) - - self._overflow_lock.acquire() - try: - if self._max_overflow > -1 and \ - self._overflow >= self._max_overflow: - return self._do_get() - else: - con = self._create_connection() - self._overflow += 1 - return con - finally: - self._overflow_lock.release() - - def recreate(self): - self.logger.info("Pool recreating") - return self.__class__(self._creator, pool_size=self._pool.maxsize, - max_overflow=self._max_overflow, - timeout=self._timeout, - recycle=self._recycle, echo=self.echo, - logging_name=self._orig_logging_name, - use_threadlocal=self._use_threadlocal, - _dispatch=self.dispatch) - - def dispose(self): - while True: - try: - conn = self._pool.get(False) - conn.close() - except sqla_queue.Empty: - break - - self._overflow = 0 - self.size() - self.logger.info("Pool disposed. %s", self.status()) - - def _replace(self): - self.dispose() - np = self.recreate() - self._pool.abort(np) - return np - - def status(self): - return "Pool size: %d Connections in pool: %d "\ - "Current Overflow: %d Current Checked out "\ - "connections: %d" % (self.size(), - self.checkedin(), - self.overflow(), - self.checkedout()) - - def size(self): - return self._pool.maxsize - - def checkedin(self): - return self._pool.qsize() - - def overflow(self): - return self._overflow - - def checkedout(self): - return self._pool.maxsize - self._pool.qsize() + self._overflow - -class NullPool(Pool): - """A Pool which does not pool connections. - - Instead it literally opens and closes the underlying DB-API connection - per each connection open/close. - - Reconnect-related functions such as ``recycle`` and connection - invalidation are not supported by this Pool implementation, since - no connections are held persistently. - - .. versionchanged:: 0.7 - :class:`.NullPool` is used by the SQlite dialect automatically - when a file-based database is used. See :ref:`sqlite_toplevel`. - - """ - - def status(self): - return "NullPool" - - def _do_return_conn(self, conn): - conn.close() - - def _do_get(self): - return self._create_connection() - - def recreate(self): - self.logger.info("Pool recreating") - - return self.__class__(self._creator, - recycle=self._recycle, - echo=self.echo, - logging_name=self._orig_logging_name, - use_threadlocal=self._use_threadlocal, - _dispatch=self.dispatch) - - def dispose(self): - pass - - -class StaticPool(Pool): - """A Pool of exactly one connection, used for all requests. - - Reconnect-related functions such as ``recycle`` and connection - invalidation (which is also used to support auto-reconnect) are not - currently supported by this Pool implementation but may be implemented - in a future release. - - """ - - @memoized_property - def _conn(self): - return self._creator() - - @memoized_property - def connection(self): - return _ConnectionRecord(self) - - def status(self): - return "StaticPool" - - def dispose(self): - if '_conn' in self.__dict__: - self._conn.close() - self._conn = None - - def recreate(self): - self.logger.info("Pool recreating") - return self.__class__(creator=self._creator, - recycle=self._recycle, - use_threadlocal=self._use_threadlocal, - reset_on_return=self._reset_on_return, - echo=self.echo, - logging_name=self._orig_logging_name, - _dispatch=self.dispatch) - - def _create_connection(self): - return self._conn - - def _do_return_conn(self, conn): - pass - - def _do_get(self): - return self.connection - -class AssertionPool(Pool): - """A :class:`.Pool` that allows at most one checked out connection at any given - time. - - This will raise an exception if more than one connection is checked out - at a time. Useful for debugging code that is using more connections - than desired. - - .. versionchanged:: 0.7 - :class:`.AssertionPool` also logs a traceback of where - the original connection was checked out, and reports - this in the assertion error raised. - - """ - def __init__(self, *args, **kw): - self._conn = None - self._checked_out = False - self._store_traceback = kw.pop('store_traceback', True) - self._checkout_traceback = None - Pool.__init__(self, *args, **kw) - - def status(self): - return "AssertionPool" - - def _do_return_conn(self, conn): - if not self._checked_out: - raise AssertionError("connection is not checked out") - self._checked_out = False - assert conn is self._conn - - def dispose(self): - self._checked_out = False - if self._conn: - self._conn.close() - - def recreate(self): - self.logger.info("Pool recreating") - return self.__class__(self._creator, echo=self.echo, - logging_name=self._orig_logging_name, - _dispatch=self.dispatch) - - def _do_get(self): - if self._checked_out: - if self._checkout_traceback: - suffix = ' at:\n%s' % ''.join( - chop_traceback(self._checkout_traceback)) - else: - suffix = '' - raise AssertionError("connection is already checked out" + suffix) - - if not self._conn: - self._conn = self._create_connection() - - self._checked_out = True - if self._store_traceback: - self._checkout_traceback = traceback.format_stack() - return self._conn - -class _DBProxy(object): - """Layers connection pooling behavior on top of a standard DB-API module. - - Proxies a DB-API 2.0 connect() call to a connection pool keyed to the - specific connect parameters. Other functions and attributes are delegated - to the underlying DB-API module. - """ - - def __init__(self, module, poolclass=QueuePool, **kw): - """Initializes a new proxy. - - module - a DB-API 2.0 module - - poolclass - a Pool class, defaulting to QueuePool - - Other parameters are sent to the Pool object's constructor. - - """ - - self.module = module - self.kw = kw - self.poolclass = poolclass - self.pools = {} - self._create_pool_mutex = threading.Lock() - - def close(self): - for key in self.pools.keys(): - del self.pools[key] - - def __del__(self): - self.close() - - def __getattr__(self, key): - return getattr(self.module, key) - - def get_pool(self, *args, **kw): - key = self._serialize(*args, **kw) - try: - return self.pools[key] - except KeyError: - self._create_pool_mutex.acquire() - try: - if key not in self.pools: - kw.pop('sa_pool_key', None) - pool = self.poolclass(lambda: - self.module.connect(*args, **kw), **self.kw) - self.pools[key] = pool - return pool - else: - return self.pools[key] - finally: - self._create_pool_mutex.release() - - def connect(self, *args, **kw): - """Activate a connection to the database. - - Connect to the database using this DBProxy's module and the given - connect arguments. If the arguments match an existing pool, the - connection will be returned from the pool's current thread-local - connection instance, or if there is no thread-local connection - instance it will be checked out from the set of pooled connections. - - If the pool has no available connections and allows new connections - to be created, a new database connection will be made. - - """ - - return self.get_pool(*args, **kw).connect() - - def dispose(self, *args, **kw): - """Dispose the pool referenced by the given connect arguments.""" - - key = self._serialize(*args, **kw) - try: - del self.pools[key] - except KeyError: - pass - - def _serialize(self, *args, **kw): - if "sa_pool_key" in kw: - return kw['sa_pool_key'] - - return tuple( - list(args) + - [(k, kw[k]) for k in sorted(kw)] - ) diff --git a/libs/sqlalchemy/processors.py b/libs/sqlalchemy/processors.py deleted file mode 100644 index bc5c3909..00000000 --- a/libs/sqlalchemy/processors.py +++ /dev/null @@ -1,121 +0,0 @@ -# sqlalchemy/processors.py -# Copyright (C) 2010-2011 the SQLAlchemy authors and contributors -# Copyright (C) 2010 Gaetan de Menten gdementen@gmail.com -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""defines generic type conversion functions, as used in bind and result -processors. - -They all share one common characteristic: None is passed through unchanged. - -""" - -import codecs -import re -import datetime - -def str_to_datetime_processor_factory(regexp, type_): - rmatch = regexp.match - # Even on python2.6 datetime.strptime is both slower than this code - # and it does not support microseconds. - def process(value): - if value is None: - return None - else: - try: - m = rmatch(value) - except TypeError: - raise ValueError("Couldn't parse %s string '%r' " - "- value is not a string." % (type_.__name__ , value)) - if m is None: - raise ValueError("Couldn't parse %s string: " - "'%s'" % (type_.__name__ , value)) - return type_(*map(int, m.groups(0))) - return process - -def boolean_to_int(value): - if value is None: - return None - else: - return int(value) - -def py_fallback(): - def to_unicode_processor_factory(encoding, errors=None): - decoder = codecs.getdecoder(encoding) - - def process(value): - if value is None: - return None - else: - # decoder returns a tuple: (value, len). Simply dropping the - # len part is safe: it is done that way in the normal - # 'xx'.decode(encoding) code path. - return decoder(value, errors)[0] - return process - - def to_decimal_processor_factory(target_class, scale=10): - fstring = "%%.%df" % scale - - def process(value): - if value is None: - return None - else: - return target_class(fstring % value) - return process - - def to_float(value): - if value is None: - return None - else: - return float(value) - - def to_str(value): - if value is None: - return None - else: - return str(value) - - def int_to_boolean(value): - if value is None: - return None - else: - return value and True or False - - DATETIME_RE = re.compile( - "(\d+)-(\d+)-(\d+) (\d+):(\d+):(\d+)(?:\.(\d+))?") - TIME_RE = re.compile("(\d+):(\d+):(\d+)(?:\.(\d+))?") - DATE_RE = re.compile("(\d+)-(\d+)-(\d+)") - - str_to_datetime = str_to_datetime_processor_factory(DATETIME_RE, - datetime.datetime) - str_to_time = str_to_datetime_processor_factory(TIME_RE, datetime.time) - str_to_date = str_to_datetime_processor_factory(DATE_RE, datetime.date) - return locals() - -try: - from sqlalchemy.cprocessors import UnicodeResultProcessor, \ - DecimalResultProcessor, \ - to_float, to_str, int_to_boolean, \ - str_to_datetime, str_to_time, \ - str_to_date - - def to_unicode_processor_factory(encoding, errors=None): - # this is cumbersome but it would be even more so on the C side - if errors is not None: - return UnicodeResultProcessor(encoding, errors).process - else: - return UnicodeResultProcessor(encoding).process - - def to_decimal_processor_factory(target_class, scale=10): - # Note that the scale argument is not taken into account for integer - # values in the C implementation while it is in the Python one. - # For example, the Python implementation might return - # Decimal('5.00000') whereas the C implementation will - # return Decimal('5'). These are equivalent of course. - return DecimalResultProcessor(target_class, "%%.%df" % scale).process - -except ImportError: - globals().update(py_fallback()) - diff --git a/libs/sqlalchemy/schema.py b/libs/sqlalchemy/schema.py deleted file mode 100644 index 154fb5f7..00000000 --- a/libs/sqlalchemy/schema.py +++ /dev/null @@ -1,3204 +0,0 @@ -# sqlalchemy/schema.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""The schema module provides the building blocks for database metadata. - -Each element within this module describes a database entity which can be -created and dropped, or is otherwise part of such an entity. Examples include -tables, columns, sequences, and indexes. - -All entities are subclasses of :class:`~sqlalchemy.schema.SchemaItem`, and as -defined in this module they are intended to be agnostic of any vendor-specific -constructs. - -A collection of entities are grouped into a unit called -:class:`~sqlalchemy.schema.MetaData`. MetaData serves as a logical grouping of -schema elements, and can also be associated with an actual database connection -such that operations involving the contained elements can contact the database -as needed. - -Two of the elements here also build upon their "syntactic" counterparts, which -are defined in :class:`~sqlalchemy.sql.expression.`, specifically -:class:`~sqlalchemy.schema.Table` and :class:`~sqlalchemy.schema.Column`. -Since these objects are part of the SQL expression language, they are usable -as components in SQL expressions. - -""" -import re, inspect -from sqlalchemy import exc, util, dialects -from sqlalchemy.sql import expression, visitors -from sqlalchemy import event, events - - -ddl = util.importlater("sqlalchemy.engine", "ddl") -sqlutil = util.importlater("sqlalchemy.sql", "util") -url = util.importlater("sqlalchemy.engine", "url") -sqltypes = util.importlater("sqlalchemy", "types") - -__all__ = ['SchemaItem', 'Table', 'Column', 'ForeignKey', 'Sequence', 'Index', - 'ForeignKeyConstraint', 'PrimaryKeyConstraint', 'CheckConstraint', - 'UniqueConstraint', 'DefaultGenerator', 'Constraint', 'MetaData', - 'ThreadLocalMetaData', 'SchemaVisitor', 'PassiveDefault', - 'DefaultClause', 'FetchedValue', 'ColumnDefault', 'DDL', - 'CreateTable', 'DropTable', 'CreateSequence', 'DropSequence', - 'AddConstraint', 'DropConstraint', - ] -__all__.sort() - -RETAIN_SCHEMA = util.symbol('retain_schema') - -class SchemaItem(events.SchemaEventTarget, visitors.Visitable): - """Base class for items that define a database schema.""" - - __visit_name__ = 'schema_item' - quote = None - - def _init_items(self, *args): - """Initialize the list of child items for this SchemaItem.""" - - for item in args: - if item is not None: - item._set_parent_with_dispatch(self) - - def get_children(self, **kwargs): - """used to allow SchemaVisitor access""" - return [] - - def __repr__(self): - return util.generic_repr(self) - - @util.memoized_property - def info(self): - return {} - -def _get_table_key(name, schema): - if schema is None: - return name - else: - return schema + "." + name - -def _validate_dialect_kwargs(kwargs, name): - # validate remaining kwargs that they all specify DB prefixes - if len([k for k in kwargs - if not re.match( - r'^(?:%s)_' % - '|'.join(dialects.__all__), k - ) - ]): - raise TypeError( - "Invalid argument(s) for %s: %r" % (name, kwargs.keys())) - - -class Table(SchemaItem, expression.TableClause): - """Represent a table in a database. - - e.g.:: - - mytable = Table("mytable", metadata, - Column('mytable_id', Integer, primary_key=True), - Column('value', String(50)) - ) - - The :class:`.Table` object constructs a unique instance of itself based on its - name and optional schema name within the given :class:`.MetaData` object. - Calling the :class:`.Table` - constructor with the same name and same :class:`.MetaData` argument - a second time will return the *same* :class:`.Table` object - in this way - the :class:`.Table` constructor acts as a registry function. - - See also: - - :ref:`metadata_describing` - Introduction to database metadata - - Constructor arguments are as follows: - - :param name: The name of this table as represented in the database. - - This property, along with the *schema*, indicates the *singleton - identity* of this table in relation to its parent :class:`.MetaData`. - Additional calls to :class:`.Table` with the same name, metadata, - and schema name will return the same :class:`.Table` object. - - Names which contain no upper case characters - will be treated as case insensitive names, and will not be quoted - unless they are a reserved word. Names with any number of upper - case characters will be quoted and sent exactly. Note that this - behavior applies even for databases which standardize upper - case names as case insensitive such as Oracle. - - :param metadata: a :class:`.MetaData` object which will contain this - table. The metadata is used as a point of association of this table - with other tables which are referenced via foreign key. It also - may be used to associate this table with a particular - :class:`~sqlalchemy.engine.base.Connectable`. - - :param \*args: Additional positional arguments are used primarily - to add the list of :class:`.Column` objects contained within this - table. Similar to the style of a CREATE TABLE statement, other - :class:`.SchemaItem` constructs may be added here, including - :class:`.PrimaryKeyConstraint`, and :class:`.ForeignKeyConstraint`. - - :param autoload: Defaults to False: the Columns for this table should - be reflected from the database. Usually there will be no Column - objects in the constructor if this property is set. - - :param autoload_replace: If ``True``, when using ``autoload=True`` - and ``extend_existing=True``, - replace ``Column`` objects already present in the ``Table`` that's - in the ``MetaData`` registry with - what's reflected. Otherwise, all existing columns will be - excluded from the reflection process. Note that this does - not impact ``Column`` objects specified in the same call to ``Table`` - which includes ``autoload``, those always take precedence. - Defaults to ``True``. - - .. versionadded:: 0.7.5 - - :param autoload_with: If autoload==True, this is an optional Engine - or Connection instance to be used for the table reflection. If - ``None``, the underlying MetaData's bound connectable will be used. - - :param extend_existing: When ``True``, indicates that if this :class:`.Table` is already - present in the given :class:`.MetaData`, apply further arguments within - the constructor to the existing :class:`.Table`. - - If ``extend_existing`` or ``keep_existing`` are not set, an error is - raised if additional table modifiers are specified when - the given :class:`.Table` is already present in the :class:`.MetaData`. - - .. versionchanged:: 0.7.4 - ``extend_existing`` will work in conjunction - with ``autoload=True`` to run a new reflection operation against - the database; new :class:`.Column` objects will be produced - from database metadata to replace those existing with the same - name, and additional :class:`.Column` objects not present - in the :class:`.Table` will be added. - - As is always the case with ``autoload=True``, :class:`.Column` - objects can be specified in the same :class:`.Table` constructor, - which will take precedence. I.e.:: - - Table("mytable", metadata, - Column('y', Integer), - extend_existing=True, - autoload=True, - autoload_with=engine - ) - - The above will overwrite all columns within ``mytable`` which - are present in the database, except for ``y`` which will be used as is - from the above definition. If the ``autoload_replace`` flag - is set to False, no existing columns will be replaced. - - :param implicit_returning: True by default - indicates that - RETURNING can be used by default to fetch newly inserted primary key - values, for backends which support this. Note that - create_engine() also provides an implicit_returning flag. - - :param include_columns: A list of strings indicating a subset of - columns to be loaded via the ``autoload`` operation; table columns who - aren't present in this list will not be represented on the resulting - ``Table`` object. Defaults to ``None`` which indicates all columns - should be reflected. - - :param info: A dictionary which defaults to ``{}``. A space to store - application specific data. This must be a dictionary. - - :param keep_existing: When ``True``, indicates that if this Table - is already present in the given :class:`.MetaData`, ignore - further arguments within the constructor to the existing - :class:`.Table`, and return the :class:`.Table` object as - originally created. This is to allow a function that wishes - to define a new :class:`.Table` on first call, but on - subsequent calls will return the same :class:`.Table`, - without any of the declarations (particularly constraints) - being applied a second time. Also see extend_existing. - - If extend_existing or keep_existing are not set, an error is - raised if additional table modifiers are specified when - the given :class:`.Table` is already present in the :class:`.MetaData`. - - :param listeners: A list of tuples of the form ``(, )`` - which will be passed to :func:`.event.listen` upon construction. - This alternate hook to :func:`.event.listen` allows the establishment - of a listener function specific to this :class:`.Table` before - the "autoload" process begins. Particularly useful for - the :meth:`.events.column_reflect` event:: - - def listen_for_reflect(table, column_info): - "handle the column reflection event" - # ... - - t = Table( - 'sometable', - autoload=True, - listeners=[ - ('column_reflect', listen_for_reflect) - ]) - - :param mustexist: When ``True``, indicates that this Table must already - be present in the given :class:`.MetaData`` collection, else - an exception is raised. - - :param prefixes: - A list of strings to insert after CREATE in the CREATE TABLE - statement. They will be separated by spaces. - - :param quote: Force quoting of this table's name on or off, corresponding - to ``True`` or ``False``. When left at its default of ``None``, - the column identifier will be quoted according to whether the name is - case sensitive (identifiers with at least one upper case character are - treated as case sensitive), or if it's a reserved word. This flag - is only needed to force quoting of a reserved word which is not known - by the SQLAlchemy dialect. - - :param quote_schema: same as 'quote' but applies to the schema identifier. - - :param schema: The *schema name* for this table, which is required if - the table resides in a schema other than the default selected schema - for the engine's database connection. Defaults to ``None``. - - :param useexisting: Deprecated. Use extend_existing. - - """ - - __visit_name__ = 'table' - - def __new__(cls, *args, **kw): - if not args: - # python3k pickle seems to call this - return object.__new__(cls) - - try: - name, metadata, args = args[0], args[1], args[2:] - except IndexError: - raise TypeError("Table() takes at least two arguments") - - schema = kw.get('schema', None) - if schema is None: - schema = metadata.schema - keep_existing = kw.pop('keep_existing', False) - extend_existing = kw.pop('extend_existing', False) - if 'useexisting' in kw: - util.warn_deprecated("useexisting is deprecated. Use extend_existing.") - if extend_existing: - raise exc.ArgumentError("useexisting is synonymous " - "with extend_existing.") - extend_existing = kw.pop('useexisting', False) - - if keep_existing and extend_existing: - raise exc.ArgumentError("keep_existing and extend_existing " - "are mutually exclusive.") - - mustexist = kw.pop('mustexist', False) - key = _get_table_key(name, schema) - if key in metadata.tables: - if not keep_existing and not extend_existing and bool(args): - raise exc.InvalidRequestError( - "Table '%s' is already defined for this MetaData " - "instance. Specify 'extend_existing=True' " - "to redefine " - "options and columns on an " - "existing Table object." % key) - table = metadata.tables[key] - if extend_existing: - table._init_existing(*args, **kw) - return table - else: - if mustexist: - raise exc.InvalidRequestError( - "Table '%s' not defined" % (key)) - table = object.__new__(cls) - table.dispatch.before_parent_attach(table, metadata) - metadata._add_table(name, schema, table) - try: - table._init(name, metadata, *args, **kw) - table.dispatch.after_parent_attach(table, metadata) - return table - except: - metadata._remove_table(name, schema) - raise - - def __init__(self, *args, **kw): - """Constructor for :class:`~.schema.Table`. - - This method is a no-op. See the top-level - documentation for :class:`~.schema.Table` - for constructor arguments. - - """ - # __init__ is overridden to prevent __new__ from - # calling the superclass constructor. - - def _init(self, name, metadata, *args, **kwargs): - super(Table, self).__init__(name) - self.metadata = metadata - self.schema = kwargs.pop('schema', None) - if self.schema is None: - self.schema = metadata.schema - self.quote_schema = kwargs.pop('quote_schema', metadata.quote_schema) - else: - self.quote_schema = kwargs.pop('quote_schema', None) - - self.indexes = set() - self.constraints = set() - self._columns = expression.ColumnCollection() - PrimaryKeyConstraint()._set_parent_with_dispatch(self) - self.foreign_keys = set() - self._extra_dependencies = set() - self.kwargs = {} - if self.schema is not None: - self.fullname = "%s.%s" % (self.schema, self.name) - else: - self.fullname = self.name - - autoload = kwargs.pop('autoload', False) - autoload_with = kwargs.pop('autoload_with', None) - # this argument is only used with _init_existing() - kwargs.pop('autoload_replace', True) - include_columns = kwargs.pop('include_columns', None) - - self.implicit_returning = kwargs.pop('implicit_returning', True) - self.quote = kwargs.pop('quote', None) - if 'info' in kwargs: - self.info = kwargs.pop('info') - if 'listeners' in kwargs: - listeners = kwargs.pop('listeners') - for evt, fn in listeners: - event.listen(self, evt, fn) - - self._prefixes = kwargs.pop('prefixes', []) - - self._extra_kwargs(**kwargs) - - # load column definitions from the database if 'autoload' is defined - # we do it after the table is in the singleton dictionary to support - # circular foreign keys - if autoload: - self._autoload(metadata, autoload_with, include_columns) - - # initialize all the column, etc. objects. done after reflection to - # allow user-overrides - self._init_items(*args) - - def _autoload(self, metadata, autoload_with, include_columns, exclude_columns=()): - if self.primary_key.columns: - PrimaryKeyConstraint(*[ - c for c in self.primary_key.columns - if c.key in exclude_columns - ])._set_parent_with_dispatch(self) - - if autoload_with: - autoload_with.run_callable( - autoload_with.dialect.reflecttable, - self, include_columns, exclude_columns - ) - else: - bind = _bind_or_error(metadata, - msg="No engine is bound to this Table's MetaData. " - "Pass an engine to the Table via " - "autoload_with=, " - "or associate the MetaData with an engine via " - "metadata.bind=") - bind.run_callable( - bind.dialect.reflecttable, - self, include_columns, exclude_columns - ) - - @property - def _sorted_constraints(self): - """Return the set of constraints as a list, sorted by creation order.""" - - return sorted(self.constraints, key=lambda c:c._creation_order) - - def _init_existing(self, *args, **kwargs): - autoload = kwargs.pop('autoload', False) - autoload_with = kwargs.pop('autoload_with', None) - autoload_replace = kwargs.pop('autoload_replace', True) - schema = kwargs.pop('schema', None) - if schema and schema != self.schema: - raise exc.ArgumentError( - "Can't change schema of existing table from '%s' to '%s'", - (self.schema, schema)) - - include_columns = kwargs.pop('include_columns', None) - - if include_columns is not None: - for c in self.c: - if c.name not in include_columns: - self._columns.remove(c) - - for key in ('quote', 'quote_schema'): - if key in kwargs: - setattr(self, key, kwargs.pop(key)) - - if 'info' in kwargs: - self.info = kwargs.pop('info') - - if autoload: - if not autoload_replace: - exclude_columns = [c.name for c in self.c] - else: - exclude_columns = () - self._autoload(self.metadata, autoload_with, include_columns, exclude_columns) - - self._extra_kwargs(**kwargs) - self._init_items(*args) - - def _extra_kwargs(self, **kwargs): - # validate remaining kwargs that they all specify DB prefixes - _validate_dialect_kwargs(kwargs, "Table") - self.kwargs.update(kwargs) - - def _init_collections(self): - pass - - @util.memoized_property - def _autoincrement_column(self): - for col in self.primary_key: - if col.autoincrement and \ - col.type._type_affinity is not None and \ - issubclass(col.type._type_affinity, sqltypes.Integer) and \ - (not col.foreign_keys or col.autoincrement == 'ignore_fk') and \ - isinstance(col.default, (type(None), Sequence)) and \ - (col.server_default is None or col.server_default.reflected): - return col - - @property - def key(self): - return _get_table_key(self.name, self.schema) - - def __repr__(self): - return "Table(%s)" % ', '.join( - [repr(self.name)] + [repr(self.metadata)] + - [repr(x) for x in self.columns] + - ["%s=%s" % (k, repr(getattr(self, k))) for k in ['schema']]) - - def __str__(self): - return _get_table_key(self.description, self.schema) - - @property - def bind(self): - """Return the connectable associated with this Table.""" - - return self.metadata and self.metadata.bind or None - - def add_is_dependent_on(self, table): - """Add a 'dependency' for this Table. - - This is another Table object which must be created - first before this one can, or dropped after this one. - - Usually, dependencies between tables are determined via - ForeignKey objects. However, for other situations that - create dependencies outside of foreign keys (rules, inheriting), - this method can manually establish such a link. - - """ - self._extra_dependencies.add(table) - - def append_column(self, column): - """Append a :class:`~.schema.Column` to this :class:`~.schema.Table`. - - The "key" of the newly added :class:`~.schema.Column`, i.e. the - value of its ``.key`` attribute, will then be available - in the ``.c`` collection of this :class:`~.schema.Table`, and the - column definition will be included in any CREATE TABLE, SELECT, - UPDATE, etc. statements generated from this :class:`~.schema.Table` - construct. - - Note that this does **not** change the definition of the table - as it exists within any underlying database, assuming that - table has already been created in the database. Relational - databases support the addition of columns to existing tables - using the SQL ALTER command, which would need to be - emitted for an already-existing table that doesn't contain - the newly added column. - - """ - - column._set_parent_with_dispatch(self) - - def append_constraint(self, constraint): - """Append a :class:`~.schema.Constraint` to this :class:`~.schema.Table`. - - This has the effect of the constraint being included in any - future CREATE TABLE statement, assuming specific DDL creation - events have not been associated with the given :class:`~.schema.Constraint` - object. - - Note that this does **not** produce the constraint within the - relational database automatically, for a table that already exists - in the database. To add a constraint to an - existing relational database table, the SQL ALTER command must - be used. SQLAlchemy also provides the :class:`.AddConstraint` construct - which can produce this SQL when invoked as an executable clause. - - """ - - constraint._set_parent_with_dispatch(self) - - def append_ddl_listener(self, event_name, listener): - """Append a DDL event listener to this ``Table``. - - Deprecated. See :class:`.DDLEvents`. - - """ - - def adapt_listener(target, connection, **kw): - listener(event_name, target, connection) - - event.listen(self, "" + event_name.replace('-', '_'), adapt_listener) - - def _set_parent(self, metadata): - metadata._add_table(self.name, self.schema, self) - self.metadata = metadata - - def get_children(self, column_collections=True, - schema_visitor=False, **kw): - if not schema_visitor: - return expression.TableClause.get_children( - self, column_collections=column_collections, **kw) - else: - if column_collections: - return list(self.columns) - else: - return [] - - def exists(self, bind=None): - """Return True if this table exists.""" - - if bind is None: - bind = _bind_or_error(self) - - return bind.run_callable(bind.dialect.has_table, - self.name, schema=self.schema) - - def create(self, bind=None, checkfirst=False): - """Issue a ``CREATE`` statement for this - :class:`.Table`, using the given :class:`.Connectable` - for connectivity. - - See also :meth:`.MetaData.create_all`. - - """ - - if bind is None: - bind = _bind_or_error(self) - bind._run_visitor(ddl.SchemaGenerator, - self, - checkfirst=checkfirst) - - - def drop(self, bind=None, checkfirst=False): - """Issue a ``DROP`` statement for this - :class:`.Table`, using the given :class:`.Connectable` - for connectivity. - - See also :meth:`.MetaData.drop_all`. - - """ - if bind is None: - bind = _bind_or_error(self) - bind._run_visitor(ddl.SchemaDropper, - self, - checkfirst=checkfirst) - - - def tometadata(self, metadata, schema=RETAIN_SCHEMA): - """Return a copy of this :class:`.Table` associated with a different - :class:`.MetaData`. - - E.g.:: - - # create two metadata - meta1 = MetaData('sqlite:///querytest.db') - meta2 = MetaData() - - # load 'users' from the sqlite engine - users_table = Table('users', meta1, autoload=True) - - # create the same Table object for the plain metadata - users_table_2 = users_table.tometadata(meta2) - - """ - - if schema is RETAIN_SCHEMA: - schema = self.schema - elif schema is None: - schema = metadata.schema - key = _get_table_key(self.name, schema) - if key in metadata.tables: - util.warn("Table '%s' already exists within the given " - "MetaData - not copying." % self.description) - return metadata.tables[key] - - args = [] - for c in self.columns: - args.append(c.copy(schema=schema)) - for c in self.constraints: - args.append(c.copy(schema=schema)) - table = Table( - self.name, metadata, schema=schema, - *args, **self.kwargs - ) - for index in self.indexes: - # skip indexes that would be generated - # by the 'index' flag on Column - if len(index.columns) == 1 and \ - list(index.columns)[0].index: - continue - Index(index.name, - unique=index.unique, - *[table.c[col] for col in index.columns.keys()], - **index.kwargs) - table.dispatch._update(self.dispatch) - return table - -class Column(SchemaItem, expression.ColumnClause): - """Represents a column in a database table.""" - - __visit_name__ = 'column' - - def __init__(self, *args, **kwargs): - """ - Construct a new ``Column`` object. - - :param name: The name of this column as represented in the database. - This argument may be the first positional argument, or specified - via keyword. - - Names which contain no upper case characters - will be treated as case insensitive names, and will not be quoted - unless they are a reserved word. Names with any number of upper - case characters will be quoted and sent exactly. Note that this - behavior applies even for databases which standardize upper - case names as case insensitive such as Oracle. - - The name field may be omitted at construction time and applied - later, at any time before the Column is associated with a - :class:`.Table`. This is to support convenient - usage within the :mod:`~sqlalchemy.ext.declarative` extension. - - :param type\_: The column's type, indicated using an instance which - subclasses :class:`~sqlalchemy.types.TypeEngine`. If no arguments - are required for the type, the class of the type can be sent - as well, e.g.:: - - # use a type with arguments - Column('data', String(50)) - - # use no arguments - Column('level', Integer) - - The ``type`` argument may be the second positional argument - or specified by keyword. - - There is partial support for automatic detection of the - type based on that of a :class:`.ForeignKey` associated - with this column, if the type is specified as ``None``. - However, this feature is not fully implemented and - may not function in all cases. - - :param \*args: Additional positional arguments include various - :class:`.SchemaItem` derived constructs which will be applied - as options to the column. These include instances of - :class:`.Constraint`, :class:`.ForeignKey`, :class:`.ColumnDefault`, - and :class:`.Sequence`. In some cases an equivalent keyword - argument is available such as ``server_default``, ``default`` - and ``unique``. - - :param autoincrement: This flag may be set to ``False`` to - indicate an integer primary key column that should not be - considered to be the "autoincrement" column, that is - the integer primary key column which generates values - implicitly upon INSERT and whose value is usually returned - via the DBAPI cursor.lastrowid attribute. It defaults - to ``True`` to satisfy the common use case of a table - with a single integer primary key column. If the table - has a composite primary key consisting of more than one - integer column, set this flag to True only on the - column that should be considered "autoincrement". - - The setting *only* has an effect for columns which are: - - * Integer derived (i.e. INT, SMALLINT, BIGINT). - - * Part of the primary key - - * Are not referenced by any foreign keys, unless - the value is specified as ``'ignore_fk'`` - - .. versionadded:: 0.7.4 - - * have no server side or client side defaults (with the exception - of Postgresql SERIAL). - - The setting has these two effects on columns that meet the - above criteria: - - * DDL issued for the column will include database-specific - keywords intended to signify this column as an - "autoincrement" column, such as AUTO INCREMENT on MySQL, - SERIAL on Postgresql, and IDENTITY on MS-SQL. It does - *not* issue AUTOINCREMENT for SQLite since this is a - special SQLite flag that is not required for autoincrementing - behavior. See the SQLite dialect documentation for - information on SQLite's AUTOINCREMENT. - - * The column will be considered to be available as - cursor.lastrowid or equivalent, for those dialects which - "post fetch" newly inserted identifiers after a row has - been inserted (SQLite, MySQL, MS-SQL). It does not have - any effect in this regard for databases that use sequences - to generate primary key identifiers (i.e. Firebird, Postgresql, - Oracle). - - .. versionchanged:: 0.7.4 - ``autoincrement`` accepts a special value ``'ignore_fk'`` - to indicate that autoincrementing status regardless of foreign key - references. This applies to certain composite foreign key - setups, such as the one demonstrated in the ORM documentation - at :ref:`post_update`. - - :param default: A scalar, Python callable, or - :class:`~sqlalchemy.sql.expression.ClauseElement` representing the - *default value* for this column, which will be invoked upon insert - if this column is otherwise not specified in the VALUES clause of - the insert. This is a shortcut to using :class:`.ColumnDefault` as - a positional argument. - - Contrast this argument to ``server_default`` which creates a - default generator on the database side. - - :param doc: optional String that can be used by the ORM or similar - to document attributes. This attribute does not render SQL - comments (a future attribute 'comment' will achieve that). - - :param key: An optional string identifier which will identify this - ``Column`` object on the :class:`.Table`. When a key is provided, - this is the only identifier referencing the ``Column`` within the - application, including ORM attribute mapping; the ``name`` field - is used only when rendering SQL. - - :param index: When ``True``, indicates that the column is indexed. - This is a shortcut for using a :class:`.Index` construct on the - table. To specify indexes with explicit names or indexes that - contain multiple columns, use the :class:`.Index` construct - instead. - - :param info: A dictionary which defaults to ``{}``. A space to store - application specific data. This must be a dictionary. - - :param nullable: If set to the default of ``True``, indicates the - column will be rendered as allowing NULL, else it's rendered as - NOT NULL. This parameter is only used when issuing CREATE TABLE - statements. - - :param onupdate: A scalar, Python callable, or - :class:`~sqlalchemy.sql.expression.ClauseElement` representing a - default value to be applied to the column within UPDATE - statements, which wil be invoked upon update if this column is not - present in the SET clause of the update. This is a shortcut to - using :class:`.ColumnDefault` as a positional argument with - ``for_update=True``. - - :param primary_key: If ``True``, marks this column as a primary key - column. Multiple columns can have this flag set to specify - composite primary keys. As an alternative, the primary key of a - :class:`.Table` can be specified via an explicit - :class:`.PrimaryKeyConstraint` object. - - :param server_default: A :class:`.FetchedValue` instance, str, Unicode - or :func:`~sqlalchemy.sql.expression.text` construct representing - the DDL DEFAULT value for the column. - - String types will be emitted as-is, surrounded by single quotes:: - - Column('x', Text, server_default="val") - - x TEXT DEFAULT 'val' - - A :func:`~sqlalchemy.sql.expression.text` expression will be - rendered as-is, without quotes:: - - Column('y', DateTime, server_default=text('NOW()'))0 - - y DATETIME DEFAULT NOW() - - Strings and text() will be converted into a :class:`.DefaultClause` - object upon initialization. - - Use :class:`.FetchedValue` to indicate that an already-existing - column will generate a default value on the database side which - will be available to SQLAlchemy for post-fetch after inserts. This - construct does not specify any DDL and the implementation is left - to the database, such as via a trigger. - - :param server_onupdate: A :class:`.FetchedValue` instance - representing a database-side default generation function. This - indicates to SQLAlchemy that a newly generated value will be - available after updates. This construct does not specify any DDL - and the implementation is left to the database, such as via a - trigger. - - :param quote: Force quoting of this column's name on or off, - corresponding to ``True`` or ``False``. When left at its default - of ``None``, the column identifier will be quoted according to - whether the name is case sensitive (identifiers with at least one - upper case character are treated as case sensitive), or if it's a - reserved word. This flag is only needed to force quoting of a - reserved word which is not known by the SQLAlchemy dialect. - - :param unique: When ``True``, indicates that this column contains a - unique constraint, or if ``index`` is ``True`` as well, indicates - that the :class:`.Index` should be created with the unique flag. - To specify multiple columns in the constraint/index or to specify - an explicit name, use the :class:`.UniqueConstraint` or - :class:`.Index` constructs explicitly. - - """ - - name = kwargs.pop('name', None) - type_ = kwargs.pop('type_', None) - args = list(args) - if args: - if isinstance(args[0], basestring): - if name is not None: - raise exc.ArgumentError( - "May not pass name positionally and as a keyword.") - name = args.pop(0) - if args: - coltype = args[0] - - if (isinstance(coltype, sqltypes.TypeEngine) or - (isinstance(coltype, type) and - issubclass(coltype, sqltypes.TypeEngine))): - if type_ is not None: - raise exc.ArgumentError( - "May not pass type_ positionally and as a keyword.") - type_ = args.pop(0) - - no_type = type_ is None - - super(Column, self).__init__(name, None, type_) - self.key = kwargs.pop('key', name) - self.primary_key = kwargs.pop('primary_key', False) - self.nullable = kwargs.pop('nullable', not self.primary_key) - self.default = kwargs.pop('default', None) - self.server_default = kwargs.pop('server_default', None) - self.server_onupdate = kwargs.pop('server_onupdate', None) - self.index = kwargs.pop('index', None) - self.unique = kwargs.pop('unique', None) - self.quote = kwargs.pop('quote', None) - self.doc = kwargs.pop('doc', None) - self.onupdate = kwargs.pop('onupdate', None) - self.autoincrement = kwargs.pop('autoincrement', True) - self.constraints = set() - self.foreign_keys = set() - - # check if this Column is proxying another column - if '_proxies' in kwargs: - self.proxies = kwargs.pop('_proxies') - # otherwise, add DDL-related events - elif isinstance(self.type, sqltypes.SchemaType): - self.type._set_parent_with_dispatch(self) - - if self.default is not None: - if isinstance(self.default, (ColumnDefault, Sequence)): - args.append(self.default) - else: - if getattr(self.type, '_warn_on_bytestring', False): - # Py3K - #if isinstance(self.default, bytes): - # Py2K - if isinstance(self.default, str): - # end Py2K - util.warn("Unicode column received non-unicode " - "default value.") - args.append(ColumnDefault(self.default)) - - if self.server_default is not None: - if isinstance(self.server_default, FetchedValue): - args.append(self.server_default._as_for_update(False)) - else: - args.append(DefaultClause(self.server_default)) - - if self.onupdate is not None: - if isinstance(self.onupdate, (ColumnDefault, Sequence)): - args.append(self.onupdate) - else: - args.append(ColumnDefault(self.onupdate, for_update=True)) - - if self.server_onupdate is not None: - if isinstance(self.server_onupdate, FetchedValue): - args.append(self.server_onupdate._as_for_update(True)) - else: - args.append(DefaultClause(self.server_onupdate, - for_update=True)) - self._init_items(*args) - - if not self.foreign_keys and no_type: - raise exc.ArgumentError("'type' is required on Column objects " - "which have no foreign keys.") - util.set_creation_order(self) - - if 'info' in kwargs: - self.info = kwargs.pop('info') - - if kwargs: - raise exc.ArgumentError( - "Unknown arguments passed to Column: " + repr(kwargs.keys())) - - def __str__(self): - if self.name is None: - return "(no name)" - elif self.table is not None: - if self.table.named_with_column: - return (self.table.description + "." + self.description) - else: - return self.description - else: - return self.description - - def references(self, column): - """Return True if this Column references the given column via foreign - key.""" - - for fk in self.foreign_keys: - if fk.column.proxy_set.intersection(column.proxy_set): - return True - else: - return False - - def append_foreign_key(self, fk): - fk._set_parent_with_dispatch(self) - - def __repr__(self): - kwarg = [] - if self.key != self.name: - kwarg.append('key') - if self.primary_key: - kwarg.append('primary_key') - if not self.nullable: - kwarg.append('nullable') - if self.onupdate: - kwarg.append('onupdate') - if self.default: - kwarg.append('default') - if self.server_default: - kwarg.append('server_default') - return "Column(%s)" % ', '.join( - [repr(self.name)] + [repr(self.type)] + - [repr(x) for x in self.foreign_keys if x is not None] + - [repr(x) for x in self.constraints] + - [(self.table is not None and "table=<%s>" % - self.table.description or "table=None")] + - ["%s=%s" % (k, repr(getattr(self, k))) for k in kwarg]) - - def _set_parent(self, table): - if not self.name: - raise exc.ArgumentError( - "Column must be constructed with a non-blank name or " - "assign a non-blank .name before adding to a Table.") - if self.key is None: - self.key = self.name - - existing = getattr(self, 'table', None) - if existing is not None and existing is not table: - raise exc.ArgumentError( - "Column object already assigned to Table '%s'" % - existing.description) - - if self.key in table._columns: - col = table._columns.get(self.key) - if col is not self: - for fk in list(col.foreign_keys): - table.foreign_keys.remove(fk) - if fk.constraint in table.constraints: - # this might have been removed - # already, if it's a composite constraint - # and more than one col being replaced - table.constraints.remove(fk.constraint) - - table._columns.replace(self) - - if self.primary_key: - table.primary_key._replace(self) - Table._autoincrement_column._reset(table) - elif self.key in table.primary_key: - raise exc.ArgumentError( - "Trying to redefine primary-key column '%s' as a " - "non-primary-key column on table '%s'" % ( - self.key, table.fullname)) - self.table = table - - if self.index: - if isinstance(self.index, basestring): - raise exc.ArgumentError( - "The 'index' keyword argument on Column is boolean only. " - "To create indexes with a specific name, create an " - "explicit Index object external to the Table.") - Index(expression._truncated_label('ix_%s' % self._label), self, unique=self.unique) - elif self.unique: - if isinstance(self.unique, basestring): - raise exc.ArgumentError( - "The 'unique' keyword argument on Column is boolean " - "only. To create unique constraints or indexes with a " - "specific name, append an explicit UniqueConstraint to " - "the Table's list of elements, or create an explicit " - "Index object external to the Table.") - table.append_constraint(UniqueConstraint(self.key)) - - def _on_table_attach(self, fn): - if self.table is not None: - fn(self, self.table) - event.listen(self, 'after_parent_attach', fn) - - def copy(self, **kw): - """Create a copy of this ``Column``, unitialized. - - This is used in ``Table.tometadata``. - - """ - - # Constraint objects plus non-constraint-bound ForeignKey objects - args = \ - [c.copy(**kw) for c in self.constraints] + \ - [c.copy(**kw) for c in self.foreign_keys if not c.constraint] - - c = self._constructor( - name=self.name, - type_=self.type, - key = self.key, - primary_key = self.primary_key, - nullable = self.nullable, - unique = self.unique, - quote=self.quote, - index=self.index, - autoincrement=self.autoincrement, - default=self.default, - server_default=self.server_default, - onupdate=self.onupdate, - server_onupdate=self.server_onupdate, - info=self.info, - doc=self.doc, - *args - ) - c.dispatch._update(self.dispatch) - return c - - def _make_proxy(self, selectable, name=None): - """Create a *proxy* for this column. - - This is a copy of this ``Column`` referenced by a different parent - (such as an alias or select statement). The column should - be used only in select scenarios, as its full DDL/default - information is not transferred. - - """ - fk = [ForeignKey(f.column) for f in self.foreign_keys] - if name is None and self.name is None: - raise exc.InvalidRequestError("Cannot initialize a sub-selectable" - " with this Column object until it's 'name' has " - "been assigned.") - try: - c = self._constructor( - expression._as_truncated(name or self.name), - self.type, - key = name or self.key, - primary_key = self.primary_key, - nullable = self.nullable, - quote=self.quote, _proxies=[self], *fk) - except TypeError, e: - # Py3K - #raise TypeError( - # "Could not create a copy of this %r object. " - # "Ensure the class includes a _constructor() " - # "attribute or method which accepts the " - # "standard Column constructor arguments, or " - # "references the Column class itself." % self.__class__) from e - # Py2K - raise TypeError( - "Could not create a copy of this %r object. " - "Ensure the class includes a _constructor() " - "attribute or method which accepts the " - "standard Column constructor arguments, or " - "references the Column class itself. " - "Original error: %s" % (self.__class__, e)) - # end Py2K - - c.table = selectable - selectable._columns.add(c) - if selectable._is_clone_of is not None: - c._is_clone_of = selectable._is_clone_of.columns[c.name] - if self.primary_key: - selectable.primary_key.add(c) - c.dispatch.after_parent_attach(c, selectable) - return c - - def get_children(self, schema_visitor=False, **kwargs): - if schema_visitor: - return [x for x in (self.default, self.onupdate) - if x is not None] + \ - list(self.foreign_keys) + list(self.constraints) - else: - return expression.ColumnClause.get_children(self, **kwargs) - - -class ForeignKey(SchemaItem): - """Defines a dependency between two columns. - - ``ForeignKey`` is specified as an argument to a :class:`.Column` object, - e.g.:: - - t = Table("remote_table", metadata, - Column("remote_id", ForeignKey("main_table.id")) - ) - - Note that ``ForeignKey`` is only a marker object that defines - a dependency between two columns. The actual constraint - is in all cases represented by the :class:`.ForeignKeyConstraint` - object. This object will be generated automatically when - a ``ForeignKey`` is associated with a :class:`.Column` which - in turn is associated with a :class:`.Table`. Conversely, - when :class:`.ForeignKeyConstraint` is applied to a :class:`.Table`, - ``ForeignKey`` markers are automatically generated to be - present on each associated :class:`.Column`, which are also - associated with the constraint object. - - Note that you cannot define a "composite" foreign key constraint, - that is a constraint between a grouping of multiple parent/child - columns, using ``ForeignKey`` objects. To define this grouping, - the :class:`.ForeignKeyConstraint` object must be used, and applied - to the :class:`.Table`. The associated ``ForeignKey`` objects - are created automatically. - - The ``ForeignKey`` objects associated with an individual - :class:`.Column` object are available in the `foreign_keys` collection - of that column. - - Further examples of foreign key configuration are in - :ref:`metadata_foreignkeys`. - - """ - - __visit_name__ = 'foreign_key' - - def __init__(self, column, _constraint=None, use_alter=False, name=None, - onupdate=None, ondelete=None, deferrable=None, - schema=None, - initially=None, link_to_name=False): - """ - Construct a column-level FOREIGN KEY. - - The :class:`.ForeignKey` object when constructed generates a - :class:`.ForeignKeyConstraint` which is associated with the parent - :class:`.Table` object's collection of constraints. - - :param column: A single target column for the key relationship. A - :class:`.Column` object or a column name as a string: - ``tablename.columnkey`` or ``schema.tablename.columnkey``. - ``columnkey`` is the ``key`` which has been assigned to the column - (defaults to the column name itself), unless ``link_to_name`` is - ``True`` in which case the rendered name of the column is used. - - .. versionadded:: 0.7.4 - Note that if the schema name is not included, and the underlying - :class:`.MetaData` has a "schema", that value will be used. - - :param name: Optional string. An in-database name for the key if - `constraint` is not provided. - - :param onupdate: Optional string. If set, emit ON UPDATE when - issuing DDL for this constraint. Typical values include CASCADE, - DELETE and RESTRICT. - - :param ondelete: Optional string. If set, emit ON DELETE when - issuing DDL for this constraint. Typical values include CASCADE, - DELETE and RESTRICT. - - :param deferrable: Optional bool. If set, emit DEFERRABLE or NOT - DEFERRABLE when issuing DDL for this constraint. - - :param initially: Optional string. If set, emit INITIALLY when - issuing DDL for this constraint. - - :param link_to_name: if True, the string name given in ``column`` is - the rendered name of the referenced column, not its locally - assigned ``key``. - - :param use_alter: passed to the underlying - :class:`.ForeignKeyConstraint` to indicate the constraint should be - generated/dropped externally from the CREATE TABLE/ DROP TABLE - statement. See that classes' constructor for details. - - """ - - self._colspec = column - - # the linked ForeignKeyConstraint. - # ForeignKey will create this when parent Column - # is attached to a Table, *or* ForeignKeyConstraint - # object passes itself in when creating ForeignKey - # markers. - self.constraint = _constraint - - - self.use_alter = use_alter - self.name = name - self.onupdate = onupdate - self.ondelete = ondelete - self.deferrable = deferrable - self.initially = initially - self.link_to_name = link_to_name - - def __repr__(self): - return "ForeignKey(%r)" % self._get_colspec() - - def copy(self, schema=None): - """Produce a copy of this :class:`.ForeignKey` object. - - The new :class:`.ForeignKey` will not be bound - to any :class:`.Column`. - - This method is usually used by the internal - copy procedures of :class:`.Column`, :class:`.Table`, - and :class:`.MetaData`. - - :param schema: The returned :class:`.ForeignKey` will - reference the original table and column name, qualified - by the given string schema name. - - """ - - fk = ForeignKey( - self._get_colspec(schema=schema), - use_alter=self.use_alter, - name=self.name, - onupdate=self.onupdate, - ondelete=self.ondelete, - deferrable=self.deferrable, - initially=self.initially, - link_to_name=self.link_to_name - ) - fk.dispatch._update(self.dispatch) - return fk - - def _get_colspec(self, schema=None): - """Return a string based 'column specification' for this :class:`.ForeignKey`. - - This is usually the equivalent of the string-based "tablename.colname" - argument first passed to the object's constructor. - - """ - if schema: - return schema + "." + self.column.table.name + \ - "." + self.column.key - elif isinstance(self._colspec, basestring): - return self._colspec - elif hasattr(self._colspec, '__clause_element__'): - _column = self._colspec.__clause_element__() - else: - _column = self._colspec - - return "%s.%s" % (_column.table.fullname, _column.key) - - target_fullname = property(_get_colspec) - - def references(self, table): - """Return True if the given :class:`.Table` is referenced by this :class:`.ForeignKey`.""" - - return table.corresponding_column(self.column) is not None - - def get_referent(self, table): - """Return the :class:`.Column` in the given :class:`.Table` - referenced by this :class:`.ForeignKey`. - - Returns None if this :class:`.ForeignKey` does not reference the given - :class:`.Table`. - - """ - - return table.corresponding_column(self.column) - - @util.memoized_property - def column(self): - """Return the target :class:`.Column` referenced by this :class:`.ForeignKey`. - - If this :class:`.ForeignKey` was created using a - string-based target column specification, this - attribute will on first access initiate a resolution - process to locate the referenced remote - :class:`.Column`. The resolution process traverses - to the parent :class:`.Column`, :class:`.Table`, and - :class:`.MetaData` to proceed - if any of these aren't - yet present, an error is raised. - - """ - # ForeignKey inits its remote column as late as possible, so tables - # can be defined without dependencies - if isinstance(self._colspec, basestring): - # locate the parent table this foreign key is attached to. we - # use the "original" column which our parent column represents - # (its a list of columns/other ColumnElements if the parent - # table is a UNION) - for c in self.parent.base_columns: - if isinstance(c, Column): - parenttable = c.table - break - else: - raise exc.ArgumentError( - "Parent column '%s' does not descend from a " - "table-attached Column" % str(self.parent)) - - m = self._colspec.split('.') - - if m is None: - raise exc.ArgumentError( - "Invalid foreign key column specification: %s" % - self._colspec) - - # A FK between column 'bar' and table 'foo' can be - # specified as 'foo', 'foo.bar', 'dbo.foo.bar', - # 'otherdb.dbo.foo.bar'. Once we have the column name and - # the table name, treat everything else as the schema - # name. Some databases (e.g. Sybase) support - # inter-database foreign keys. See tickets#1341 and -- - # indirectly related -- Ticket #594. This assumes that '.' - # will never appear *within* any component of the FK. - - (schema, tname, colname) = (None, None, None) - if schema is None and parenttable.metadata.schema is not None: - schema = parenttable.metadata.schema - - if (len(m) == 1): - tname = m.pop() - else: - colname = m.pop() - tname = m.pop() - - if (len(m) > 0): - schema = '.'.join(m) - - if _get_table_key(tname, schema) not in parenttable.metadata: - raise exc.NoReferencedTableError( - "Foreign key associated with column '%s' could not find " - "table '%s' with which to generate a " - "foreign key to target column '%s'" % (self.parent, tname, colname), - tname) - table = Table(tname, parenttable.metadata, - mustexist=True, schema=schema) - - _column = None - if colname is None: - # colname is None in the case that ForeignKey argument - # was specified as table name only, in which case we - # match the column name to the same column on the - # parent. - key = self.parent - _column = table.c.get(self.parent.key, None) - elif self.link_to_name: - key = colname - for c in table.c: - if c.name == colname: - _column = c - else: - key = colname - _column = table.c.get(colname, None) - - if _column is None: - raise exc.NoReferencedColumnError( - "Could not create ForeignKey '%s' on table '%s': " - "table '%s' has no column named '%s'" % ( - self._colspec, parenttable.name, table.name, key), - table.name, key) - - elif hasattr(self._colspec, '__clause_element__'): - _column = self._colspec.__clause_element__() - else: - _column = self._colspec - - # propagate TypeEngine to parent if it didn't have one - if isinstance(self.parent.type, sqltypes.NullType): - self.parent.type = _column.type - return _column - - def _set_parent(self, column): - if hasattr(self, 'parent'): - if self.parent is column: - return - raise exc.InvalidRequestError( - "This ForeignKey already has a parent !") - self.parent = column - self.parent.foreign_keys.add(self) - self.parent._on_table_attach(self._set_table) - - def _set_table(self, column, table): - # standalone ForeignKey - create ForeignKeyConstraint - # on the hosting Table when attached to the Table. - if self.constraint is None and isinstance(table, Table): - self.constraint = ForeignKeyConstraint( - [], [], use_alter=self.use_alter, name=self.name, - onupdate=self.onupdate, ondelete=self.ondelete, - deferrable=self.deferrable, initially=self.initially, - ) - self.constraint._elements[self.parent] = self - self.constraint._set_parent_with_dispatch(table) - table.foreign_keys.add(self) - -class _NotAColumnExpr(object): - def _not_a_column_expr(self): - raise exc.InvalidRequestError( - "This %s cannot be used directly " - "as a column expression." % self.__class__.__name__) - - __clause_element__ = self_group = lambda self: self._not_a_column_expr() - _from_objects = property(lambda self: self._not_a_column_expr()) - -class DefaultGenerator(_NotAColumnExpr, SchemaItem): - """Base class for column *default* values.""" - - __visit_name__ = 'default_generator' - - is_sequence = False - is_server_default = False - column = None - - def __init__(self, for_update=False): - self.for_update = for_update - - def _set_parent(self, column): - self.column = column - if self.for_update: - self.column.onupdate = self - else: - self.column.default = self - - def execute(self, bind=None, **kwargs): - if bind is None: - bind = _bind_or_error(self) - return bind._execute_default(self, **kwargs) - - @property - def bind(self): - """Return the connectable associated with this default.""" - if getattr(self, 'column', None) is not None: - return self.column.table.bind - else: - return None - - -class ColumnDefault(DefaultGenerator): - """A plain default value on a column. - - This could correspond to a constant, a callable function, - or a SQL clause. - - :class:`.ColumnDefault` is generated automatically - whenever the ``default``, ``onupdate`` arguments of - :class:`.Column` are used. A :class:`.ColumnDefault` - can be passed positionally as well. - - For example, the following:: - - Column('foo', Integer, default=50) - - Is equivalent to:: - - Column('foo', Integer, ColumnDefault(50)) - - - """ - - def __init__(self, arg, **kwargs): - super(ColumnDefault, self).__init__(**kwargs) - if isinstance(arg, FetchedValue): - raise exc.ArgumentError( - "ColumnDefault may not be a server-side default type.") - if util.callable(arg): - arg = self._maybe_wrap_callable(arg) - self.arg = arg - - @util.memoized_property - def is_callable(self): - return util.callable(self.arg) - - @util.memoized_property - def is_clause_element(self): - return isinstance(self.arg, expression.ClauseElement) - - @util.memoized_property - def is_scalar(self): - return not self.is_callable and \ - not self.is_clause_element and \ - not self.is_sequence - - def _maybe_wrap_callable(self, fn): - """Backward compat: Wrap callables that don't accept a context.""" - - if inspect.isfunction(fn): - inspectable = fn - elif inspect.isclass(fn): - inspectable = fn.__init__ - elif hasattr(fn, '__call__'): - inspectable = fn.__call__ - else: - # probably not inspectable, try anyways. - inspectable = fn - try: - argspec = inspect.getargspec(inspectable) - except TypeError: - return lambda ctx: fn() - - positionals = len(argspec[0]) - - # Py3K compat - no unbound methods - if inspect.ismethod(inspectable) or inspect.isclass(fn): - positionals -= 1 - - if positionals == 0: - return lambda ctx: fn() - - defaulted = argspec[3] is not None and len(argspec[3]) or 0 - if positionals - defaulted > 1: - raise exc.ArgumentError( - "ColumnDefault Python function takes zero or one " - "positional arguments") - return fn - - def _visit_name(self): - if self.for_update: - return "column_onupdate" - else: - return "column_default" - __visit_name__ = property(_visit_name) - - def __repr__(self): - return "ColumnDefault(%r)" % self.arg - -class Sequence(DefaultGenerator): - """Represents a named database sequence. - - The :class:`.Sequence` object represents the name and configurational - parameters of a database sequence. It also represents - a construct that can be "executed" by a SQLAlchemy :class:`.Engine` - or :class:`.Connection`, rendering the appropriate "next value" function - for the target database and returning a result. - - The :class:`.Sequence` is typically associated with a primary key column:: - - some_table = Table('some_table', metadata, - Column('id', Integer, Sequence('some_table_seq'), primary_key=True) - ) - - When CREATE TABLE is emitted for the above :class:`.Table`, if the - target platform supports sequences, a CREATE SEQUENCE statement will - be emitted as well. For platforms that don't support sequences, - the :class:`.Sequence` construct is ignored. - - See also: :class:`.CreateSequence` :class:`.DropSequence` - - """ - - __visit_name__ = 'sequence' - - is_sequence = True - - def __init__(self, name, start=None, increment=None, schema=None, - optional=False, quote=None, metadata=None, - quote_schema=None, - for_update=False): - """Construct a :class:`.Sequence` object. - - :param name: The name of the sequence. - :param start: the starting index of the sequence. This value is - used when the CREATE SEQUENCE command is emitted to the database - as the value of the "START WITH" clause. If ``None``, the - clause is omitted, which on most platforms indicates a starting - value of 1. - :param increment: the increment value of the sequence. This - value is used when the CREATE SEQUENCE command is emitted to - the database as the value of the "INCREMENT BY" clause. If ``None``, - the clause is omitted, which on most platforms indicates an - increment of 1. - :param schema: Optional schema name for the sequence, if located - in a schema other than the default. - :param optional: boolean value, when ``True``, indicates that this - :class:`.Sequence` object only needs to be explicitly generated - on backends that don't provide another way to generate primary - key identifiers. Currently, it essentially means, "don't create - this sequence on the Postgresql backend, where the SERIAL keyword - creates a sequence for us automatically". - :param quote: boolean value, when ``True`` or ``False``, explicitly - forces quoting of the schema name on or off. When left at its - default of ``None``, normal quoting rules based on casing and reserved - words take place. - :param metadata: optional :class:`.MetaData` object which will be - associated with this :class:`.Sequence`. A :class:`.Sequence` - that is associated with a :class:`.MetaData` gains access to the - ``bind`` of that :class:`.MetaData`, meaning the :meth:`.Sequence.create` - and :meth:`.Sequence.drop` methods will make usage of that engine - automatically. - - .. versionchanged:: 0.7 - Additionally, the appropriate CREATE SEQUENCE/ - DROP SEQUENCE DDL commands will be emitted corresponding to this - :class:`.Sequence` when :meth:`.MetaData.create_all` and - :meth:`.MetaData.drop_all` are invoked. - - Note that when a :class:`.Sequence` is applied to a :class:`.Column`, - the :class:`.Sequence` is automatically associated with the - :class:`.MetaData` object of that column's parent :class:`.Table`, - when that association is made. The :class:`.Sequence` will then - be subject to automatic CREATE SEQUENCE/DROP SEQUENCE corresponding - to when the :class:`.Table` object itself is created or dropped, - rather than that of the :class:`.MetaData` object overall. - :param for_update: Indicates this :class:`.Sequence`, when associated - with a :class:`.Column`, should be invoked for UPDATE statements - on that column's table, rather than for INSERT statements, when - no value is otherwise present for that column in the statement. - - """ - super(Sequence, self).__init__(for_update=for_update) - self.name = name - self.start = start - self.increment = increment - self.optional = optional - self.quote = quote - if metadata is not None and schema is None and metadata.schema: - self.schema = schema = metadata.schema - self.quote_schema = metadata.quote_schema - else: - self.schema = schema - self.quote_schema = quote_schema - self.metadata = metadata - self._key = _get_table_key(name, schema) - if metadata: - self._set_metadata(metadata) - - @util.memoized_property - def is_callable(self): - return False - - @util.memoized_property - def is_clause_element(self): - return False - - def next_value(self): - """Return a :class:`.next_value` function element - which will render the appropriate increment function - for this :class:`.Sequence` within any SQL expression. - - """ - return expression.func.next_value(self, bind=self.bind) - - def _set_parent(self, column): - super(Sequence, self)._set_parent(column) - column._on_table_attach(self._set_table) - - def _set_table(self, column, table): - self._set_metadata(table.metadata) - - def _set_metadata(self, metadata): - self.metadata = metadata - self.metadata._sequences[self._key] = self - - @property - def bind(self): - if self.metadata: - return self.metadata.bind - else: - return None - - def create(self, bind=None, checkfirst=True): - """Creates this sequence in the database.""" - - if bind is None: - bind = _bind_or_error(self) - bind._run_visitor(ddl.SchemaGenerator, - self, - checkfirst=checkfirst) - - def drop(self, bind=None, checkfirst=True): - """Drops this sequence from the database.""" - - if bind is None: - bind = _bind_or_error(self) - bind._run_visitor(ddl.SchemaDropper, - self, - checkfirst=checkfirst) - - def _not_a_column_expr(self): - raise exc.InvalidRequestError( - "This %s cannot be used directly " - "as a column expression. Use func.next_value(sequence) " - "to produce a 'next value' function that's usable " - "as a column element." - % self.__class__.__name__) - - -class FetchedValue(_NotAColumnExpr, events.SchemaEventTarget): - """A marker for a transparent database-side default. - - Use :class:`.FetchedValue` when the database is configured - to provide some automatic default for a column. - - E.g.:: - - Column('foo', Integer, FetchedValue()) - - Would indicate that some trigger or default generator - will create a new value for the ``foo`` column during an - INSERT. - - """ - is_server_default = True - reflected = False - has_argument = False - - def __init__(self, for_update=False): - self.for_update = for_update - - def _as_for_update(self, for_update): - if for_update == self.for_update: - return self - else: - return self._clone(for_update) - - def _clone(self, for_update): - n = self.__class__.__new__(self.__class__) - n.__dict__.update(self.__dict__) - n.__dict__.pop('column', None) - n.for_update = for_update - return n - - def _set_parent(self, column): - self.column = column - if self.for_update: - self.column.server_onupdate = self - else: - self.column.server_default = self - - def __repr__(self): - return util.generic_repr(self) - -class DefaultClause(FetchedValue): - """A DDL-specified DEFAULT column value. - - :class:`.DefaultClause` is a :class:`.FetchedValue` - that also generates a "DEFAULT" clause when - "CREATE TABLE" is emitted. - - :class:`.DefaultClause` is generated automatically - whenever the ``server_default``, ``server_onupdate`` arguments of - :class:`.Column` are used. A :class:`.DefaultClause` - can be passed positionally as well. - - For example, the following:: - - Column('foo', Integer, server_default="50") - - Is equivalent to:: - - Column('foo', Integer, DefaultClause("50")) - - """ - - has_argument = True - - def __init__(self, arg, for_update=False, _reflected=False): - util.assert_arg_type(arg, (basestring, - expression.ClauseElement, - expression._TextClause), 'arg') - super(DefaultClause, self).__init__(for_update) - self.arg = arg - self.reflected = _reflected - - def __repr__(self): - return "DefaultClause(%r, for_update=%r)" % \ - (self.arg, self.for_update) - -class PassiveDefault(DefaultClause): - """A DDL-specified DEFAULT column value. - - .. deprecated:: 0.6 - :class:`.PassiveDefault` is deprecated. - Use :class:`.DefaultClause`. - """ - @util.deprecated("0.6", - ":class:`.PassiveDefault` is deprecated. " - "Use :class:`.DefaultClause`.", - False) - def __init__(self, *arg, **kw): - DefaultClause.__init__(self, *arg, **kw) - -class Constraint(SchemaItem): - """A table-level SQL constraint.""" - - __visit_name__ = 'constraint' - - def __init__(self, name=None, deferrable=None, initially=None, - _create_rule=None, - **kw): - """Create a SQL constraint. - - :param name: - Optional, the in-database name of this ``Constraint``. - - :param deferrable: - Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when - issuing DDL for this constraint. - - :param initially: - Optional string. If set, emit INITIALLY when issuing DDL - for this constraint. - - :param _create_rule: - a callable which is passed the DDLCompiler object during - compilation. Returns True or False to signal inline generation of - this Constraint. - - The AddConstraint and DropConstraint DDL constructs provide - DDLElement's more comprehensive "conditional DDL" approach that is - passed a database connection when DDL is being issued. _create_rule - is instead called during any CREATE TABLE compilation, where there - may not be any transaction/connection in progress. However, it - allows conditional compilation of the constraint even for backends - which do not support addition of constraints through ALTER TABLE, - which currently includes SQLite. - - _create_rule is used by some types to create constraints. - Currently, its call signature is subject to change at any time. - - :param \**kwargs: - Dialect-specific keyword parameters, see the documentation - for various dialects and constraints regarding options here. - - """ - - self.name = name - self.deferrable = deferrable - self.initially = initially - self._create_rule = _create_rule - util.set_creation_order(self) - _validate_dialect_kwargs(kw, self.__class__.__name__) - self.kwargs = kw - - @property - def table(self): - try: - if isinstance(self.parent, Table): - return self.parent - except AttributeError: - pass - raise exc.InvalidRequestError( - "This constraint is not bound to a table. Did you " - "mean to call table.add_constraint(constraint) ?") - - def _set_parent(self, parent): - self.parent = parent - parent.constraints.add(self) - - def copy(self, **kw): - raise NotImplementedError() - -class ColumnCollectionMixin(object): - def __init__(self, *columns): - self.columns = expression.ColumnCollection() - self._pending_colargs = [_to_schema_column_or_string(c) - for c in columns] - if self._pending_colargs and \ - isinstance(self._pending_colargs[0], Column) and \ - self._pending_colargs[0].table is not None: - self._set_parent_with_dispatch(self._pending_colargs[0].table) - - def _set_parent(self, table): - for col in self._pending_colargs: - if isinstance(col, basestring): - col = table.c[col] - self.columns.add(col) - -class ColumnCollectionConstraint(ColumnCollectionMixin, Constraint): - """A constraint that proxies a ColumnCollection.""" - - def __init__(self, *columns, **kw): - """ - :param \*columns: - A sequence of column names or Column objects. - - :param name: - Optional, the in-database name of this constraint. - - :param deferrable: - Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when - issuing DDL for this constraint. - - :param initially: - Optional string. If set, emit INITIALLY when issuing DDL - for this constraint. - - """ - ColumnCollectionMixin.__init__(self, *columns) - Constraint.__init__(self, **kw) - - def _set_parent(self, table): - ColumnCollectionMixin._set_parent(self, table) - Constraint._set_parent(self, table) - - def __contains__(self, x): - return x in self.columns - - def copy(self, **kw): - c = self.__class__(name=self.name, deferrable=self.deferrable, - initially=self.initially, *self.columns.keys()) - c.dispatch._update(self.dispatch) - return c - - def contains_column(self, col): - return self.columns.contains_column(col) - - def __iter__(self): - # inlining of - # return iter(self.columns) - # ColumnCollection->OrderedProperties->OrderedDict - ordered_dict = self.columns._data - return (ordered_dict[key] for key in ordered_dict._list) - - def __len__(self): - return len(self.columns._data) - - -class CheckConstraint(Constraint): - """A table- or column-level CHECK constraint. - - Can be included in the definition of a Table or Column. - """ - - def __init__(self, sqltext, name=None, deferrable=None, - initially=None, table=None, _create_rule=None): - """Construct a CHECK constraint. - - :param sqltext: - A string containing the constraint definition, which will be used - verbatim, or a SQL expression construct. - - :param name: - Optional, the in-database name of the constraint. - - :param deferrable: - Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when - issuing DDL for this constraint. - - :param initially: - Optional string. If set, emit INITIALLY when issuing DDL - for this constraint. - - """ - - super(CheckConstraint, self).\ - __init__(name, deferrable, initially, _create_rule) - self.sqltext = expression._literal_as_text(sqltext) - if table is not None: - self._set_parent_with_dispatch(table) - - def __visit_name__(self): - if isinstance(self.parent, Table): - return "check_constraint" - else: - return "column_check_constraint" - __visit_name__ = property(__visit_name__) - - def copy(self, **kw): - c = CheckConstraint(self.sqltext, - name=self.name, - initially=self.initially, - deferrable=self.deferrable, - _create_rule=self._create_rule) - c.dispatch._update(self.dispatch) - return c - -class ForeignKeyConstraint(Constraint): - """A table-level FOREIGN KEY constraint. - - Defines a single column or composite FOREIGN KEY ... REFERENCES - constraint. For a no-frills, single column foreign key, adding a - :class:`.ForeignKey` to the definition of a :class:`.Column` is a shorthand - equivalent for an unnamed, single column :class:`.ForeignKeyConstraint`. - - Examples of foreign key configuration are in :ref:`metadata_foreignkeys`. - - """ - __visit_name__ = 'foreign_key_constraint' - - def __init__(self, columns, refcolumns, name=None, onupdate=None, - ondelete=None, deferrable=None, initially=None, use_alter=False, - link_to_name=False, table=None): - """Construct a composite-capable FOREIGN KEY. - - :param columns: A sequence of local column names. The named columns - must be defined and present in the parent Table. The names should - match the ``key`` given to each column (defaults to the name) unless - ``link_to_name`` is True. - - :param refcolumns: A sequence of foreign column names or Column - objects. The columns must all be located within the same Table. - - :param name: Optional, the in-database name of the key. - - :param onupdate: Optional string. If set, emit ON UPDATE when - issuing DDL for this constraint. Typical values include CASCADE, - DELETE and RESTRICT. - - :param ondelete: Optional string. If set, emit ON DELETE when - issuing DDL for this constraint. Typical values include CASCADE, - DELETE and RESTRICT. - - :param deferrable: Optional bool. If set, emit DEFERRABLE or NOT - DEFERRABLE when issuing DDL for this constraint. - - :param initially: Optional string. If set, emit INITIALLY when - issuing DDL for this constraint. - - :param link_to_name: if True, the string name given in ``column`` is - the rendered name of the referenced column, not its locally assigned - ``key``. - - :param use_alter: If True, do not emit the DDL for this constraint as - part of the CREATE TABLE definition. Instead, generate it via an - ALTER TABLE statement issued after the full collection of tables - have been created, and drop it via an ALTER TABLE statement before - the full collection of tables are dropped. This is shorthand for the - usage of :class:`.AddConstraint` and :class:`.DropConstraint` applied - as "after-create" and "before-drop" events on the MetaData object. - This is normally used to generate/drop constraints on objects that - are mutually dependent on each other. - - """ - super(ForeignKeyConstraint, self).\ - __init__(name, deferrable, initially) - - self.onupdate = onupdate - self.ondelete = ondelete - self.link_to_name = link_to_name - if self.name is None and use_alter: - raise exc.ArgumentError("Alterable Constraint requires a name") - self.use_alter = use_alter - - self._elements = util.OrderedDict() - - # standalone ForeignKeyConstraint - create - # associated ForeignKey objects which will be applied to hosted - # Column objects (in col.foreign_keys), either now or when attached - # to the Table for string-specified names - for col, refcol in zip(columns, refcolumns): - self._elements[col] = ForeignKey( - refcol, - _constraint=self, - name=self.name, - onupdate=self.onupdate, - ondelete=self.ondelete, - use_alter=self.use_alter, - link_to_name=self.link_to_name - ) - - if table is not None: - self._set_parent_with_dispatch(table) - - @property - def columns(self): - return self._elements.keys() - - @property - def elements(self): - return self._elements.values() - - def _set_parent(self, table): - super(ForeignKeyConstraint, self)._set_parent(table) - for col, fk in self._elements.iteritems(): - # string-specified column names now get - # resolved to Column objects - if isinstance(col, basestring): - try: - col = table.c[col] - except KeyError: - raise exc.ArgumentError( - "Can't create ForeignKeyConstraint " - "on table '%s': no column " - "named '%s' is present." % (table.description, col)) - - if not hasattr(fk, 'parent') or \ - fk.parent is not col: - fk._set_parent_with_dispatch(col) - - if self.use_alter: - def supports_alter(ddl, event, schema_item, bind, **kw): - return table in set(kw['tables']) and \ - bind.dialect.supports_alter - - event.listen(table.metadata, "after_create", AddConstraint(self, on=supports_alter)) - event.listen(table.metadata, "before_drop", DropConstraint(self, on=supports_alter)) - - - def copy(self, **kw): - fkc = ForeignKeyConstraint( - [x.parent.key for x in self._elements.values()], - [x._get_colspec(**kw) for x in self._elements.values()], - name=self.name, - onupdate=self.onupdate, - ondelete=self.ondelete, - use_alter=self.use_alter, - deferrable=self.deferrable, - initially=self.initially, - link_to_name=self.link_to_name - ) - fkc.dispatch._update(self.dispatch) - return fkc - -class PrimaryKeyConstraint(ColumnCollectionConstraint): - """A table-level PRIMARY KEY constraint. - - Defines a single column or composite PRIMARY KEY constraint. For a - no-frills primary key, adding ``primary_key=True`` to one or more - ``Column`` definitions is a shorthand equivalent for an unnamed single- or - multiple-column PrimaryKeyConstraint. - """ - - __visit_name__ = 'primary_key_constraint' - - def _set_parent(self, table): - super(PrimaryKeyConstraint, self)._set_parent(table) - - if table.primary_key in table.constraints: - table.constraints.remove(table.primary_key) - table.primary_key = self - table.constraints.add(self) - - for c in self.columns: - c.primary_key = True - - def _replace(self, col): - self.columns.replace(col) - -class UniqueConstraint(ColumnCollectionConstraint): - """A table-level UNIQUE constraint. - - Defines a single column or composite UNIQUE constraint. For a no-frills, - single column constraint, adding ``unique=True`` to the ``Column`` - definition is a shorthand equivalent for an unnamed, single column - UniqueConstraint. - """ - - __visit_name__ = 'unique_constraint' - -class Index(ColumnCollectionMixin, SchemaItem): - """A table-level INDEX. - - Defines a composite (one or more column) INDEX. For a no-frills, single - column index, adding ``index=True`` to the ``Column`` definition is - a shorthand equivalent for an unnamed, single column :class:`.Index`. - - See also: - - :ref:`schema_indexes` - General information on :class:`.Index`. - - :ref:`postgresql_indexes` - PostgreSQL-specific options available for the :class:`.Index` construct. - - :ref:`mysql_indexes` - MySQL-specific options available for the :class:`.Index` construct. - """ - - __visit_name__ = 'index' - - def __init__(self, name, *columns, **kw): - """Construct an index object. - - :param name: - The name of the index - - :param \*columns: - Columns to include in the index. All columns must belong to the same - table. - - :param unique: - Defaults to False: create a unique index. - - :param \**kw: - Other keyword arguments may be interpreted by specific dialects. - - """ - self.table = None - # will call _set_parent() if table-bound column - # objects are present - ColumnCollectionMixin.__init__(self, *columns) - self.name = name - self.unique = kw.pop('unique', False) - self.kwargs = kw - - def _set_parent(self, table): - ColumnCollectionMixin._set_parent(self, table) - - if self.table is not None and table is not self.table: - raise exc.ArgumentError( - "Index '%s' is against table '%s', and " - "cannot be associated with table '%s'." % ( - self.name, - self.table.description, - table.description - ) - ) - self.table = table - for c in self.columns: - if c.table != self.table: - raise exc.ArgumentError( - "Column '%s' is not part of table '%s'." % - (c, self.table.description) - ) - table.indexes.add(self) - - @property - def bind(self): - """Return the connectable associated with this Index.""" - - return self.table.bind - - def create(self, bind=None): - """Issue a ``CREATE`` statement for this - :class:`.Index`, using the given :class:`.Connectable` - for connectivity. - - See also :meth:`.MetaData.create_all`. - - """ - if bind is None: - bind = _bind_or_error(self) - bind._run_visitor(ddl.SchemaGenerator, self) - return self - - def drop(self, bind=None): - """Issue a ``DROP`` statement for this - :class:`.Index`, using the given :class:`.Connectable` - for connectivity. - - See also :meth:`.MetaData.drop_all`. - - """ - if bind is None: - bind = _bind_or_error(self) - bind._run_visitor(ddl.SchemaDropper, self) - - def __repr__(self): - return 'Index(%s)' % ( - ", ".join( - [repr(self.name)] + - [repr(c) for c in self.columns] + - (self.unique and ["unique=True"] or []) - )) - -class MetaData(SchemaItem): - """A collection of :class:`.Table` objects and their associated schema constructs. - - Holds a collection of :class:`.Table` objects as well as - an optional binding to an :class:`.Engine` or - :class:`.Connection`. If bound, the :class:`.Table` objects - in the collection and their columns may participate in implicit SQL - execution. - - The :class:`.Table` objects themselves are stored in the ``metadata.tables`` - dictionary. - - The ``bind`` property may be assigned to dynamically. A common pattern is - to start unbound and then bind later when an engine is available:: - - metadata = MetaData() - # define tables - Table('mytable', metadata, ...) - # connect to an engine later, perhaps after loading a URL from a - # configuration file - metadata.bind = an_engine - - MetaData is a thread-safe object after tables have been explicitly defined - or loaded via reflection. - - See also: - - :ref:`metadata_describing` - Introduction to database metadata - - .. index:: - single: thread safety; MetaData - - """ - - __visit_name__ = 'metadata' - - def __init__(self, bind=None, reflect=False, schema=None, quote_schema=None): - """Create a new MetaData object. - - :param bind: - An Engine or Connection to bind to. May also be a string or URL - instance, these are passed to create_engine() and this MetaData will - be bound to the resulting engine. - - :param reflect: - Optional, automatically load all tables from the bound database. - Defaults to False. ``bind`` is required when this option is set. - For finer control over loaded tables, use the ``reflect`` method of - ``MetaData``. - - :param schema: - The default schema to use for the :class:`.Table`, :class:`.Sequence`, and other - objects associated with this :class:`.MetaData`. - Defaults to ``None``. - - :param quote_schema: - Sets the ``quote_schema`` flag for those :class:`.Table`, :class:`.Sequence`, - and other objects which make usage of the local ``schema`` name. - - .. versionadded:: 0.7.4 - ``schema`` and ``quote_schema`` parameters. - - """ - self.tables = util.immutabledict() - self.schema = schema - self.quote_schema = quote_schema - self._schemas = set() - self._sequences = {} - self.bind = bind - if reflect: - if not bind: - raise exc.ArgumentError( - "A bind must be supplied in conjunction " - "with reflect=True") - self.reflect() - - def __repr__(self): - return 'MetaData(bind=%r)' % self.bind - - def __contains__(self, table_or_key): - if not isinstance(table_or_key, basestring): - table_or_key = table_or_key.key - return table_or_key in self.tables - - def _add_table(self, name, schema, table): - key = _get_table_key(name, schema) - dict.__setitem__(self.tables, key, table) - if schema: - self._schemas.add(schema) - - def _remove_table(self, name, schema): - key = _get_table_key(name, schema) - dict.pop(self.tables, key, None) - if self._schemas: - self._schemas = set([t.schema - for t in self.tables.values() - if t.schema is not None]) - - def __getstate__(self): - return {'tables': self.tables, 'schema':self.schema, - 'quote_schema':self.quote_schema, - 'schemas':self._schemas, - 'sequences':self._sequences} - - def __setstate__(self, state): - self.tables = state['tables'] - self.schema = state['schema'] - self.quote_schema = state['quote_schema'] - self._bind = None - self._sequences = state['sequences'] - self._schemas = state['schemas'] - - def is_bound(self): - """True if this MetaData is bound to an Engine or Connection.""" - - return self._bind is not None - - def bind(self): - """An :class:`.Engine` or :class:`.Connection` to which this - :class:`.MetaData` is bound. - - Typically, a :class:`.Engine` is assigned to this attribute - so that "implicit execution" may be used, or alternatively - as a means of providing engine binding information to an - ORM :class:`.Session` object:: - - engine = create_engine("someurl://") - metadata.bind = engine - - .. seealso:: - - :ref:`dbengine_implicit` - background on "bound metadata" - - """ - return self._bind - - def _bind_to(self, bind): - """Bind this MetaData to an Engine, Connection, string or URL.""" - - if isinstance(bind, (basestring, url.URL)): - from sqlalchemy import create_engine - self._bind = create_engine(bind) - else: - self._bind = bind - bind = property(bind, _bind_to) - - def clear(self): - """Clear all Table objects from this MetaData.""" - - dict.clear(self.tables) - self._schemas.clear() - - def remove(self, table): - """Remove the given Table object from this MetaData.""" - - self._remove_table(table.name, table.schema) - - @property - def sorted_tables(self): - """Returns a list of ``Table`` objects sorted in order of - dependency. - """ - return sqlutil.sort_tables(self.tables.itervalues()) - - def reflect(self, bind=None, schema=None, views=False, only=None): - """Load all available table definitions from the database. - - Automatically creates ``Table`` entries in this ``MetaData`` for any - table available in the database but not yet present in the - ``MetaData``. May be called multiple times to pick up tables recently - added to the database, however no special action is taken if a table - in this ``MetaData`` no longer exists in the database. - - :param bind: - A :class:`~sqlalchemy.engine.base.Connectable` used to access the - database; if None, uses the existing bind on this ``MetaData``, if - any. - - :param schema: - Optional, query and reflect tables from an alterate schema. - If None, the schema associated with this :class:`.MetaData` - is used, if any. - - :param views: - If True, also reflect views. - - :param only: - Optional. Load only a sub-set of available named tables. May be - specified as a sequence of names or a callable. - - If a sequence of names is provided, only those tables will be - reflected. An error is raised if a table is requested but not - available. Named tables already present in this ``MetaData`` are - ignored. - - If a callable is provided, it will be used as a boolean predicate to - filter the list of potential table names. The callable is called - with a table name and this ``MetaData`` instance as positional - arguments and should return a true value for any table to reflect. - - """ - if bind is None: - bind = _bind_or_error(self) - - if bind.engine is not bind: - conn = bind - close = False - else: - conn = bind.contextual_connect() - close = True - - reflect_opts = { - 'autoload': True, - 'autoload_with': bind - } - - if schema is None: - schema = self.schema - - if schema is not None: - reflect_opts['schema'] = schema - - try: - available = util.OrderedSet(bind.engine.table_names(schema, - connection=conn)) - if views: - available.update( - bind.dialect.get_view_names(conn, schema) - ) - - current = set(self.tables.iterkeys()) - - if only is None: - load = [name for name in available if name not in current] - elif util.callable(only): - load = [name for name in available - if name not in current and only(name, self)] - else: - missing = [name for name in only if name not in available] - if missing: - s = schema and (" schema '%s'" % schema) or '' - raise exc.InvalidRequestError( - 'Could not reflect: requested table(s) not available ' - 'in %s%s: (%s)' % - (bind.engine.url, s, ', '.join(missing))) - load = [name for name in only if name not in current] - - for name in load: - Table(name, self, **reflect_opts) - finally: - if close: - conn.close() - - def append_ddl_listener(self, event_name, listener): - """Append a DDL event listener to this ``MetaData``. - - Deprecated. See :class:`.DDLEvents`. - - """ - def adapt_listener(target, connection, **kw): - tables = kw['tables'] - listener(event, target, connection, tables=tables) - - event.listen(self, "" + event_name.replace('-', '_'), adapt_listener) - - def create_all(self, bind=None, tables=None, checkfirst=True): - """Create all tables stored in this metadata. - - Conditional by default, will not attempt to recreate tables already - present in the target database. - - :param bind: - A :class:`~sqlalchemy.engine.base.Connectable` used to access the - database; if None, uses the existing bind on this ``MetaData``, if - any. - - :param tables: - Optional list of ``Table`` objects, which is a subset of the total - tables in the ``MetaData`` (others are ignored). - - :param checkfirst: - Defaults to True, don't issue CREATEs for tables already present - in the target database. - - """ - if bind is None: - bind = _bind_or_error(self) - bind._run_visitor(ddl.SchemaGenerator, - self, - checkfirst=checkfirst, - tables=tables) - - def drop_all(self, bind=None, tables=None, checkfirst=True): - """Drop all tables stored in this metadata. - - Conditional by default, will not attempt to drop tables not present in - the target database. - - :param bind: - A :class:`~sqlalchemy.engine.base.Connectable` used to access the - database; if None, uses the existing bind on this ``MetaData``, if - any. - - :param tables: - Optional list of ``Table`` objects, which is a subset of the - total tables in the ``MetaData`` (others are ignored). - - :param checkfirst: - Defaults to True, only issue DROPs for tables confirmed to be - present in the target database. - - """ - if bind is None: - bind = _bind_or_error(self) - bind._run_visitor(ddl.SchemaDropper, - self, - checkfirst=checkfirst, - tables=tables) - -class ThreadLocalMetaData(MetaData): - """A MetaData variant that presents a different ``bind`` in every thread. - - Makes the ``bind`` property of the MetaData a thread-local value, allowing - this collection of tables to be bound to different ``Engine`` - implementations or connections in each thread. - - The ThreadLocalMetaData starts off bound to None in each thread. Binds - must be made explicitly by assigning to the ``bind`` property or using - ``connect()``. You can also re-bind dynamically multiple times per - thread, just like a regular ``MetaData``. - - """ - - __visit_name__ = 'metadata' - - def __init__(self): - """Construct a ThreadLocalMetaData.""" - - self.context = util.threading.local() - self.__engines = {} - super(ThreadLocalMetaData, self).__init__() - - def bind(self): - """The bound Engine or Connection for this thread. - - This property may be assigned an Engine or Connection, or assigned a - string or URL to automatically create a basic Engine for this bind - with ``create_engine()``.""" - - return getattr(self.context, '_engine', None) - - def _bind_to(self, bind): - """Bind to a Connectable in the caller's thread.""" - - if isinstance(bind, (basestring, url.URL)): - try: - self.context._engine = self.__engines[bind] - except KeyError: - from sqlalchemy import create_engine - e = create_engine(bind) - self.__engines[bind] = e - self.context._engine = e - else: - # TODO: this is squirrely. we shouldnt have to hold onto engines - # in a case like this - if bind not in self.__engines: - self.__engines[bind] = bind - self.context._engine = bind - - bind = property(bind, _bind_to) - - def is_bound(self): - """True if there is a bind for this thread.""" - return (hasattr(self.context, '_engine') and - self.context._engine is not None) - - def dispose(self): - """Dispose all bound engines, in all thread contexts.""" - - for e in self.__engines.itervalues(): - if hasattr(e, 'dispose'): - e.dispose() - -class SchemaVisitor(visitors.ClauseVisitor): - """Define the visiting for ``SchemaItem`` objects.""" - - __traverse_options__ = {'schema_visitor':True} - - -class DDLElement(expression.Executable, expression.ClauseElement): - """Base class for DDL expression constructs. - - This class is the base for the general purpose :class:`.DDL` class, - as well as the various create/drop clause constructs such as - :class:`.CreateTable`, :class:`.DropTable`, :class:`.AddConstraint`, - etc. - - :class:`.DDLElement` integrates closely with SQLAlchemy events, - introduced in :ref:`event_toplevel`. An instance of one is - itself an event receiving callable:: - - event.listen( - users, - 'after_create', - AddConstraint(constraint).execute_if(dialect='postgresql') - ) - - See also: - - :class:`.DDL` - - :class:`.DDLEvents` - - :ref:`event_toplevel` - - :ref:`schema_ddl_sequences` - - """ - - _execution_options = expression.Executable.\ - _execution_options.union({'autocommit':True}) - - target = None - on = None - dialect = None - callable_ = None - - def execute(self, bind=None, target=None): - """Execute this DDL immediately. - - Executes the DDL statement in isolation using the supplied - :class:`~sqlalchemy.engine.base.Connectable` or - :class:`~sqlalchemy.engine.base.Connectable` assigned to the ``.bind`` - property, if not supplied. If the DDL has a conditional ``on`` - criteria, it will be invoked with None as the event. - - :param bind: - Optional, an ``Engine`` or ``Connection``. If not supplied, a valid - :class:`~sqlalchemy.engine.base.Connectable` must be present in the - ``.bind`` property. - - :param target: - Optional, defaults to None. The target SchemaItem for the - execute call. Will be passed to the ``on`` callable if any, - and may also provide string expansion data for the - statement. See ``execute_at`` for more information. - - """ - - if bind is None: - bind = _bind_or_error(self) - - if self._should_execute(target, bind): - return bind.execute(self.against(target)) - else: - bind.engine.logger.info( - "DDL execution skipped, criteria not met.") - - @util.deprecated("0.7", "See :class:`.DDLEvents`, as well as " - ":meth:`.DDLElement.execute_if`.") - def execute_at(self, event_name, target): - """Link execution of this DDL to the DDL lifecycle of a SchemaItem. - - Links this ``DDLElement`` to a ``Table`` or ``MetaData`` instance, - executing it when that schema item is created or dropped. The DDL - statement will be executed using the same Connection and transactional - context as the Table create/drop itself. The ``.bind`` property of - this statement is ignored. - - :param event: - One of the events defined in the schema item's ``.ddl_events``; - e.g. 'before-create', 'after-create', 'before-drop' or 'after-drop' - - :param target: - The Table or MetaData instance for which this DDLElement will - be associated with. - - A DDLElement instance can be linked to any number of schema items. - - ``execute_at`` builds on the ``append_ddl_listener`` interface of - :class:`.MetaData` and :class:`.Table` objects. - - Caveat: Creating or dropping a Table in isolation will also trigger - any DDL set to ``execute_at`` that Table's MetaData. This may change - in a future release. - - """ - - def call_event(target, connection, **kw): - if self._should_execute_deprecated(event_name, - target, connection, **kw): - return connection.execute(self.against(target)) - - event.listen(target, "" + event_name.replace('-', '_'), call_event) - - @expression._generative - def against(self, target): - """Return a copy of this DDL against a specific schema item.""" - - self.target = target - - @expression._generative - def execute_if(self, dialect=None, callable_=None, state=None): - """Return a callable that will execute this - DDLElement conditionally. - - Used to provide a wrapper for event listening:: - - event.listen( - metadata, - 'before_create', - DDL("my_ddl").execute_if(dialect='postgresql') - ) - - :param dialect: May be a string, tuple or a callable - predicate. If a string, it will be compared to the name of the - executing database dialect:: - - DDL('something').execute_if(dialect='postgresql') - - If a tuple, specifies multiple dialect names:: - - DDL('something').execute_if(dialect=('postgresql', 'mysql')) - - :param callable_: A callable, which will be invoked with - four positional arguments as well as optional keyword - arguments: - - :ddl: - This DDL element. - - :target: - The :class:`.Table` or :class:`.MetaData` object which is the target of - this event. May be None if the DDL is executed explicitly. - - :bind: - The :class:`.Connection` being used for DDL execution - - :tables: - Optional keyword argument - a list of Table objects which are to - be created/ dropped within a MetaData.create_all() or drop_all() - method call. - - :state: - Optional keyword argument - will be the ``state`` argument - passed to this function. - - :checkfirst: - Keyword argument, will be True if the 'checkfirst' flag was - set during the call to ``create()``, ``create_all()``, - ``drop()``, ``drop_all()``. - - If the callable returns a true value, the DDL statement will be - executed. - - :param state: any value which will be passed to the callable_ - as the ``state`` keyword argument. - - See also: - - :class:`.DDLEvents` - - :ref:`event_toplevel` - - """ - self.dialect = dialect - self.callable_ = callable_ - self.state = state - - def _should_execute(self, target, bind, **kw): - if self.on is not None and \ - not self._should_execute_deprecated(None, target, bind, **kw): - return False - - if isinstance(self.dialect, basestring): - if self.dialect != bind.engine.name: - return False - elif isinstance(self.dialect, (tuple, list, set)): - if bind.engine.name not in self.dialect: - return False - if self.callable_ is not None and \ - not self.callable_(self, target, bind, state=self.state, **kw): - return False - - return True - - def _should_execute_deprecated(self, event, target, bind, **kw): - if self.on is None: - return True - elif isinstance(self.on, basestring): - return self.on == bind.engine.name - elif isinstance(self.on, (tuple, list, set)): - return bind.engine.name in self.on - else: - return self.on(self, event, target, bind, **kw) - - def __call__(self, target, bind, **kw): - """Execute the DDL as a ddl_listener.""" - - if self._should_execute(target, bind, **kw): - return bind.execute(self.against(target)) - - def _check_ddl_on(self, on): - if (on is not None and - (not isinstance(on, (basestring, tuple, list, set)) and - not util.callable(on))): - raise exc.ArgumentError( - "Expected the name of a database dialect, a tuple " - "of names, or a callable for " - "'on' criteria, got type '%s'." % type(on).__name__) - - def bind(self): - if self._bind: - return self._bind - def _set_bind(self, bind): - self._bind = bind - bind = property(bind, _set_bind) - - def _generate(self): - s = self.__class__.__new__(self.__class__) - s.__dict__ = self.__dict__.copy() - return s - - def _compiler(self, dialect, **kw): - """Return a compiler appropriate for this ClauseElement, given a - Dialect.""" - - return dialect.ddl_compiler(dialect, self, **kw) - -class DDL(DDLElement): - """A literal DDL statement. - - Specifies literal SQL DDL to be executed by the database. DDL objects - function as DDL event listeners, and can be subscribed to those events - listed in :class:`.DDLEvents`, using either :class:`.Table` or :class:`.MetaData` - objects as targets. Basic templating support allows a single DDL instance - to handle repetitive tasks for multiple tables. - - Examples:: - - from sqlalchemy import event, DDL - - tbl = Table('users', metadata, Column('uid', Integer)) - event.listen(tbl, 'before_create', DDL('DROP TRIGGER users_trigger')) - - spow = DDL('ALTER TABLE %(table)s SET secretpowers TRUE') - event.listen(tbl, 'after_create', spow.execute_if(dialect='somedb')) - - drop_spow = DDL('ALTER TABLE users SET secretpowers FALSE') - connection.execute(drop_spow) - - When operating on Table events, the following ``statement`` - string substitions are available:: - - %(table)s - the Table name, with any required quoting applied - %(schema)s - the schema name, with any required quoting applied - %(fullname)s - the Table name including schema, quoted if needed - - The DDL's "context", if any, will be combined with the standard - substutions noted above. Keys present in the context will override - the standard substitutions. - - """ - - __visit_name__ = "ddl" - - def __init__(self, statement, on=None, context=None, bind=None): - """Create a DDL statement. - - :param statement: - A string or unicode string to be executed. Statements will be - processed with Python's string formatting operator. See the - ``context`` argument and the ``execute_at`` method. - - A literal '%' in a statement must be escaped as '%%'. - - SQL bind parameters are not available in DDL statements. - - :param on: - Deprecated. See :meth:`.DDLElement.execute_if`. - - Optional filtering criteria. May be a string, tuple or a callable - predicate. If a string, it will be compared to the name of the - executing database dialect:: - - DDL('something', on='postgresql') - - If a tuple, specifies multiple dialect names:: - - DDL('something', on=('postgresql', 'mysql')) - - If a callable, it will be invoked with four positional arguments - as well as optional keyword arguments: - - :ddl: - This DDL element. - - :event: - The name of the event that has triggered this DDL, such as - 'after-create' Will be None if the DDL is executed explicitly. - - :target: - The ``Table`` or ``MetaData`` object which is the target of - this event. May be None if the DDL is executed explicitly. - - :connection: - The ``Connection`` being used for DDL execution - - :tables: - Optional keyword argument - a list of Table objects which are to - be created/ dropped within a MetaData.create_all() or drop_all() - method call. - - - If the callable returns a true value, the DDL statement will be - executed. - - :param context: - Optional dictionary, defaults to None. These values will be - available for use in string substitutions on the DDL statement. - - :param bind: - Optional. A :class:`~sqlalchemy.engine.base.Connectable`, used by - default when ``execute()`` is invoked without a bind argument. - - - See also: - - :class:`.DDLEvents` - :mod:`sqlalchemy.event` - - """ - - if not isinstance(statement, basestring): - raise exc.ArgumentError( - "Expected a string or unicode SQL statement, got '%r'" % - statement) - - self.statement = statement - self.context = context or {} - - self._check_ddl_on(on) - self.on = on - self._bind = bind - - - def __repr__(self): - return '<%s@%s; %s>' % ( - type(self).__name__, id(self), - ', '.join([repr(self.statement)] + - ['%s=%r' % (key, getattr(self, key)) - for key in ('on', 'context') - if getattr(self, key)])) - -def _to_schema_column(element): - if hasattr(element, '__clause_element__'): - element = element.__clause_element__() - if not isinstance(element, Column): - raise exc.ArgumentError("schema.Column object expected") - return element - -def _to_schema_column_or_string(element): - if hasattr(element, '__clause_element__'): - element = element.__clause_element__() - if not isinstance(element, (basestring, expression.ColumnElement)): - raise exc.ArgumentError("Element %r is not a string name or column element" % element) - return element - -class _CreateDropBase(DDLElement): - """Base class for DDL constucts that represent CREATE and DROP or - equivalents. - - The common theme of _CreateDropBase is a single - ``element`` attribute which refers to the element - to be created or dropped. - - """ - - def __init__(self, element, on=None, bind=None): - self.element = element - self._check_ddl_on(on) - self.on = on - self.bind = bind - - def _create_rule_disable(self, compiler): - """Allow disable of _create_rule using a callable. - - Pass to _create_rule using - util.portable_instancemethod(self._create_rule_disable) - to retain serializability. - - """ - return False - -class CreateSchema(_CreateDropBase): - """Represent a CREATE SCHEMA statement. - - .. versionadded:: 0.7.4 - - The argument here is the string name of the schema. - - """ - - __visit_name__ = "create_schema" - - def __init__(self, name, quote=None, **kw): - """Create a new :class:`.CreateSchema` construct.""" - - self.quote = quote - super(CreateSchema, self).__init__(name, **kw) - -class DropSchema(_CreateDropBase): - """Represent a DROP SCHEMA statement. - - The argument here is the string name of the schema. - - .. versionadded:: 0.7.4 - - """ - - __visit_name__ = "drop_schema" - - def __init__(self, name, quote=None, cascade=False, **kw): - """Create a new :class:`.DropSchema` construct.""" - - self.quote = quote - self.cascade=cascade - super(DropSchema, self).__init__(name, **kw) - - -class CreateTable(_CreateDropBase): - """Represent a CREATE TABLE statement.""" - - __visit_name__ = "create_table" - -class DropTable(_CreateDropBase): - """Represent a DROP TABLE statement.""" - - __visit_name__ = "drop_table" - -class CreateSequence(_CreateDropBase): - """Represent a CREATE SEQUENCE statement.""" - - __visit_name__ = "create_sequence" - -class DropSequence(_CreateDropBase): - """Represent a DROP SEQUENCE statement.""" - - __visit_name__ = "drop_sequence" - -class CreateIndex(_CreateDropBase): - """Represent a CREATE INDEX statement.""" - - __visit_name__ = "create_index" - -class DropIndex(_CreateDropBase): - """Represent a DROP INDEX statement.""" - - __visit_name__ = "drop_index" - -class AddConstraint(_CreateDropBase): - """Represent an ALTER TABLE ADD CONSTRAINT statement.""" - - __visit_name__ = "add_constraint" - - def __init__(self, element, *args, **kw): - super(AddConstraint, self).__init__(element, *args, **kw) - element._create_rule = util.portable_instancemethod( - self._create_rule_disable) - -class DropConstraint(_CreateDropBase): - """Represent an ALTER TABLE DROP CONSTRAINT statement.""" - - __visit_name__ = "drop_constraint" - - def __init__(self, element, cascade=False, **kw): - self.cascade = cascade - super(DropConstraint, self).__init__(element, **kw) - element._create_rule = util.portable_instancemethod( - self._create_rule_disable) - -def _bind_or_error(schemaitem, msg=None): - bind = schemaitem.bind - if not bind: - name = schemaitem.__class__.__name__ - label = getattr(schemaitem, 'fullname', - getattr(schemaitem, 'name', None)) - if label: - item = '%s %r' % (name, label) - else: - item = name - if isinstance(schemaitem, (MetaData, DDL)): - bindable = "the %s's .bind" % name - else: - bindable = "this %s's .metadata.bind" % name - - if msg is None: - msg = "The %s is not bound to an Engine or Connection. "\ - "Execution can not proceed without a database to execute "\ - "against. Either execute with an explicit connection or "\ - "assign %s to enable implicit execution." % \ - (item, bindable) - raise exc.UnboundExecutionError(msg) - return bind - diff --git a/libs/sqlalchemy/sql/__init__.py b/libs/sqlalchemy/sql/__init__.py deleted file mode 100644 index 77fbfc84..00000000 --- a/libs/sqlalchemy/sql/__init__.py +++ /dev/null @@ -1,69 +0,0 @@ -# sql/__init__.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -from sqlalchemy.sql.expression import ( - Alias, - ClauseElement, - ColumnCollection, - ColumnElement, - CompoundSelect, - Delete, - FromClause, - Insert, - Join, - Select, - Selectable, - TableClause, - Update, - alias, - and_, - asc, - between, - bindparam, - case, - cast, - collate, - column, - delete, - desc, - distinct, - except_, - except_all, - exists, - extract, - false, - func, - insert, - intersect, - intersect_all, - join, - label, - literal, - literal_column, - modifier, - not_, - null, - or_, - outerjoin, - outparam, - over, - select, - subquery, - table, - text, - true, - tuple_, - type_coerce, - union, - union_all, - update, - ) - -from sqlalchemy.sql.visitors import ClauseVisitor - -__tmp = locals().keys() -__all__ = sorted([i for i in __tmp if not i.startswith('__')]) - diff --git a/libs/sqlalchemy/sql/compiler.py b/libs/sqlalchemy/sql/compiler.py deleted file mode 100644 index 9dc56d1f..00000000 --- a/libs/sqlalchemy/sql/compiler.py +++ /dev/null @@ -1,2146 +0,0 @@ -# sql/compiler.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Base SQL and DDL compiler implementations. - -Classes provided include: - -:class:`~sqlalchemy.sql.compiler.SQLCompiler` - renders SQL -strings - -:class:`~sqlalchemy.sql.compiler.DDLCompiler` - renders DDL -(data definition language) strings - -:class:`~sqlalchemy.sql.compiler.GenericTypeCompiler` - renders -type specification strings. - -To generate user-defined SQL strings, see -:module:`~sqlalchemy.ext.compiler`. - -""" - -import re -import sys -from sqlalchemy import schema, engine, util, exc -from sqlalchemy.sql import operators, functions, util as sql_util, \ - visitors -from sqlalchemy.sql import expression as sql -import decimal -import itertools - -RESERVED_WORDS = set([ - 'all', 'analyse', 'analyze', 'and', 'any', 'array', - 'as', 'asc', 'asymmetric', 'authorization', 'between', - 'binary', 'both', 'case', 'cast', 'check', 'collate', - 'column', 'constraint', 'create', 'cross', 'current_date', - 'current_role', 'current_time', 'current_timestamp', - 'current_user', 'default', 'deferrable', 'desc', - 'distinct', 'do', 'else', 'end', 'except', 'false', - 'for', 'foreign', 'freeze', 'from', 'full', 'grant', - 'group', 'having', 'ilike', 'in', 'initially', 'inner', - 'intersect', 'into', 'is', 'isnull', 'join', 'leading', - 'left', 'like', 'limit', 'localtime', 'localtimestamp', - 'natural', 'new', 'not', 'notnull', 'null', 'off', 'offset', - 'old', 'on', 'only', 'or', 'order', 'outer', 'overlaps', - 'placing', 'primary', 'references', 'right', 'select', - 'session_user', 'set', 'similar', 'some', 'symmetric', 'table', - 'then', 'to', 'trailing', 'true', 'union', 'unique', 'user', - 'using', 'verbose', 'when', 'where']) - -LEGAL_CHARACTERS = re.compile(r'^[A-Z0-9_$]+$', re.I) -ILLEGAL_INITIAL_CHARACTERS = set([str(x) for x in xrange(0, 10)]).union(['$']) - -BIND_PARAMS = re.compile(r'(? ', - operators.ge : ' >= ', - operators.eq : ' = ', - operators.concat_op : ' || ', - operators.between_op : ' BETWEEN ', - operators.match_op : ' MATCH ', - operators.in_op : ' IN ', - operators.notin_op : ' NOT IN ', - operators.comma_op : ', ', - operators.from_ : ' FROM ', - operators.as_ : ' AS ', - operators.is_ : ' IS ', - operators.isnot : ' IS NOT ', - operators.collate : ' COLLATE ', - - # unary - operators.exists : 'EXISTS ', - operators.distinct_op : 'DISTINCT ', - operators.inv : 'NOT ', - - # modifiers - operators.desc_op : ' DESC', - operators.asc_op : ' ASC', - operators.nullsfirst_op : ' NULLS FIRST', - operators.nullslast_op : ' NULLS LAST', -} - -FUNCTIONS = { - functions.coalesce : 'coalesce%(expr)s', - functions.current_date: 'CURRENT_DATE', - functions.current_time: 'CURRENT_TIME', - functions.current_timestamp: 'CURRENT_TIMESTAMP', - functions.current_user: 'CURRENT_USER', - functions.localtime: 'LOCALTIME', - functions.localtimestamp: 'LOCALTIMESTAMP', - functions.random: 'random%(expr)s', - functions.sysdate: 'sysdate', - functions.session_user :'SESSION_USER', - functions.user: 'USER' -} - -EXTRACT_MAP = { - 'month': 'month', - 'day': 'day', - 'year': 'year', - 'second': 'second', - 'hour': 'hour', - 'doy': 'doy', - 'minute': 'minute', - 'quarter': 'quarter', - 'dow': 'dow', - 'week': 'week', - 'epoch': 'epoch', - 'milliseconds': 'milliseconds', - 'microseconds': 'microseconds', - 'timezone_hour': 'timezone_hour', - 'timezone_minute': 'timezone_minute' -} - -COMPOUND_KEYWORDS = { - sql.CompoundSelect.UNION : 'UNION', - sql.CompoundSelect.UNION_ALL : 'UNION ALL', - sql.CompoundSelect.EXCEPT : 'EXCEPT', - sql.CompoundSelect.EXCEPT_ALL : 'EXCEPT ALL', - sql.CompoundSelect.INTERSECT : 'INTERSECT', - sql.CompoundSelect.INTERSECT_ALL : 'INTERSECT ALL' -} - -class _CompileLabel(visitors.Visitable): - """lightweight label object which acts as an expression._Label.""" - - __visit_name__ = 'label' - __slots__ = 'element', 'name' - - def __init__(self, col, name, alt_names=()): - self.element = col - self.name = name - self._alt_names = alt_names - - @property - def proxy_set(self): - return self.element.proxy_set - - @property - def type(self): - return self.element.type - - @property - def quote(self): - return self.element.quote - -class SQLCompiler(engine.Compiled): - """Default implementation of Compiled. - - Compiles ClauseElements into SQL strings. Uses a similar visit - paradigm as visitors.ClauseVisitor but implements its own traversal. - - """ - - extract_map = EXTRACT_MAP - - compound_keywords = COMPOUND_KEYWORDS - - isdelete = isinsert = isupdate = False - """class-level defaults which can be set at the instance - level to define if this Compiled instance represents - INSERT/UPDATE/DELETE - """ - - returning = None - """holds the "returning" collection of columns if - the statement is CRUD and defines returning columns - either implicitly or explicitly - """ - - returning_precedes_values = False - """set to True classwide to generate RETURNING - clauses before the VALUES or WHERE clause (i.e. MSSQL) - """ - - render_table_with_column_in_update_from = False - """set to True classwide to indicate the SET clause - in a multi-table UPDATE statement should qualify - columns with the table name (i.e. MySQL only) - """ - - ansi_bind_rules = False - """SQL 92 doesn't allow bind parameters to be used - in the columns clause of a SELECT, nor does it allow - ambiguous expressions like "? = ?". A compiler - subclass can set this flag to False if the target - driver/DB enforces this - """ - - def __init__(self, dialect, statement, column_keys=None, - inline=False, **kwargs): - """Construct a new ``DefaultCompiler`` object. - - dialect - Dialect to be used - - statement - ClauseElement to be compiled - - column_keys - a list of column names to be compiled into an INSERT or UPDATE - statement. - - """ - self.column_keys = column_keys - - # compile INSERT/UPDATE defaults/sequences inlined (no pre- - # execute) - self.inline = inline or getattr(statement, 'inline', False) - - # a dictionary of bind parameter keys to _BindParamClause - # instances. - self.binds = {} - - # a dictionary of _BindParamClause instances to "compiled" names - # that are actually present in the generated SQL - self.bind_names = util.column_dict() - - # stack which keeps track of nested SELECT statements - self.stack = [] - - # relates label names in the final SQL to a tuple of local - # column/label name, ColumnElement object (if any) and - # TypeEngine. ResultProxy uses this for type processing and - # column targeting - self.result_map = {} - - # true if the paramstyle is positional - self.positional = dialect.positional - if self.positional: - self.positiontup = [] - self.bindtemplate = BIND_TEMPLATES[dialect.paramstyle] - - self.ctes = None - - # an IdentifierPreparer that formats the quoting of identifiers - self.preparer = dialect.identifier_preparer - self.label_length = dialect.label_length \ - or dialect.max_identifier_length - - # a map which tracks "anonymous" identifiers that are created on - # the fly here - self.anon_map = util.PopulateDict(self._process_anon) - - # a map which tracks "truncated" names based on - # dialect.label_length or dialect.max_identifier_length - self.truncated_names = {} - engine.Compiled.__init__(self, dialect, statement, **kwargs) - - if self.positional and dialect.paramstyle == 'numeric': - self._apply_numbered_params() - - @util.memoized_instancemethod - def _init_cte_state(self): - """Initialize collections related to CTEs only if - a CTE is located, to save on the overhead of - these collections otherwise. - - """ - # collect CTEs to tack on top of a SELECT - self.ctes = util.OrderedDict() - self.ctes_by_name = {} - self.ctes_recursive = False - if self.positional: - self.cte_positional = [] - - def _apply_numbered_params(self): - poscount = itertools.count(1) - self.string = re.sub( - r'\[_POSITION\]', - lambda m:str(util.next(poscount)), - self.string) - - @util.memoized_property - def _bind_processors(self): - return dict( - (key, value) for key, value in - ( (self.bind_names[bindparam], - bindparam.type._cached_bind_processor(self.dialect)) - for bindparam in self.bind_names ) - if value is not None - ) - - def is_subquery(self): - return len(self.stack) > 1 - - @property - def sql_compiler(self): - return self - - def construct_params(self, params=None, _group_number=None, _check=True): - """return a dictionary of bind parameter keys and values""" - - if params: - pd = {} - for bindparam, name in self.bind_names.iteritems(): - if bindparam.key in params: - pd[name] = params[bindparam.key] - elif name in params: - pd[name] = params[name] - elif _check and bindparam.required: - if _group_number: - raise exc.InvalidRequestError( - "A value is required for bind parameter %r, " - "in parameter group %d" % - (bindparam.key, _group_number)) - else: - raise exc.InvalidRequestError( - "A value is required for bind parameter %r" - % bindparam.key) - else: - pd[name] = bindparam.effective_value - return pd - else: - pd = {} - for bindparam in self.bind_names: - if _check and bindparam.required: - if _group_number: - raise exc.InvalidRequestError( - "A value is required for bind parameter %r, " - "in parameter group %d" % - (bindparam.key, _group_number)) - else: - raise exc.InvalidRequestError( - "A value is required for bind parameter %r" - % bindparam.key) - pd[self.bind_names[bindparam]] = bindparam.effective_value - return pd - - @property - def params(self): - """Return the bind param dictionary embedded into this - compiled object, for those values that are present.""" - return self.construct_params(_check=False) - - def default_from(self): - """Called when a SELECT statement has no froms, and no FROM clause is - to be appended. - - Gives Oracle a chance to tack on a ``FROM DUAL`` to the string output. - - """ - return "" - - def visit_grouping(self, grouping, asfrom=False, **kwargs): - return "(" + grouping.element._compiler_dispatch(self, **kwargs) + ")" - - def visit_label(self, label, result_map=None, - within_label_clause=False, - within_columns_clause=False, **kw): - # only render labels within the columns clause - # or ORDER BY clause of a select. dialect-specific compilers - # can modify this behavior. - if within_columns_clause and not within_label_clause: - if isinstance(label.name, sql._truncated_label): - labelname = self._truncated_identifier("colident", label.name) - else: - labelname = label.name - - if result_map is not None: - result_map[labelname.lower()] = ( - label.name, - (label, label.element, labelname, ) + - label._alt_names, - label.type) - - return label.element._compiler_dispatch(self, - within_columns_clause=True, - within_label_clause=True, - **kw) + \ - OPERATORS[operators.as_] + \ - self.preparer.format_label(label, labelname) - else: - return label.element._compiler_dispatch(self, - within_columns_clause=False, - **kw) - - def visit_column(self, column, result_map=None, **kwargs): - name = orig_name = column.name - if name is None: - raise exc.CompileError("Cannot compile Column object until " - "it's 'name' is assigned.") - - is_literal = column.is_literal - if not is_literal and isinstance(name, sql._truncated_label): - name = self._truncated_identifier("colident", name) - - if result_map is not None: - result_map[name.lower()] = (orig_name, - (column, name, column.key), - column.type) - - if is_literal: - name = self.escape_literal_column(name) - else: - name = self.preparer.quote(name, column.quote) - - table = column.table - if table is None or not table.named_with_column: - return name - else: - if table.schema: - schema_prefix = self.preparer.quote_schema( - table.schema, - table.quote_schema) + '.' - else: - schema_prefix = '' - tablename = table.name - if isinstance(tablename, sql._truncated_label): - tablename = self._truncated_identifier("alias", tablename) - - return schema_prefix + \ - self.preparer.quote(tablename, table.quote) + \ - "." + name - - def escape_literal_column(self, text): - """provide escaping for the literal_column() construct.""" - - # TODO: some dialects might need different behavior here - return text.replace('%', '%%') - - def visit_fromclause(self, fromclause, **kwargs): - return fromclause.name - - def visit_index(self, index, **kwargs): - return index.name - - def visit_typeclause(self, typeclause, **kwargs): - return self.dialect.type_compiler.process(typeclause.type) - - def post_process_text(self, text): - return text - - def visit_textclause(self, textclause, **kwargs): - if textclause.typemap is not None: - for colname, type_ in textclause.typemap.iteritems(): - self.result_map[colname.lower()] = (colname, None, type_) - - def do_bindparam(m): - name = m.group(1) - if name in textclause.bindparams: - return self.process(textclause.bindparams[name]) - else: - return self.bindparam_string(name, **kwargs) - - # un-escape any \:params - return BIND_PARAMS_ESC.sub(lambda m: m.group(1), - BIND_PARAMS.sub(do_bindparam, - self.post_process_text(textclause.text)) - ) - - def visit_null(self, expr, **kw): - return 'NULL' - - def visit_true(self, expr, **kw): - return 'true' - - def visit_false(self, expr, **kw): - return 'false' - - def visit_clauselist(self, clauselist, **kwargs): - sep = clauselist.operator - if sep is None: - sep = " " - else: - sep = OPERATORS[clauselist.operator] - return sep.join( - s for s in - (c._compiler_dispatch(self, **kwargs) - for c in clauselist.clauses) - if s) - - def visit_case(self, clause, **kwargs): - x = "CASE " - if clause.value is not None: - x += clause.value._compiler_dispatch(self, **kwargs) + " " - for cond, result in clause.whens: - x += "WHEN " + cond._compiler_dispatch( - self, **kwargs - ) + " THEN " + result._compiler_dispatch( - self, **kwargs) + " " - if clause.else_ is not None: - x += "ELSE " + clause.else_._compiler_dispatch( - self, **kwargs - ) + " " - x += "END" - return x - - def visit_cast(self, cast, **kwargs): - return "CAST(%s AS %s)" % \ - (cast.clause._compiler_dispatch(self, **kwargs), - cast.typeclause._compiler_dispatch(self, **kwargs)) - - def visit_over(self, over, **kwargs): - return "%s OVER (%s)" % ( - over.func._compiler_dispatch(self, **kwargs), - ' '.join( - '%s BY %s' % (word, clause._compiler_dispatch(self, **kwargs)) - for word, clause in ( - ('PARTITION', over.partition_by), - ('ORDER', over.order_by) - ) - if clause is not None and len(clause) - ) - ) - - def visit_extract(self, extract, **kwargs): - field = self.extract_map.get(extract.field, extract.field) - return "EXTRACT(%s FROM %s)" % (field, - extract.expr._compiler_dispatch(self, **kwargs)) - - def visit_function(self, func, result_map=None, **kwargs): - if result_map is not None: - result_map[func.name.lower()] = (func.name, None, func.type) - - disp = getattr(self, "visit_%s_func" % func.name.lower(), None) - if disp: - return disp(func, **kwargs) - else: - name = FUNCTIONS.get(func.__class__, func.name + "%(expr)s") - return ".".join(list(func.packagenames) + [name]) % \ - {'expr': self.function_argspec(func, **kwargs)} - - def visit_next_value_func(self, next_value, **kw): - return self.visit_sequence(next_value.sequence) - - def visit_sequence(self, sequence): - raise NotImplementedError( - "Dialect '%s' does not support sequence increments." % self.dialect.name - ) - - def function_argspec(self, func, **kwargs): - return func.clause_expr._compiler_dispatch(self, **kwargs) - - def visit_compound_select(self, cs, asfrom=False, - parens=True, compound_index=0, **kwargs): - entry = self.stack and self.stack[-1] or {} - self.stack.append({'from': entry.get('from', None), - 'iswrapper': not entry}) - - keyword = self.compound_keywords.get(cs.keyword) - - text = (" " + keyword + " ").join( - (c._compiler_dispatch(self, - asfrom=asfrom, parens=False, - compound_index=i, **kwargs) - for i, c in enumerate(cs.selects)) - ) - - group_by = cs._group_by_clause._compiler_dispatch( - self, asfrom=asfrom, **kwargs) - if group_by: - text += " GROUP BY " + group_by - - text += self.order_by_clause(cs, **kwargs) - text += (cs._limit is not None or cs._offset is not None) and \ - self.limit_clause(cs) or "" - - if self.ctes and \ - compound_index == 0 and not entry: - text = self._render_cte_clause() + text - - self.stack.pop(-1) - if asfrom and parens: - return "(" + text + ")" - else: - return text - - def visit_unary(self, unary, **kw): - s = unary.element._compiler_dispatch(self, **kw) - if unary.operator: - s = OPERATORS[unary.operator] + s - if unary.modifier: - s = s + OPERATORS[unary.modifier] - return s - - def visit_binary(self, binary, **kw): - # don't allow "? = ?" to render - if self.ansi_bind_rules and \ - isinstance(binary.left, sql._BindParamClause) and \ - isinstance(binary.right, sql._BindParamClause): - kw['literal_binds'] = True - - return self._operator_dispatch(binary.operator, - binary, - lambda opstr: binary.left._compiler_dispatch(self, **kw) + - opstr + - binary.right._compiler_dispatch( - self, **kw), - **kw - ) - - def visit_like_op(self, binary, **kw): - escape = binary.modifiers.get("escape", None) - return '%s LIKE %s' % ( - binary.left._compiler_dispatch(self, **kw), - binary.right._compiler_dispatch(self, **kw)) \ - + (escape and - (' ESCAPE ' + self.render_literal_value(escape, None)) - or '') - - def visit_notlike_op(self, binary, **kw): - escape = binary.modifiers.get("escape", None) - return '%s NOT LIKE %s' % ( - binary.left._compiler_dispatch(self, **kw), - binary.right._compiler_dispatch(self, **kw)) \ - + (escape and - (' ESCAPE ' + self.render_literal_value(escape, None)) - or '') - - def visit_ilike_op(self, binary, **kw): - escape = binary.modifiers.get("escape", None) - return 'lower(%s) LIKE lower(%s)' % ( - binary.left._compiler_dispatch(self, **kw), - binary.right._compiler_dispatch(self, **kw)) \ - + (escape and - (' ESCAPE ' + self.render_literal_value(escape, None)) - or '') - - def visit_notilike_op(self, binary, **kw): - escape = binary.modifiers.get("escape", None) - return 'lower(%s) NOT LIKE lower(%s)' % ( - binary.left._compiler_dispatch(self, **kw), - binary.right._compiler_dispatch(self, **kw)) \ - + (escape and - (' ESCAPE ' + self.render_literal_value(escape, None)) - or '') - - def _operator_dispatch(self, operator, element, fn, **kw): - if util.callable(operator): - disp = getattr(self, "visit_%s" % operator.__name__, None) - if disp: - return disp(element, **kw) - else: - return fn(OPERATORS[operator]) - else: - return fn(" " + operator + " ") - - def visit_bindparam(self, bindparam, within_columns_clause=False, - literal_binds=False, **kwargs): - - if literal_binds or \ - (within_columns_clause and \ - self.ansi_bind_rules): - if bindparam.value is None: - raise exc.CompileError("Bind parameter without a " - "renderable value not allowed here.") - return self.render_literal_bindparam(bindparam, - within_columns_clause=True, **kwargs) - - name = self._truncate_bindparam(bindparam) - - if name in self.binds: - existing = self.binds[name] - if existing is not bindparam: - if (existing.unique or bindparam.unique) and \ - not existing.proxy_set.intersection(bindparam.proxy_set): - raise exc.CompileError( - "Bind parameter '%s' conflicts with " - "unique bind parameter of the same name" % - bindparam.key - ) - elif getattr(existing, '_is_crud', False) or \ - getattr(bindparam, '_is_crud', False): - raise exc.CompileError( - "bindparam() name '%s' is reserved " - "for automatic usage in the VALUES or SET " - "clause of this " - "insert/update statement. Please use a " - "name other than column name when using bindparam() " - "with insert() or update() (for example, 'b_%s')." - % (bindparam.key, bindparam.key) - ) - - self.binds[bindparam.key] = self.binds[name] = bindparam - - return self.bindparam_string(name, **kwargs) - - def render_literal_bindparam(self, bindparam, **kw): - value = bindparam.value - processor = bindparam.type._cached_bind_processor(self.dialect) - if processor: - value = processor(value) - return self.render_literal_value(value, bindparam.type) - - def render_literal_value(self, value, type_): - """Render the value of a bind parameter as a quoted literal. - - This is used for statement sections that do not accept bind parameters - on the target driver/database. - - This should be implemented by subclasses using the quoting services - of the DBAPI. - - """ - if isinstance(value, basestring): - value = value.replace("'", "''") - return "'%s'" % value - elif value is None: - return "NULL" - elif isinstance(value, (float, int, long)): - return repr(value) - elif isinstance(value, decimal.Decimal): - return str(value) - else: - raise NotImplementedError( - "Don't know how to literal-quote value %r" % value) - - def _truncate_bindparam(self, bindparam): - if bindparam in self.bind_names: - return self.bind_names[bindparam] - - bind_name = bindparam.key - if isinstance(bind_name, sql._truncated_label): - bind_name = self._truncated_identifier("bindparam", bind_name) - - # add to bind_names for translation - self.bind_names[bindparam] = bind_name - - return bind_name - - def _truncated_identifier(self, ident_class, name): - if (ident_class, name) in self.truncated_names: - return self.truncated_names[(ident_class, name)] - - anonname = name.apply_map(self.anon_map) - - if len(anonname) > self.label_length: - counter = self.truncated_names.get(ident_class, 1) - truncname = anonname[0:max(self.label_length - 6, 0)] + \ - "_" + hex(counter)[2:] - self.truncated_names[ident_class] = counter + 1 - else: - truncname = anonname - self.truncated_names[(ident_class, name)] = truncname - return truncname - - def _anonymize(self, name): - return name % self.anon_map - - def _process_anon(self, key): - (ident, derived) = key.split(' ', 1) - anonymous_counter = self.anon_map.get(derived, 1) - self.anon_map[derived] = anonymous_counter + 1 - return derived + "_" + str(anonymous_counter) - - def bindparam_string(self, name, positional_names=None, **kw): - if self.positional: - if positional_names is not None: - positional_names.append(name) - else: - self.positiontup.append(name) - return self.bindtemplate % {'name':name} - - def visit_cte(self, cte, asfrom=False, ashint=False, - fromhints=None, - **kwargs): - self._init_cte_state() - if self.positional: - kwargs['positional_names'] = self.cte_positional - - if isinstance(cte.name, sql._truncated_label): - cte_name = self._truncated_identifier("alias", cte.name) - else: - cte_name = cte.name - - if cte_name in self.ctes_by_name: - existing_cte = self.ctes_by_name[cte_name] - # we've generated a same-named CTE that we are enclosed in, - # or this is the same CTE. just return the name. - if cte in existing_cte._restates or cte is existing_cte: - return cte_name - elif existing_cte in cte._restates: - # we've generated a same-named CTE that is - # enclosed in us - we take precedence, so - # discard the text for the "inner". - del self.ctes[existing_cte] - else: - raise exc.CompileError( - "Multiple, unrelated CTEs found with " - "the same name: %r" % - cte_name) - - self.ctes_by_name[cte_name] = cte - - if cte.cte_alias: - if isinstance(cte.cte_alias, sql._truncated_label): - cte_alias = self._truncated_identifier("alias", cte.cte_alias) - else: - cte_alias = cte.cte_alias - if not cte.cte_alias and cte not in self.ctes: - if cte.recursive: - self.ctes_recursive = True - text = self.preparer.format_alias(cte, cte_name) - if cte.recursive: - if isinstance(cte.original, sql.Select): - col_source = cte.original - elif isinstance(cte.original, sql.CompoundSelect): - col_source = cte.original.selects[0] - else: - assert False - recur_cols = [c for c in - util.unique_list(col_source.inner_columns) - if c is not None] - - text += "(%s)" % (", ".join( - self.preparer.format_column(ident) - for ident in recur_cols)) - text += " AS \n" + \ - cte.original._compiler_dispatch( - self, asfrom=True, **kwargs - ) - self.ctes[cte] = text - if asfrom: - if cte.cte_alias: - text = self.preparer.format_alias(cte, cte_alias) - text += " AS " + cte_name - else: - return self.preparer.format_alias(cte, cte_name) - return text - - def visit_alias(self, alias, asfrom=False, ashint=False, - fromhints=None, **kwargs): - if asfrom or ashint: - if isinstance(alias.name, sql._truncated_label): - alias_name = self._truncated_identifier("alias", alias.name) - else: - alias_name = alias.name - - if ashint: - return self.preparer.format_alias(alias, alias_name) - elif asfrom: - ret = alias.original._compiler_dispatch(self, - asfrom=True, **kwargs) + \ - " AS " + \ - self.preparer.format_alias(alias, alias_name) - - if fromhints and alias in fromhints: - hinttext = self.get_from_hint_text(alias, fromhints[alias]) - if hinttext: - ret += " " + hinttext - - return ret - else: - return alias.original._compiler_dispatch(self, **kwargs) - - def label_select_column(self, select, column, asfrom): - """label columns present in a select().""" - - if isinstance(column, sql._Label): - return column - - elif select is not None and \ - select.use_labels and \ - column._label: - return _CompileLabel( - column, - column._label, - alt_names=(column._key_label, ) - ) - - elif \ - asfrom and \ - isinstance(column, sql.ColumnClause) and \ - not column.is_literal and \ - column.table is not None and \ - not isinstance(column.table, sql.Select): - return _CompileLabel(column, sql._as_truncated(column.name), - alt_names=(column.key,)) - elif not isinstance(column, - (sql._UnaryExpression, sql._TextClause)) \ - and (not hasattr(column, 'name') or \ - isinstance(column, sql.Function)): - return _CompileLabel(column, column.anon_label) - else: - return column - - def get_select_hint_text(self, byfroms): - return None - - def get_from_hint_text(self, table, text): - return None - - def get_crud_hint_text(self, table, text): - return None - - def visit_select(self, select, asfrom=False, parens=True, - iswrapper=False, fromhints=None, - compound_index=0, - positional_names=None, **kwargs): - - entry = self.stack and self.stack[-1] or {} - - existingfroms = entry.get('from', None) - - froms = select._get_display_froms(existingfroms) - - correlate_froms = set(sql._from_objects(*froms)) - - # TODO: might want to propagate existing froms for - # select(select(select)) where innermost select should correlate - # to outermost if existingfroms: correlate_froms = - # correlate_froms.union(existingfroms) - - populate_result_map = compound_index == 0 and ( - not entry or \ - entry.get('iswrapper', False) - ) - - self.stack.append({'from': correlate_froms, 'iswrapper': iswrapper}) - - if populate_result_map: - column_clause_args = {'result_map': self.result_map, - 'positional_names': positional_names} - else: - column_clause_args = {'positional_names': positional_names} - - # the actual list of columns to print in the SELECT column list. - inner_columns = [ - c for c in [ - self.label_select_column(select, co, asfrom=asfrom).\ - _compiler_dispatch(self, - within_columns_clause=True, - **column_clause_args) - for co in util.unique_list(select.inner_columns) - ] - if c is not None - ] - - text = "SELECT " # we're off to a good start ! - - if select._hints: - byfrom = dict([ - (from_, hinttext % { - 'name':from_._compiler_dispatch( - self, ashint=True) - }) - for (from_, dialect), hinttext in - select._hints.iteritems() - if dialect in ('*', self.dialect.name) - ]) - hint_text = self.get_select_hint_text(byfrom) - if hint_text: - text += hint_text + " " - - if select._prefixes: - text += " ".join( - x._compiler_dispatch(self, **kwargs) - for x in select._prefixes) + " " - text += self.get_select_precolumns(select) - text += ', '.join(inner_columns) - - if froms: - text += " \nFROM " - - if select._hints: - text += ', '.join([f._compiler_dispatch(self, - asfrom=True, fromhints=byfrom, - **kwargs) - for f in froms]) - else: - text += ', '.join([f._compiler_dispatch(self, - asfrom=True, **kwargs) - for f in froms]) - else: - text += self.default_from() - - if select._whereclause is not None: - t = select._whereclause._compiler_dispatch(self, **kwargs) - if t: - text += " \nWHERE " + t - - if select._group_by_clause.clauses: - group_by = select._group_by_clause._compiler_dispatch( - self, **kwargs) - if group_by: - text += " GROUP BY " + group_by - - if select._having is not None: - t = select._having._compiler_dispatch(self, **kwargs) - if t: - text += " \nHAVING " + t - - if select._order_by_clause.clauses: - text += self.order_by_clause(select, **kwargs) - if select._limit is not None or select._offset is not None: - text += self.limit_clause(select) - if select.for_update: - text += self.for_update_clause(select) - - if self.ctes and \ - compound_index == 0 and not entry: - text = self._render_cte_clause() + text - - self.stack.pop(-1) - - if asfrom and parens: - return "(" + text + ")" - else: - return text - - def _render_cte_clause(self): - if self.positional: - self.positiontup = self.cte_positional + self.positiontup - cte_text = self.get_cte_preamble(self.ctes_recursive) + " " - cte_text += ", \n".join( - [txt for txt in self.ctes.values()] - ) - cte_text += "\n " - return cte_text - - def get_cte_preamble(self, recursive): - if recursive: - return "WITH RECURSIVE" - else: - return "WITH" - - def get_select_precolumns(self, select): - """Called when building a ``SELECT`` statement, position is just - before column list. - - """ - return select._distinct and "DISTINCT " or "" - - def order_by_clause(self, select, **kw): - order_by = select._order_by_clause._compiler_dispatch(self, **kw) - if order_by: - return " ORDER BY " + order_by - else: - return "" - - def for_update_clause(self, select): - if select.for_update: - return " FOR UPDATE" - else: - return "" - - def limit_clause(self, select): - text = "" - if select._limit is not None: - text += "\n LIMIT " + self.process(sql.literal(select._limit)) - if select._offset is not None: - if select._limit is None: - text += "\n LIMIT -1" - text += " OFFSET " + self.process(sql.literal(select._offset)) - return text - - def visit_table(self, table, asfrom=False, ashint=False, - fromhints=None, **kwargs): - if asfrom or ashint: - if getattr(table, "schema", None): - ret = self.preparer.quote_schema(table.schema, - table.quote_schema) + \ - "." + self.preparer.quote(table.name, - table.quote) - else: - ret = self.preparer.quote(table.name, table.quote) - if fromhints and table in fromhints: - hinttext = self.get_from_hint_text(table, fromhints[table]) - if hinttext: - ret += " " + hinttext - return ret - else: - return "" - - def visit_join(self, join, asfrom=False, **kwargs): - return ( - join.left._compiler_dispatch(self, asfrom=True, **kwargs) + - (join.isouter and " LEFT OUTER JOIN " or " JOIN ") + - join.right._compiler_dispatch(self, asfrom=True, **kwargs) + - " ON " + - join.onclause._compiler_dispatch(self, **kwargs) - ) - - def visit_insert(self, insert_stmt): - self.isinsert = True - colparams = self._get_colparams(insert_stmt) - - if not colparams and \ - not self.dialect.supports_default_values and \ - not self.dialect.supports_empty_insert: - raise exc.CompileError("The version of %s you are using does " - "not support empty inserts." % - self.dialect.name) - - preparer = self.preparer - supports_default_values = self.dialect.supports_default_values - - text = "INSERT" - - - prefixes = [self.process(x) for x in insert_stmt._prefixes] - if prefixes: - text += " " + " ".join(prefixes) - - text += " INTO " + preparer.format_table(insert_stmt.table) - - if insert_stmt._hints: - dialect_hints = dict([ - (table, hint_text) - for (table, dialect), hint_text in - insert_stmt._hints.items() - if dialect in ('*', self.dialect.name) - ]) - if insert_stmt.table in dialect_hints: - text += " " + self.get_crud_hint_text( - insert_stmt.table, - dialect_hints[insert_stmt.table] - ) - - if colparams or not supports_default_values: - text += " (%s)" % ', '.join([preparer.format_column(c[0]) - for c in colparams]) - - if self.returning or insert_stmt._returning: - self.returning = self.returning or insert_stmt._returning - returning_clause = self.returning_clause( - insert_stmt, self.returning) - - if self.returning_precedes_values: - text += " " + returning_clause - - if not colparams and supports_default_values: - text += " DEFAULT VALUES" - else: - text += " VALUES (%s)" % \ - ', '.join([c[1] for c in colparams]) - - if self.returning and not self.returning_precedes_values: - text += " " + returning_clause - - return text - - def update_limit_clause(self, update_stmt): - """Provide a hook for MySQL to add LIMIT to the UPDATE""" - return None - - def update_tables_clause(self, update_stmt, from_table, - extra_froms, **kw): - """Provide a hook to override the initial table clause - in an UPDATE statement. - - MySQL overrides this. - - """ - return self.preparer.format_table(from_table) - - def update_from_clause(self, update_stmt, - from_table, extra_froms, - from_hints, - **kw): - """Provide a hook to override the generation of an - UPDATE..FROM clause. - - MySQL and MSSQL override this. - - """ - return "FROM " + ', '.join( - t._compiler_dispatch(self, asfrom=True, - fromhints=from_hints, **kw) - for t in extra_froms) - - def visit_update(self, update_stmt, **kw): - self.stack.append({'from': set([update_stmt.table])}) - - self.isupdate = True - - extra_froms = update_stmt._extra_froms - - colparams = self._get_colparams(update_stmt, extra_froms) - - text = "UPDATE " + self.update_tables_clause( - update_stmt, - update_stmt.table, - extra_froms, **kw) - - if update_stmt._hints: - dialect_hints = dict([ - (table, hint_text) - for (table, dialect), hint_text in - update_stmt._hints.items() - if dialect in ('*', self.dialect.name) - ]) - if update_stmt.table in dialect_hints: - text += " " + self.get_crud_hint_text( - update_stmt.table, - dialect_hints[update_stmt.table] - ) - else: - dialect_hints = None - - text += ' SET ' - if extra_froms and self.render_table_with_column_in_update_from: - text += ', '.join( - self.visit_column(c[0]) + - '=' + c[1] for c in colparams - ) - else: - text += ', '.join( - self.preparer.quote(c[0].name, c[0].quote) + - '=' + c[1] for c in colparams - ) - - if update_stmt._returning: - self.returning = update_stmt._returning - if self.returning_precedes_values: - text += " " + self.returning_clause( - update_stmt, update_stmt._returning) - - if extra_froms: - extra_from_text = self.update_from_clause( - update_stmt, - update_stmt.table, - extra_froms, - dialect_hints, **kw) - if extra_from_text: - text += " " + extra_from_text - - if update_stmt._whereclause is not None: - text += " WHERE " + self.process(update_stmt._whereclause) - - limit_clause = self.update_limit_clause(update_stmt) - if limit_clause: - text += " " + limit_clause - - if self.returning and not self.returning_precedes_values: - text += " " + self.returning_clause( - update_stmt, update_stmt._returning) - - self.stack.pop(-1) - - return text - - def _create_crud_bind_param(self, col, value, required=False): - bindparam = sql.bindparam(col.key, value, - type_=col.type, required=required) - bindparam._is_crud = True - return bindparam._compiler_dispatch(self) - - - def _get_colparams(self, stmt, extra_tables=None): - """create a set of tuples representing column/string pairs for use - in an INSERT or UPDATE statement. - - Also generates the Compiled object's postfetch, prefetch, and - returning column collections, used for default handling and ultimately - populating the ResultProxy's prefetch_cols() and postfetch_cols() - collections. - - """ - - self.postfetch = [] - self.prefetch = [] - self.returning = [] - - # no parameters in the statement, no parameters in the - # compiled params - return binds for all columns - if self.column_keys is None and stmt.parameters is None: - return [ - (c, self._create_crud_bind_param(c, - None, required=True)) - for c in stmt.table.columns - ] - - required = object() - - # if we have statement parameters - set defaults in the - # compiled params - if self.column_keys is None: - parameters = {} - else: - parameters = dict((sql._column_as_key(key), required) - for key in self.column_keys - if not stmt.parameters or - key not in stmt.parameters) - - if stmt.parameters is not None: - for k, v in stmt.parameters.iteritems(): - parameters.setdefault(sql._column_as_key(k), v) - - # create a list of column assignment clauses as tuples - values = [] - - need_pks = self.isinsert and \ - not self.inline and \ - not stmt._returning - - implicit_returning = need_pks and \ - self.dialect.implicit_returning and \ - stmt.table.implicit_returning - - postfetch_lastrowid = need_pks and self.dialect.postfetch_lastrowid - - check_columns = {} - # special logic that only occurs for multi-table UPDATE - # statements - if extra_tables and stmt.parameters: - assert self.isupdate - affected_tables = set() - for t in extra_tables: - for c in t.c: - if c in stmt.parameters: - affected_tables.add(t) - check_columns[c.key] = c - value = stmt.parameters[c] - if sql._is_literal(value): - value = self._create_crud_bind_param( - c, value, required=value is required) - else: - self.postfetch.append(c) - value = self.process(value.self_group()) - values.append((c, value)) - # determine tables which are actually - # to be updated - process onupdate and - # server_onupdate for these - for t in affected_tables: - for c in t.c: - if c in stmt.parameters: - continue - elif c.onupdate is not None and not c.onupdate.is_sequence: - if c.onupdate.is_clause_element: - values.append( - (c, self.process(c.onupdate.arg.self_group())) - ) - self.postfetch.append(c) - else: - values.append( - (c, self._create_crud_bind_param(c, None)) - ) - self.prefetch.append(c) - elif c.server_onupdate is not None: - self.postfetch.append(c) - - # iterating through columns at the top to maintain ordering. - # otherwise we might iterate through individual sets of - # "defaults", "primary key cols", etc. - for c in stmt.table.columns: - if c.key in parameters and c.key not in check_columns: - value = parameters.pop(c.key) - if sql._is_literal(value): - value = self._create_crud_bind_param( - c, value, required=value is required) - elif c.primary_key and implicit_returning: - self.returning.append(c) - value = self.process(value.self_group()) - else: - self.postfetch.append(c) - value = self.process(value.self_group()) - values.append((c, value)) - - elif self.isinsert: - if c.primary_key and \ - need_pks and \ - ( - implicit_returning or - not postfetch_lastrowid or - c is not stmt.table._autoincrement_column - ): - - if implicit_returning: - if c.default is not None: - if c.default.is_sequence: - if self.dialect.supports_sequences and \ - (not c.default.optional or \ - not self.dialect.sequences_optional): - proc = self.process(c.default) - values.append((c, proc)) - self.returning.append(c) - elif c.default.is_clause_element: - values.append( - (c, - self.process(c.default.arg.self_group())) - ) - self.returning.append(c) - else: - values.append( - (c, self._create_crud_bind_param(c, None)) - ) - self.prefetch.append(c) - else: - self.returning.append(c) - else: - if c.default is not None or \ - c is stmt.table._autoincrement_column and ( - self.dialect.supports_sequences or - self.dialect.preexecute_autoincrement_sequences - ): - - values.append( - (c, self._create_crud_bind_param(c, None)) - ) - - self.prefetch.append(c) - - elif c.default is not None: - if c.default.is_sequence: - if self.dialect.supports_sequences and \ - (not c.default.optional or \ - not self.dialect.sequences_optional): - proc = self.process(c.default) - values.append((c, proc)) - if not c.primary_key: - self.postfetch.append(c) - elif c.default.is_clause_element: - values.append( - (c, self.process(c.default.arg.self_group())) - ) - - if not c.primary_key: - # dont add primary key column to postfetch - self.postfetch.append(c) - else: - values.append( - (c, self._create_crud_bind_param(c, None)) - ) - self.prefetch.append(c) - elif c.server_default is not None: - if not c.primary_key: - self.postfetch.append(c) - - elif self.isupdate: - if c.onupdate is not None and not c.onupdate.is_sequence: - if c.onupdate.is_clause_element: - values.append( - (c, self.process(c.onupdate.arg.self_group())) - ) - self.postfetch.append(c) - else: - values.append( - (c, self._create_crud_bind_param(c, None)) - ) - self.prefetch.append(c) - elif c.server_onupdate is not None: - self.postfetch.append(c) - - if parameters and stmt.parameters: - check = set(parameters).intersection( - sql._column_as_key(k) for k in stmt.parameters - ).difference(check_columns) - if check: - util.warn( - "Unconsumed column names: %s" % - (", ".join(check)) - ) - - return values - - def visit_delete(self, delete_stmt): - self.stack.append({'from': set([delete_stmt.table])}) - self.isdelete = True - - text = "DELETE FROM " + self.preparer.format_table(delete_stmt.table) - - if delete_stmt._hints: - dialect_hints = dict([ - (table, hint_text) - for (table, dialect), hint_text in - delete_stmt._hints.items() - if dialect in ('*', self.dialect.name) - ]) - if delete_stmt.table in dialect_hints: - text += " " + self.get_crud_hint_text( - delete_stmt.table, - dialect_hints[delete_stmt.table] - ) - else: - dialect_hints = None - - if delete_stmt._returning: - self.returning = delete_stmt._returning - if self.returning_precedes_values: - text += " " + self.returning_clause( - delete_stmt, delete_stmt._returning) - - if delete_stmt._whereclause is not None: - text += " WHERE " + self.process(delete_stmt._whereclause) - - if self.returning and not self.returning_precedes_values: - text += " " + self.returning_clause( - delete_stmt, delete_stmt._returning) - - self.stack.pop(-1) - - return text - - def visit_savepoint(self, savepoint_stmt): - return "SAVEPOINT %s" % self.preparer.format_savepoint(savepoint_stmt) - - def visit_rollback_to_savepoint(self, savepoint_stmt): - return "ROLLBACK TO SAVEPOINT %s" % \ - self.preparer.format_savepoint(savepoint_stmt) - - def visit_release_savepoint(self, savepoint_stmt): - return "RELEASE SAVEPOINT %s" % \ - self.preparer.format_savepoint(savepoint_stmt) - - -class DDLCompiler(engine.Compiled): - - @util.memoized_property - def sql_compiler(self): - return self.dialect.statement_compiler(self.dialect, None) - - @property - def preparer(self): - return self.dialect.identifier_preparer - - def construct_params(self, params=None): - return None - - def visit_ddl(self, ddl, **kwargs): - # table events can substitute table and schema name - context = ddl.context - if isinstance(ddl.target, schema.Table): - context = context.copy() - - preparer = self.dialect.identifier_preparer - path = preparer.format_table_seq(ddl.target) - if len(path) == 1: - table, sch = path[0], '' - else: - table, sch = path[-1], path[0] - - context.setdefault('table', table) - context.setdefault('schema', sch) - context.setdefault('fullname', preparer.format_table(ddl.target)) - - return self.sql_compiler.post_process_text(ddl.statement % context) - - def visit_create_schema(self, create): - return "CREATE SCHEMA " + self.preparer.format_schema(create.element, create.quote) - - def visit_drop_schema(self, drop): - text = "DROP SCHEMA " + self.preparer.format_schema(drop.element, drop.quote) - if drop.cascade: - text += " CASCADE" - return text - - def visit_create_table(self, create): - table = create.element - preparer = self.dialect.identifier_preparer - - text = "\n" + " ".join(['CREATE'] + \ - table._prefixes + \ - ['TABLE', - preparer.format_table(table), - "("]) - separator = "\n" - - # if only one primary key, specify it along with the column - first_pk = False - for column in table.columns: - try: - text += separator - separator = ", \n" - text += "\t" + self.get_column_specification( - column, - first_pk=column.primary_key and \ - not first_pk - ) - if column.primary_key: - first_pk = True - const = " ".join(self.process(constraint) \ - for constraint in column.constraints) - if const: - text += " " + const - except exc.CompileError, ce: - # Py3K - #raise exc.CompileError("(in table '%s', column '%s'): %s" - # % ( - # table.description, - # column.name, - # ce.args[0] - # )) from ce - # Py2K - raise exc.CompileError("(in table '%s', column '%s'): %s" - % ( - table.description, - column.name, - ce.args[0] - )), None, sys.exc_info()[2] - # end Py2K - - const = self.create_table_constraints(table) - if const: - text += ", \n\t" + const - - text += "\n)%s\n\n" % self.post_create_table(table) - return text - - def create_table_constraints(self, table): - - # On some DB order is significant: visit PK first, then the - # other constraints (engine.ReflectionTest.testbasic failed on FB2) - constraints = [] - if table.primary_key: - constraints.append(table.primary_key) - - constraints.extend([c for c in table._sorted_constraints - if c is not table.primary_key]) - - return ", \n\t".join(p for p in - (self.process(constraint) - for constraint in constraints - if ( - constraint._create_rule is None or - constraint._create_rule(self)) - and ( - not self.dialect.supports_alter or - not getattr(constraint, 'use_alter', False) - )) if p is not None - ) - - def visit_drop_table(self, drop): - return "\nDROP TABLE " + self.preparer.format_table(drop.element) - - def _index_identifier(self, ident): - if isinstance(ident, sql._truncated_label): - max = self.dialect.max_index_name_length or \ - self.dialect.max_identifier_length - if len(ident) > max: - ident = ident[0:max - 8] + \ - "_" + util.md5_hex(ident)[-4:] - else: - self.dialect.validate_identifier(ident) - - return ident - - def visit_create_index(self, create): - index = create.element - preparer = self.preparer - text = "CREATE " - if index.unique: - text += "UNIQUE " - text += "INDEX %s ON %s (%s)" \ - % (preparer.quote(self._index_identifier(index.name), - index.quote), - preparer.format_table(index.table), - ', '.join(preparer.quote(c.name, c.quote) - for c in index.columns)) - return text - - def visit_drop_index(self, drop): - index = drop.element - if index.table is not None and index.table.schema: - schema = index.table.schema - schema_name = self.preparer.quote_schema(schema, - index.table.quote_schema) - else: - schema_name = None - - index_name = self.preparer.quote( - self._index_identifier(index.name), - index.quote) - - if schema_name: - index_name = schema_name + "." + index_name - return "\nDROP INDEX " + index_name - - def visit_add_constraint(self, create): - preparer = self.preparer - return "ALTER TABLE %s ADD %s" % ( - self.preparer.format_table(create.element.table), - self.process(create.element) - ) - - def visit_create_sequence(self, create): - text = "CREATE SEQUENCE %s" % \ - self.preparer.format_sequence(create.element) - if create.element.increment is not None: - text += " INCREMENT BY %d" % create.element.increment - if create.element.start is not None: - text += " START WITH %d" % create.element.start - return text - - def visit_drop_sequence(self, drop): - return "DROP SEQUENCE %s" % \ - self.preparer.format_sequence(drop.element) - - def visit_drop_constraint(self, drop): - preparer = self.preparer - return "ALTER TABLE %s DROP CONSTRAINT %s%s" % ( - self.preparer.format_table(drop.element.table), - self.preparer.format_constraint(drop.element), - drop.cascade and " CASCADE" or "" - ) - - def get_column_specification(self, column, **kwargs): - colspec = self.preparer.format_column(column) + " " + \ - self.dialect.type_compiler.process(column.type) - default = self.get_column_default_string(column) - if default is not None: - colspec += " DEFAULT " + default - - if not column.nullable: - colspec += " NOT NULL" - return colspec - - def post_create_table(self, table): - return '' - - def get_column_default_string(self, column): - if isinstance(column.server_default, schema.DefaultClause): - if isinstance(column.server_default.arg, basestring): - return "'%s'" % column.server_default.arg - else: - return self.sql_compiler.process(column.server_default.arg) - else: - return None - - def visit_check_constraint(self, constraint): - text = "" - if constraint.name is not None: - text += "CONSTRAINT %s " % \ - self.preparer.format_constraint(constraint) - sqltext = sql_util.expression_as_ddl(constraint.sqltext) - text += "CHECK (%s)" % self.sql_compiler.process(sqltext) - text += self.define_constraint_deferrability(constraint) - return text - - def visit_column_check_constraint(self, constraint): - text = "" - if constraint.name is not None: - text += "CONSTRAINT %s " % \ - self.preparer.format_constraint(constraint) - text += "CHECK (%s)" % constraint.sqltext - text += self.define_constraint_deferrability(constraint) - return text - - def visit_primary_key_constraint(self, constraint): - if len(constraint) == 0: - return '' - text = "" - if constraint.name is not None: - text += "CONSTRAINT %s " % \ - self.preparer.format_constraint(constraint) - text += "PRIMARY KEY " - text += "(%s)" % ', '.join(self.preparer.quote(c.name, c.quote) - for c in constraint) - text += self.define_constraint_deferrability(constraint) - return text - - def visit_foreign_key_constraint(self, constraint): - preparer = self.dialect.identifier_preparer - text = "" - if constraint.name is not None: - text += "CONSTRAINT %s " % \ - preparer.format_constraint(constraint) - remote_table = list(constraint._elements.values())[0].column.table - text += "FOREIGN KEY(%s) REFERENCES %s (%s)" % ( - ', '.join(preparer.quote(f.parent.name, f.parent.quote) - for f in constraint._elements.values()), - self.define_constraint_remote_table( - constraint, remote_table, preparer), - ', '.join(preparer.quote(f.column.name, f.column.quote) - for f in constraint._elements.values()) - ) - text += self.define_constraint_cascades(constraint) - text += self.define_constraint_deferrability(constraint) - return text - - def define_constraint_remote_table(self, constraint, table, preparer): - """Format the remote table clause of a CREATE CONSTRAINT clause.""" - - return preparer.format_table(table) - - def visit_unique_constraint(self, constraint): - text = "" - if constraint.name is not None: - text += "CONSTRAINT %s " % \ - self.preparer.format_constraint(constraint) - text += "UNIQUE (%s)" % ( - ', '.join(self.preparer.quote(c.name, c.quote) - for c in constraint)) - text += self.define_constraint_deferrability(constraint) - return text - - def define_constraint_cascades(self, constraint): - text = "" - if constraint.ondelete is not None: - text += " ON DELETE %s" % constraint.ondelete - if constraint.onupdate is not None: - text += " ON UPDATE %s" % constraint.onupdate - return text - - def define_constraint_deferrability(self, constraint): - text = "" - if constraint.deferrable is not None: - if constraint.deferrable: - text += " DEFERRABLE" - else: - text += " NOT DEFERRABLE" - if constraint.initially is not None: - text += " INITIALLY %s" % constraint.initially - return text - - -class GenericTypeCompiler(engine.TypeCompiler): - def visit_CHAR(self, type_): - return "CHAR" + (type_.length and "(%d)" % type_.length or "") - - def visit_NCHAR(self, type_): - return "NCHAR" + (type_.length and "(%d)" % type_.length or "") - - def visit_FLOAT(self, type_): - return "FLOAT" - - def visit_REAL(self, type_): - return "REAL" - - def visit_NUMERIC(self, type_): - if type_.precision is None: - return "NUMERIC" - elif type_.scale is None: - return "NUMERIC(%(precision)s)" % \ - {'precision': type_.precision} - else: - return "NUMERIC(%(precision)s, %(scale)s)" % \ - {'precision': type_.precision, - 'scale' : type_.scale} - - def visit_DECIMAL(self, type_): - return "DECIMAL" - - def visit_INTEGER(self, type_): - return "INTEGER" - - def visit_SMALLINT(self, type_): - return "SMALLINT" - - def visit_BIGINT(self, type_): - return "BIGINT" - - def visit_TIMESTAMP(self, type_): - return 'TIMESTAMP' - - def visit_DATETIME(self, type_): - return "DATETIME" - - def visit_DATE(self, type_): - return "DATE" - - def visit_TIME(self, type_): - return "TIME" - - def visit_CLOB(self, type_): - return "CLOB" - - def visit_NCLOB(self, type_): - return "NCLOB" - - def visit_VARCHAR(self, type_): - return "VARCHAR" + (type_.length and "(%d)" % type_.length or "") - - def visit_NVARCHAR(self, type_): - return "NVARCHAR" + (type_.length and "(%d)" % type_.length or "") - - def visit_BLOB(self, type_): - return "BLOB" - - def visit_BINARY(self, type_): - return "BINARY" + (type_.length and "(%d)" % type_.length or "") - - def visit_VARBINARY(self, type_): - return "VARBINARY" + (type_.length and "(%d)" % type_.length or "") - - def visit_BOOLEAN(self, type_): - return "BOOLEAN" - - def visit_TEXT(self, type_): - return "TEXT" - - def visit_large_binary(self, type_): - return self.visit_BLOB(type_) - - def visit_boolean(self, type_): - return self.visit_BOOLEAN(type_) - - def visit_time(self, type_): - return self.visit_TIME(type_) - - def visit_datetime(self, type_): - return self.visit_DATETIME(type_) - - def visit_date(self, type_): - return self.visit_DATE(type_) - - def visit_big_integer(self, type_): - return self.visit_BIGINT(type_) - - def visit_small_integer(self, type_): - return self.visit_SMALLINT(type_) - - def visit_integer(self, type_): - return self.visit_INTEGER(type_) - - def visit_real(self, type_): - return self.visit_REAL(type_) - - def visit_float(self, type_): - return self.visit_FLOAT(type_) - - def visit_numeric(self, type_): - return self.visit_NUMERIC(type_) - - def visit_string(self, type_): - return self.visit_VARCHAR(type_) - - def visit_unicode(self, type_): - return self.visit_VARCHAR(type_) - - def visit_text(self, type_): - return self.visit_TEXT(type_) - - def visit_unicode_text(self, type_): - return self.visit_TEXT(type_) - - def visit_enum(self, type_): - return self.visit_VARCHAR(type_) - - def visit_null(self, type_): - raise NotImplementedError("Can't generate DDL for the null type") - - def visit_type_decorator(self, type_): - return self.process(type_.type_engine(self.dialect)) - - def visit_user_defined(self, type_): - return type_.get_col_spec() - -class IdentifierPreparer(object): - """Handle quoting and case-folding of identifiers based on options.""" - - reserved_words = RESERVED_WORDS - - legal_characters = LEGAL_CHARACTERS - - illegal_initial_characters = ILLEGAL_INITIAL_CHARACTERS - - def __init__(self, dialect, initial_quote='"', - final_quote=None, escape_quote='"', omit_schema=False): - """Construct a new ``IdentifierPreparer`` object. - - initial_quote - Character that begins a delimited identifier. - - final_quote - Character that ends a delimited identifier. Defaults to - `initial_quote`. - - omit_schema - Prevent prepending schema name. Useful for databases that do - not support schemae. - """ - - self.dialect = dialect - self.initial_quote = initial_quote - self.final_quote = final_quote or self.initial_quote - self.escape_quote = escape_quote - self.escape_to_quote = self.escape_quote * 2 - self.omit_schema = omit_schema - self._strings = {} - - def _escape_identifier(self, value): - """Escape an identifier. - - Subclasses should override this to provide database-dependent - escaping behavior. - """ - - return value.replace(self.escape_quote, self.escape_to_quote) - - def _unescape_identifier(self, value): - """Canonicalize an escaped identifier. - - Subclasses should override this to provide database-dependent - unescaping behavior that reverses _escape_identifier. - """ - - return value.replace(self.escape_to_quote, self.escape_quote) - - def quote_identifier(self, value): - """Quote an identifier. - - Subclasses should override this to provide database-dependent - quoting behavior. - """ - - return self.initial_quote + \ - self._escape_identifier(value) + \ - self.final_quote - - def _requires_quotes(self, value): - """Return True if the given identifier requires quoting.""" - lc_value = value.lower() - return (lc_value in self.reserved_words - or value[0] in self.illegal_initial_characters - or not self.legal_characters.match(unicode(value)) - or (lc_value != value)) - - def quote_schema(self, schema, force): - """Quote a schema. - - Subclasses should override this to provide database-dependent - quoting behavior. - """ - return self.quote(schema, force) - - def quote(self, ident, force): - if force is None: - if ident in self._strings: - return self._strings[ident] - else: - if self._requires_quotes(ident): - self._strings[ident] = self.quote_identifier(ident) - else: - self._strings[ident] = ident - return self._strings[ident] - elif force: - return self.quote_identifier(ident) - else: - return ident - - def format_sequence(self, sequence, use_schema=True): - name = self.quote(sequence.name, sequence.quote) - if not self.omit_schema and use_schema and \ - sequence.schema is not None: - name = self.quote_schema(sequence.schema, sequence.quote) + \ - "." + name - return name - - def format_label(self, label, name=None): - return self.quote(name or label.name, label.quote) - - def format_alias(self, alias, name=None): - return self.quote(name or alias.name, alias.quote) - - def format_savepoint(self, savepoint, name=None): - return self.quote(name or savepoint.ident, savepoint.quote) - - def format_constraint(self, constraint): - return self.quote(constraint.name, constraint.quote) - - def format_table(self, table, use_schema=True, name=None): - """Prepare a quoted table and schema name.""" - - if name is None: - name = table.name - result = self.quote(name, table.quote) - if not self.omit_schema and use_schema \ - and getattr(table, "schema", None): - result = self.quote_schema(table.schema, table.quote_schema) + \ - "." + result - return result - - def format_schema(self, name, quote): - """Prepare a quoted schema name.""" - - return self.quote(name, quote) - - def format_column(self, column, use_table=False, - name=None, table_name=None): - """Prepare a quoted column name.""" - - if name is None: - name = column.name - if not getattr(column, 'is_literal', False): - if use_table: - return self.format_table( - column.table, use_schema=False, - name=table_name) + "." + \ - self.quote(name, column.quote) - else: - return self.quote(name, column.quote) - else: - # literal textual elements get stuck into ColumnClause a lot, - # which shouldn't get quoted - - if use_table: - return self.format_table(column.table, - use_schema=False, name=table_name) + '.' + name - else: - return name - - def format_table_seq(self, table, use_schema=True): - """Format table name and schema as a tuple.""" - - # Dialects with more levels in their fully qualified references - # ('database', 'owner', etc.) could override this and return - # a longer sequence. - - if not self.omit_schema and use_schema and \ - getattr(table, 'schema', None): - return (self.quote_schema(table.schema, table.quote_schema), - self.format_table(table, use_schema=False)) - else: - return (self.format_table(table, use_schema=False), ) - - @util.memoized_property - def _r_identifiers(self): - initial, final, escaped_final = \ - [re.escape(s) for s in - (self.initial_quote, self.final_quote, - self._escape_identifier(self.final_quote))] - r = re.compile( - r'(?:' - r'(?:%(initial)s((?:%(escaped)s|[^%(final)s])+)%(final)s' - r'|([^\.]+))(?=\.|$))+' % - { 'initial': initial, - 'final': final, - 'escaped': escaped_final }) - return r - - def unformat_identifiers(self, identifiers): - """Unpack 'schema.table.column'-like strings into components.""" - - r = self._r_identifiers - return [self._unescape_identifier(i) - for i in [a or b for a, b in r.findall(identifiers)]] diff --git a/libs/sqlalchemy/sql/expression.py b/libs/sqlalchemy/sql/expression.py deleted file mode 100644 index c90a3dcb..00000000 --- a/libs/sqlalchemy/sql/expression.py +++ /dev/null @@ -1,5730 +0,0 @@ -# sql/expression.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Defines the base components of SQL expression trees. - -All components are derived from a common base class -:class:`.ClauseElement`. Common behaviors are organized -based on class hierarchies, in some cases via mixins. - -All object construction from this package occurs via functions which -in some cases will construct composite :class:`.ClauseElement` structures -together, and in other cases simply return a single :class:`.ClauseElement` -constructed directly. The function interface affords a more "DSL-ish" -feel to constructing SQL expressions and also allows future class -reorganizations. - -Even though classes are not constructed directly from the outside, -most classes which have additional public methods are considered to be -public (i.e. have no leading underscore). Other classes which are -"semi-public" are marked with a single leading underscore; these -classes usually have few or no public methods and are less guaranteed -to stay the same in future releases. - -""" - -import itertools, re -from operator import attrgetter - -from sqlalchemy import util, exc -from sqlalchemy.sql import operators -from sqlalchemy.sql.operators import Operators, ColumnOperators -from sqlalchemy.sql.visitors import Visitable, cloned_traverse -import operator - -functions = util.importlater("sqlalchemy.sql", "functions") -sqlutil = util.importlater("sqlalchemy.sql", "util") -sqltypes = util.importlater("sqlalchemy", "types") -default = util.importlater("sqlalchemy.engine", "default") - -__all__ = [ - 'Alias', 'ClauseElement', 'ColumnCollection', 'ColumnElement', - 'CompoundSelect', 'Delete', 'FromClause', 'Insert', 'Join', 'Select', - 'Selectable', 'TableClause', 'Update', 'alias', 'and_', 'asc', 'between', - 'bindparam', 'case', 'cast', 'column', 'delete', 'desc', 'distinct', - 'except_', 'except_all', 'exists', 'extract', 'func', 'modifier', - 'collate', 'insert', 'intersect', 'intersect_all', 'join', 'label', - 'literal', 'literal_column', 'not_', 'null', 'nullsfirst', 'nullslast', - 'or_', 'outparam', 'outerjoin', 'over', 'select', 'subquery', 'table', 'text', - 'tuple_', 'type_coerce', 'union', 'union_all', 'update', ] - -PARSE_AUTOCOMMIT = util.symbol('PARSE_AUTOCOMMIT') - -def nullsfirst(column): - """Return a NULLS FIRST ``ORDER BY`` clause element. - - e.g.:: - - someselect.order_by(desc(table1.mycol).nullsfirst()) - - produces:: - - ORDER BY mycol DESC NULLS FIRST - - """ - return _UnaryExpression(column, modifier=operators.nullsfirst_op) - -def nullslast(column): - """Return a NULLS LAST ``ORDER BY`` clause element. - - e.g.:: - - someselect.order_by(desc(table1.mycol).nullslast()) - - produces:: - - ORDER BY mycol DESC NULLS LAST - - """ - return _UnaryExpression(column, modifier=operators.nullslast_op) - -def desc(column): - """Return a descending ``ORDER BY`` clause element. - - e.g.:: - - someselect.order_by(desc(table1.mycol)) - - produces:: - - ORDER BY mycol DESC - - """ - return _UnaryExpression(column, modifier=operators.desc_op) - -def asc(column): - """Return an ascending ``ORDER BY`` clause element. - - e.g.:: - - someselect.order_by(asc(table1.mycol)) - - produces:: - - ORDER BY mycol ASC - - """ - return _UnaryExpression(column, modifier=operators.asc_op) - -def outerjoin(left, right, onclause=None): - """Return an ``OUTER JOIN`` clause element. - - The returned object is an instance of :class:`.Join`. - - Similar functionality is also available via the - :meth:`~.FromClause.outerjoin()` method on any - :class:`.FromClause`. - - :param left: The left side of the join. - - :param right: The right side of the join. - - :param onclause: Optional criterion for the ``ON`` clause, is - derived from foreign key relationships established between - left and right otherwise. - - To chain joins together, use the :meth:`.FromClause.join` or - :meth:`.FromClause.outerjoin` methods on the resulting - :class:`.Join` object. - - """ - return Join(left, right, onclause, isouter=True) - -def join(left, right, onclause=None, isouter=False): - """Return a ``JOIN`` clause element (regular inner join). - - The returned object is an instance of :class:`.Join`. - - Similar functionality is also available via the - :meth:`~.FromClause.join()` method on any - :class:`.FromClause`. - - :param left: The left side of the join. - - :param right: The right side of the join. - - :param onclause: Optional criterion for the ``ON`` clause, is - derived from foreign key relationships established between - left and right otherwise. - - To chain joins together, use the :meth:`.FromClause.join` or - :meth:`.FromClause.outerjoin` methods on the resulting - :class:`.Join` object. - - - """ - return Join(left, right, onclause, isouter) - -def select(columns=None, whereclause=None, from_obj=[], **kwargs): - """Returns a ``SELECT`` clause element. - - Similar functionality is also available via the :func:`select()` - method on any :class:`.FromClause`. - - The returned object is an instance of :class:`.Select`. - - All arguments which accept :class:`.ClauseElement` arguments also accept - string arguments, which will be converted as appropriate into - either :func:`text()` or :func:`literal_column()` constructs. - - See also: - - :ref:`coretutorial_selecting` - Core Tutorial description of :func:`.select`. - - :param columns: - A list of :class:`.ClauseElement` objects, typically - :class:`.ColumnElement` objects or subclasses, which will form the - columns clause of the resulting statement. For all members which are - instances of :class:`.Selectable`, the individual :class:`.ColumnElement` - members of the :class:`.Selectable` will be added individually to the - columns clause. For example, specifying a - :class:`~sqlalchemy.schema.Table` instance will result in all the - contained :class:`~sqlalchemy.schema.Column` objects within to be added - to the columns clause. - - This argument is not present on the form of :func:`select()` - available on :class:`~sqlalchemy.schema.Table`. - - :param whereclause: - A :class:`.ClauseElement` expression which will be used to form the - ``WHERE`` clause. - - :param from_obj: - A list of :class:`.ClauseElement` objects which will be added to the - ``FROM`` clause of the resulting statement. Note that "from" objects are - automatically located within the columns and whereclause ClauseElements. - Use this parameter to explicitly specify "from" objects which are not - automatically locatable. This could include - :class:`~sqlalchemy.schema.Table` objects that aren't otherwise present, - or :class:`.Join` objects whose presence will supercede that of the - :class:`~sqlalchemy.schema.Table` objects already located in the other - clauses. - - :param autocommit: - Deprecated. Use .execution_options(autocommit=) - to set the autocommit option. - - :param bind=None: - an :class:`~.base.Engine` or :class:`~.base.Connection` instance - to which the - resulting :class:`.Select` object will be bound. The :class:`.Select` - object will otherwise automatically bind to whatever - :class:`~.base.Connectable` instances can be located within its contained - :class:`.ClauseElement` members. - - :param correlate=True: - indicates that this :class:`.Select` object should have its - contained :class:`.FromClause` elements "correlated" to an enclosing - :class:`.Select` object. This means that any :class:`.ClauseElement` - instance within the "froms" collection of this :class:`.Select` - which is also present in the "froms" collection of an - enclosing select will not be rendered in the ``FROM`` clause - of this select statement. - - :param distinct=False: - when ``True``, applies a ``DISTINCT`` qualifier to the columns - clause of the resulting statement. - - The boolean argument may also be a column expression or list - of column expressions - this is a special calling form which - is understood by the Postgresql dialect to render the - ``DISTINCT ON ()`` syntax. - - ``distinct`` is also available via the :meth:`~.Select.distinct` - generative method. - - .. note:: - - The ``distinct`` keyword's acceptance of a string - argument for usage with MySQL is deprecated. Use - the ``prefixes`` argument or :meth:`~.Select.prefix_with`. - - :param for_update=False: - when ``True``, applies ``FOR UPDATE`` to the end of the - resulting statement. - - Certain database dialects also support - alternate values for this parameter: - - * With the MySQL dialect, the value ``"read"`` translates to - ``LOCK IN SHARE MODE``. - * With the Oracle and Postgresql dialects, the value ``"nowait"`` - translates to ``FOR UPDATE NOWAIT``. - * With the Postgresql dialect, the values "read" and ``"read_nowait"`` - translate to ``FOR SHARE`` and ``FOR SHARE NOWAIT``, respectively. - - .. versionadded:: 0.7.7 - - :param group_by: - a list of :class:`.ClauseElement` objects which will comprise the - ``GROUP BY`` clause of the resulting select. - - :param having: - a :class:`.ClauseElement` that will comprise the ``HAVING`` clause - of the resulting select when ``GROUP BY`` is used. - - :param limit=None: - a numerical value which usually compiles to a ``LIMIT`` - expression in the resulting select. Databases that don't - support ``LIMIT`` will attempt to provide similar - functionality. - - :param offset=None: - a numeric value which usually compiles to an ``OFFSET`` - expression in the resulting select. Databases that don't - support ``OFFSET`` will attempt to provide similar - functionality. - - :param order_by: - a scalar or list of :class:`.ClauseElement` objects which will - comprise the ``ORDER BY`` clause of the resulting select. - - :param prefixes: - a list of strings or :class:`.ClauseElement` objects to include - directly after the SELECT keyword in the generated statement, - for dialect-specific query features. ``prefixes`` is - also available via the :meth:`~.Select.prefix_with` - generative method. - - :param use_labels=False: - when ``True``, the statement will be generated using labels - for each column in the columns clause, which qualify each - column with its parent table's (or aliases) name so that name - conflicts between columns in different tables don't occur. - The format of the label is _. The "c" - collection of the resulting :class:`.Select` object will use these - names as well for targeting column members. - - use_labels is also available via the :meth:`~._SelectBase.apply_labels` - generative method. - - """ - return Select(columns, whereclause=whereclause, from_obj=from_obj, - **kwargs) - -def subquery(alias, *args, **kwargs): - """Return an :class:`.Alias` object derived - from a :class:`.Select`. - - name - alias name - - \*args, \**kwargs - - all other arguments are delivered to the - :func:`select` function. - - """ - return Select(*args, **kwargs).alias(alias) - -def insert(table, values=None, inline=False, **kwargs): - """Represent an ``INSERT`` statement via the :class:`.Insert` SQL - construct. - - Similar functionality is available via the :meth:`~.TableClause.insert` method on - :class:`~.schema.Table`. - - - :param table: The table to be inserted into. - - :param values: A dictionary which specifies the column specifications of - the ``INSERT``, and is optional. If left as None, the column - specifications are determined from the bind parameters used during the - compile phase of the ``INSERT`` statement. If the bind parameters also - are None during the compile phase, then the column specifications will be - generated from the full list of table columns. Note that the - :meth:`~Insert.values()` generative method may also be used for this. - - :param prefixes: A list of modifier keywords to be inserted between INSERT - and INTO. Alternatively, the :meth:`~Insert.prefix_with` generative - method may be used. - - :param inline: if True, SQL defaults will be compiled 'inline' into the - statement and not pre-executed. - - If both `values` and compile-time bind parameters are present, the - compile-time bind parameters override the information specified - within `values` on a per-key basis. - - The keys within `values` can be either :class:`~sqlalchemy.schema.Column` - objects or their string identifiers. Each key may reference one of: - - * a literal data value (i.e. string, number, etc.); - * a Column object; - * a SELECT statement. - - If a ``SELECT`` statement is specified which references this - ``INSERT`` statement's table, the statement will be correlated - against the ``INSERT`` statement. - - See also: - - :ref:`coretutorial_insert_expressions` - SQL Expression Tutorial - - :ref:`inserts_and_updates` - SQL Expression Tutorial - - """ - return Insert(table, values, inline=inline, **kwargs) - -def update(table, whereclause=None, values=None, inline=False, **kwargs): - """Represent an ``UPDATE`` statement via the :class:`.Update` SQL - construct. - - E.g.:: - - from sqlalchemy import update - - stmt = update(users).where(users.c.id==5).\\ - values(name='user #5') - - Similar functionality is available via the :meth:`~.TableClause.update` method on - :class:`.Table`:: - - - stmt = users.update().\\ - where(users.c.id==5).\\ - values(name='user #5') - - :param table: A :class:`.Table` object representing the database - table to be updated. - - :param whereclause: Optional SQL expression describing the ``WHERE`` - condition of the ``UPDATE`` statement. Modern applications - may prefer to use the generative :meth:`~Update.where()` - method to specify the ``WHERE`` clause. - - The WHERE clause can refer to multiple tables. - For databases which support this, an ``UPDATE FROM`` clause will - be generated, or on MySQL, a multi-table update. The statement - will fail on databases that don't have support for multi-table - update statements. A SQL-standard method of referring to - additional tables in the WHERE clause is to use a correlated - subquery:: - - users.update().values(name='ed').where( - users.c.name==select([addresses.c.email_address]).\\ - where(addresses.c.user_id==users.c.id).\\ - as_scalar() - ) - - .. versionchanged:: 0.7.4 - The WHERE clause can refer to multiple tables. - - :param values: - Optional dictionary which specifies the ``SET`` conditions of the - ``UPDATE``. If left as ``None``, the ``SET`` - conditions are determined from those parameters passed to the - statement during the execution and/or compilation of the - statement. When compiled standalone without any parameters, - the ``SET`` clause generates for all columns. - - Modern applications may prefer to use the generative - :meth:`.Update.values` method to set the values of the - UPDATE statement. - - :param inline: - if True, SQL defaults present on :class:`.Column` objects via - the ``default`` keyword will be compiled 'inline' into the statement - and not pre-executed. This means that their values will not - be available in the dictionary returned from - :meth:`.ResultProxy.last_updated_params`. - - If both ``values`` and compile-time bind parameters are present, the - compile-time bind parameters override the information specified - within ``values`` on a per-key basis. - - The keys within ``values`` can be either :class:`.Column` - objects or their string identifiers (specifically the "key" of the - :class:`.Column`, normally but not necessarily equivalent to - its "name"). Normally, the - :class:`.Column` objects used here are expected to be - part of the target :class:`.Table` that is the table - to be updated. However when using MySQL, a multiple-table - UPDATE statement can refer to columns from any of - the tables referred to in the WHERE clause. - - The values referred to in ``values`` are typically: - - * a literal data value (i.e. string, number, etc.) - * a SQL expression, such as a related :class:`.Column`, - a scalar-returning :func:`.select` construct, - etc. - - When combining :func:`.select` constructs within the values - clause of an :func:`.update` construct, - the subquery represented by the :func:`.select` should be - *correlated* to the parent table, that is, providing criterion - which links the table inside the subquery to the outer table - being updated:: - - users.update().values( - name=select([addresses.c.email_address]).\\ - where(addresses.c.user_id==users.c.id).\\ - as_scalar() - ) - - See also: - - :ref:`inserts_and_updates` - SQL Expression - Language Tutorial - - - """ - return Update( - table, - whereclause=whereclause, - values=values, - inline=inline, - **kwargs) - -def delete(table, whereclause = None, **kwargs): - """Represent a ``DELETE`` statement via the :class:`.Delete` SQL - construct. - - Similar functionality is available via the :meth:`~.TableClause.delete` method on - :class:`~.schema.Table`. - - :param table: The table to be updated. - - :param whereclause: A :class:`.ClauseElement` describing the ``WHERE`` - condition of the ``UPDATE`` statement. Note that the - :meth:`~Delete.where()` generative method may be used instead. - - See also: - - :ref:`deletes` - SQL Expression Tutorial - - """ - return Delete(table, whereclause, **kwargs) - -def and_(*clauses): - """Join a list of clauses together using the ``AND`` operator. - - The ``&`` operator is also overloaded on all - :class:`_CompareMixin` subclasses to produce the - same result. - - """ - if len(clauses) == 1: - return clauses[0] - return BooleanClauseList(operator=operators.and_, *clauses) - -def or_(*clauses): - """Join a list of clauses together using the ``OR`` operator. - - The ``|`` operator is also overloaded on all - :class:`_CompareMixin` subclasses to produce the - same result. - - """ - if len(clauses) == 1: - return clauses[0] - return BooleanClauseList(operator=operators.or_, *clauses) - -def not_(clause): - """Return a negation of the given clause, i.e. ``NOT(clause)``. - - The ``~`` operator is also overloaded on all - :class:`_CompareMixin` subclasses to produce the - same result. - - """ - return operators.inv(_literal_as_binds(clause)) - -def distinct(expr): - """Return a ``DISTINCT`` clause. - - e.g.:: - - distinct(a) - - renders:: - - DISTINCT a - - """ - expr = _literal_as_binds(expr) - return _UnaryExpression(expr, operator=operators.distinct_op, type_=expr.type) - -def between(ctest, cleft, cright): - """Return a ``BETWEEN`` predicate clause. - - Equivalent of SQL ``clausetest BETWEEN clauseleft AND clauseright``. - - The :func:`between()` method on all - :class:`_CompareMixin` subclasses provides - similar functionality. - - """ - ctest = _literal_as_binds(ctest) - return ctest.between(cleft, cright) - - -def case(whens, value=None, else_=None): - """Produce a ``CASE`` statement. - - whens - A sequence of pairs, or alternatively a dict, - to be translated into "WHEN / THEN" clauses. - - value - Optional for simple case statements, produces - a column expression as in "CASE WHEN ..." - - else\_ - Optional as well, for case defaults produces - the "ELSE" portion of the "CASE" statement. - - The expressions used for THEN and ELSE, - when specified as strings, will be interpreted - as bound values. To specify textual SQL expressions - for these, use the :func:`literal_column` - construct. - - The expressions used for the WHEN criterion - may only be literal strings when "value" is - present, i.e. CASE table.somecol WHEN "x" THEN "y". - Otherwise, literal strings are not accepted - in this position, and either the text() - or literal() constructs must be used to - interpret raw string values. - - Usage examples:: - - case([(orderline.c.qty > 100, item.c.specialprice), - (orderline.c.qty > 10, item.c.bulkprice) - ], else_=item.c.regularprice) - case(value=emp.c.type, whens={ - 'engineer': emp.c.salary * 1.1, - 'manager': emp.c.salary * 3, - }) - - Using :func:`literal_column()`, to allow for databases that - do not support bind parameters in the ``then`` clause. The type - can be specified which determines the type of the :func:`case()` construct - overall:: - - case([(orderline.c.qty > 100, - literal_column("'greaterthan100'", String)), - (orderline.c.qty > 10, literal_column("'greaterthan10'", - String)) - ], else_=literal_column("'lethan10'", String)) - - """ - - return _Case(whens, value=value, else_=else_) - -def cast(clause, totype, **kwargs): - """Return a ``CAST`` function. - - Equivalent of SQL ``CAST(clause AS totype)``. - - Use with a :class:`~sqlalchemy.types.TypeEngine` subclass, i.e:: - - cast(table.c.unit_price * table.c.qty, Numeric(10,4)) - - or:: - - cast(table.c.timestamp, DATE) - - """ - return _Cast(clause, totype, **kwargs) - -def extract(field, expr): - """Return the clause ``extract(field FROM expr)``.""" - - return _Extract(field, expr) - -def collate(expression, collation): - """Return the clause ``expression COLLATE collation``. - - e.g.:: - - collate(mycolumn, 'utf8_bin') - - produces:: - - mycolumn COLLATE utf8_bin - - """ - - expr = _literal_as_binds(expression) - return _BinaryExpression( - expr, - _literal_as_text(collation), - operators.collate, type_=expr.type) - -def exists(*args, **kwargs): - """Return an ``EXISTS`` clause as applied to a :class:`.Select` object. - - Calling styles are of the following forms:: - - # use on an existing select() - s = select([table.c.col1]).where(table.c.col2==5) - s = exists(s) - - # construct a select() at once - exists(['*'], **select_arguments).where(criterion) - - # columns argument is optional, generates "EXISTS (SELECT *)" - # by default. - exists().where(table.c.col2==5) - - """ - return _Exists(*args, **kwargs) - -def union(*selects, **kwargs): - """Return a ``UNION`` of multiple selectables. - - The returned object is an instance of - :class:`.CompoundSelect`. - - A similar :func:`union()` method is available on all - :class:`.FromClause` subclasses. - - \*selects - a list of :class:`.Select` instances. - - \**kwargs - available keyword arguments are the same as those of - :func:`select`. - - """ - return CompoundSelect(CompoundSelect.UNION, *selects, **kwargs) - -def union_all(*selects, **kwargs): - """Return a ``UNION ALL`` of multiple selectables. - - The returned object is an instance of - :class:`.CompoundSelect`. - - A similar :func:`union_all()` method is available on all - :class:`.FromClause` subclasses. - - \*selects - a list of :class:`.Select` instances. - - \**kwargs - available keyword arguments are the same as those of - :func:`select`. - - """ - return CompoundSelect(CompoundSelect.UNION_ALL, *selects, **kwargs) - -def except_(*selects, **kwargs): - """Return an ``EXCEPT`` of multiple selectables. - - The returned object is an instance of - :class:`.CompoundSelect`. - - \*selects - a list of :class:`.Select` instances. - - \**kwargs - available keyword arguments are the same as those of - :func:`select`. - - """ - return CompoundSelect(CompoundSelect.EXCEPT, *selects, **kwargs) - -def except_all(*selects, **kwargs): - """Return an ``EXCEPT ALL`` of multiple selectables. - - The returned object is an instance of - :class:`.CompoundSelect`. - - \*selects - a list of :class:`.Select` instances. - - \**kwargs - available keyword arguments are the same as those of - :func:`select`. - - """ - return CompoundSelect(CompoundSelect.EXCEPT_ALL, *selects, **kwargs) - -def intersect(*selects, **kwargs): - """Return an ``INTERSECT`` of multiple selectables. - - The returned object is an instance of - :class:`.CompoundSelect`. - - \*selects - a list of :class:`.Select` instances. - - \**kwargs - available keyword arguments are the same as those of - :func:`select`. - - """ - return CompoundSelect(CompoundSelect.INTERSECT, *selects, **kwargs) - -def intersect_all(*selects, **kwargs): - """Return an ``INTERSECT ALL`` of multiple selectables. - - The returned object is an instance of - :class:`.CompoundSelect`. - - \*selects - a list of :class:`.Select` instances. - - \**kwargs - available keyword arguments are the same as those of - :func:`select`. - - """ - return CompoundSelect(CompoundSelect.INTERSECT_ALL, *selects, **kwargs) - -def alias(selectable, name=None): - """Return an :class:`.Alias` object. - - An :class:`.Alias` represents any :class:`.FromClause` - with an alternate name assigned within SQL, typically using the ``AS`` - clause when generated, e.g. ``SELECT * FROM table AS aliasname``. - - Similar functionality is available via the - :meth:`~.FromClause.alias` method - available on all :class:`.FromClause` subclasses. - - When an :class:`.Alias` is created from a :class:`.Table` object, - this has the effect of the table being rendered - as ``tablename AS aliasname`` in a SELECT statement. - - For :func:`.select` objects, the effect is that of creating a named - subquery, i.e. ``(select ...) AS aliasname``. - - The ``name`` parameter is optional, and provides the name - to use in the rendered SQL. If blank, an "anonymous" name - will be deterministically generated at compile time. - Deterministic means the name is guaranteed to be unique against - other constructs used in the same statement, and will also be the - same name for each successive compilation of the same statement - object. - - :param selectable: any :class:`.FromClause` subclass, - such as a table, select statement, etc. - - :param name: string name to be assigned as the alias. - If ``None``, a name will be deterministically generated - at compile time. - - """ - return Alias(selectable, name=name) - - -def literal(value, type_=None): - """Return a literal clause, bound to a bind parameter. - - Literal clauses are created automatically when non- :class:`.ClauseElement` - objects (such as strings, ints, dates, etc.) are used in a comparison - operation with a :class:`_CompareMixin` - subclass, such as a :class:`~sqlalchemy.schema.Column` object. Use this function to force the - generation of a literal clause, which will be created as a - :class:`_BindParamClause` with a bound value. - - :param value: the value to be bound. Can be any Python object supported by - the underlying DB-API, or is translatable via the given type argument. - - :param type\_: an optional :class:`~sqlalchemy.types.TypeEngine` which - will provide bind-parameter translation for this literal. - - """ - return _BindParamClause(None, value, type_=type_, unique=True) - -def tuple_(*expr): - """Return a SQL tuple. - - Main usage is to produce a composite IN construct:: - - tuple_(table.c.col1, table.c.col2).in_( - [(1, 2), (5, 12), (10, 19)] - ) - - .. warning:: - - The composite IN construct is not supported by all backends, - and is currently known to work on Postgresql and MySQL, - but not SQLite. Unsupported backends will raise - a subclass of :class:`~sqlalchemy.exc.DBAPIError` when such - an expression is invoked. - - """ - return _Tuple(*expr) - -def type_coerce(expr, type_): - """Coerce the given expression into the given type, on the Python side only. - - :func:`.type_coerce` is roughly similar to :func:`.cast`, except no - "CAST" expression is rendered - the given type is only applied towards - expression typing and against received result values. - - e.g.:: - - from sqlalchemy.types import TypeDecorator - import uuid - - class AsGuid(TypeDecorator): - impl = String - - def process_bind_param(self, value, dialect): - if value is not None: - return str(value) - else: - return None - - def process_result_value(self, value, dialect): - if value is not None: - return uuid.UUID(value) - else: - return None - - conn.execute( - select([type_coerce(mytable.c.ident, AsGuid)]).\\ - where( - type_coerce(mytable.c.ident, AsGuid) == - uuid.uuid3(uuid.NAMESPACE_URL, 'bar') - ) - ) - - """ - if hasattr(expr, '__clause_expr__'): - return type_coerce(expr.__clause_expr__()) - - elif not isinstance(expr, Visitable): - if expr is None: - return null() - else: - return literal(expr, type_=type_) - else: - return _Label(None, expr, type_=type_) - - -def label(name, obj): - """Return a :class:`_Label` object for the - given :class:`.ColumnElement`. - - A label changes the name of an element in the columns clause of a - ``SELECT`` statement, typically via the ``AS`` SQL keyword. - - This functionality is more conveniently available via the - :func:`label()` method on :class:`.ColumnElement`. - - name - label name - - obj - a :class:`.ColumnElement`. - - """ - return _Label(name, obj) - -def column(text, type_=None): - """Return a textual column clause, as would be in the columns clause of a - ``SELECT`` statement. - - The object returned is an instance of :class:`.ColumnClause`, which - represents the "syntactical" portion of the schema-level - :class:`~sqlalchemy.schema.Column` object. It is often used directly - within :func:`~.expression.select` constructs or with lightweight :func:`~.expression.table` - constructs. - - Note that the :func:`~.expression.column` function is not part of - the ``sqlalchemy`` namespace. It must be imported from the ``sql`` package:: - - from sqlalchemy.sql import table, column - - :param text: the name of the column. Quoting rules will be applied - to the clause like any other column name. For textual column constructs - that are not to be quoted, use the :func:`literal_column` function. - - :param type\_: an optional :class:`~sqlalchemy.types.TypeEngine` object - which will provide result-set translation for this column. - - See :class:`.ColumnClause` for further examples. - - """ - return ColumnClause(text, type_=type_) - -def literal_column(text, type_=None): - """Return a textual column expression, as would be in the columns - clause of a ``SELECT`` statement. - - The object returned supports further expressions in the same way as any - other column object, including comparison, math and string operations. - The type\_ parameter is important to determine proper expression behavior - (such as, '+' means string concatenation or numerical addition based on - the type). - - :param text: the text of the expression; can be any SQL expression. - Quoting rules will not be applied. To specify a column-name expression - which should be subject to quoting rules, use the :func:`column` - function. - - :param type\_: an optional :class:`~sqlalchemy.types.TypeEngine` object which will - provide result-set translation and additional expression semantics for - this column. If left as None the type will be NullType. - - """ - return ColumnClause(text, type_=type_, is_literal=True) - -def table(name, *columns): - """Represent a textual table clause. - - The object returned is an instance of :class:`.TableClause`, which represents the - "syntactical" portion of the schema-level :class:`~.schema.Table` object. - It may be used to construct lightweight table constructs. - - Note that the :func:`~.expression.table` function is not part of - the ``sqlalchemy`` namespace. It must be imported from the ``sql`` package:: - - from sqlalchemy.sql import table, column - - :param name: Name of the table. - - :param columns: A collection of :func:`~.expression.column` constructs. - - See :class:`.TableClause` for further examples. - - """ - return TableClause(name, *columns) - -def bindparam(key, value=None, type_=None, unique=False, required=False, callable_=None): - """Create a bind parameter clause with the given key. - - :param key: - the key for this bind param. Will be used in the generated - SQL statement for dialects that use named parameters. This - value may be modified when part of a compilation operation, - if other :class:`_BindParamClause` objects exist with the same - key, or if its length is too long and truncation is - required. - - :param value: - Initial value for this bind param. This value may be - overridden by the dictionary of parameters sent to statement - compilation/execution. - - :param callable\_: - A callable function that takes the place of "value". The function - will be called at statement execution time to determine the - ultimate value. Used for scenarios where the actual bind - value cannot be determined at the point at which the clause - construct is created, but embedded bind values are still desirable. - - :param type\_: - A ``TypeEngine`` object that will be used to pre-process the - value corresponding to this :class:`_BindParamClause` at - execution time. - - :param unique: - if True, the key name of this BindParamClause will be - modified if another :class:`_BindParamClause` of the same name - already has been located within the containing - :class:`.ClauseElement`. - - :param required: - a value is required at execution time. - - """ - if isinstance(key, ColumnClause): - return _BindParamClause(key.name, value, type_=key.type, - callable_=callable_, - unique=unique, required=required) - else: - return _BindParamClause(key, value, type_=type_, - callable_=callable_, - unique=unique, required=required) - -def outparam(key, type_=None): - """Create an 'OUT' parameter for usage in functions (stored procedures), - for databases which support them. - - The ``outparam`` can be used like a regular function parameter. - The "output" value will be available from the - :class:`~sqlalchemy.engine.ResultProxy` object via its ``out_parameters`` - attribute, which returns a dictionary containing the values. - - """ - return _BindParamClause( - key, None, type_=type_, unique=False, isoutparam=True) - -def text(text, bind=None, *args, **kwargs): - """Create a SQL construct that is represented by a literal string. - - E.g.:: - - t = text("SELECT * FROM users") - result = connection.execute(t) - - The advantages :func:`text` provides over a plain string are - backend-neutral support for bind parameters, per-statement - execution options, as well as - bind parameter and result-column typing behavior, allowing - SQLAlchemy type constructs to play a role when executing - a statement that is specified literally. - - Bind parameters are specified by name, using the format ``:name``. - E.g.:: - - t = text("SELECT * FROM users WHERE id=:user_id") - result = connection.execute(t, user_id=12) - - To invoke SQLAlchemy typing logic for bind parameters, the - ``bindparams`` list allows specification of :func:`bindparam` - constructs which specify the type for a given name:: - - t = text("SELECT id FROM users WHERE updated_at>:updated", - bindparams=[bindparam('updated', DateTime())] - ) - - Typing during result row processing is also an important concern. - Result column types - are specified using the ``typemap`` dictionary, where the keys - match the names of columns. These names are taken from what - the DBAPI returns as ``cursor.description``:: - - t = text("SELECT id, name FROM users", - typemap={ - 'id':Integer, - 'name':Unicode - } - ) - - The :func:`text` construct is used internally for most cases when - a literal string is specified for part of a larger query, such as - within :func:`select()`, :func:`update()`, - :func:`insert()` or :func:`delete()`. In those cases, the same - bind parameter syntax is applied:: - - s = select([users.c.id, users.c.name]).where("id=:user_id") - result = connection.execute(s, user_id=12) - - Using :func:`text` explicitly usually implies the construction - of a full, standalone statement. As such, SQLAlchemy refers - to it as an :class:`.Executable` object, and it supports - the :meth:`Executable.execution_options` method. For example, - a :func:`text` construct that should be subject to "autocommit" - can be set explicitly so using the ``autocommit`` option:: - - t = text("EXEC my_procedural_thing()").\\ - execution_options(autocommit=True) - - Note that SQLAlchemy's usual "autocommit" behavior applies to - :func:`text` constructs - that is, statements which begin - with a phrase such as ``INSERT``, ``UPDATE``, ``DELETE``, - or a variety of other phrases specific to certain backends, will - be eligible for autocommit if no transaction is in progress. - - :param text: - the text of the SQL statement to be created. use ``:`` - to specify bind parameters; they will be compiled to their - engine-specific format. - - :param autocommit: - Deprecated. Use .execution_options(autocommit=) - to set the autocommit option. - - :param bind: - an optional connection or engine to be used for this text query. - - :param bindparams: - a list of :func:`bindparam()` instances which can be used to define - the types and/or initial values for the bind parameters within - the textual statement; the keynames of the bindparams must match - those within the text of the statement. The types will be used - for pre-processing on bind values. - - :param typemap: - a dictionary mapping the names of columns represented in the - columns clause of a ``SELECT`` statement to type objects, - which will be used to perform post-processing on columns within - the result set. This argument applies to any expression - that returns result sets. - - """ - return _TextClause(text, bind=bind, *args, **kwargs) - -def over(func, partition_by=None, order_by=None): - """Produce an OVER clause against a function. - - Used against aggregate or so-called "window" functions, - for database backends that support window functions. - - E.g.:: - - from sqlalchemy import over - over(func.row_number(), order_by='x') - - Would produce "ROW_NUMBER() OVER(ORDER BY x)". - - :param func: a :class:`.FunctionElement` construct, typically - generated by :attr:`~.expression.func`. - :param partition_by: a column element or string, or a list - of such, that will be used as the PARTITION BY clause - of the OVER construct. - :param order_by: a column element or string, or a list - of such, that will be used as the ORDER BY clause - of the OVER construct. - - This function is also available from the :attr:`~.expression.func` - construct itself via the :meth:`.FunctionElement.over` method. - - .. versionadded:: 0.7 - - """ - return _Over(func, partition_by=partition_by, order_by=order_by) - -def null(): - """Return a :class:`_Null` object, which compiles to ``NULL``. - - """ - return _Null() - -def true(): - """Return a :class:`_True` object, which compiles to ``true``, or the - boolean equivalent for the target dialect. - - """ - return _True() - -def false(): - """Return a :class:`_False` object, which compiles to ``false``, or the - boolean equivalent for the target dialect. - - """ - return _False() - -class _FunctionGenerator(object): - """Generate :class:`.Function` objects based on getattr calls.""" - - def __init__(self, **opts): - self.__names = [] - self.opts = opts - - def __getattr__(self, name): - # passthru __ attributes; fixes pydoc - if name.startswith('__'): - try: - return self.__dict__[name] - except KeyError: - raise AttributeError(name) - - elif name.endswith('_'): - name = name[0:-1] - f = _FunctionGenerator(**self.opts) - f.__names = list(self.__names) + [name] - return f - - def __call__(self, *c, **kwargs): - o = self.opts.copy() - o.update(kwargs) - if len(self.__names) == 1: - func = getattr(functions, self.__names[-1].lower(), None) - if func is not None and \ - isinstance(func, type) and \ - issubclass(func, Function): - return func(*c, **o) - - return Function(self.__names[-1], - packagenames=self.__names[0:-1], *c, **o) - -# "func" global - i.e. func.count() -func = _FunctionGenerator() -"""Generate SQL function expressions. - - ``func`` is a special object instance which generates SQL functions based on name-based attributes, e.g.:: - - >>> print func.count(1) - count(:param_1) - - The element is a column-oriented SQL element like any other, and is - used in that way:: - - >>> print select([func.count(table.c.id)]) - SELECT count(sometable.id) FROM sometable - - Any name can be given to ``func``. If the function name is unknown to - SQLAlchemy, it will be rendered exactly as is. For common SQL functions - which SQLAlchemy is aware of, the name may be interpreted as a *generic - function* which will be compiled appropriately to the target database:: - - >>> print func.current_timestamp() - CURRENT_TIMESTAMP - - To call functions which are present in dot-separated packages, specify them in the same manner:: - - >>> print func.stats.yield_curve(5, 10) - stats.yield_curve(:yield_curve_1, :yield_curve_2) - - SQLAlchemy can be made aware of the return type of functions to enable - type-specific lexical and result-based behavior. For example, to ensure - that a string-based function returns a Unicode value and is similarly - treated as a string in expressions, specify - :class:`~sqlalchemy.types.Unicode` as the type: - - >>> print func.my_string(u'hi', type_=Unicode) + ' ' + \ - ... func.my_string(u'there', type_=Unicode) - my_string(:my_string_1) || :my_string_2 || my_string(:my_string_3) - - The object returned by a ``func`` call is an instance of :class:`.Function`. - This object meets the "column" interface, including comparison and labeling - functions. The object can also be passed the :meth:`~.Connectable.execute` - method of a :class:`.Connection` or :class:`.Engine`, where it will be - wrapped inside of a SELECT statement first:: - - print connection.execute(func.current_timestamp()).scalar() - - A function can also be "bound" to a :class:`.Engine` or :class:`.Connection` - using the ``bind`` keyword argument, providing an execute() as well - as a scalar() method:: - - myfunc = func.current_timestamp(bind=some_engine) - print myfunc.scalar() - - Functions which are interpreted as "generic" functions know how to - calculate their return type automatically. For a listing of known generic - functions, see :ref:`generic_functions`. - -""" - -# "modifier" global - i.e. modifier.distinct -# TODO: use UnaryExpression for this instead ? -modifier = _FunctionGenerator(group=False) - -class _truncated_label(unicode): - """A unicode subclass used to identify symbolic " - "names that may require truncation.""" - - def apply_map(self, map_): - return self - -# for backwards compatibility in case -# someone is re-implementing the -# _truncated_identifier() sequence in a custom -# compiler -_generated_label = _truncated_label - -class _anonymous_label(_truncated_label): - """A unicode subclass used to identify anonymously - generated names.""" - - def __add__(self, other): - return _anonymous_label( - unicode(self) + - unicode(other)) - - def __radd__(self, other): - return _anonymous_label( - unicode(other) + - unicode(self)) - - def apply_map(self, map_): - return self % map_ - -def _as_truncated(value): - """coerce the given value to :class:`._truncated_label`. - - Existing :class:`._truncated_label` and - :class:`._anonymous_label` objects are passed - unchanged. - """ - - if isinstance(value, _truncated_label): - return value - else: - return _truncated_label(value) - -def _string_or_unprintable(element): - if isinstance(element, basestring): - return element - else: - try: - return str(element) - except: - return "unprintable element %r" % element - -def _clone(element, **kw): - return element._clone() - -def _expand_cloned(elements): - """expand the given set of ClauseElements to be the set of all 'cloned' - predecessors. - - """ - return itertools.chain(*[x._cloned_set for x in elements]) - -def _select_iterables(elements): - """expand tables into individual columns in the - given list of column expressions. - - """ - return itertools.chain(*[c._select_iterable for c in elements]) - -def _cloned_intersection(a, b): - """return the intersection of sets a and b, counting - any overlap between 'cloned' predecessors. - - The returned set is in terms of the entities present within 'a'. - - """ - all_overlap = set(_expand_cloned(a)).intersection(_expand_cloned(b)) - return set(elem for elem in a - if all_overlap.intersection(elem._cloned_set)) - - -def _is_literal(element): - return not isinstance(element, Visitable) and \ - not hasattr(element, '__clause_element__') - -def _from_objects(*elements): - return itertools.chain(*[element._from_objects for element in elements]) - -def _labeled(element): - if not hasattr(element, 'name'): - return element.label(None) - else: - return element - -def _column_as_key(element): - if isinstance(element, basestring): - return element - if hasattr(element, '__clause_element__'): - element = element.__clause_element__() - return element.key - -def _literal_as_text(element): - if isinstance(element, Visitable): - return element - elif hasattr(element, '__clause_element__'): - return element.__clause_element__() - elif isinstance(element, basestring): - return _TextClause(unicode(element)) - elif isinstance(element, (util.NoneType, bool)): - return _const_expr(element) - else: - raise exc.ArgumentError( - "SQL expression object or string expected." - ) - -def _const_expr(element): - if element is None: - return null() - elif element is False: - return false() - elif element is True: - return true() - else: - raise exc.ArgumentError( - "Expected None, False, or True" - ) - -def _clause_element_as_expr(element): - if hasattr(element, '__clause_element__'): - return element.__clause_element__() - else: - return element - -def _literal_as_column(element): - if isinstance(element, Visitable): - return element - elif hasattr(element, '__clause_element__'): - return element.__clause_element__() - else: - return literal_column(str(element)) - -def _literal_as_binds(element, name=None, type_=None): - if hasattr(element, '__clause_element__'): - return element.__clause_element__() - elif not isinstance(element, Visitable): - if element is None: - return null() - else: - return _BindParamClause(name, element, type_=type_, unique=True) - else: - return element - -def _type_from_args(args): - for a in args: - if not isinstance(a.type, sqltypes.NullType): - return a.type - else: - return sqltypes.NullType - -def _no_literals(element): - if hasattr(element, '__clause_element__'): - return element.__clause_element__() - elif not isinstance(element, Visitable): - raise exc.ArgumentError("Ambiguous literal: %r. Use the 'text()' " - "function to indicate a SQL expression " - "literal, or 'literal()' to indicate a " - "bound value." % element) - else: - return element - -def _only_column_elements_or_none(element, name): - if element is None: - return None - else: - return _only_column_elements(element, name) - -def _only_column_elements(element, name): - if hasattr(element, '__clause_element__'): - element = element.__clause_element__() - if not isinstance(element, ColumnElement): - raise exc.ArgumentError( - "Column-based expression object expected for argument " - "'%s'; got: '%s', type %s" % (name, element, type(element))) - return element - -def _corresponding_column_or_error(fromclause, column, - require_embedded=False): - c = fromclause.corresponding_column(column, - require_embedded=require_embedded) - if c is None: - raise exc.InvalidRequestError( - "Given column '%s', attached to table '%s', " - "failed to locate a corresponding column from table '%s'" - % - (column, - getattr(column, 'table', None),fromclause.description) - ) - return c - -@util.decorator -def _generative(fn, *args, **kw): - """Mark a method as generative.""" - - self = args[0]._generate() - fn(self, *args[1:], **kw) - return self - - -def is_column(col): - """True if ``col`` is an instance of :class:`.ColumnElement`.""" - - return isinstance(col, ColumnElement) - - -class ClauseElement(Visitable): - """Base class for elements of a programmatically constructed SQL - expression. - - """ - __visit_name__ = 'clause' - - _annotations = {} - supports_execution = False - _from_objects = [] - bind = None - _is_clone_of = None - - def _clone(self): - """Create a shallow copy of this ClauseElement. - - This method may be used by a generative API. Its also used as - part of the "deep" copy afforded by a traversal that combines - the _copy_internals() method. - - """ - c = self.__class__.__new__(self.__class__) - c.__dict__ = self.__dict__.copy() - c.__dict__.pop('_cloned_set', None) - - # this is a marker that helps to "equate" clauses to each other - # when a Select returns its list of FROM clauses. the cloning - # process leaves around a lot of remnants of the previous clause - # typically in the form of column expressions still attached to the - # old table. - c._is_clone_of = self - - return c - - @property - def _constructor(self): - """return the 'constructor' for this ClauseElement. - - This is for the purposes for creating a new object of - this type. Usually, its just the element's __class__. - However, the "Annotated" version of the object overrides - to return the class of its proxied element. - - """ - return self.__class__ - - @util.memoized_property - def _cloned_set(self): - """Return the set consisting all cloned ancestors of this - ClauseElement. - - Includes this ClauseElement. This accessor tends to be used for - FromClause objects to identify 'equivalent' FROM clauses, regardless - of transformative operations. - - """ - s = util.column_set() - f = self - while f is not None: - s.add(f) - f = f._is_clone_of - return s - - def __getstate__(self): - d = self.__dict__.copy() - d.pop('_is_clone_of', None) - return d - - if util.jython: - def __hash__(self): - """Return a distinct hash code. - - ClauseElements may have special equality comparisons which - makes us rely on them having unique hash codes for use in - hash-based collections. Stock __hash__ doesn't guarantee - unique values on platforms with moving GCs. - """ - return id(self) - - def _annotate(self, values): - """return a copy of this ClauseElement with the given annotations - dictionary. - - """ - return sqlutil.Annotated(self, values) - - def _deannotate(self): - """return a copy of this ClauseElement with an empty annotations - dictionary. - - """ - return self._clone() - - def unique_params(self, *optionaldict, **kwargs): - """Return a copy with :func:`bindparam()` elements replaced. - - Same functionality as ``params()``, except adds `unique=True` - to affected bind parameters so that multiple statements can be - used. - - """ - return self._params(True, optionaldict, kwargs) - - def params(self, *optionaldict, **kwargs): - """Return a copy with :func:`bindparam()` elements replaced. - - Returns a copy of this ClauseElement with :func:`bindparam()` - elements replaced with values taken from the given dictionary:: - - >>> clause = column('x') + bindparam('foo') - >>> print clause.compile().params - {'foo':None} - >>> print clause.params({'foo':7}).compile().params - {'foo':7} - - """ - return self._params(False, optionaldict, kwargs) - - def _params(self, unique, optionaldict, kwargs): - if len(optionaldict) == 1: - kwargs.update(optionaldict[0]) - elif len(optionaldict) > 1: - raise exc.ArgumentError( - "params() takes zero or one positional dictionary argument") - - def visit_bindparam(bind): - if bind.key in kwargs: - bind.value = kwargs[bind.key] - if unique: - bind._convert_to_unique() - return cloned_traverse(self, {}, {'bindparam':visit_bindparam}) - - def compare(self, other, **kw): - """Compare this ClauseElement to the given ClauseElement. - - Subclasses should override the default behavior, which is a - straight identity comparison. - - \**kw are arguments consumed by subclass compare() methods and - may be used to modify the criteria for comparison. - (see :class:`.ColumnElement`) - - """ - return self is other - - def _copy_internals(self, clone=_clone, **kw): - """Reassign internal elements to be clones of themselves. - - Called during a copy-and-traverse operation on newly - shallow-copied elements to create a deep copy. - - The given clone function should be used, which may be applying - additional transformations to the element (i.e. replacement - traversal, cloned traversal, annotations). - - """ - pass - - def get_children(self, **kwargs): - """Return immediate child elements of this :class:`.ClauseElement`. - - This is used for visit traversal. - - \**kwargs may contain flags that change the collection that is - returned, for example to return a subset of items in order to - cut down on larger traversals, or to return child items from a - different context (such as schema-level collections instead of - clause-level). - - """ - return [] - - def self_group(self, against=None): - """Apply a 'grouping' to this :class:`.ClauseElement`. - - This method is overridden by subclasses to return a - "grouping" construct, i.e. parenthesis. In particular - it's used by "binary" expressions to provide a grouping - around themselves when placed into a larger expression, - as well as by :func:`.select` constructs when placed into - the FROM clause of another :func:`.select`. (Note that - subqueries should be normally created using the - :func:`.Select.alias` method, as many platforms require - nested SELECT statements to be named). - - As expressions are composed together, the application of - :meth:`self_group` is automatic - end-user code should never - need to use this method directly. Note that SQLAlchemy's - clause constructs take operator precedence into account - - so parenthesis might not be needed, for example, in - an expression like ``x OR (y AND z)`` - AND takes precedence - over OR. - - The base :meth:`self_group` method of :class:`.ClauseElement` - just returns self. - """ - return self - - - @util.deprecated('0.7', - 'Only SQL expressions which subclass ' - ':class:`.Executable` may provide the ' - ':func:`.execute` method.') - def execute(self, *multiparams, **params): - """Compile and execute this :class:`.ClauseElement`. - - """ - e = self.bind - if e is None: - label = getattr(self, 'description', self.__class__.__name__) - msg = ('This %s does not support direct execution.' % label) - raise exc.UnboundExecutionError(msg) - return e._execute_clauseelement(self, multiparams, params) - - @util.deprecated('0.7', - 'Only SQL expressions which subclass ' - ':class:`.Executable` may provide the ' - ':func:`.scalar` method.') - def scalar(self, *multiparams, **params): - """Compile and execute this :class:`.ClauseElement`, returning - the result's scalar representation. - - """ - return self.execute(*multiparams, **params).scalar() - - def compile(self, bind=None, dialect=None, **kw): - """Compile this SQL expression. - - The return value is a :class:`~sqlalchemy.engine.Compiled` object. - Calling ``str()`` or ``unicode()`` on the returned value will yield a - string representation of the result. The - :class:`~sqlalchemy.engine.Compiled` object also can return a - dictionary of bind parameter names and values - using the ``params`` accessor. - - :param bind: An ``Engine`` or ``Connection`` from which a - ``Compiled`` will be acquired. This argument takes precedence over - this :class:`.ClauseElement`'s bound engine, if any. - - :param column_keys: Used for INSERT and UPDATE statements, a list of - column names which should be present in the VALUES clause of the - compiled statement. If ``None``, all columns from the target table - object are rendered. - - :param dialect: A ``Dialect`` instance from which a ``Compiled`` - will be acquired. This argument takes precedence over the `bind` - argument as well as this :class:`.ClauseElement`'s bound engine, if - any. - - :param inline: Used for INSERT statements, for a dialect which does - not support inline retrieval of newly generated primary key - columns, will force the expression used to create the new primary - key value to be rendered inline within the INSERT statement's - VALUES clause. This typically refers to Sequence execution but may - also refer to any server-side default generation function - associated with a primary key `Column`. - - """ - - if not dialect: - if bind: - dialect = bind.dialect - elif self.bind: - dialect = self.bind.dialect - bind = self.bind - else: - dialect = default.DefaultDialect() - return self._compiler(dialect, bind=bind, **kw) - - def _compiler(self, dialect, **kw): - """Return a compiler appropriate for this ClauseElement, given a - Dialect.""" - - return dialect.statement_compiler(dialect, self, **kw) - - def __str__(self): - # Py3K - #return unicode(self.compile()) - # Py2K - return unicode(self.compile()).encode('ascii', 'backslashreplace') - # end Py2K - - def __and__(self, other): - return and_(self, other) - - def __or__(self, other): - return or_(self, other) - - def __invert__(self): - return self._negate() - - def __nonzero__(self): - raise TypeError("Boolean value of this clause is not defined") - - def _negate(self): - if hasattr(self, 'negation_clause'): - return self.negation_clause - else: - return _UnaryExpression( - self.self_group(against=operators.inv), - operator=operators.inv, - negate=None) - - def __repr__(self): - friendly = getattr(self, 'description', None) - if friendly is None: - return object.__repr__(self) - else: - return '<%s.%s at 0x%x; %s>' % ( - self.__module__, self.__class__.__name__, id(self), friendly) - - -class _Immutable(object): - """mark a ClauseElement as 'immutable' when expressions are cloned.""" - - def unique_params(self, *optionaldict, **kwargs): - raise NotImplementedError("Immutable objects do not support copying") - - def params(self, *optionaldict, **kwargs): - raise NotImplementedError("Immutable objects do not support copying") - - def _clone(self): - return self - - -class _CompareMixin(ColumnOperators): - """Defines comparison and math operations for :class:`.ClauseElement` - instances. - - See :class:`.ColumnOperators` and :class:`.Operators` for descriptions - of all operations. - - """ - - def __compare(self, op, obj, negate=None, reverse=False, - **kwargs - ): - if obj is None or isinstance(obj, _Null): - if op in (operators.eq, operators.is_): - return _BinaryExpression(self, null(), operators.is_, - negate=operators.isnot) - elif op in (operators.ne, operators.isnot): - return _BinaryExpression(self, null(), operators.isnot, - negate=operators.is_) - else: - raise exc.ArgumentError("Only '='/'!=' operators can " - "be used with NULL") - else: - obj = self._check_literal(op, obj) - - if reverse: - return _BinaryExpression(obj, - self, - op, - type_=sqltypes.BOOLEANTYPE, - negate=negate, modifiers=kwargs) - else: - return _BinaryExpression(self, - obj, - op, - type_=sqltypes.BOOLEANTYPE, - negate=negate, modifiers=kwargs) - - def __operate(self, op, obj, reverse=False): - obj = self._check_literal(op, obj) - - if reverse: - left, right = obj, self - else: - left, right = self, obj - - if left.type is None: - op, result_type = sqltypes.NULLTYPE._adapt_expression(op, - right.type) - elif right.type is None: - op, result_type = left.type._adapt_expression(op, - sqltypes.NULLTYPE) - else: - op, result_type = left.type._adapt_expression(op, - right.type) - return _BinaryExpression(left, right, op, type_=result_type) - - - # a mapping of operators with the method they use, along with their negated - # operator for comparison operators - operators = { - operators.add : (__operate,), - operators.mul : (__operate,), - operators.sub : (__operate,), - # Py2K - operators.div : (__operate,), - # end Py2K - operators.mod : (__operate,), - operators.truediv : (__operate,), - operators.lt : (__compare, operators.ge), - operators.le : (__compare, operators.gt), - operators.ne : (__compare, operators.eq), - operators.gt : (__compare, operators.le), - operators.ge : (__compare, operators.lt), - operators.eq : (__compare, operators.ne), - operators.like_op : (__compare, operators.notlike_op), - operators.ilike_op : (__compare, operators.notilike_op), - operators.is_ : (__compare, operators.is_), - operators.isnot : (__compare, operators.isnot), - } - - def operate(self, op, *other, **kwargs): - o = _CompareMixin.operators[op] - return o[0](self, op, other[0], *o[1:], **kwargs) - - def reverse_operate(self, op, other, **kwargs): - o = _CompareMixin.operators[op] - return o[0](self, op, other, reverse=True, *o[1:], **kwargs) - - def in_(self, other): - """See :meth:`.ColumnOperators.in_`.""" - return self._in_impl(operators.in_op, operators.notin_op, other) - - def _in_impl(self, op, negate_op, seq_or_selectable): - seq_or_selectable = _clause_element_as_expr(seq_or_selectable) - - if isinstance(seq_or_selectable, _ScalarSelect): - return self.__compare(op, seq_or_selectable, - negate=negate_op) - elif isinstance(seq_or_selectable, _SelectBase): - - # TODO: if we ever want to support (x, y, z) IN (select x, - # y, z from table), we would need a multi-column version of - # as_scalar() to produce a multi- column selectable that - # does not export itself as a FROM clause - - return self.__compare(op, seq_or_selectable.as_scalar(), - negate=negate_op) - elif isinstance(seq_or_selectable, (Selectable, _TextClause)): - return self.__compare(op, seq_or_selectable, - negate=negate_op) - - - # Handle non selectable arguments as sequences - - args = [] - for o in seq_or_selectable: - if not _is_literal(o): - if not isinstance(o, _CompareMixin): - raise exc.InvalidRequestError('in() function accept' - 's either a list of non-selectable values, ' - 'or a selectable: %r' % o) - else: - o = self._bind_param(op, o) - args.append(o) - if len(args) == 0: - - # Special case handling for empty IN's, behave like - # comparison against zero row selectable. We use != to - # build the contradiction as it handles NULL values - # appropriately, i.e. "not (x IN ())" should not return NULL - # values for x. - - util.warn('The IN-predicate on "%s" was invoked with an ' - 'empty sequence. This results in a ' - 'contradiction, which nonetheless can be ' - 'expensive to evaluate. Consider alternative ' - 'strategies for improved performance.' % self) - return self != self - - return self.__compare(op, - ClauseList(*args).self_group(against=op), - negate=negate_op) - - def __neg__(self): - """See :meth:`.ColumnOperators.__neg__`.""" - return _UnaryExpression(self, operator=operators.neg) - - def startswith(self, other, escape=None): - """See :meth:`.ColumnOperators.startswith`.""" - # use __radd__ to force string concat behavior - return self.__compare( - operators.like_op, - literal_column("'%'", type_=sqltypes.String).__radd__( - self._check_literal(operators.like_op, other) - ), - escape=escape) - - def endswith(self, other, escape=None): - """See :meth:`.ColumnOperators.endswith`.""" - return self.__compare( - operators.like_op, - literal_column("'%'", type_=sqltypes.String) + - self._check_literal(operators.like_op, other), - escape=escape) - - def contains(self, other, escape=None): - """See :meth:`.ColumnOperators.contains`.""" - return self.__compare( - operators.like_op, - literal_column("'%'", type_=sqltypes.String) + - self._check_literal(operators.like_op, other) + - literal_column("'%'", type_=sqltypes.String), - escape=escape) - - def match(self, other): - """See :meth:`.ColumnOperators.match`.""" - return self.__compare(operators.match_op, - self._check_literal(operators.match_op, - other)) - - def label(self, name): - """Produce a column label, i.e. `` AS ``. - - This is a shortcut to the :func:`~.expression.label` function. - - if 'name' is None, an anonymous label name will be generated. - - """ - return _Label(name, self, self.type) - - def desc(self): - """See :meth:`.ColumnOperators.desc`.""" - return desc(self) - - def asc(self): - """See :meth:`.ColumnOperators.asc`.""" - return asc(self) - - def nullsfirst(self): - """See :meth:`.ColumnOperators.nullsfirst`.""" - return nullsfirst(self) - - def nullslast(self): - """See :meth:`.ColumnOperators.nullslast`.""" - return nullslast(self) - - def distinct(self): - """See :meth:`.ColumnOperators.distinct`.""" - return _UnaryExpression(self, operator=operators.distinct_op, - type_=self.type) - - def between(self, cleft, cright): - """See :meth:`.ColumnOperators.between`.""" - return _BinaryExpression( - self, - ClauseList( - self._check_literal(operators.and_, cleft), - self._check_literal(operators.and_, cright), - operator=operators.and_, - group=False), - operators.between_op) - - def collate(self, collation): - """See :meth:`.ColumnOperators.collate`.""" - - return collate(self, collation) - - def op(self, operator): - """See :meth:`.ColumnOperators.op`.""" - - return lambda other: self.__operate(operator, other) - - def _bind_param(self, operator, obj): - return _BindParamClause(None, obj, - _compared_to_operator=operator, - _compared_to_type=self.type, unique=True) - - def _check_literal(self, operator, other): - if isinstance(other, _BindParamClause) and \ - isinstance(other.type, sqltypes.NullType): - # TODO: perhaps we should not mutate the incoming bindparam() - # here and instead make a copy of it. this might - # be the only place that we're mutating an incoming construct. - other.type = self.type - return other - elif hasattr(other, '__clause_element__'): - other = other.__clause_element__() - if isinstance(other, (_SelectBase, Alias)): - other = other.as_scalar() - return other - elif not isinstance(other, ClauseElement): - return self._bind_param(operator, other) - elif isinstance(other, (_SelectBase, Alias)): - return other.as_scalar() - else: - return other - - -class ColumnElement(ClauseElement, _CompareMixin): - """Represent an element that is usable within the "column clause" portion - of a ``SELECT`` statement. - - This includes columns associated with tables, aliases, and - subqueries, expressions, function calls, SQL keywords such as - ``NULL``, literals, etc. :class:`.ColumnElement` is the ultimate base - class for all such elements. - - :class:`.ColumnElement` supports the ability to be a *proxy* element, - which indicates that the :class:`.ColumnElement` may be associated with - a :class:`.Selectable` which was derived from another :class:`.Selectable`. - An example of a "derived" :class:`.Selectable` is an :class:`.Alias` of a - :class:`~sqlalchemy.schema.Table`. - - A :class:`.ColumnElement`, by subclassing the :class:`_CompareMixin` mixin - class, provides the ability to generate new :class:`.ClauseElement` - objects using Python expressions. See the :class:`_CompareMixin` - docstring for more details. - - """ - - __visit_name__ = 'column' - primary_key = False - foreign_keys = [] - quote = None - _label = None - _key_label = None - _alt_names = () - - @property - def _select_iterable(self): - return (self, ) - - @util.memoized_property - def base_columns(self): - return util.column_set(c for c in self.proxy_set - if not hasattr(c, 'proxies')) - - @util.memoized_property - def proxy_set(self): - s = util.column_set([self]) - if hasattr(self, 'proxies'): - for c in self.proxies: - s.update(c.proxy_set) - return s - - def shares_lineage(self, othercolumn): - """Return True if the given :class:`.ColumnElement` - has a common ancestor to this :class:`.ColumnElement`.""" - - return bool(self.proxy_set.intersection(othercolumn.proxy_set)) - - def _compare_name_for_result(self, other): - """Return True if the given column element compares to this one - when targeting within a result row.""" - - return hasattr(other, 'name') and hasattr(self, 'name') and \ - other.name == self.name - - def _make_proxy(self, selectable, name=None): - """Create a new :class:`.ColumnElement` representing this - :class:`.ColumnElement` as it appears in the select list of a - descending selectable. - - """ - if name is None: - name = self.anon_label - # TODO: may want to change this to anon_label, - # or some value that is more useful than the - # compiled form of the expression - key = str(self) - else: - key = name - - co = ColumnClause(_as_truncated(name), - selectable, - type_=getattr(self, - 'type', None)) - co.proxies = [self] - if selectable._is_clone_of is not None: - co._is_clone_of = \ - selectable._is_clone_of.columns.get(key) - selectable._columns[key] = co - return co - - def compare(self, other, use_proxies=False, equivalents=None, **kw): - """Compare this ColumnElement to another. - - Special arguments understood: - - :param use_proxies: when True, consider two columns that - share a common base column as equivalent (i.e. shares_lineage()) - - :param equivalents: a dictionary of columns as keys mapped to sets - of columns. If the given "other" column is present in this - dictionary, if any of the columns in the corresponding set() pass the - comparison test, the result is True. This is used to expand the - comparison to other columns that may be known to be equivalent to - this one via foreign key or other criterion. - - """ - to_compare = (other, ) - if equivalents and other in equivalents: - to_compare = equivalents[other].union(to_compare) - - for oth in to_compare: - if use_proxies and self.shares_lineage(oth): - return True - elif oth is self: - return True - else: - return False - - @util.memoized_property - def anon_label(self): - """provides a constant 'anonymous label' for this ColumnElement. - - This is a label() expression which will be named at compile time. - The same label() is returned each time anon_label is called so - that expressions can reference anon_label multiple times, producing - the same label name at compile time. - - the compiler uses this function automatically at compile time - for expressions that are known to be 'unnamed' like binary - expressions and function calls. - - """ - return _anonymous_label('%%(%d %s)s' % (id(self), getattr(self, - 'name', 'anon'))) - -class ColumnCollection(util.OrderedProperties): - """An ordered dictionary that stores a list of ColumnElement - instances. - - Overrides the ``__eq__()`` method to produce SQL clauses between - sets of correlated columns. - - """ - - def __init__(self, *cols): - super(ColumnCollection, self).__init__() - self._data.update((c.key, c) for c in cols) - self.__dict__['_all_cols'] = util.column_set(self) - - def __str__(self): - return repr([str(c) for c in self]) - - def replace(self, column): - """add the given column to this collection, removing unaliased - versions of this column as well as existing columns with the - same key. - - e.g.:: - - t = Table('sometable', metadata, Column('col1', Integer)) - t.columns.replace(Column('col1', Integer, key='columnone')) - - will remove the original 'col1' from the collection, and add - the new column under the name 'columnname'. - - Used by schema.Column to override columns during table reflection. - - """ - if column.name in self and column.key != column.name: - other = self[column.name] - if other.name == other.key: - del self._data[other.name] - self._all_cols.remove(other) - if column.key in self._data: - self._all_cols.remove(self._data[column.key]) - self._all_cols.add(column) - self._data[column.key] = column - - def add(self, column): - """Add a column to this collection. - - The key attribute of the column will be used as the hash key - for this dictionary. - - """ - self[column.key] = column - - def __delitem__(self, key): - raise NotImplementedError() - - def __setattr__(self, key, object): - raise NotImplementedError() - - def __setitem__(self, key, value): - if key in self: - - # this warning is primarily to catch select() statements - # which have conflicting column names in their exported - # columns collection - - existing = self[key] - if not existing.shares_lineage(value): - util.warn('Column %r on table %r being replaced by ' - 'another column with the same key. Consider ' - 'use_labels for select() statements.' % (key, - getattr(existing, 'table', None))) - self._all_cols.remove(existing) - # pop out memoized proxy_set as this - # operation may very well be occurring - # in a _make_proxy operation - value.__dict__.pop('proxy_set', None) - self._all_cols.add(value) - self._data[key] = value - - def clear(self): - self._data.clear() - self._all_cols.clear() - - def remove(self, column): - del self._data[column.key] - self._all_cols.remove(column) - - def update(self, value): - self._data.update(value) - self._all_cols.clear() - self._all_cols.update(self._data.values()) - - def extend(self, iter): - self.update((c.key, c) for c in iter) - - __hash__ = None - - def __eq__(self, other): - l = [] - for c in other: - for local in self: - if c.shares_lineage(local): - l.append(c==local) - return and_(*l) - - def __contains__(self, other): - if not isinstance(other, basestring): - raise exc.ArgumentError("__contains__ requires a string argument") - return util.OrderedProperties.__contains__(self, other) - - def __setstate__(self, state): - self.__dict__['_data'] = state['_data'] - self.__dict__['_all_cols'] = util.column_set(self._data.values()) - - def contains_column(self, col): - # this has to be done via set() membership - return col in self._all_cols - - def as_immutable(self): - return ImmutableColumnCollection(self._data, self._all_cols) - -class ImmutableColumnCollection(util.ImmutableProperties, ColumnCollection): - def __init__(self, data, colset): - util.ImmutableProperties.__init__(self, data) - self.__dict__['_all_cols'] = colset - - extend = remove = util.ImmutableProperties._immutable - - -class ColumnSet(util.ordered_column_set): - def contains_column(self, col): - return col in self - - def extend(self, cols): - for col in cols: - self.add(col) - - def __add__(self, other): - return list(self) + list(other) - - def __eq__(self, other): - l = [] - for c in other: - for local in self: - if c.shares_lineage(local): - l.append(c==local) - return and_(*l) - - def __hash__(self): - return hash(tuple(x for x in self)) - -class Selectable(ClauseElement): - """mark a class as being selectable""" - __visit_name__ = 'selectable' - -class FromClause(Selectable): - """Represent an element that can be used within the ``FROM`` - clause of a ``SELECT`` statement. - - """ - __visit_name__ = 'fromclause' - named_with_column = False - _hide_froms = [] - quote = None - schema = None - _memoized_property = util.group_expirable_memoized_property(["_columns"]) - - def count(self, whereclause=None, **params): - """return a SELECT COUNT generated against this - :class:`.FromClause`.""" - - if self.primary_key: - col = list(self.primary_key)[0] - else: - col = list(self.columns)[0] - return select( - [func.count(col).label('tbl_row_count')], - whereclause, - from_obj=[self], - **params) - - def select(self, whereclause=None, **params): - """return a SELECT of this :class:`.FromClause`.""" - - return select([self], whereclause, **params) - - def join(self, right, onclause=None, isouter=False): - """return a join of this :class:`.FromClause` against another - :class:`.FromClause`.""" - - return Join(self, right, onclause, isouter) - - def outerjoin(self, right, onclause=None): - """return an outer join of this :class:`.FromClause` against another - :class:`.FromClause`.""" - - return Join(self, right, onclause, True) - - def alias(self, name=None): - """return an alias of this :class:`.FromClause`. - - This is shorthand for calling:: - - from sqlalchemy import alias - a = alias(self, name=name) - - See :func:`~.expression.alias` for details. - - """ - - return Alias(self, name) - - def is_derived_from(self, fromclause): - """Return True if this FromClause is 'derived' from the given - FromClause. - - An example would be an Alias of a Table is derived from that Table. - - """ - # this is essentially an "identity" check in the base class. - # Other constructs override this to traverse through - # contained elements. - return fromclause in self._cloned_set - - def _is_lexical_equivalent(self, other): - """Return True if this FromClause and the other represent - the same lexical identity. - - This tests if either one is a copy of the other, or - if they are the same via annotation identity. - - """ - return self._cloned_set.intersection(other._cloned_set) - - def replace_selectable(self, old, alias): - """replace all occurrences of FromClause 'old' with the given Alias - object, returning a copy of this :class:`.FromClause`. - - """ - - return sqlutil.ClauseAdapter(alias).traverse(self) - - def correspond_on_equivalents(self, column, equivalents): - """Return corresponding_column for the given column, or if None - search for a match in the given dictionary. - - """ - col = self.corresponding_column(column, require_embedded=True) - if col is None and col in equivalents: - for equiv in equivalents[col]: - nc = self.corresponding_column(equiv, require_embedded=True) - if nc: - return nc - return col - - def corresponding_column(self, column, require_embedded=False): - """Given a :class:`.ColumnElement`, return the exported - :class:`.ColumnElement` object from this :class:`.Selectable` - which corresponds to that original - :class:`~sqlalchemy.schema.Column` via a common ancestor - column. - - :param column: the target :class:`.ColumnElement` to be matched - - :param require_embedded: only return corresponding columns for - the given :class:`.ColumnElement`, if the given - :class:`.ColumnElement` is actually present within a sub-element - of this :class:`.FromClause`. Normally the column will match if - it merely shares a common ancestor with one of the exported - columns of this :class:`.FromClause`. - - """ - - def embedded(expanded_proxy_set, target_set): - for t in target_set.difference(expanded_proxy_set): - if not set(_expand_cloned([t]) - ).intersection(expanded_proxy_set): - return False - return True - - # don't dig around if the column is locally present - if self.c.contains_column(column): - return column - col, intersect = None, None - target_set = column.proxy_set - cols = self.c - for c in cols: - expanded_proxy_set = set(_expand_cloned(c.proxy_set)) - i = target_set.intersection(expanded_proxy_set) - if i and (not require_embedded - or embedded(expanded_proxy_set, target_set)): - if col is None: - - # no corresponding column yet, pick this one. - - col, intersect = c, i - elif len(i) > len(intersect): - - # 'c' has a larger field of correspondence than - # 'col'. i.e. selectable.c.a1_x->a1.c.x->table.c.x - # matches a1.c.x->table.c.x better than - # selectable.c.x->table.c.x does. - - col, intersect = c, i - elif i == intersect: - - # they have the same field of correspondence. see - # which proxy_set has fewer columns in it, which - # indicates a closer relationship with the root - # column. Also take into account the "weight" - # attribute which CompoundSelect() uses to give - # higher precedence to columns based on vertical - # position in the compound statement, and discard - # columns that have no reference to the target - # column (also occurs with CompoundSelect) - - col_distance = util.reduce(operator.add, - [sc._annotations.get('weight', 1) for sc in - col.proxy_set if sc.shares_lineage(column)]) - c_distance = util.reduce(operator.add, - [sc._annotations.get('weight', 1) for sc in - c.proxy_set if sc.shares_lineage(column)]) - if c_distance < col_distance: - col, intersect = c, i - return col - - @property - def description(self): - """a brief description of this FromClause. - - Used primarily for error message formatting. - - """ - return getattr(self, 'name', self.__class__.__name__ + " object") - - def _reset_exported(self): - """delete memoized collections when a FromClause is cloned.""" - - self._memoized_property.expire_instance(self) - - @_memoized_property - def columns(self): - """Return the collection of Column objects contained by this - FromClause.""" - - if '_columns' not in self.__dict__: - self._init_collections() - self._populate_column_collection() - return self._columns.as_immutable() - - @_memoized_property - def primary_key(self): - """Return the collection of Column objects which comprise the - primary key of this FromClause.""" - - self._init_collections() - self._populate_column_collection() - return self.primary_key - - @_memoized_property - def foreign_keys(self): - """Return the collection of ForeignKey objects which this - FromClause references.""" - - self._init_collections() - self._populate_column_collection() - return self.foreign_keys - - c = property(attrgetter('columns')) - _select_iterable = property(attrgetter('columns')) - - def _init_collections(self): - assert '_columns' not in self.__dict__ - assert 'primary_key' not in self.__dict__ - assert 'foreign_keys' not in self.__dict__ - - self._columns = ColumnCollection() - self.primary_key = ColumnSet() - self.foreign_keys = set() - - def _populate_column_collection(self): - pass - -class _BindParamClause(ColumnElement): - """Represent a bind parameter. - - Public constructor is the :func:`bindparam()` function. - - """ - - __visit_name__ = 'bindparam' - quote = None - - def __init__(self, key, value, type_=None, unique=False, - callable_=None, - isoutparam=False, required=False, - _compared_to_operator=None, - _compared_to_type=None): - """Construct a _BindParamClause. - - :param key: - the key for this bind param. Will be used in the generated - SQL statement for dialects that use named parameters. This - value may be modified when part of a compilation operation, - if other :class:`_BindParamClause` objects exist with the same - key, or if its length is too long and truncation is - required. - - :param value: - Initial value for this bind param. This value may be - overridden by the dictionary of parameters sent to statement - compilation/execution. - - :param callable\_: - A callable function that takes the place of "value". The function - will be called at statement execution time to determine the - ultimate value. Used for scenarios where the actual bind - value cannot be determined at the point at which the clause - construct is created, but embedded bind values are still desirable. - - :param type\_: - A ``TypeEngine`` object that will be used to pre-process the - value corresponding to this :class:`_BindParamClause` at - execution time. - - :param unique: - if True, the key name of this BindParamClause will be - modified if another :class:`_BindParamClause` of the same name - already has been located within the containing - :class:`.ClauseElement`. - - :param required: - a value is required at execution time. - - :param isoutparam: - if True, the parameter should be treated like a stored procedure - "OUT" parameter. - - """ - if unique: - self.key = _anonymous_label('%%(%d %s)s' % (id(self), key - or 'param')) - else: - self.key = key or _anonymous_label('%%(%d param)s' - % id(self)) - - # identifying key that won't change across - # clones, used to identify the bind's logical - # identity - self._identifying_key = self.key - - # key that was passed in the first place, used to - # generate new keys - self._orig_key = key or 'param' - - self.unique = unique - self.value = value - self.callable = callable_ - self.isoutparam = isoutparam - self.required = required - if type_ is None: - if _compared_to_type is not None: - self.type = \ - _compared_to_type._coerce_compared_value( - _compared_to_operator, value) - else: - self.type = sqltypes._type_map.get(type(value), - sqltypes.NULLTYPE) - elif isinstance(type_, type): - self.type = type_() - else: - self.type = type_ - - @property - def effective_value(self): - """Return the value of this bound parameter, - taking into account if the ``callable`` parameter - was set. - - The ``callable`` value will be evaluated - and returned if present, else ``value``. - - """ - if self.callable: - return self.callable() - else: - return self.value - - def _clone(self): - c = ClauseElement._clone(self) - if self.unique: - c.key = _anonymous_label('%%(%d %s)s' % (id(c), c._orig_key - or 'param')) - return c - - def _convert_to_unique(self): - if not self.unique: - self.unique = True - self.key = _anonymous_label('%%(%d %s)s' % (id(self), - self._orig_key or 'param')) - - def compare(self, other, **kw): - """Compare this :class:`_BindParamClause` to the given - clause.""" - - return isinstance(other, _BindParamClause) \ - and self.type._compare_type_affinity(other.type) \ - and self.value == other.value - - def __getstate__(self): - """execute a deferred value for serialization purposes.""" - - d = self.__dict__.copy() - v = self.value - if self.callable: - v = self.callable() - d['callable'] = None - d['value'] = v - return d - - def __repr__(self): - return '_BindParamClause(%r, %r, type_=%r)' % (self.key, - self.value, self.type) - -class _TypeClause(ClauseElement): - """Handle a type keyword in a SQL statement. - - Used by the ``Case`` statement. - - """ - - __visit_name__ = 'typeclause' - - def __init__(self, type): - self.type = type - - -class _Generative(object): - """Allow a ClauseElement to generate itself via the - @_generative decorator. - - """ - - def _generate(self): - s = self.__class__.__new__(self.__class__) - s.__dict__ = self.__dict__.copy() - return s - - -class Executable(_Generative): - """Mark a ClauseElement as supporting execution. - - :class:`.Executable` is a superclass for all "statement" types - of objects, including :func:`select`, :func:`delete`, :func:`update`, - :func:`insert`, :func:`text`. - - """ - - supports_execution = True - _execution_options = util.immutabledict() - _bind = None - - @_generative - def execution_options(self, **kw): - """ Set non-SQL options for the statement which take effect during - execution. - - Execution options can be set on a per-statement or - per :class:`.Connection` basis. Additionally, the - :class:`.Engine` and ORM :class:`~.orm.query.Query` objects provide access - to execution options which they in turn configure upon connections. - - The :meth:`execution_options` method is generative. A new - instance of this statement is returned that contains the options:: - - statement = select([table.c.x, table.c.y]) - statement = statement.execution_options(autocommit=True) - - Note that only a subset of possible execution options can be applied - to a statement - these include "autocommit" and "stream_results", - but not "isolation_level" or "compiled_cache". - See :meth:`.Connection.execution_options` for a full list of - possible options. - - See also: - - :meth:`.Connection.execution_options()` - - :meth:`.Query.execution_options()` - - """ - if 'isolation_level' in kw: - raise exc.ArgumentError( - "'isolation_level' execution option may only be specified " - "on Connection.execution_options(), or " - "per-engine using the isolation_level " - "argument to create_engine()." - ) - if 'compiled_cache' in kw: - raise exc.ArgumentError( - "'compiled_cache' execution option may only be specified " - "on Connection.execution_options(), not per statement." - ) - self._execution_options = self._execution_options.union(kw) - - def execute(self, *multiparams, **params): - """Compile and execute this :class:`.Executable`.""" - - e = self.bind - if e is None: - label = getattr(self, 'description', self.__class__.__name__) - msg = ('This %s is not directly bound to a Connection or Engine.' - 'Use the .execute() method of a Connection or Engine ' - 'to execute this construct.' % label) - raise exc.UnboundExecutionError(msg) - return e._execute_clauseelement(self, multiparams, params) - - def scalar(self, *multiparams, **params): - """Compile and execute this :class:`.Executable`, returning the - result's scalar representation. - - """ - return self.execute(*multiparams, **params).scalar() - - @property - def bind(self): - """Returns the :class:`.Engine` or :class:`.Connection` to - which this :class:`.Executable` is bound, or None if none found. - - This is a traversal which checks locally, then - checks among the "from" clauses of associated objects - until a bound engine or connection is found. - - """ - if self._bind is not None: - return self._bind - - for f in _from_objects(self): - if f is self: - continue - engine = f.bind - if engine is not None: - return engine - else: - return None - - -# legacy, some outside users may be calling this -_Executable = Executable - -class _TextClause(Executable, ClauseElement): - """Represent a literal SQL text fragment. - - Public constructor is the :func:`text()` function. - - """ - - __visit_name__ = 'textclause' - - _bind_params_regex = re.compile(r'(? RIGHT``.""" - - __visit_name__ = 'binary' - - def __init__(self, left, right, operator, type_=None, - negate=None, modifiers=None): - self.left = _literal_as_text(left).self_group(against=operator) - self.right = _literal_as_text(right).self_group(against=operator) - self.operator = operator - self.type = sqltypes.to_instance(type_) - self.negate = negate - if modifiers is None: - self.modifiers = {} - else: - self.modifiers = modifiers - - def __nonzero__(self): - try: - return self.operator(hash(self.left), hash(self.right)) - except: - raise TypeError("Boolean value of this clause is not defined") - - @property - def _from_objects(self): - return self.left._from_objects + self.right._from_objects - - def _copy_internals(self, clone=_clone, **kw): - self.left = clone(self.left, **kw) - self.right = clone(self.right, **kw) - - def get_children(self, **kwargs): - return self.left, self.right - - def compare(self, other, **kw): - """Compare this :class:`_BinaryExpression` against the - given :class:`_BinaryExpression`.""" - - return ( - isinstance(other, _BinaryExpression) and - self.operator == other.operator and - ( - self.left.compare(other.left, **kw) and - self.right.compare(other.right, **kw) or - ( - operators.is_commutative(self.operator) and - self.left.compare(other.right, **kw) and - self.right.compare(other.left, **kw) - ) - ) - ) - - def self_group(self, against=None): - if operators.is_precedent(self.operator, against): - return _Grouping(self) - else: - return self - - def _negate(self): - if self.negate is not None: - return _BinaryExpression( - self.left, - self.right, - self.negate, - negate=self.operator, - type_=sqltypes.BOOLEANTYPE, - modifiers=self.modifiers) - else: - return super(_BinaryExpression, self)._negate() - -class _Exists(_UnaryExpression): - __visit_name__ = _UnaryExpression.__visit_name__ - _from_objects = [] - - def __init__(self, *args, **kwargs): - if args and isinstance(args[0], (_SelectBase, _ScalarSelect)): - s = args[0] - else: - if not args: - args = ([literal_column('*')],) - s = select(*args, **kwargs).as_scalar().self_group() - - _UnaryExpression.__init__(self, s, operator=operators.exists, - type_=sqltypes.Boolean) - - def select(self, whereclause=None, **params): - return select([self], whereclause, **params) - - def correlate(self, fromclause): - e = self._clone() - e.element = self.element.correlate(fromclause).self_group() - return e - - def select_from(self, clause): - """return a new :class:`._Exists` construct, applying the given expression - to the :meth:`.Select.select_from` method of the select statement - contained. - - """ - e = self._clone() - e.element = self.element.select_from(clause).self_group() - return e - - def where(self, clause): - """return a new exists() construct with the given expression added to - its WHERE clause, joined to the existing clause via AND, if any. - - """ - e = self._clone() - e.element = self.element.where(clause).self_group() - return e - -class Join(FromClause): - """represent a ``JOIN`` construct between two :class:`.FromClause` - elements. - - The public constructor function for :class:`.Join` is the module-level - :func:`join()` function, as well as the :func:`join()` method available - off all :class:`.FromClause` subclasses. - - """ - __visit_name__ = 'join' - - def __init__(self, left, right, onclause=None, isouter=False): - """Construct a new :class:`.Join`. - - The usual entrypoint here is the :func:`~.expression.join` - function or the :meth:`.FromClause.join` method of any - :class:`.FromClause` object. - - """ - self.left = _literal_as_text(left) - self.right = _literal_as_text(right).self_group() - - if onclause is None: - self.onclause = self._match_primaries(self.left, self.right) - else: - self.onclause = onclause - - self.isouter = isouter - self.__folded_equivalents = None - - @property - def description(self): - return "Join object on %s(%d) and %s(%d)" % ( - self.left.description, - id(self.left), - self.right.description, - id(self.right)) - - def is_derived_from(self, fromclause): - return fromclause is self or \ - self.left.is_derived_from(fromclause) or\ - self.right.is_derived_from(fromclause) - - def self_group(self, against=None): - return _FromGrouping(self) - - def _populate_column_collection(self): - columns = [c for c in self.left.columns] + \ - [c for c in self.right.columns] - - self.primary_key.extend(sqlutil.reduce_columns( - (c for c in columns if c.primary_key), self.onclause)) - self._columns.update((col._label, col) for col in columns) - self.foreign_keys.update(itertools.chain( - *[col.foreign_keys for col in columns])) - - def _copy_internals(self, clone=_clone, **kw): - self._reset_exported() - self.left = clone(self.left, **kw) - self.right = clone(self.right, **kw) - self.onclause = clone(self.onclause, **kw) - self.__folded_equivalents = None - - def get_children(self, **kwargs): - return self.left, self.right, self.onclause - - def _match_primaries(self, left, right): - if isinstance(left, Join): - left_right = left.right - else: - left_right = None - return sqlutil.join_condition(left, right, a_subset=left_right) - - def select(self, whereclause=None, fold_equivalents=False, **kwargs): - """Create a :class:`.Select` from this :class:`.Join`. - - The equivalent long-hand form, given a :class:`.Join` object - ``j``, is:: - - from sqlalchemy import select - j = select([j.left, j.right], **kw).\\ - where(whereclause).\\ - select_from(j) - - :param whereclause: the WHERE criterion that will be sent to - the :func:`select()` function - - :param fold_equivalents: based on the join criterion of this - :class:`.Join`, do not include - repeat column names in the column list of the resulting - select, for columns that are calculated to be "equivalent" - based on the join criterion of this :class:`.Join`. This will - recursively apply to any joins directly nested by this one - as well. - - :param \**kwargs: all other kwargs are sent to the - underlying :func:`select()` function. - - """ - if fold_equivalents: - collist = sqlutil.folded_equivalents(self) - else: - collist = [self.left, self.right] - - return select(collist, whereclause, from_obj=[self], **kwargs) - - @property - def bind(self): - return self.left.bind or self.right.bind - - def alias(self, name=None): - """return an alias of this :class:`.Join`. - - Used against a :class:`.Join` object, - :meth:`~.Join.alias` calls the :meth:`~.Join.select` - method first so that a subquery against a - :func:`.select` construct is generated. - the :func:`~expression.select` construct also has the - ``correlate`` flag set to ``False`` and will not - auto-correlate inside an enclosing :func:`~expression.select` - construct. - - The equivalent long-hand form, given a :class:`.Join` object - ``j``, is:: - - from sqlalchemy import select, alias - j = alias( - select([j.left, j.right]).\\ - select_from(j).\\ - with_labels(True).\\ - correlate(False), - name=name - ) - - See :func:`~.expression.alias` for further details on - aliases. - - """ - return self.select(use_labels=True, correlate=False).alias(name) - - @property - def _hide_froms(self): - return itertools.chain(*[_from_objects(x.left, x.right) - for x in self._cloned_set]) - - @property - def _from_objects(self): - return [self] + \ - self.onclause._from_objects + \ - self.left._from_objects + \ - self.right._from_objects - -class Alias(FromClause): - """Represents an table or selectable alias (AS). - - Represents an alias, as typically applied to any table or - sub-select within a SQL statement using the ``AS`` keyword (or - without the keyword on certain databases such as Oracle). - - This object is constructed from the :func:`~.expression.alias` module level - function as well as the :meth:`.FromClause.alias` method available on all - :class:`.FromClause` subclasses. - - """ - - __visit_name__ = 'alias' - named_with_column = True - - def __init__(self, selectable, name=None): - baseselectable = selectable - while isinstance(baseselectable, Alias): - baseselectable = baseselectable.element - self.original = baseselectable - self.supports_execution = baseselectable.supports_execution - if self.supports_execution: - self._execution_options = baseselectable._execution_options - self.element = selectable - if name is None: - if self.original.named_with_column: - name = getattr(self.original, 'name', None) - name = _anonymous_label('%%(%d %s)s' % (id(self), name - or 'anon')) - self.name = name - - @property - def description(self): - # Py3K - #return self.name - # Py2K - return self.name.encode('ascii', 'backslashreplace') - # end Py2K - - def as_scalar(self): - try: - return self.element.as_scalar() - except AttributeError: - raise AttributeError("Element %s does not support " - "'as_scalar()'" % self.element) - - def is_derived_from(self, fromclause): - if fromclause in self._cloned_set: - return True - return self.element.is_derived_from(fromclause) - - def _populate_column_collection(self): - for col in self.element.columns: - col._make_proxy(self) - - def _copy_internals(self, clone=_clone, **kw): - # don't apply anything to an aliased Table - # for now. May want to drive this from - # the given **kw. - if isinstance(self.element, TableClause): - return - self._reset_exported() - self.element = clone(self.element, **kw) - baseselectable = self.element - while isinstance(baseselectable, Alias): - baseselectable = baseselectable.element - self.original = baseselectable - - def get_children(self, column_collections=True, **kw): - if column_collections: - for c in self.c: - yield c - yield self.element - - @property - def _from_objects(self): - return [self] - - @property - def bind(self): - return self.element.bind - -class CTE(Alias): - """Represent a Common Table Expression. - - The :class:`.CTE` object is obtained using the - :meth:`._SelectBase.cte` method from any selectable. - See that method for complete examples. - - .. versionadded:: 0.7.6 - - """ - __visit_name__ = 'cte' - - def __init__(self, selectable, - name=None, - recursive=False, - cte_alias=False, - _restates=frozenset()): - self.recursive = recursive - self.cte_alias = cte_alias - self._restates = _restates - super(CTE, self).__init__(selectable, name=name) - - def alias(self, name=None): - return CTE( - self.original, - name=name, - recursive=self.recursive, - cte_alias = self.name - ) - - def union(self, other): - return CTE( - self.original.union(other), - name=self.name, - recursive=self.recursive, - _restates=self._restates.union([self]) - ) - - def union_all(self, other): - return CTE( - self.original.union_all(other), - name=self.name, - recursive=self.recursive, - _restates=self._restates.union([self]) - ) - - -class _Grouping(ColumnElement): - """Represent a grouping within a column expression""" - - __visit_name__ = 'grouping' - - def __init__(self, element): - self.element = element - self.type = getattr(element, 'type', None) - - @property - def _label(self): - return getattr(self.element, '_label', None) or self.anon_label - - def _copy_internals(self, clone=_clone, **kw): - self.element = clone(self.element, **kw) - - def get_children(self, **kwargs): - return self.element, - - @property - def _from_objects(self): - return self.element._from_objects - - def __getattr__(self, attr): - return getattr(self.element, attr) - - def __getstate__(self): - return {'element':self.element, 'type':self.type} - - def __setstate__(self, state): - self.element = state['element'] - self.type = state['type'] - -class _FromGrouping(FromClause): - """Represent a grouping of a FROM clause""" - __visit_name__ = 'grouping' - - def __init__(self, element): - self.element = element - - def _init_collections(self): - pass - - @property - def columns(self): - return self.element.columns - - @property - def primary_key(self): - return self.element.primary_key - - @property - def foreign_keys(self): - # this could be - # self.element.foreign_keys - # see SelectableTest.test_join_condition - return set() - - @property - def _hide_froms(self): - return self.element._hide_froms - - def get_children(self, **kwargs): - return self.element, - - def _copy_internals(self, clone=_clone, **kw): - self.element = clone(self.element, **kw) - - @property - def _from_objects(self): - return self.element._from_objects - - def __getattr__(self, attr): - return getattr(self.element, attr) - - def __getstate__(self): - return {'element':self.element} - - def __setstate__(self, state): - self.element = state['element'] - -class _Over(ColumnElement): - """Represent an OVER clause. - - This is a special operator against a so-called - "window" function, as well as any aggregate function, - which produces results relative to the result set - itself. It's supported only by certain database - backends. - - """ - __visit_name__ = 'over' - - order_by = None - partition_by = None - - def __init__(self, func, partition_by=None, order_by=None): - self.func = func - if order_by is not None: - self.order_by = ClauseList(*util.to_list(order_by)) - if partition_by is not None: - self.partition_by = ClauseList(*util.to_list(partition_by)) - - @util.memoized_property - def type(self): - return self.func.type - - def get_children(self, **kwargs): - return [c for c in - (self.func, self.partition_by, self.order_by) - if c is not None] - - def _copy_internals(self, clone=_clone, **kw): - self.func = clone(self.func, **kw) - if self.partition_by is not None: - self.partition_by = clone(self.partition_by, **kw) - if self.order_by is not None: - self.order_by = clone(self.order_by, **kw) - - @property - def _from_objects(self): - return list(itertools.chain( - *[c._from_objects for c in - (self.func, self.partition_by, self.order_by) - if c is not None] - )) - -class _Label(ColumnElement): - """Represents a column label (AS). - - Represent a label, as typically applied to any column-level - element using the ``AS`` sql keyword. - - This object is constructed from the :func:`label()` module level - function as well as the :func:`label()` method available on all - :class:`.ColumnElement` subclasses. - - """ - - __visit_name__ = 'label' - - def __init__(self, name, element, type_=None): - while isinstance(element, _Label): - element = element.element - if name: - self.name = name - else: - self.name = _anonymous_label('%%(%d %s)s' % (id(self), - getattr(element, 'name', 'anon'))) - self.key = self._label = self._key_label = self.name - self._element = element - self._type = type_ - self.quote = element.quote - self.proxies = [element] - - @util.memoized_property - def type(self): - return sqltypes.to_instance( - self._type or getattr(self._element, 'type', None) - ) - - @util.memoized_property - def element(self): - return self._element.self_group(against=operators.as_) - - def self_group(self, against=None): - sub_element = self._element.self_group(against=against) - if sub_element is not self._element: - return _Label(self.name, - sub_element, - type_=self._type) - else: - return self - - @property - def primary_key(self): - return self.element.primary_key - - @property - def foreign_keys(self): - return self.element.foreign_keys - - def get_children(self, **kwargs): - return self.element, - - def _copy_internals(self, clone=_clone, **kw): - self.element = clone(self.element, **kw) - - @property - def _from_objects(self): - return self.element._from_objects - - def _make_proxy(self, selectable, name = None): - e = self.element._make_proxy(selectable, name=name or self.name) - e.proxies.append(self) - return e - -class ColumnClause(_Immutable, ColumnElement): - """Represents a generic column expression from any textual string. - - This includes columns associated with tables, aliases and select - statements, but also any arbitrary text. May or may not be bound - to an underlying :class:`.Selectable`. - - :class:`.ColumnClause` is constructed by itself typically via - the :func:`~.expression.column` function. It may be placed directly - into constructs such as :func:`.select` constructs:: - - from sqlalchemy.sql import column, select - - c1, c2 = column("c1"), column("c2") - s = select([c1, c2]).where(c1==5) - - There is also a variant on :func:`~.expression.column` known - as :func:`~.expression.literal_column` - the difference is that - in the latter case, the string value is assumed to be an exact - expression, rather than a column name, so that no quoting rules - or similar are applied:: - - from sqlalchemy.sql import literal_column, select - - s = select([literal_column("5 + 7")]) - - :class:`.ColumnClause` can also be used in a table-like - fashion by combining the :func:`~.expression.column` function - with the :func:`~.expression.table` function, to produce - a "lightweight" form of table metadata:: - - from sqlalchemy.sql import table, column - - user = table("user", - column("id"), - column("name"), - column("description"), - ) - - The above construct can be created in an ad-hoc fashion and is - not associated with any :class:`.schema.MetaData`, unlike it's - more full fledged :class:`.schema.Table` counterpart. - - :param text: the text of the element. - - :param selectable: parent selectable. - - :param type: :class:`.types.TypeEngine` object which can associate - this :class:`.ColumnClause` with a type. - - :param is_literal: if True, the :class:`.ColumnClause` is assumed to - be an exact expression that will be delivered to the output with no - quoting rules applied regardless of case sensitive settings. the - :func:`literal_column()` function is usually used to create such a - :class:`.ColumnClause`. - - """ - __visit_name__ = 'column' - - onupdate = default = server_default = server_onupdate = None - - _memoized_property = util.group_expirable_memoized_property() - - def __init__(self, text, selectable=None, type_=None, is_literal=False): - self.key = self.name = text - self.table = selectable - self.type = sqltypes.to_instance(type_) - self.is_literal = is_literal - - def _compare_name_for_result(self, other): - if self.is_literal or \ - self.table is None or \ - not hasattr(other, 'proxy_set') or ( - isinstance(other, ColumnClause) and other.is_literal - ): - return super(ColumnClause, self).\ - _compare_name_for_result(other) - else: - return other.proxy_set.intersection(self.proxy_set) - - def _get_table(self): - return self.__dict__['table'] - def _set_table(self, table): - self._memoized_property.expire_instance(self) - self.__dict__['table'] = table - table = property(_get_table, _set_table) - - @_memoized_property - def _from_objects(self): - t = self.table - if t is not None: - return [t] - else: - return [] - - @util.memoized_property - def description(self): - # Py3K - #return self.name - # Py2K - return self.name.encode('ascii', 'backslashreplace') - # end Py2K - - @_memoized_property - def _key_label(self): - if self.key != self.name: - return self._gen_label(self.key) - else: - return self._label - - @_memoized_property - def _label(self): - return self._gen_label(self.name) - - def _gen_label(self, name): - t = self.table - if self.is_literal: - return None - - elif t is not None and t.named_with_column: - if getattr(t, 'schema', None): - label = t.schema.replace('.', '_') + "_" + \ - t.name + "_" + name - else: - label = t.name + "_" + name - - # ensure the label name doesn't conflict with that - # of an existing column - if label in t.c: - _label = label - counter = 1 - while _label in t.c: - _label = label + "_" + str(counter) - counter += 1 - label = _label - - return _as_truncated(label) - - else: - return name - - def label(self, name): - # currently, anonymous labels don't occur for - # ColumnClause. The use at the moment - # is that they do not generate nicely for - # is_literal clauses. We would like to change - # this so that label(None) acts as would be expected. - # See [ticket:2168]. - if name is None: - return self - else: - return super(ColumnClause, self).label(name) - - - def _bind_param(self, operator, obj): - return _BindParamClause(self.name, obj, - _compared_to_operator=operator, - _compared_to_type=self.type, - unique=True) - - def _make_proxy(self, selectable, name=None, attach=True): - # propagate the "is_literal" flag only if we are keeping our name, - # otherwise its considered to be a label - is_literal = self.is_literal and (name is None or name == self.name) - c = self._constructor( - _as_truncated(name or self.name), - selectable=selectable, - type_=self.type, - is_literal=is_literal - ) - c.proxies = [self] - if selectable._is_clone_of is not None: - c._is_clone_of = \ - selectable._is_clone_of.columns.get(c.name) - - if attach: - selectable._columns[c.name] = c - return c - -class TableClause(_Immutable, FromClause): - """Represents a minimal "table" construct. - - The constructor for :class:`.TableClause` is the - :func:`~.expression.table` function. This produces - a lightweight table object that has only a name and a - collection of columns, which are typically produced - by the :func:`~.expression.column` function:: - - from sqlalchemy.sql import table, column - - user = table("user", - column("id"), - column("name"), - column("description"), - ) - - The :class:`.TableClause` construct serves as the base for - the more commonly used :class:`~.schema.Table` object, providing - the usual set of :class:`~.expression.FromClause` services including - the ``.c.`` collection and statement generation methods. - - It does **not** provide all the additional schema-level services - of :class:`~.schema.Table`, including constraints, references to other - tables, or support for :class:`.MetaData`-level services. It's useful - on its own as an ad-hoc construct used to generate quick SQL - statements when a more fully fledged :class:`~.schema.Table` is not on hand. - - """ - - __visit_name__ = 'table' - - named_with_column = True - - def __init__(self, name, *columns): - super(TableClause, self).__init__() - self.name = self.fullname = name - self._columns = ColumnCollection() - self.primary_key = ColumnSet() - self.foreign_keys = set() - for c in columns: - self.append_column(c) - - def _init_collections(self): - pass - - @util.memoized_property - def description(self): - # Py3K - #return self.name - # Py2K - return self.name.encode('ascii', 'backslashreplace') - # end Py2K - - def append_column(self, c): - self._columns[c.name] = c - c.table = self - - def get_children(self, column_collections=True, **kwargs): - if column_collections: - return [c for c in self.c] - else: - return [] - - def count(self, whereclause=None, **params): - """return a SELECT COUNT generated against this - :class:`.TableClause`.""" - - if self.primary_key: - col = list(self.primary_key)[0] - else: - col = list(self.columns)[0] - return select( - [func.count(col).label('tbl_row_count')], - whereclause, - from_obj=[self], - **params) - - def insert(self, values=None, inline=False, **kwargs): - """Generate an :func:`.insert` construct against this - :class:`.TableClause`. - - E.g.:: - - table.insert().values(name='foo') - - See :func:`.insert` for argument and usage information. - - """ - - return insert(self, values=values, inline=inline, **kwargs) - - def update(self, whereclause=None, values=None, inline=False, **kwargs): - """Generate an :func:`.update` construct against this - :class:`.TableClause`. - - E.g.:: - - table.update().where(table.c.id==7).values(name='foo') - - See :func:`.update` for argument and usage information. - - """ - - return update(self, whereclause=whereclause, - values=values, inline=inline, **kwargs) - - def delete(self, whereclause=None, **kwargs): - """Generate a :func:`.delete` construct against this - :class:`.TableClause`. - - E.g.:: - - table.delete().where(table.c.id==7) - - See :func:`.delete` for argument and usage information. - - """ - - return delete(self, whereclause, **kwargs) - - @property - def _from_objects(self): - return [self] - -class _SelectBase(Executable, FromClause): - """Base class for :class:`.Select` and ``CompoundSelects``.""" - - _order_by_clause = ClauseList() - _group_by_clause = ClauseList() - _limit = None - _offset = None - - def __init__(self, - use_labels=False, - for_update=False, - limit=None, - offset=None, - order_by=None, - group_by=None, - bind=None, - autocommit=None): - self.use_labels = use_labels - self.for_update = for_update - if autocommit is not None: - util.warn_deprecated('autocommit on select() is ' - 'deprecated. Use .execution_options(a' - 'utocommit=True)') - self._execution_options = \ - self._execution_options.union({'autocommit' - : autocommit}) - if limit is not None: - self._limit = util.asint(limit) - if offset is not None: - self._offset = util.asint(offset) - self._bind = bind - - if order_by is not None: - self._order_by_clause = ClauseList(*util.to_list(order_by)) - if group_by is not None: - self._group_by_clause = ClauseList(*util.to_list(group_by)) - - def as_scalar(self): - """return a 'scalar' representation of this selectable, which can be - used as a column expression. - - Typically, a select statement which has only one column in its columns - clause is eligible to be used as a scalar expression. - - The returned object is an instance of - :class:`_ScalarSelect`. - - """ - return _ScalarSelect(self) - - @_generative - def apply_labels(self): - """return a new selectable with the 'use_labels' flag set to True. - - This will result in column expressions being generated using labels - against their table name, such as "SELECT somecolumn AS - tablename_somecolumn". This allows selectables which contain multiple - FROM clauses to produce a unique set of column names regardless of - name conflicts among the individual FROM clauses. - - """ - self.use_labels = True - - def label(self, name): - """return a 'scalar' representation of this selectable, embedded as a - subquery with a label. - - See also :meth:`~._SelectBase.as_scalar`. - - """ - return self.as_scalar().label(name) - - def cte(self, name=None, recursive=False): - """Return a new :class:`.CTE`, or Common Table Expression instance. - - Common table expressions are a SQL standard whereby SELECT - statements can draw upon secondary statements specified along - with the primary statement, using a clause called "WITH". - Special semantics regarding UNION can also be employed to - allow "recursive" queries, where a SELECT statement can draw - upon the set of rows that have previously been selected. - - SQLAlchemy detects :class:`.CTE` objects, which are treated - similarly to :class:`.Alias` objects, as special elements - to be delivered to the FROM clause of the statement as well - as to a WITH clause at the top of the statement. - - .. versionadded:: 0.7.6 - - :param name: name given to the common table expression. Like - :meth:`._FromClause.alias`, the name can be left as ``None`` - in which case an anonymous symbol will be used at query - compile time. - :param recursive: if ``True``, will render ``WITH RECURSIVE``. - A recursive common table expression is intended to be used in - conjunction with UNION ALL in order to derive rows - from those already selected. - - The following examples illustrate two examples from - Postgresql's documentation at - http://www.postgresql.org/docs/8.4/static/queries-with.html. - - Example 1, non recursive:: - - from sqlalchemy import Table, Column, String, Integer, MetaData, \\ - select, func - - metadata = MetaData() - - orders = Table('orders', metadata, - Column('region', String), - Column('amount', Integer), - Column('product', String), - Column('quantity', Integer) - ) - - regional_sales = select([ - orders.c.region, - func.sum(orders.c.amount).label('total_sales') - ]).group_by(orders.c.region).cte("regional_sales") - - - top_regions = select([regional_sales.c.region]).\\ - where( - regional_sales.c.total_sales > - select([ - func.sum(regional_sales.c.total_sales)/10 - ]) - ).cte("top_regions") - - statement = select([ - orders.c.region, - orders.c.product, - func.sum(orders.c.quantity).label("product_units"), - func.sum(orders.c.amount).label("product_sales") - ]).where(orders.c.region.in_( - select([top_regions.c.region]) - )).group_by(orders.c.region, orders.c.product) - - result = conn.execute(statement).fetchall() - - Example 2, WITH RECURSIVE:: - - from sqlalchemy import Table, Column, String, Integer, MetaData, \\ - select, func - - metadata = MetaData() - - parts = Table('parts', metadata, - Column('part', String), - Column('sub_part', String), - Column('quantity', Integer), - ) - - included_parts = select([ - parts.c.sub_part, - parts.c.part, - parts.c.quantity]).\\ - where(parts.c.part=='our part').\\ - cte(recursive=True) - - - incl_alias = included_parts.alias() - parts_alias = parts.alias() - included_parts = included_parts.union_all( - select([ - parts_alias.c.part, - parts_alias.c.sub_part, - parts_alias.c.quantity - ]). - where(parts_alias.c.part==incl_alias.c.sub_part) - ) - - statement = select([ - included_parts.c.sub_part, - func.sum(included_parts.c.quantity).label('total_quantity') - ]).\ - select_from(included_parts.join(parts, - included_parts.c.part==parts.c.part)).\\ - group_by(included_parts.c.sub_part) - - result = conn.execute(statement).fetchall() - - - See also: - - :meth:`.orm.query.Query.cte` - ORM version of :meth:`._SelectBase.cte`. - - """ - return CTE(self, name=name, recursive=recursive) - - @_generative - @util.deprecated('0.6', - message=":func:`.autocommit` is deprecated. Use " - ":func:`.Executable.execution_options` with the " - "'autocommit' flag.") - def autocommit(self): - """return a new selectable with the 'autocommit' flag set to - True.""" - - self._execution_options = \ - self._execution_options.union({'autocommit': True}) - - def _generate(self): - """Override the default _generate() method to also clear out - exported collections.""" - - s = self.__class__.__new__(self.__class__) - s.__dict__ = self.__dict__.copy() - s._reset_exported() - return s - - @_generative - def limit(self, limit): - """return a new selectable with the given LIMIT criterion - applied.""" - - self._limit = util.asint(limit) - - @_generative - def offset(self, offset): - """return a new selectable with the given OFFSET criterion - applied.""" - - self._offset = util.asint(offset) - - @_generative - def order_by(self, *clauses): - """return a new selectable with the given list of ORDER BY - criterion applied. - - The criterion will be appended to any pre-existing ORDER BY - criterion. - - """ - - self.append_order_by(*clauses) - - @_generative - def group_by(self, *clauses): - """return a new selectable with the given list of GROUP BY - criterion applied. - - The criterion will be appended to any pre-existing GROUP BY - criterion. - - """ - - self.append_group_by(*clauses) - - def append_order_by(self, *clauses): - """Append the given ORDER BY criterion applied to this selectable. - - The criterion will be appended to any pre-existing ORDER BY criterion. - - """ - if len(clauses) == 1 and clauses[0] is None: - self._order_by_clause = ClauseList() - else: - if getattr(self, '_order_by_clause', None) is not None: - clauses = list(self._order_by_clause) + list(clauses) - self._order_by_clause = ClauseList(*clauses) - - def append_group_by(self, *clauses): - """Append the given GROUP BY criterion applied to this selectable. - - The criterion will be appended to any pre-existing GROUP BY criterion. - - """ - if len(clauses) == 1 and clauses[0] is None: - self._group_by_clause = ClauseList() - else: - if getattr(self, '_group_by_clause', None) is not None: - clauses = list(self._group_by_clause) + list(clauses) - self._group_by_clause = ClauseList(*clauses) - - @property - def _from_objects(self): - return [self] - - -class _ScalarSelect(_Grouping): - _from_objects = [] - - def __init__(self, element): - self.element = element - self.type = element._scalar_type() - - @property - def columns(self): - raise exc.InvalidRequestError('Scalar Select expression has no ' - 'columns; use this object directly within a ' - 'column-level expression.') - c = columns - - def self_group(self, **kwargs): - return self - - def _make_proxy(self, selectable, name): - return list(self.inner_columns)[0]._make_proxy(selectable, name) - -class CompoundSelect(_SelectBase): - """Forms the basis of ``UNION``, ``UNION ALL``, and other - SELECT-based set operations.""" - - __visit_name__ = 'compound_select' - - UNION = util.symbol('UNION') - UNION_ALL = util.symbol('UNION ALL') - EXCEPT = util.symbol('EXCEPT') - EXCEPT_ALL = util.symbol('EXCEPT ALL') - INTERSECT = util.symbol('INTERSECT') - INTERSECT_ALL = util.symbol('INTERSECT ALL') - - def __init__(self, keyword, *selects, **kwargs): - self._should_correlate = kwargs.pop('correlate', False) - self.keyword = keyword - self.selects = [] - - numcols = None - - # some DBs do not like ORDER BY in the inner queries of a UNION, etc. - for n, s in enumerate(selects): - s = _clause_element_as_expr(s) - - if not numcols: - numcols = len(s.c) - elif len(s.c) != numcols: - raise exc.ArgumentError('All selectables passed to ' - 'CompoundSelect must have identical numbers of ' - 'columns; select #%d has %d columns, select ' - '#%d has %d' % (1, len(self.selects[0].c), n - + 1, len(s.c))) - - self.selects.append(s.self_group(self)) - - _SelectBase.__init__(self, **kwargs) - - def _scalar_type(self): - return self.selects[0]._scalar_type() - - def self_group(self, against=None): - return _FromGrouping(self) - - def is_derived_from(self, fromclause): - for s in self.selects: - if s.is_derived_from(fromclause): - return True - return False - - def _populate_column_collection(self): - for cols in zip(*[s.c for s in self.selects]): - - # this is a slightly hacky thing - the union exports a - # column that resembles just that of the *first* selectable. - # to get at a "composite" column, particularly foreign keys, - # you have to dig through the proxies collection which we - # generate below. We may want to improve upon this, such as - # perhaps _make_proxy can accept a list of other columns - # that are "shared" - schema.column can then copy all the - # ForeignKeys in. this would allow the union() to have all - # those fks too. - - proxy = cols[0]._make_proxy(self, name=self.use_labels - and cols[0]._label or None) - - # hand-construct the "proxies" collection to include all - # derived columns place a 'weight' annotation corresponding - # to how low in the list of select()s the column occurs, so - # that the corresponding_column() operation can resolve - # conflicts - - proxy.proxies = [c._annotate({'weight': i + 1}) for (i, - c) in enumerate(cols)] - - def _copy_internals(self, clone=_clone, **kw): - self._reset_exported() - self.selects = [clone(s, **kw) for s in self.selects] - if hasattr(self, '_col_map'): - del self._col_map - for attr in ('_order_by_clause', '_group_by_clause'): - if getattr(self, attr) is not None: - setattr(self, attr, clone(getattr(self, attr), **kw)) - - def get_children(self, column_collections=True, **kwargs): - return (column_collections and list(self.c) or []) \ - + [self._order_by_clause, self._group_by_clause] \ - + list(self.selects) - - def bind(self): - if self._bind: - return self._bind - for s in self.selects: - e = s.bind - if e: - return e - else: - return None - def _set_bind(self, bind): - self._bind = bind - bind = property(bind, _set_bind) - -class Select(_SelectBase): - """Represents a ``SELECT`` statement. - - See also: - - :func:`~.expression.select` - the function which creates a :class:`.Select` object. - - :ref:`coretutorial_selecting` - Core Tutorial description of :func:`.select`. - - """ - - __visit_name__ = 'select' - - _prefixes = () - _hints = util.immutabledict() - _distinct = False - _from_cloned = None - - _memoized_property = _SelectBase._memoized_property - - def __init__(self, - columns, - whereclause=None, - from_obj=None, - distinct=False, - having=None, - correlate=True, - prefixes=None, - **kwargs): - """Construct a Select object. - - The public constructor for Select is the - :func:`select` function; see that function for - argument descriptions. - - Additional generative and mutator methods are available on the - :class:`_SelectBase` superclass. - - """ - self._should_correlate = correlate - if distinct is not False: - if isinstance(distinct, basestring): - util.warn_deprecated( - "A string argument passed to the 'distinct' " - "keyword argument of 'select()' is deprecated " - "- please use 'prefixes' or 'prefix_with()' " - "to specify additional prefixes") - if prefixes: - prefixes = util.to_list(prefixes) + [distinct] - else: - prefixes = [distinct] - elif distinct is True: - self._distinct = True - else: - self._distinct = [ - _literal_as_text(e) - for e in util.to_list(distinct) - ] - - self._correlate = set() - if from_obj is not None: - self._from_obj = util.OrderedSet( - _literal_as_text(f) - for f in util.to_list(from_obj)) - else: - self._from_obj = util.OrderedSet() - - try: - cols_present = bool(columns) - except TypeError: - raise exc.ArgumentError("columns argument to select() must " - "be a Python list or other iterable") - - if cols_present: - self._raw_columns = [] - for c in columns: - c = _literal_as_column(c) - if isinstance(c, _ScalarSelect): - c = c.self_group(against=operators.comma_op) - self._raw_columns.append(c) - else: - self._raw_columns = [] - - if whereclause is not None: - self._whereclause = _literal_as_text(whereclause) - else: - self._whereclause = None - - if having is not None: - self._having = _literal_as_text(having) - else: - self._having = None - - if prefixes: - self._prefixes = tuple([_literal_as_text(p) for p in prefixes]) - - _SelectBase.__init__(self, **kwargs) - - @property - def _froms(self): - # would love to cache this, - # but there's just enough edge cases, particularly now that - # declarative encourages construction of SQL expressions - # without tables present, to just regen this each time. - froms = [] - seen = set() - translate = self._from_cloned - def add(items): - for item in items: - if translate and item in translate: - item = translate[item] - if not seen.intersection(item._cloned_set): - froms.append(item) - seen.update(item._cloned_set) - - add(_from_objects(*self._raw_columns)) - if self._whereclause is not None: - add(_from_objects(self._whereclause)) - add(self._from_obj) - - return froms - - def _get_display_froms(self, existing_froms=None): - """Return the full list of 'from' clauses to be displayed. - - Takes into account a set of existing froms which may be - rendered in the FROM clause of enclosing selects; this Select - may want to leave those absent if it is automatically - correlating. - - """ - froms = self._froms - - toremove = set(itertools.chain(*[f._hide_froms for f in froms])) - if toremove: - # if we're maintaining clones of froms, - # add the copies out to the toremove list. only include - # clones that are lexical equivalents. - if self._from_cloned: - toremove.update( - self._from_cloned[f] for f in - toremove.intersection(self._from_cloned) - if self._from_cloned[f]._is_lexical_equivalent(f) - ) - # filter out to FROM clauses not in the list, - # using a list to maintain ordering - froms = [f for f in froms if f not in toremove] - - if len(froms) > 1 or self._correlate: - if self._correlate: - froms = [f for f in froms if f not in _cloned_intersection(froms, - self._correlate)] - if self._should_correlate and existing_froms: - froms = [f for f in froms if f not in _cloned_intersection(froms, - existing_froms)] - - if not len(froms): - raise exc.InvalidRequestError("Select statement '%s" - "' returned no FROM clauses due to " - "auto-correlation; specify " - "correlate() to control " - "correlation manually." % self) - - return froms - - def _scalar_type(self): - elem = self._raw_columns[0] - cols = list(elem._select_iterable) - return cols[0].type - - @property - def froms(self): - """Return the displayed list of FromClause elements.""" - - return self._get_display_froms() - - @_generative - def with_hint(self, selectable, text, dialect_name='*'): - """Add an indexing hint for the given selectable to this - :class:`.Select`. - - The text of the hint is rendered in the appropriate - location for the database backend in use, relative - to the given :class:`.Table` or :class:`.Alias` passed as the - ``selectable`` argument. The dialect implementation - typically uses Python string substitution syntax - with the token ``%(name)s`` to render the name of - the table or alias. E.g. when using Oracle, the - following:: - - select([mytable]).\\ - with_hint(mytable, "+ index(%(name)s ix_mytable)") - - Would render SQL as:: - - select /*+ index(mytable ix_mytable) */ ... from mytable - - The ``dialect_name`` option will limit the rendering of a particular - hint to a particular backend. Such as, to add hints for both Oracle - and Sybase simultaneously:: - - select([mytable]).\\ - with_hint(mytable, "+ index(%(name)s ix_mytable)", 'oracle').\\ - with_hint(mytable, "WITH INDEX ix_mytable", 'sybase') - - """ - self._hints = self._hints.union({(selectable, dialect_name):text}) - - @property - def type(self): - raise exc.InvalidRequestError("Select objects don't have a type. " - "Call as_scalar() on this Select object " - "to return a 'scalar' version of this Select.") - - @_memoized_property.method - def locate_all_froms(self): - """return a Set of all FromClause elements referenced by this Select. - - This set is a superset of that returned by the ``froms`` property, - which is specifically for those FromClause elements that would - actually be rendered. - - """ - froms = self._froms - return froms + list(_from_objects(*froms)) - - @property - def inner_columns(self): - """an iterator of all ColumnElement expressions which would - be rendered into the columns clause of the resulting SELECT statement. - - """ - return _select_iterables(self._raw_columns) - - def is_derived_from(self, fromclause): - if self in fromclause._cloned_set: - return True - - for f in self.locate_all_froms(): - if f.is_derived_from(fromclause): - return True - return False - - def _copy_internals(self, clone=_clone, **kw): - - # Select() object has been cloned and probably adapted by the - # given clone function. Apply the cloning function to internal - # objects - - # 1. keep a dictionary of the froms we've cloned, and what - # they've become. This is consulted later when we derive - # additional froms from "whereclause" and the columns clause, - # which may still reference the uncloned parent table. - # as of 0.7.4 we also put the current version of _froms, which - # gets cleared on each generation. previously we were "baking" - # _froms into self._from_obj. - self._from_cloned = from_cloned = dict((f, clone(f, **kw)) - for f in self._from_obj.union(self._froms)) - - # 3. update persistent _from_obj with the cloned versions. - self._from_obj = util.OrderedSet(from_cloned[f] for f in - self._from_obj) - - # the _correlate collection is done separately, what can happen - # here is the same item is _correlate as in _from_obj but the - # _correlate version has an annotation on it - (specifically - # RelationshipProperty.Comparator._criterion_exists() does - # this). Also keep _correlate liberally open with it's previous - # contents, as this set is used for matching, not rendering. - self._correlate = set(clone(f) for f in - self._correlate).union(self._correlate) - - # 4. clone other things. The difficulty here is that Column - # objects are not actually cloned, and refer to their original - # .table, resulting in the wrong "from" parent after a clone - # operation. Hence _from_cloned and _from_obj supercede what is - # present here. - self._raw_columns = [clone(c, **kw) for c in self._raw_columns] - for attr in '_whereclause', '_having', '_order_by_clause', \ - '_group_by_clause': - if getattr(self, attr) is not None: - setattr(self, attr, clone(getattr(self, attr), **kw)) - - # erase exported column list, _froms collection, - # etc. - self._reset_exported() - - def get_children(self, column_collections=True, **kwargs): - """return child elements as per the ClauseElement specification.""" - - return (column_collections and list(self.columns) or []) + \ - self._raw_columns + list(self._froms) + \ - [x for x in - (self._whereclause, self._having, - self._order_by_clause, self._group_by_clause) - if x is not None] - - @_generative - def column(self, column): - """return a new select() construct with the given column expression - added to its columns clause. - - """ - self.append_column(column) - - @_generative - def with_only_columns(self, columns): - """Return a new :func:`.select` construct with its columns - clause replaced with the given columns. - - .. versionchanged:: 0.7.3 - Due to a bug fix, this method has a slight - behavioral change as of version 0.7.3. - Prior to version 0.7.3, the FROM clause of - a :func:`.select` was calculated upfront and as new columns - were added; in 0.7.3 and later it's calculated - at compile time, fixing an issue regarding late binding - of columns to parent tables. This changes the behavior of - :meth:`.Select.with_only_columns` in that FROM clauses no - longer represented in the new list are dropped, - but this behavior is more consistent in - that the FROM clauses are consistently derived from the - current columns clause. The original intent of this method - is to allow trimming of the existing columns list to be fewer - columns than originally present; the use case of replacing - the columns list with an entirely different one hadn't - been anticipated until 0.7.3 was released; the usage - guidelines below illustrate how this should be done. - - This method is exactly equivalent to as if the original - :func:`.select` had been called with the given columns - clause. I.e. a statement:: - - s = select([table1.c.a, table1.c.b]) - s = s.with_only_columns([table1.c.b]) - - should be exactly equivalent to:: - - s = select([table1.c.b]) - - This means that FROM clauses which are only derived - from the column list will be discarded if the new column - list no longer contains that FROM:: - - >>> table1 = table('t1', column('a'), column('b')) - >>> table2 = table('t2', column('a'), column('b')) - >>> s1 = select([table1.c.a, table2.c.b]) - >>> print s1 - SELECT t1.a, t2.b FROM t1, t2 - >>> s2 = s1.with_only_columns([table2.c.b]) - >>> print s2 - SELECT t2.b FROM t1 - - The preferred way to maintain a specific FROM clause - in the construct, assuming it won't be represented anywhere - else (i.e. not in the WHERE clause, etc.) is to set it using - :meth:`.Select.select_from`:: - - >>> s1 = select([table1.c.a, table2.c.b]).\\ - ... select_from(table1.join(table2, table1.c.a==table2.c.a)) - >>> s2 = s1.with_only_columns([table2.c.b]) - >>> print s2 - SELECT t2.b FROM t1 JOIN t2 ON t1.a=t2.a - - Care should also be taken to use the correct - set of column objects passed to :meth:`.Select.with_only_columns`. - Since the method is essentially equivalent to calling the - :func:`.select` construct in the first place with the given - columns, the columns passed to :meth:`.Select.with_only_columns` - should usually be a subset of those which were passed - to the :func:`.select` construct, not those which are available - from the ``.c`` collection of that :func:`.select`. That - is:: - - s = select([table1.c.a, table1.c.b]).select_from(table1) - s = s.with_only_columns([table1.c.b]) - - and **not**:: - - # usually incorrect - s = s.with_only_columns([s.c.b]) - - The latter would produce the SQL:: - - SELECT b - FROM (SELECT t1.a AS a, t1.b AS b - FROM t1), t1 - - Since the :func:`.select` construct is essentially being - asked to select both from ``table1`` as well as itself. - - """ - self._reset_exported() - rc = [] - for c in columns: - c = _literal_as_column(c) - if isinstance(c, _ScalarSelect): - c = c.self_group(against=operators.comma_op) - rc.append(c) - self._raw_columns = rc - - @_generative - def where(self, whereclause): - """return a new select() construct with the given expression added to - its WHERE clause, joined to the existing clause via AND, if any. - - """ - - self.append_whereclause(whereclause) - - @_generative - def having(self, having): - """return a new select() construct with the given expression added to - its HAVING clause, joined to the existing clause via AND, if any. - - """ - self.append_having(having) - - @_generative - def distinct(self, *expr): - """Return a new select() construct which will apply DISTINCT to its - columns clause. - - :param \*expr: optional column expressions. When present, - the Postgresql dialect will render a ``DISTINCT ON (>)`` - construct. - - """ - if expr: - expr = [_literal_as_text(e) for e in expr] - if isinstance(self._distinct, list): - self._distinct = self._distinct + expr - else: - self._distinct = expr - else: - self._distinct = True - - @_generative - def prefix_with(self, *expr): - """return a new select() construct which will apply the given - expressions, typically strings, to the start of its columns clause, - not using any commas. In particular is useful for MySQL - keywords. - - e.g.:: - - select(['a', 'b']).prefix_with('HIGH_PRIORITY', - 'SQL_SMALL_RESULT', - 'ALL') - - Would render:: - - SELECT HIGH_PRIORITY SQL_SMALL_RESULT ALL a, b - - """ - expr = tuple(_literal_as_text(e) for e in expr) - self._prefixes = self._prefixes + expr - - @_generative - def select_from(self, fromclause): - """return a new :func:`.select` construct with the given FROM expression - merged into its list of FROM objects. - - E.g.:: - - table1 = table('t1', column('a')) - table2 = table('t2', column('b')) - s = select([table1.c.a]).\\ - select_from( - table1.join(table2, table1.c.a==table2.c.b) - ) - - The "from" list is a unique set on the identity of each element, - so adding an already present :class:`.Table` or other selectable - will have no effect. Passing a :class:`.Join` that refers - to an already present :class:`.Table` or other selectable will have - the effect of concealing the presence of that selectable as - an individual element in the rendered FROM list, instead rendering it into a - JOIN clause. - - While the typical purpose of :meth:`.Select.select_from` is to replace - the default, derived FROM clause with a join, it can also be called with - individual table elements, multiple times if desired, in the case that the - FROM clause cannot be fully derived from the columns clause:: - - select([func.count('*')]).select_from(table1) - - """ - self.append_from(fromclause) - - @_generative - def correlate(self, *fromclauses): - """return a new select() construct which will correlate the given FROM - clauses to that of an enclosing select(), if a match is found. - - By "match", the given fromclause must be present in this select's - list of FROM objects and also present in an enclosing select's list of - FROM objects. - - Calling this method turns off the select's default behavior of - "auto-correlation". Normally, select() auto-correlates all of its FROM - clauses to those of an embedded select when compiled. - - If the fromclause is None, correlation is disabled for the returned - select(). - - """ - self._should_correlate = False - if fromclauses and fromclauses[0] is None: - self._correlate = set() - else: - self._correlate = self._correlate.union(fromclauses) - - def append_correlation(self, fromclause): - """append the given correlation expression to this select() - construct.""" - - self._should_correlate = False - self._correlate = self._correlate.union([fromclause]) - - def append_column(self, column): - """append the given column expression to the columns clause of this - select() construct. - - """ - self._reset_exported() - column = _literal_as_column(column) - - if isinstance(column, _ScalarSelect): - column = column.self_group(against=operators.comma_op) - - self._raw_columns = self._raw_columns + [column] - - def append_prefix(self, clause): - """append the given columns clause prefix expression to this select() - construct. - - """ - clause = _literal_as_text(clause) - self._prefixes = self._prefixes + (clause,) - - def append_whereclause(self, whereclause): - """append the given expression to this select() construct's WHERE - criterion. - - The expression will be joined to existing WHERE criterion via AND. - - """ - self._reset_exported() - whereclause = _literal_as_text(whereclause) - - if self._whereclause is not None: - self._whereclause = and_(self._whereclause, whereclause) - else: - self._whereclause = whereclause - - def append_having(self, having): - """append the given expression to this select() construct's HAVING - criterion. - - The expression will be joined to existing HAVING criterion via AND. - - """ - if self._having is not None: - self._having = and_(self._having, _literal_as_text(having)) - else: - self._having = _literal_as_text(having) - - def append_from(self, fromclause): - """append the given FromClause expression to this select() construct's - FROM clause. - - """ - self._reset_exported() - fromclause = _literal_as_text(fromclause) - self._from_obj = self._from_obj.union([fromclause]) - - def _populate_column_collection(self): - for c in self.inner_columns: - if hasattr(c, '_make_proxy'): - c._make_proxy(self, - name=self.use_labels - and c._label or None) - - def self_group(self, against=None): - """return a 'grouping' construct as per the ClauseElement - specification. - - This produces an element that can be embedded in an expression. Note - that this method is called automatically as needed when constructing - expressions and should not require explicit use. - - """ - if isinstance(against, CompoundSelect): - return self - return _FromGrouping(self) - - def union(self, other, **kwargs): - """return a SQL UNION of this select() construct against the given - selectable.""" - - return union(self, other, **kwargs) - - def union_all(self, other, **kwargs): - """return a SQL UNION ALL of this select() construct against the given - selectable. - - """ - return union_all(self, other, **kwargs) - - def except_(self, other, **kwargs): - """return a SQL EXCEPT of this select() construct against the given - selectable.""" - - return except_(self, other, **kwargs) - - def except_all(self, other, **kwargs): - """return a SQL EXCEPT ALL of this select() construct against the - given selectable. - - """ - return except_all(self, other, **kwargs) - - def intersect(self, other, **kwargs): - """return a SQL INTERSECT of this select() construct against the given - selectable. - - """ - return intersect(self, other, **kwargs) - - def intersect_all(self, other, **kwargs): - """return a SQL INTERSECT ALL of this select() construct against the - given selectable. - - """ - return intersect_all(self, other, **kwargs) - - def bind(self): - if self._bind: - return self._bind - froms = self._froms - if not froms: - for c in self._raw_columns: - e = c.bind - if e: - self._bind = e - return e - else: - e = list(froms)[0].bind - if e: - self._bind = e - return e - - return None - - def _set_bind(self, bind): - self._bind = bind - bind = property(bind, _set_bind) - -class UpdateBase(Executable, ClauseElement): - """Form the base for ``INSERT``, ``UPDATE``, and ``DELETE`` statements. - - """ - - __visit_name__ = 'update_base' - - _execution_options = \ - Executable._execution_options.union({'autocommit': True}) - kwargs = util.immutabledict() - _hints = util.immutabledict() - - def _process_colparams(self, parameters): - if isinstance(parameters, (list, tuple)): - pp = {} - for i, c in enumerate(self.table.c): - pp[c.key] = parameters[i] - return pp - else: - return parameters - - def params(self, *arg, **kw): - """Set the parameters for the statement. - - This method raises ``NotImplementedError`` on the base class, - and is overridden by :class:`.ValuesBase` to provide the - SET/VALUES clause of UPDATE and INSERT. - - """ - raise NotImplementedError( - "params() is not supported for INSERT/UPDATE/DELETE statements." - " To set the values for an INSERT or UPDATE statement, use" - " stmt.values(**parameters).") - - def bind(self): - """Return a 'bind' linked to this :class:`.UpdateBase` - or a :class:`.Table` associated with it. - - """ - return self._bind or self.table.bind - - def _set_bind(self, bind): - self._bind = bind - bind = property(bind, _set_bind) - - _returning_re = re.compile(r'(?:firebird|postgres(?:ql)?)_returning') - def _process_deprecated_kw(self, kwargs): - for k in list(kwargs): - m = self._returning_re.match(k) - if m: - self._returning = kwargs.pop(k) - util.warn_deprecated( - "The %r argument is deprecated. Please " - "use statement.returning(col1, col2, ...)" % k - ) - return kwargs - - @_generative - def returning(self, *cols): - """Add a RETURNING or equivalent clause to this statement. - - The given list of columns represent columns within the table that is - the target of the INSERT, UPDATE, or DELETE. Each element can be any - column expression. :class:`~sqlalchemy.schema.Table` objects will be - expanded into their individual columns. - - Upon compilation, a RETURNING clause, or database equivalent, - will be rendered within the statement. For INSERT and UPDATE, - the values are the newly inserted/updated values. For DELETE, - the values are those of the rows which were deleted. - - Upon execution, the values of the columns to be returned - are made available via the result set and can be iterated - using ``fetchone()`` and similar. For DBAPIs which do not - natively support returning values (i.e. cx_oracle), - SQLAlchemy will approximate this behavior at the result level - so that a reasonable amount of behavioral neutrality is - provided. - - Note that not all databases/DBAPIs - support RETURNING. For those backends with no support, - an exception is raised upon compilation and/or execution. - For those who do support it, the functionality across backends - varies greatly, including restrictions on executemany() - and other statements which return multiple rows. Please - read the documentation notes for the database in use in - order to determine the availability of RETURNING. - - """ - self._returning = cols - - @_generative - def with_hint(self, text, selectable=None, dialect_name="*"): - """Add a table hint for a single table to this - INSERT/UPDATE/DELETE statement. - - .. note:: - - :meth:`.UpdateBase.with_hint` currently applies only to - Microsoft SQL Server. For MySQL INSERT hints, use - :meth:`.Insert.prefix_with`. UPDATE/DELETE hints for - MySQL will be added in a future release. - - The text of the hint is rendered in the appropriate - location for the database backend in use, relative - to the :class:`.Table` that is the subject of this - statement, or optionally to that of the given - :class:`.Table` passed as the ``selectable`` argument. - - The ``dialect_name`` option will limit the rendering of a particular - hint to a particular backend. Such as, to add a hint - that only takes effect for SQL Server:: - - mytable.insert().with_hint("WITH (PAGLOCK)", dialect_name="mssql") - - .. versionadded:: 0.7.6 - - :param text: Text of the hint. - :param selectable: optional :class:`.Table` that specifies - an element of the FROM clause within an UPDATE or DELETE - to be the subject of the hint - applies only to certain backends. - :param dialect_name: defaults to ``*``, if specified as the name - of a particular dialect, will apply these hints only when - that dialect is in use. - """ - if selectable is None: - selectable = self.table - - self._hints = self._hints.union({(selectable, dialect_name):text}) - -class ValuesBase(UpdateBase): - """Supplies support for :meth:`.ValuesBase.values` to INSERT and UPDATE constructs.""" - - __visit_name__ = 'values_base' - - def __init__(self, table, values): - self.table = table - self.parameters = self._process_colparams(values) - - @_generative - def values(self, *args, **kwargs): - """specify the VALUES clause for an INSERT statement, or the SET - clause for an UPDATE. - - :param \**kwargs: key value pairs representing the string key - of a :class:`.Column` mapped to the value to be rendered into the - VALUES or SET clause:: - - users.insert().values(name="some name") - - users.update().where(users.c.id==5).values(name="some name") - - :param \*args: A single dictionary can be sent as the first positional - argument. This allows non-string based keys, such as Column - objects, to be used:: - - users.insert().values({users.c.name : "some name"}) - - users.update().where(users.c.id==5).values({users.c.name : "some name"}) - - See also: - - :ref:`inserts_and_updates` - SQL Expression - Language Tutorial - - :func:`~.expression.insert` - produce an ``INSERT`` statement - - :func:`~.expression.update` - produce an ``UPDATE`` statement - - """ - if args: - v = args[0] - else: - v = {} - - if self.parameters is None: - self.parameters = self._process_colparams(v) - self.parameters.update(kwargs) - else: - self.parameters = self.parameters.copy() - self.parameters.update(self._process_colparams(v)) - self.parameters.update(kwargs) - -class Insert(ValuesBase): - """Represent an INSERT construct. - - The :class:`.Insert` object is created using the :func:`~.expression.insert()` function. - - See also: - - :ref:`coretutorial_insert_expressions` - - """ - __visit_name__ = 'insert' - - _prefixes = () - - def __init__(self, - table, - values=None, - inline=False, - bind=None, - prefixes=None, - returning=None, - **kwargs): - ValuesBase.__init__(self, table, values) - self._bind = bind - self.select = None - self.inline = inline - self._returning = returning - if prefixes: - self._prefixes = tuple([_literal_as_text(p) for p in prefixes]) - - if kwargs: - self.kwargs = self._process_deprecated_kw(kwargs) - - def get_children(self, **kwargs): - if self.select is not None: - return self.select, - else: - return () - - def _copy_internals(self, clone=_clone, **kw): - # TODO: coverage - self.parameters = self.parameters.copy() - - @_generative - def prefix_with(self, clause): - """Add a word or expression between INSERT and INTO. Generative. - - If multiple prefixes are supplied, they will be separated with - spaces. - - """ - clause = _literal_as_text(clause) - self._prefixes = self._prefixes + (clause,) - -class Update(ValuesBase): - """Represent an Update construct. - - The :class:`.Update` object is created using the :func:`update()` function. - - """ - __visit_name__ = 'update' - - def __init__(self, - table, - whereclause, - values=None, - inline=False, - bind=None, - returning=None, - **kwargs): - ValuesBase.__init__(self, table, values) - self._bind = bind - self._returning = returning - if whereclause is not None: - self._whereclause = _literal_as_text(whereclause) - else: - self._whereclause = None - self.inline = inline - - if kwargs: - self.kwargs = self._process_deprecated_kw(kwargs) - - def get_children(self, **kwargs): - if self._whereclause is not None: - return self._whereclause, - else: - return () - - def _copy_internals(self, clone=_clone, **kw): - # TODO: coverage - self._whereclause = clone(self._whereclause, **kw) - self.parameters = self.parameters.copy() - - @_generative - def where(self, whereclause): - """return a new update() construct with the given expression added to - its WHERE clause, joined to the existing clause via AND, if any. - - """ - if self._whereclause is not None: - self._whereclause = and_(self._whereclause, - _literal_as_text(whereclause)) - else: - self._whereclause = _literal_as_text(whereclause) - - @property - def _extra_froms(self): - # TODO: this could be made memoized - # if the memoization is reset on each generative call. - froms = [] - seen = set([self.table]) - - if self._whereclause is not None: - for item in _from_objects(self._whereclause): - if not seen.intersection(item._cloned_set): - froms.append(item) - seen.update(item._cloned_set) - - return froms - -class Delete(UpdateBase): - """Represent a DELETE construct. - - The :class:`.Delete` object is created using the :func:`delete()` function. - - """ - - __visit_name__ = 'delete' - - def __init__(self, - table, - whereclause, - bind=None, - returning =None, - **kwargs): - self._bind = bind - self.table = table - self._returning = returning - - if whereclause is not None: - self._whereclause = _literal_as_text(whereclause) - else: - self._whereclause = None - - if kwargs: - self.kwargs = self._process_deprecated_kw(kwargs) - - def get_children(self, **kwargs): - if self._whereclause is not None: - return self._whereclause, - else: - return () - - @_generative - def where(self, whereclause): - """Add the given WHERE clause to a newly returned delete construct.""" - - if self._whereclause is not None: - self._whereclause = and_(self._whereclause, - _literal_as_text(whereclause)) - else: - self._whereclause = _literal_as_text(whereclause) - - def _copy_internals(self, clone=_clone, **kw): - # TODO: coverage - self._whereclause = clone(self._whereclause, **kw) - -class _IdentifiedClause(Executable, ClauseElement): - - __visit_name__ = 'identified' - _execution_options = \ - Executable._execution_options.union({'autocommit': False}) - quote = None - - def __init__(self, ident): - self.ident = ident - -class SavepointClause(_IdentifiedClause): - __visit_name__ = 'savepoint' - -class RollbackToSavepointClause(_IdentifiedClause): - __visit_name__ = 'rollback_to_savepoint' - -class ReleaseSavepointClause(_IdentifiedClause): - __visit_name__ = 'release_savepoint' - - diff --git a/libs/sqlalchemy/sql/functions.py b/libs/sqlalchemy/sql/functions.py deleted file mode 100644 index 95781d70..00000000 --- a/libs/sqlalchemy/sql/functions.py +++ /dev/null @@ -1,134 +0,0 @@ -# sql/functions.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -from sqlalchemy import types as sqltypes, schema -from sqlalchemy.sql.expression import ( - ClauseList, Function, _literal_as_binds, text, _type_from_args - ) -from sqlalchemy.sql import operators -from sqlalchemy.sql.visitors import VisitableType - -class _GenericMeta(VisitableType): - def __call__(self, *args, **kwargs): - args = [_literal_as_binds(c) for c in args] - return type.__call__(self, *args, **kwargs) - -class GenericFunction(Function): - __metaclass__ = _GenericMeta - - def __init__(self, type_=None, args=(), **kwargs): - self.packagenames = [] - self.name = self.__class__.__name__ - self._bind = kwargs.get('bind', None) - self.clause_expr = ClauseList( - operator=operators.comma_op, - group_contents=True, *args).self_group() - self.type = sqltypes.to_instance( - type_ or getattr(self, '__return_type__', None)) - - -class next_value(Function): - """Represent the 'next value', given a :class:`.Sequence` - as it's single argument. - - Compiles into the appropriate function on each backend, - or will raise NotImplementedError if used on a backend - that does not provide support for sequences. - - """ - type = sqltypes.Integer() - name = "next_value" - - def __init__(self, seq, **kw): - assert isinstance(seq, schema.Sequence), \ - "next_value() accepts a Sequence object as input." - self._bind = kw.get('bind', None) - self.sequence = seq - - @property - def _from_objects(self): - return [] - -class AnsiFunction(GenericFunction): - def __init__(self, **kwargs): - GenericFunction.__init__(self, **kwargs) - -class ReturnTypeFromArgs(GenericFunction): - """Define a function whose return type is the same as its arguments.""" - - def __init__(self, *args, **kwargs): - kwargs.setdefault('type_', _type_from_args(args)) - GenericFunction.__init__(self, args=args, **kwargs) - -class coalesce(ReturnTypeFromArgs): - pass - -class max(ReturnTypeFromArgs): - pass - -class min(ReturnTypeFromArgs): - pass - -class sum(ReturnTypeFromArgs): - pass - - -class now(GenericFunction): - __return_type__ = sqltypes.DateTime - -class concat(GenericFunction): - __return_type__ = sqltypes.String - def __init__(self, *args, **kwargs): - GenericFunction.__init__(self, args=args, **kwargs) - -class char_length(GenericFunction): - __return_type__ = sqltypes.Integer - - def __init__(self, arg, **kwargs): - GenericFunction.__init__(self, args=[arg], **kwargs) - -class random(GenericFunction): - def __init__(self, *args, **kwargs): - kwargs.setdefault('type_', None) - GenericFunction.__init__(self, args=args, **kwargs) - -class count(GenericFunction): - """The ANSI COUNT aggregate function. With no arguments, emits COUNT \*.""" - - __return_type__ = sqltypes.Integer - - def __init__(self, expression=None, **kwargs): - if expression is None: - expression = text('*') - GenericFunction.__init__(self, args=(expression,), **kwargs) - -class current_date(AnsiFunction): - __return_type__ = sqltypes.Date - -class current_time(AnsiFunction): - __return_type__ = sqltypes.Time - -class current_timestamp(AnsiFunction): - __return_type__ = sqltypes.DateTime - -class current_user(AnsiFunction): - __return_type__ = sqltypes.String - -class localtime(AnsiFunction): - __return_type__ = sqltypes.DateTime - -class localtimestamp(AnsiFunction): - __return_type__ = sqltypes.DateTime - -class session_user(AnsiFunction): - __return_type__ = sqltypes.String - -class sysdate(AnsiFunction): - __return_type__ = sqltypes.DateTime - -class user(AnsiFunction): - __return_type__ = sqltypes.String - diff --git a/libs/sqlalchemy/sql/operators.py b/libs/sqlalchemy/sql/operators.py deleted file mode 100644 index 9e796506..00000000 --- a/libs/sqlalchemy/sql/operators.py +++ /dev/null @@ -1,612 +0,0 @@ -# sql/operators.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Defines operators used in SQL expressions.""" - -from operator import ( - and_, or_, inv, add, mul, sub, mod, truediv, lt, le, ne, gt, ge, eq, neg - ) - -# Py2K -from operator import (div,) -# end Py2K - -from sqlalchemy.util import symbol - -class Operators(object): - """Base of comparison and logical operators. - - Implements base methods :meth:`operate` and :meth:`reverse_operate`, - as well as :meth:`__and__`, :meth:`__or__`, :meth:`__invert__`. - - Usually is used via its most common subclass - :class:`.ColumnOperators`. - - """ - def __and__(self, other): - """Implement the ``&`` operator. - - When used with SQL expressions, results in an - AND operation, equivalent to - :func:`~.expression.and_`, that is:: - - a & b - - is equivalent to:: - - from sqlalchemy import and_ - and_(a, b) - - Care should be taken when using ``&`` regarding - operator precedence; the ``&`` operator has the highest precedence. - The operands should be enclosed in parenthesis if they contain - further sub expressions:: - - (a == 2) & (b == 4) - - """ - return self.operate(and_, other) - - def __or__(self, other): - """Implement the ``|`` operator. - - When used with SQL expressions, results in an - OR operation, equivalent to - :func:`~.expression.or_`, that is:: - - a | b - - is equivalent to:: - - from sqlalchemy import or_ - or_(a, b) - - Care should be taken when using ``|`` regarding - operator precedence; the ``|`` operator has the highest precedence. - The operands should be enclosed in parenthesis if they contain - further sub expressions:: - - (a == 2) | (b == 4) - - """ - return self.operate(or_, other) - - def __invert__(self): - """Implement the ``~`` operator. - - When used with SQL expressions, results in a - NOT operation, equivalent to - :func:`~.expression.not_`, that is:: - - ~a - - is equivalent to:: - - from sqlalchemy import not_ - not_(a) - - """ - return self.operate(inv) - - def op(self, opstring): - """produce a generic operator function. - - e.g.:: - - somecolumn.op("*")(5) - - produces:: - - somecolumn * 5 - - :param operator: a string which will be output as the infix operator - between this :class:`.ClauseElement` and the expression passed to the - generated function. - - This function can also be used to make bitwise operators explicit. For - example:: - - somecolumn.op('&')(0xff) - - is a bitwise AND of the value in somecolumn. - - """ - def _op(b): - return self.operate(op, opstring, b) - return _op - - def operate(self, op, *other, **kwargs): - """Operate on an argument. - - This is the lowest level of operation, raises - :class:`NotImplementedError` by default. - - Overriding this on a subclass can allow common - behavior to be applied to all operations. - For example, overriding :class:`.ColumnOperators` - to apply ``func.lower()`` to the left and right - side:: - - class MyComparator(ColumnOperators): - def operate(self, op, other): - return op(func.lower(self), func.lower(other)) - - :param op: Operator callable. - :param \*other: the 'other' side of the operation. Will - be a single scalar for most operations. - :param \**kwargs: modifiers. These may be passed by special - operators such as :meth:`ColumnOperators.contains`. - - - """ - raise NotImplementedError(str(op)) - - def reverse_operate(self, op, other, **kwargs): - """Reverse operate on an argument. - - Usage is the same as :meth:`operate`. - - """ - raise NotImplementedError(str(op)) - -class ColumnOperators(Operators): - """Defines comparison and math operations. - - By default all methods call down to - :meth:`Operators.operate` or :meth:`Operators.reverse_operate` - passing in the appropriate operator function from the - Python builtin ``operator`` module or - a SQLAlchemy-specific operator function from - :mod:`sqlalchemy.expression.operators`. For example - the ``__eq__`` function:: - - def __eq__(self, other): - return self.operate(operators.eq, other) - - Where ``operators.eq`` is essentially:: - - def eq(a, b): - return a == b - - A SQLAlchemy construct like :class:`.ColumnElement` ultimately - overrides :meth:`.Operators.operate` and others - to return further :class:`.ClauseElement` constructs, - so that the ``==`` operation above is replaced by a clause - construct. - - The docstrings here will describe column-oriented - behavior of each operator. For ORM-based operators - on related objects and collections, see :class:`.RelationshipProperty.Comparator`. - - """ - - timetuple = None - """Hack, allows datetime objects to be compared on the LHS.""" - - def __lt__(self, other): - """Implement the ``<`` operator. - - In a column context, produces the clause ``a < b``. - - """ - return self.operate(lt, other) - - def __le__(self, other): - """Implement the ``<=`` operator. - - In a column context, produces the clause ``a <= b``. - - """ - return self.operate(le, other) - - __hash__ = Operators.__hash__ - - def __eq__(self, other): - """Implement the ``==`` operator. - - In a column context, produces the clause ``a = b``. - If the target is ``None``, produces ``a IS NULL``. - - """ - return self.operate(eq, other) - - def __ne__(self, other): - """Implement the ``!=`` operator. - - In a column context, produces the clause ``a != b``. - If the target is ``None``, produces ``a IS NOT NULL``. - - """ - return self.operate(ne, other) - - def __gt__(self, other): - """Implement the ``>`` operator. - - In a column context, produces the clause ``a > b``. - - """ - return self.operate(gt, other) - - def __ge__(self, other): - """Implement the ``>=`` operator. - - In a column context, produces the clause ``a >= b``. - - """ - return self.operate(ge, other) - - def __neg__(self): - """Implement the ``-`` operator. - - In a column context, produces the clause ``-a``. - - """ - return self.operate(neg) - - def concat(self, other): - """Implement the 'concat' operator. - - In a column context, produces the clause ``a || b``, - or uses the ``concat()`` operator on MySQL. - - """ - return self.operate(concat_op, other) - - def like(self, other, escape=None): - """Implement the ``like`` operator. - - In a column context, produces the clause ``a LIKE other``. - - """ - return self.operate(like_op, other, escape=escape) - - def ilike(self, other, escape=None): - """Implement the ``ilike`` operator. - - In a column context, produces the clause ``a ILIKE other``. - - """ - return self.operate(ilike_op, other, escape=escape) - - def in_(self, other): - """Implement the ``in`` operator. - - In a column context, produces the clause ``a IN other``. - "other" may be a tuple/list of column expressions, - or a :func:`~.expression.select` construct. - - """ - return self.operate(in_op, other) - - def is_(self, other): - """Implement the ``IS`` operator. - - Normally, ``IS`` is generated automatically when comparing to a - value of ``None``, which resolves to ``NULL``. However, explicit - usage of ``IS`` may be desirable if comparing to boolean values - on certain platforms. - - .. versionadded:: 0.7.9 - - .. seealso:: :meth:`.ColumnOperators.isnot` - - """ - return self.operate(is_, other) - - def isnot(self, other): - """Implement the ``IS NOT`` operator. - - Normally, ``IS NOT`` is generated automatically when comparing to a - value of ``None``, which resolves to ``NULL``. However, explicit - usage of ``IS NOT`` may be desirable if comparing to boolean values - on certain platforms. - - .. versionadded:: 0.7.9 - - .. seealso:: :meth:`.ColumnOperators.is_` - - """ - return self.operate(isnot, other) - - def startswith(self, other, **kwargs): - """Implement the ``startwith`` operator. - - In a column context, produces the clause ``LIKE '%'`` - - """ - return self.operate(startswith_op, other, **kwargs) - - def endswith(self, other, **kwargs): - """Implement the 'endswith' operator. - - In a column context, produces the clause ``LIKE '%'`` - - """ - return self.operate(endswith_op, other, **kwargs) - - def contains(self, other, **kwargs): - """Implement the 'contains' operator. - - In a column context, produces the clause ``LIKE '%%'`` - - """ - return self.operate(contains_op, other, **kwargs) - - def match(self, other, **kwargs): - """Implements the 'match' operator. - - In a column context, this produces a MATCH clause, i.e. - ``MATCH ''``. The allowed contents of ``other`` - are database backend specific. - - """ - return self.operate(match_op, other, **kwargs) - - def desc(self): - """Produce a :func:`~.expression.desc` clause against the - parent object.""" - return self.operate(desc_op) - - def asc(self): - """Produce a :func:`~.expression.asc` clause against the - parent object.""" - return self.operate(asc_op) - - def nullsfirst(self): - """Produce a :func:`~.expression.nullsfirst` clause against the - parent object.""" - return self.operate(nullsfirst_op) - - def nullslast(self): - """Produce a :func:`~.expression.nullslast` clause against the - parent object.""" - return self.operate(nullslast_op) - - def collate(self, collation): - """Produce a :func:`~.expression.collate` clause against - the parent object, given the collation string.""" - return self.operate(collate, collation) - - def __radd__(self, other): - """Implement the ``+`` operator in reverse. - - See :meth:`__add__`. - - """ - return self.reverse_operate(add, other) - - def __rsub__(self, other): - """Implement the ``-`` operator in reverse. - - See :meth:`__sub__`. - - """ - return self.reverse_operate(sub, other) - - def __rmul__(self, other): - """Implement the ``*`` operator in reverse. - - See :meth:`__mul__`. - - """ - return self.reverse_operate(mul, other) - - def __rdiv__(self, other): - """Implement the ``/`` operator in reverse. - - See :meth:`__div__`. - - """ - return self.reverse_operate(div, other) - - def between(self, cleft, cright): - """Produce a :func:`~.expression.between` clause against - the parent object, given the lower and upper range.""" - return self.operate(between_op, cleft, cright) - - def distinct(self): - """Produce a :func:`~.expression.distinct` clause against the parent object.""" - return self.operate(distinct_op) - - def __add__(self, other): - """Implement the ``+`` operator. - - In a column context, produces the clause ``a + b`` - if the parent object has non-string affinity. - If the parent object has a string affinity, - produces the concatenation operator, ``a || b`` - - see :meth:`concat`. - - """ - return self.operate(add, other) - - def __sub__(self, other): - """Implement the ``-`` operator. - - In a column context, produces the clause ``a - b``. - - """ - return self.operate(sub, other) - - def __mul__(self, other): - """Implement the ``*`` operator. - - In a column context, produces the clause ``a * b``. - - """ - return self.operate(mul, other) - - def __div__(self, other): - """Implement the ``/`` operator. - - In a column context, produces the clause ``a / b``. - - """ - return self.operate(div, other) - - def __mod__(self, other): - """Implement the ``%`` operator. - - In a column context, produces the clause ``a % b``. - - """ - return self.operate(mod, other) - - def __truediv__(self, other): - """Implement the ``//`` operator. - - In a column context, produces the clause ``a / b``. - - """ - return self.operate(truediv, other) - - def __rtruediv__(self, other): - """Implement the ``//`` operator in reverse. - - See :meth:`__truediv__`. - - """ - return self.reverse_operate(truediv, other) - -def from_(): - raise NotImplementedError() - -def as_(): - raise NotImplementedError() - -def exists(): - raise NotImplementedError() - -def is_(a, b): - return a.is_(b) - -def isnot(a, b): - return a.isnot(b) - -def collate(a, b): - return a.collate(b) - -def op(a, opstring, b): - return a.op(opstring)(b) - -def like_op(a, b, escape=None): - return a.like(b, escape=escape) - -def notlike_op(a, b, escape=None): - return ~a.like(b, escape=escape) - -def ilike_op(a, b, escape=None): - return a.ilike(b, escape=escape) - -def notilike_op(a, b, escape=None): - return ~a.ilike(b, escape=escape) - -def between_op(a, b, c): - return a.between(b, c) - -def in_op(a, b): - return a.in_(b) - -def notin_op(a, b): - return ~a.in_(b) - -def distinct_op(a): - return a.distinct() - -def startswith_op(a, b, escape=None): - return a.startswith(b, escape=escape) - -def endswith_op(a, b, escape=None): - return a.endswith(b, escape=escape) - -def contains_op(a, b, escape=None): - return a.contains(b, escape=escape) - -def match_op(a, b): - return a.match(b) - -def comma_op(a, b): - raise NotImplementedError() - -def concat_op(a, b): - return a.concat(b) - -def desc_op(a): - return a.desc() - -def asc_op(a): - return a.asc() - -def nullsfirst_op(a): - return a.nullsfirst() - -def nullslast_op(a): - return a.nullslast() - -_commutative = set([eq, ne, add, mul]) - -def is_commutative(op): - return op in _commutative - -def is_ordering_modifier(op): - return op in (asc_op, desc_op, - nullsfirst_op, nullslast_op) - -_associative = _commutative.union([concat_op, and_, or_]) - - -_smallest = symbol('_smallest') -_largest = symbol('_largest') - -_PRECEDENCE = { - from_: 15, - mul: 7, - truediv: 7, - # Py2K - div: 7, - # end Py2K - mod: 7, - neg: 7, - add: 6, - sub: 6, - concat_op: 6, - match_op: 6, - ilike_op: 5, - notilike_op: 5, - like_op: 5, - notlike_op: 5, - in_op: 5, - notin_op: 5, - is_: 5, - isnot: 5, - eq: 5, - ne: 5, - gt: 5, - lt: 5, - ge: 5, - le: 5, - between_op: 5, - distinct_op: 5, - inv: 5, - and_: 3, - or_: 2, - comma_op: -1, - collate: 7, - as_: -1, - exists: 0, - _smallest: -1000, - _largest: 1000 -} - -def is_precedent(operator, against): - if operator is against and operator in _associative: - return False - else: - return (_PRECEDENCE.get(operator, _PRECEDENCE[_smallest]) <= - _PRECEDENCE.get(against, _PRECEDENCE[_largest])) diff --git a/libs/sqlalchemy/sql/util.py b/libs/sqlalchemy/sql/util.py deleted file mode 100644 index 0a00674c..00000000 --- a/libs/sqlalchemy/sql/util.py +++ /dev/null @@ -1,788 +0,0 @@ -# sql/util.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -from sqlalchemy import exc, schema, util, sql, types as sqltypes -from sqlalchemy.util import topological -from sqlalchemy.sql import expression, operators, visitors -from itertools import chain -from collections import deque - -"""Utility functions that build upon SQL and Schema constructs.""" - -def sort_tables(tables): - """sort a collection of Table objects in order of their foreign-key dependency.""" - - tables = list(tables) - tuples = [] - def visit_foreign_key(fkey): - if fkey.use_alter: - return - parent_table = fkey.column.table - if parent_table in tables: - child_table = fkey.parent.table - if parent_table is not child_table: - tuples.append((parent_table, child_table)) - - for table in tables: - visitors.traverse(table, - {'schema_visitor':True}, - {'foreign_key':visit_foreign_key}) - - tuples.extend( - [parent, table] for parent in table._extra_dependencies - ) - - return list(topological.sort(tuples, tables)) - -def find_join_source(clauses, join_to): - """Given a list of FROM clauses and a selectable, - return the first index and element from the list of - clauses which can be joined against the selectable. returns - None, None if no match is found. - - e.g.:: - - clause1 = table1.join(table2) - clause2 = table4.join(table5) - - join_to = table2.join(table3) - - find_join_source([clause1, clause2], join_to) == clause1 - - """ - - selectables = list(expression._from_objects(join_to)) - for i, f in enumerate(clauses): - for s in selectables: - if f.is_derived_from(s): - return i, f - else: - return None, None - -def find_tables(clause, check_columns=False, - include_aliases=False, include_joins=False, - include_selects=False, include_crud=False): - """locate Table objects within the given expression.""" - - tables = [] - _visitors = {} - - if include_selects: - _visitors['select'] = _visitors['compound_select'] = tables.append - - if include_joins: - _visitors['join'] = tables.append - - if include_aliases: - _visitors['alias'] = tables.append - - if include_crud: - _visitors['insert'] = _visitors['update'] = \ - _visitors['delete'] = lambda ent: tables.append(ent.table) - - if check_columns: - def visit_column(column): - tables.append(column.table) - _visitors['column'] = visit_column - - _visitors['table'] = tables.append - - visitors.traverse(clause, {'column_collections':False}, _visitors) - return tables - -def find_columns(clause): - """locate Column objects within the given expression.""" - - cols = util.column_set() - visitors.traverse(clause, {}, {'column':cols.add}) - return cols - -def unwrap_order_by(clause): - """Break up an 'order by' expression into individual column-expressions, - without DESC/ASC/NULLS FIRST/NULLS LAST""" - - cols = util.column_set() - stack = deque([clause]) - while stack: - t = stack.popleft() - if isinstance(t, expression.ColumnElement) and \ - ( - not isinstance(t, expression._UnaryExpression) or \ - not operators.is_ordering_modifier(t.modifier) - ): - cols.add(t) - else: - for c in t.get_children(): - stack.append(c) - return cols - -def clause_is_present(clause, search): - """Given a target clause and a second to search within, return True - if the target is plainly present in the search without any - subqueries or aliases involved. - - Basically descends through Joins. - - """ - - stack = [search] - while stack: - elem = stack.pop() - if clause is elem: - return True - elif isinstance(elem, expression.Join): - stack.extend((elem.left, elem.right)) - return False - - -def bind_values(clause): - """Return an ordered list of "bound" values in the given clause. - - E.g.:: - - >>> expr = and_( - ... table.c.foo==5, table.c.foo==7 - ... ) - >>> bind_values(expr) - [5, 7] - """ - - v = [] - def visit_bindparam(bind): - v.append(bind.effective_value) - - visitors.traverse(clause, {}, {'bindparam':visit_bindparam}) - return v - -def _quote_ddl_expr(element): - if isinstance(element, basestring): - element = element.replace("'", "''") - return "'%s'" % element - else: - return repr(element) - -class _repr_params(object): - """A string view of bound parameters, truncating - display to the given number of 'multi' parameter sets. - - """ - def __init__(self, params, batches): - self.params = params - self.batches = batches - - def __repr__(self): - if isinstance(self.params, (list, tuple)) and \ - len(self.params) > self.batches and \ - isinstance(self.params[0], (list, dict, tuple)): - return ' '.join(( - repr(self.params[:self.batches - 2])[0:-1], - " ... displaying %i of %i total bound parameter sets ... " % (self.batches, len(self.params)), - repr(self.params[-2:])[1:] - )) - else: - return repr(self.params) - - -def expression_as_ddl(clause): - """Given a SQL expression, convert for usage in DDL, such as - CREATE INDEX and CHECK CONSTRAINT. - - Converts bind params into quoted literals, column identifiers - into detached column constructs so that the parent table - identifier is not included. - - """ - def repl(element): - if isinstance(element, expression._BindParamClause): - return expression.literal_column(_quote_ddl_expr(element.value)) - elif isinstance(element, expression.ColumnClause) and \ - element.table is not None: - return expression.column(element.name) - else: - return None - - return visitors.replacement_traverse(clause, {}, repl) - -def adapt_criterion_to_null(crit, nulls): - """given criterion containing bind params, convert selected elements to IS NULL.""" - - def visit_binary(binary): - if isinstance(binary.left, expression._BindParamClause) \ - and binary.left._identifying_key in nulls: - # reverse order if the NULL is on the left side - binary.left = binary.right - binary.right = expression.null() - binary.operator = operators.is_ - binary.negate = operators.isnot - elif isinstance(binary.right, expression._BindParamClause) \ - and binary.right._identifying_key in nulls: - binary.right = expression.null() - binary.operator = operators.is_ - binary.negate = operators.isnot - - return visitors.cloned_traverse(crit, {}, {'binary':visit_binary}) - -def join_condition(a, b, ignore_nonexistent_tables=False, a_subset=None): - """create a join condition between two tables or selectables. - - e.g.:: - - join_condition(tablea, tableb) - - would produce an expression along the lines of:: - - tablea.c.id==tableb.c.tablea_id - - The join is determined based on the foreign key relationships - between the two selectables. If there are multiple ways - to join, or no way to join, an error is raised. - - :param ignore_nonexistent_tables: Deprecated - this - flag is no longer used. Only resolution errors regarding - the two given tables are propagated. - - :param a_subset: An optional expression that is a sub-component - of ``a``. An attempt will be made to join to just this sub-component - first before looking at the full ``a`` construct, and if found - will be successful even if there are other ways to join to ``a``. - This allows the "right side" of a join to be passed thereby - providing a "natural join". - - """ - crit = [] - constraints = set() - - for left in (a_subset, a): - if left is None: - continue - for fk in sorted( - b.foreign_keys, - key=lambda fk:fk.parent._creation_order): - try: - col = fk.get_referent(left) - except exc.NoReferenceError, nrte: - if nrte.table_name == left.name: - raise - else: - continue - - if col is not None: - crit.append(col == fk.parent) - constraints.add(fk.constraint) - if left is not b: - for fk in sorted( - left.foreign_keys, - key=lambda fk:fk.parent._creation_order): - try: - col = fk.get_referent(b) - except exc.NoReferenceError, nrte: - if nrte.table_name == b.name: - raise - else: - # this is totally covered. can't get - # coverage to mark it. - continue - - if col is not None: - crit.append(col == fk.parent) - constraints.add(fk.constraint) - if crit: - break - - if len(crit) == 0: - if isinstance(b, expression._FromGrouping): - hint = " Perhaps you meant to convert the right side to a "\ - "subquery using alias()?" - else: - hint = "" - raise exc.ArgumentError( - "Can't find any foreign key relationships " - "between '%s' and '%s'.%s" % (a.description, b.description, hint)) - elif len(constraints) > 1: - raise exc.ArgumentError( - "Can't determine join between '%s' and '%s'; " - "tables have more than one foreign key " - "constraint relationship between them. " - "Please specify the 'onclause' of this " - "join explicitly." % (a.description, b.description)) - elif len(crit) == 1: - return (crit[0]) - else: - return sql.and_(*crit) - - -class Annotated(object): - """clones a ClauseElement and applies an 'annotations' dictionary. - - Unlike regular clones, this clone also mimics __hash__() and - __cmp__() of the original element so that it takes its place - in hashed collections. - - A reference to the original element is maintained, for the important - reason of keeping its hash value current. When GC'ed, the - hash value may be reused, causing conflicts. - - """ - - def __new__(cls, *args): - if not args: - # clone constructor - return object.__new__(cls) - else: - element, values = args - # pull appropriate subclass from registry of annotated - # classes - try: - cls = annotated_classes[element.__class__] - except KeyError: - cls = annotated_classes[element.__class__] = type.__new__(type, - "Annotated%s" % element.__class__.__name__, - (Annotated, element.__class__), {}) - return object.__new__(cls) - - def __init__(self, element, values): - # force FromClause to generate their internal - # collections into __dict__ - if isinstance(element, expression.FromClause): - element.c - - self.__dict__ = element.__dict__.copy() - self.__element = element - self._annotations = values - - def _annotate(self, values): - _values = self._annotations.copy() - _values.update(values) - clone = self.__class__.__new__(self.__class__) - clone.__dict__ = self.__dict__.copy() - clone._annotations = _values - return clone - - def _deannotate(self): - return self.__element - - def _compiler_dispatch(self, visitor, **kw): - return self.__element.__class__._compiler_dispatch(self, visitor, **kw) - - @property - def _constructor(self): - return self.__element._constructor - - def _clone(self): - clone = self.__element._clone() - if clone is self.__element: - # detect immutable, don't change anything - return self - else: - # update the clone with any changes that have occurred - # to this object's __dict__. - clone.__dict__.update(self.__dict__) - return Annotated(clone, self._annotations) - - def __hash__(self): - return hash(self.__element) - - def __eq__(self, other): - if isinstance(self.__element, expression.ColumnOperators): - return self.__element.__class__.__eq__(self, other) - else: - return hash(other) == hash(self) - - -# hard-generate Annotated subclasses. this technique -# is used instead of on-the-fly types (i.e. type.__new__()) -# so that the resulting objects are pickleable. -annotated_classes = {} - -for cls in expression.__dict__.values() + [schema.Column, schema.Table]: - if isinstance(cls, type) and issubclass(cls, expression.ClauseElement): - exec "class Annotated%s(Annotated, cls):\n" \ - " pass" % (cls.__name__, ) in locals() - exec "annotated_classes[cls] = Annotated%s" % (cls.__name__) - -def _deep_annotate(element, annotations, exclude=None): - """Deep copy the given ClauseElement, annotating each element - with the given annotations dictionary. - - Elements within the exclude collection will be cloned but not annotated. - - """ - cloned = util.column_dict() - - def clone(elem): - # check if element is present in the exclude list. - # take into account proxying relationships. - if elem in cloned: - return cloned[elem] - elif exclude and \ - hasattr(elem, 'proxy_set') and \ - elem.proxy_set.intersection(exclude): - newelem = elem._clone() - elif annotations != elem._annotations: - newelem = elem._annotate(annotations) - else: - newelem = elem - newelem._copy_internals(clone=clone) - cloned[elem] = newelem - return newelem - - if element is not None: - element = clone(element) - return element - -def _deep_deannotate(element): - """Deep copy the given element, removing all annotations.""" - - cloned = util.column_dict() - - def clone(elem): - if elem not in cloned: - newelem = elem._deannotate() - newelem._copy_internals(clone=clone) - cloned[elem] = newelem - return cloned[elem] - - if element is not None: - element = clone(element) - return element - -def _shallow_annotate(element, annotations): - """Annotate the given ClauseElement and copy its internals so that - internal objects refer to the new annotated object. - - Basically used to apply a "dont traverse" annotation to a - selectable, without digging throughout the whole - structure wasting time. - """ - element = element._annotate(annotations) - element._copy_internals() - return element - -def splice_joins(left, right, stop_on=None): - if left is None: - return right - - stack = [(right, None)] - - adapter = ClauseAdapter(left) - ret = None - while stack: - (right, prevright) = stack.pop() - if isinstance(right, expression.Join) and right is not stop_on: - right = right._clone() - right._reset_exported() - right.onclause = adapter.traverse(right.onclause) - stack.append((right.left, right)) - else: - right = adapter.traverse(right) - if prevright is not None: - prevright.left = right - if ret is None: - ret = right - - return ret - -def reduce_columns(columns, *clauses, **kw): - """given a list of columns, return a 'reduced' set based on natural equivalents. - - the set is reduced to the smallest list of columns which have no natural - equivalent present in the list. A "natural equivalent" means that two columns - will ultimately represent the same value because they are related by a foreign key. - - \*clauses is an optional list of join clauses which will be traversed - to further identify columns that are "equivalent". - - \**kw may specify 'ignore_nonexistent_tables' to ignore foreign keys - whose tables are not yet configured. - - This function is primarily used to determine the most minimal "primary key" - from a selectable, by reducing the set of primary key columns present - in the the selectable to just those that are not repeated. - - """ - ignore_nonexistent_tables = kw.pop('ignore_nonexistent_tables', False) - - columns = util.ordered_column_set(columns) - - omit = util.column_set() - for col in columns: - for fk in chain(*[c.foreign_keys for c in col.proxy_set]): - for c in columns: - if c is col: - continue - try: - fk_col = fk.column - except exc.NoReferencedTableError: - if ignore_nonexistent_tables: - continue - else: - raise - if fk_col.shares_lineage(c): - omit.add(col) - break - - if clauses: - def visit_binary(binary): - if binary.operator == operators.eq: - cols = util.column_set(chain(*[c.proxy_set for c in columns.difference(omit)])) - if binary.left in cols and binary.right in cols: - for c in columns: - if c.shares_lineage(binary.right): - omit.add(c) - break - for clause in clauses: - visitors.traverse(clause, {}, {'binary':visit_binary}) - - return expression.ColumnSet(columns.difference(omit)) - -def criterion_as_pairs(expression, consider_as_foreign_keys=None, - consider_as_referenced_keys=None, any_operator=False): - """traverse an expression and locate binary criterion pairs.""" - - if consider_as_foreign_keys and consider_as_referenced_keys: - raise exc.ArgumentError("Can only specify one of " - "'consider_as_foreign_keys' or " - "'consider_as_referenced_keys'") - - def visit_binary(binary): - if not any_operator and binary.operator is not operators.eq: - return - if not isinstance(binary.left, sql.ColumnElement) or \ - not isinstance(binary.right, sql.ColumnElement): - return - - if consider_as_foreign_keys: - if binary.left in consider_as_foreign_keys and \ - (binary.right is binary.left or - binary.right not in consider_as_foreign_keys): - pairs.append((binary.right, binary.left)) - elif binary.right in consider_as_foreign_keys and \ - (binary.left is binary.right or - binary.left not in consider_as_foreign_keys): - pairs.append((binary.left, binary.right)) - elif consider_as_referenced_keys: - if binary.left in consider_as_referenced_keys and \ - (binary.right is binary.left or - binary.right not in consider_as_referenced_keys): - pairs.append((binary.left, binary.right)) - elif binary.right in consider_as_referenced_keys and \ - (binary.left is binary.right or - binary.left not in consider_as_referenced_keys): - pairs.append((binary.right, binary.left)) - else: - if isinstance(binary.left, schema.Column) and \ - isinstance(binary.right, schema.Column): - if binary.left.references(binary.right): - pairs.append((binary.right, binary.left)) - elif binary.right.references(binary.left): - pairs.append((binary.left, binary.right)) - pairs = [] - visitors.traverse(expression, {}, {'binary':visit_binary}) - return pairs - -def folded_equivalents(join, equivs=None): - """Return a list of uniquely named columns. - - The column list of the given Join will be narrowed - down to a list of all equivalently-named, - equated columns folded into one column, where 'equated' means they are - equated to each other in the ON clause of this join. - - This function is used by Join.select(fold_equivalents=True). - - Deprecated. This function is used for a certain kind of - "polymorphic_union" which is designed to achieve joined - table inheritance where the base table has no "discriminator" - column; [ticket:1131] will provide a better way to - achieve this. - - """ - if equivs is None: - equivs = set() - def visit_binary(binary): - if binary.operator == operators.eq and binary.left.name == binary.right.name: - equivs.add(binary.right) - equivs.add(binary.left) - visitors.traverse(join.onclause, {}, {'binary':visit_binary}) - collist = [] - if isinstance(join.left, expression.Join): - left = folded_equivalents(join.left, equivs) - else: - left = list(join.left.columns) - if isinstance(join.right, expression.Join): - right = folded_equivalents(join.right, equivs) - else: - right = list(join.right.columns) - used = set() - for c in left + right: - if c in equivs: - if c.name not in used: - collist.append(c) - used.add(c.name) - else: - collist.append(c) - return collist - -class AliasedRow(object): - """Wrap a RowProxy with a translation map. - - This object allows a set of keys to be translated - to those present in a RowProxy. - - """ - def __init__(self, row, map): - # AliasedRow objects don't nest, so un-nest - # if another AliasedRow was passed - if isinstance(row, AliasedRow): - self.row = row.row - else: - self.row = row - self.map = map - - def __contains__(self, key): - return self.map[key] in self.row - - def has_key(self, key): - return key in self - - def __getitem__(self, key): - return self.row[self.map[key]] - - def keys(self): - return self.row.keys() - - -class ClauseAdapter(visitors.ReplacingCloningVisitor): - """Clones and modifies clauses based on column correspondence. - - E.g.:: - - table1 = Table('sometable', metadata, - Column('col1', Integer), - Column('col2', Integer) - ) - table2 = Table('someothertable', metadata, - Column('col1', Integer), - Column('col2', Integer) - ) - - condition = table1.c.col1 == table2.c.col1 - - make an alias of table1:: - - s = table1.alias('foo') - - calling ``ClauseAdapter(s).traverse(condition)`` converts - condition to read:: - - s.c.col1 == table2.c.col1 - - """ - def __init__(self, selectable, equivalents=None, include=None, exclude=None, adapt_on_names=False): - self.__traverse_options__ = {'stop_on':[selectable]} - self.selectable = selectable - self.include = include - self.exclude = exclude - self.equivalents = util.column_dict(equivalents or {}) - self.adapt_on_names = adapt_on_names - - def _corresponding_column(self, col, require_embedded, _seen=util.EMPTY_SET): - newcol = self.selectable.corresponding_column( - col, - require_embedded=require_embedded) - if newcol is None and col in self.equivalents and col not in _seen: - for equiv in self.equivalents[col]: - newcol = self._corresponding_column(equiv, - require_embedded=require_embedded, - _seen=_seen.union([col])) - if newcol is not None: - return newcol - if self.adapt_on_names and newcol is None: - newcol = self.selectable.c.get(col.name) - return newcol - - def replace(self, col): - if isinstance(col, expression.FromClause): - if self.selectable.is_derived_from(col): - return self.selectable - - if not isinstance(col, expression.ColumnElement): - return None - - if self.include and col not in self.include: - return None - elif self.exclude and col in self.exclude: - return None - - return self._corresponding_column(col, True) - -class ColumnAdapter(ClauseAdapter): - """Extends ClauseAdapter with extra utility functions. - - Provides the ability to "wrap" this ClauseAdapter - around another, a columns dictionary which returns - adapted elements given an original, and an - adapted_row() factory. - - """ - def __init__(self, selectable, equivalents=None, - chain_to=None, include=None, - exclude=None, adapt_required=False): - ClauseAdapter.__init__(self, selectable, equivalents, include, exclude) - if chain_to: - self.chain(chain_to) - self.columns = util.populate_column_dict(self._locate_col) - self.adapt_required = adapt_required - - def wrap(self, adapter): - ac = self.__class__.__new__(self.__class__) - ac.__dict__ = self.__dict__.copy() - ac._locate_col = ac._wrap(ac._locate_col, adapter._locate_col) - ac.adapt_clause = ac._wrap(ac.adapt_clause, adapter.adapt_clause) - ac.adapt_list = ac._wrap(ac.adapt_list, adapter.adapt_list) - ac.columns = util.populate_column_dict(ac._locate_col) - return ac - - adapt_clause = ClauseAdapter.traverse - adapt_list = ClauseAdapter.copy_and_process - - def _wrap(self, local, wrapped): - def locate(col): - col = local(col) - return wrapped(col) - return locate - - def _locate_col(self, col): - c = self._corresponding_column(col, True) - if c is None: - c = self.adapt_clause(col) - - # anonymize labels in case they have a hardcoded name - if isinstance(c, expression._Label): - c = c.label(None) - - # adapt_required indicates that if we got the same column - # back which we put in (i.e. it passed through), - # it's not correct. this is used by eagerloading which - # knows that all columns and expressions need to be adapted - # to a result row, and a "passthrough" is definitely targeting - # the wrong column. - if self.adapt_required and c is col: - return None - - return c - - def adapted_row(self, row): - return AliasedRow(row, self.columns) - - def __getstate__(self): - d = self.__dict__.copy() - del d['columns'] - return d - - def __setstate__(self, state): - self.__dict__.update(state) - self.columns = util.PopulateDict(self._locate_col) diff --git a/libs/sqlalchemy/sql/visitors.py b/libs/sqlalchemy/sql/visitors.py deleted file mode 100644 index d236063d..00000000 --- a/libs/sqlalchemy/sql/visitors.py +++ /dev/null @@ -1,282 +0,0 @@ -# sql/visitors.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Visitor/traversal interface and library functions. - -SQLAlchemy schema and expression constructs rely on a Python-centric -version of the classic "visitor" pattern as the primary way in which -they apply functionality. The most common use of this pattern -is statement compilation, where individual expression classes match -up to rendering methods that produce a string result. Beyond this, -the visitor system is also used to inspect expressions for various -information and patterns, as well as for usage in -some kinds of expression transformation. Other kinds of transformation -use a non-visitor traversal system. - -For many examples of how the visit system is used, see the -sqlalchemy.sql.util and the sqlalchemy.sql.compiler modules. -For an introduction to clause adaption, see -http://techspot.zzzeek.org/2008/01/23/expression-transformations/ - -""" - -from collections import deque -import re -from sqlalchemy import util -import operator - -__all__ = ['VisitableType', 'Visitable', 'ClauseVisitor', - 'CloningVisitor', 'ReplacingCloningVisitor', 'iterate', - 'iterate_depthfirst', 'traverse_using', 'traverse', - 'cloned_traverse', 'replacement_traverse'] - -class VisitableType(type): - """Metaclass which assigns a `_compiler_dispatch` method to classes - having a `__visit_name__` attribute. - - The _compiler_dispatch attribute becomes an instance method which - looks approximately like the following:: - - def _compiler_dispatch (self, visitor, **kw): - '''Look for an attribute named "visit_" + self.__visit_name__ - on the visitor, and call it with the same kw params.''' - return getattr(visitor, 'visit_%s' % self.__visit_name__)(self, **kw) - - Classes having no __visit_name__ attribute will remain unaffected. - """ - def __init__(cls, clsname, bases, clsdict): - if cls.__name__ == 'Visitable' or not hasattr(cls, '__visit_name__'): - super(VisitableType, cls).__init__(clsname, bases, clsdict) - return - - _generate_dispatch(cls) - - super(VisitableType, cls).__init__(clsname, bases, clsdict) - - -def _generate_dispatch(cls): - """Return an optimized visit dispatch function for the cls - for use by the compiler. - """ - if '__visit_name__' in cls.__dict__: - visit_name = cls.__visit_name__ - if isinstance(visit_name, str): - # There is an optimization opportunity here because the - # the string name of the class's __visit_name__ is known at - # this early stage (import time) so it can be pre-constructed. - getter = operator.attrgetter("visit_%s" % visit_name) - def _compiler_dispatch(self, visitor, **kw): - return getter(visitor)(self, **kw) - else: - # The optimization opportunity is lost for this case because the - # __visit_name__ is not yet a string. As a result, the visit - # string has to be recalculated with each compilation. - def _compiler_dispatch(self, visitor, **kw): - return getattr(visitor, 'visit_%s' % self.__visit_name__)(self, **kw) - - _compiler_dispatch.__doc__ = \ - """Look for an attribute named "visit_" + self.__visit_name__ - on the visitor, and call it with the same kw params. - """ - cls._compiler_dispatch = _compiler_dispatch - -class Visitable(object): - """Base class for visitable objects, applies the - ``VisitableType`` metaclass. - - """ - - __metaclass__ = VisitableType - -class ClauseVisitor(object): - """Base class for visitor objects which can traverse using - the traverse() function. - - """ - - __traverse_options__ = {} - - def traverse_single(self, obj, **kw): - for v in self._visitor_iterator: - meth = getattr(v, "visit_%s" % obj.__visit_name__, None) - if meth: - return meth(obj, **kw) - - def iterate(self, obj): - """traverse the given expression structure, returning an iterator of all elements.""" - - return iterate(obj, self.__traverse_options__) - - def traverse(self, obj): - """traverse and visit the given expression structure.""" - - return traverse(obj, self.__traverse_options__, self._visitor_dict) - - @util.memoized_property - def _visitor_dict(self): - visitors = {} - - for name in dir(self): - if name.startswith('visit_'): - visitors[name[6:]] = getattr(self, name) - return visitors - - @property - def _visitor_iterator(self): - """iterate through this visitor and each 'chained' visitor.""" - - v = self - while v: - yield v - v = getattr(v, '_next', None) - - def chain(self, visitor): - """'chain' an additional ClauseVisitor onto this ClauseVisitor. - - the chained visitor will receive all visit events after this one. - - """ - tail = list(self._visitor_iterator)[-1] - tail._next = visitor - return self - -class CloningVisitor(ClauseVisitor): - """Base class for visitor objects which can traverse using - the cloned_traverse() function. - - """ - - def copy_and_process(self, list_): - """Apply cloned traversal to the given list of elements, and return the new list.""" - - return [self.traverse(x) for x in list_] - - def traverse(self, obj): - """traverse and visit the given expression structure.""" - - return cloned_traverse(obj, self.__traverse_options__, self._visitor_dict) - -class ReplacingCloningVisitor(CloningVisitor): - """Base class for visitor objects which can traverse using - the replacement_traverse() function. - - """ - - def replace(self, elem): - """receive pre-copied elements during a cloning traversal. - - If the method returns a new element, the element is used - instead of creating a simple copy of the element. Traversal - will halt on the newly returned element if it is re-encountered. - """ - return None - - def traverse(self, obj): - """traverse and visit the given expression structure.""" - - def replace(elem): - for v in self._visitor_iterator: - e = v.replace(elem) - if e is not None: - return e - return replacement_traverse(obj, self.__traverse_options__, replace) - -def iterate(obj, opts): - """traverse the given expression structure, returning an iterator. - - traversal is configured to be breadth-first. - - """ - stack = deque([obj]) - while stack: - t = stack.popleft() - yield t - for c in t.get_children(**opts): - stack.append(c) - -def iterate_depthfirst(obj, opts): - """traverse the given expression structure, returning an iterator. - - traversal is configured to be depth-first. - - """ - stack = deque([obj]) - traversal = deque() - while stack: - t = stack.pop() - traversal.appendleft(t) - for c in t.get_children(**opts): - stack.append(c) - return iter(traversal) - -def traverse_using(iterator, obj, visitors): - """visit the given expression structure using the given iterator of objects.""" - - for target in iterator: - meth = visitors.get(target.__visit_name__, None) - if meth: - meth(target) - return obj - -def traverse(obj, opts, visitors): - """traverse and visit the given expression structure using the default iterator.""" - - return traverse_using(iterate(obj, opts), obj, visitors) - -def traverse_depthfirst(obj, opts, visitors): - """traverse and visit the given expression structure using the depth-first iterator.""" - - return traverse_using(iterate_depthfirst(obj, opts), obj, visitors) - -def cloned_traverse(obj, opts, visitors): - """clone the given expression structure, allowing - modifications by visitors.""" - - cloned = util.column_dict() - stop_on = util.column_set(opts.get('stop_on', [])) - - def clone(elem): - if elem in stop_on: - return elem - else: - if elem not in cloned: - cloned[elem] = newelem = elem._clone() - newelem._copy_internals(clone=clone) - meth = visitors.get(newelem.__visit_name__, None) - if meth: - meth(newelem) - return cloned[elem] - - if obj is not None: - obj = clone(obj) - return obj - - -def replacement_traverse(obj, opts, replace): - """clone the given expression structure, allowing element - replacement by a given replacement function.""" - - cloned = util.column_dict() - stop_on = util.column_set(opts.get('stop_on', [])) - - def clone(elem, **kw): - if elem in stop_on or \ - 'no_replacement_traverse' in elem._annotations: - return elem - else: - newelem = replace(elem) - if newelem is not None: - stop_on.add(newelem) - return newelem - else: - if elem not in cloned: - cloned[elem] = newelem = elem._clone() - newelem._copy_internals(clone=clone, **kw) - return cloned[elem] - - if obj is not None: - obj = clone(obj, **opts) - return obj diff --git a/libs/sqlalchemy/types.py b/libs/sqlalchemy/types.py deleted file mode 100644 index 5fe2ba20..00000000 --- a/libs/sqlalchemy/types.py +++ /dev/null @@ -1,2406 +0,0 @@ -# sqlalchemy/types.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""defines genericized SQL types, each represented by a subclass of -:class:`~sqlalchemy.types.AbstractType`. Dialects define further subclasses of these -types. - -For more information see the SQLAlchemy documentation on types. - -""" -__all__ = [ 'TypeEngine', 'TypeDecorator', 'AbstractType', 'UserDefinedType', - 'INT', 'CHAR', 'VARCHAR', 'NCHAR', 'NVARCHAR','TEXT', 'Text', - 'FLOAT', 'NUMERIC', 'REAL', 'DECIMAL', 'TIMESTAMP', 'DATETIME', - 'CLOB', 'BLOB', 'BINARY', 'VARBINARY', 'BOOLEAN', 'BIGINT', 'SMALLINT', - 'INTEGER', 'DATE', 'TIME', 'String', 'Integer', 'SmallInteger', - 'BigInteger', 'Numeric', 'Float', 'DateTime', 'Date', 'Time', - 'LargeBinary', 'Binary', 'Boolean', 'Unicode', 'Concatenable', - 'UnicodeText','PickleType', 'Interval', 'Enum', 'MutableType' ] - -import inspect -import datetime as dt -import codecs - -from sqlalchemy import exc, schema -from sqlalchemy.sql import expression, operators -from sqlalchemy.util import pickle -from sqlalchemy.util.compat import decimal -from sqlalchemy.sql.visitors import Visitable -from sqlalchemy import util -from sqlalchemy import processors, events, event -import collections -default = util.importlater("sqlalchemy.engine", "default") - -NoneType = type(None) -if util.jython: - import array - -class AbstractType(Visitable): - """Base for all types - not needed except for backwards - compatibility.""" - -class TypeEngine(AbstractType): - """Base for built-in types.""" - - def copy_value(self, value): - return value - - def bind_processor(self, dialect): - """Return a conversion function for processing bind values. - - Returns a callable which will receive a bind parameter value - as the sole positional argument and will return a value to - send to the DB-API. - - If processing is not necessary, the method should return ``None``. - - :param dialect: Dialect instance in use. - - """ - return None - - def result_processor(self, dialect, coltype): - """Return a conversion function for processing result row values. - - Returns a callable which will receive a result row column - value as the sole positional argument and will return a value - to return to the user. - - If processing is not necessary, the method should return ``None``. - - :param dialect: Dialect instance in use. - - :param coltype: DBAPI coltype argument received in cursor.description. - - """ - return None - - def compare_values(self, x, y): - """Compare two values for equality.""" - - return x == y - - def is_mutable(self): - """Return True if the target Python type is 'mutable'. - - This allows systems like the ORM to know if a column value can - be considered 'not changed' by comparing the identity of - objects alone. Values such as dicts, lists which - are serialized into strings are examples of "mutable" - column structures. - - .. note:: - - This functionality is now superseded by the - ``sqlalchemy.ext.mutable`` extension described in - :ref:`mutable_toplevel`. - - When this method is overridden, :meth:`copy_value` should - also be supplied. The :class:`.MutableType` mixin - is recommended as a helper. - - """ - return False - - def get_dbapi_type(self, dbapi): - """Return the corresponding type object from the underlying DB-API, if - any. - - This can be useful for calling ``setinputsizes()``, for example. - - """ - return None - - @property - def python_type(self): - """Return the Python type object expected to be returned - by instances of this type, if known. - - Basically, for those types which enforce a return type, - or are known across the board to do such for all common - DBAPIs (like ``int`` for example), will return that type. - - If a return type is not defined, raises - ``NotImplementedError``. - - Note that any type also accommodates NULL in SQL which - means you can also get back ``None`` from any type - in practice. - - """ - raise NotImplementedError() - - def with_variant(self, type_, dialect_name): - """Produce a new type object that will utilize the given - type when applied to the dialect of the given name. - - e.g.:: - - from sqlalchemy.types import String - from sqlalchemy.dialects import mysql - - s = String() - - s = s.with_variant(mysql.VARCHAR(collation='foo'), 'mysql') - - The construction of :meth:`.TypeEngine.with_variant` is always - from the "fallback" type to that which is dialect specific. - The returned type is an instance of :class:`.Variant`, which - itself provides a :meth:`~sqlalchemy.types.Variant.with_variant` that can - be called repeatedly. - - :param type_: a :class:`.TypeEngine` that will be selected - as a variant from the originating type, when a dialect - of the given name is in use. - :param dialect_name: base name of the dialect which uses - this type. (i.e. ``'postgresql'``, ``'mysql'``, etc.) - - .. versionadded:: 0.7.2 - - """ - return Variant(self, {dialect_name:type_}) - - def _adapt_expression(self, op, othertype): - """evaluate the return type of , - and apply any adaptations to the given operator. - - """ - return op, self - - @util.memoized_property - def _type_affinity(self): - """Return a rudimental 'affinity' value expressing the general class - of type.""" - - typ = None - for t in self.__class__.__mro__: - if t is TypeEngine or t is UserDefinedType: - return typ - elif issubclass(t, TypeEngine): - typ = t - else: - return self.__class__ - - def dialect_impl(self, dialect): - """Return a dialect-specific implementation for this :class:`.TypeEngine`.""" - - try: - return dialect._type_memos[self]['impl'] - except KeyError: - return self._dialect_info(dialect)['impl'] - - def _cached_bind_processor(self, dialect): - """Return a dialect-specific bind processor for this type.""" - - try: - return dialect._type_memos[self]['bind'] - except KeyError: - d = self._dialect_info(dialect) - d['bind'] = bp = d['impl'].bind_processor(dialect) - return bp - - def _cached_result_processor(self, dialect, coltype): - """Return a dialect-specific result processor for this type.""" - - try: - return dialect._type_memos[self][coltype] - except KeyError: - d = self._dialect_info(dialect) - # key assumption: DBAPI type codes are - # constants. Else this dictionary would - # grow unbounded. - d[coltype] = rp = d['impl'].result_processor(dialect, coltype) - return rp - - def _dialect_info(self, dialect): - """Return a dialect-specific registry which - caches a dialect-specific implementation, bind processing - function, and one or more result processing functions.""" - - if self in dialect._type_memos: - return dialect._type_memos[self] - else: - impl = self._gen_dialect_impl(dialect) - if impl is self: - impl = self.adapt(type(self)) - # this can't be self, else we create a cycle - assert impl is not self - dialect._type_memos[self] = d = {'impl':impl} - return d - - def _gen_dialect_impl(self, dialect): - return dialect.type_descriptor(self) - - def adapt(self, cls, **kw): - """Produce an "adapted" form of this type, given an "impl" class - to work with. - - This method is used internally to associate generic - types with "implementation" types that are specific to a particular - dialect. - """ - return util.constructor_copy(self, cls, **kw) - - def _coerce_compared_value(self, op, value): - """Suggest a type for a 'coerced' Python value in an expression. - - Given an operator and value, gives the type a chance - to return a type which the value should be coerced into. - - The default behavior here is conservative; if the right-hand - side is already coerced into a SQL type based on its - Python type, it is usually left alone. - - End-user functionality extension here should generally be via - :class:`.TypeDecorator`, which provides more liberal behavior in that - it defaults to coercing the other side of the expression into this - type, thus applying special Python conversions above and beyond those - needed by the DBAPI to both ides. It also provides the public method - :meth:`.TypeDecorator.coerce_compared_value` which is intended for - end-user customization of this behavior. - - """ - _coerced_type = _type_map.get(type(value), NULLTYPE) - if _coerced_type is NULLTYPE or _coerced_type._type_affinity \ - is self._type_affinity: - return self - else: - return _coerced_type - - def _compare_type_affinity(self, other): - return self._type_affinity is other._type_affinity - - def compile(self, dialect=None): - """Produce a string-compiled form of this :class:`.TypeEngine`. - - When called with no arguments, uses a "default" dialect - to produce a string result. - - :param dialect: a :class:`.Dialect` instance. - - """ - # arg, return value is inconsistent with - # ClauseElement.compile()....this is a mistake. - - if not dialect: - dialect = self._default_dialect - - return dialect.type_compiler.process(self) - - @property - def _default_dialect(self): - if self.__class__.__module__.startswith("sqlalchemy.dialects"): - tokens = self.__class__.__module__.split(".")[0:3] - mod = ".".join(tokens) - return getattr(__import__(mod).dialects, tokens[-1]).dialect() - else: - return default.DefaultDialect() - - def __str__(self): - # Py3K - #return unicode(self.compile()) - # Py2K - return unicode(self.compile()).\ - encode('ascii', 'backslashreplace') - # end Py2K - - def __init__(self, *args, **kwargs): - """Support implementations that were passing arguments""" - if args or kwargs: - util.warn_deprecated("Passing arguments to type object " - "constructor %s is deprecated" % self.__class__) - - def __repr__(self): - return util.generic_repr(self) - -class UserDefinedType(TypeEngine): - """Base for user defined types. - - This should be the base of new types. Note that - for most cases, :class:`.TypeDecorator` is probably - more appropriate:: - - import sqlalchemy.types as types - - class MyType(types.UserDefinedType): - def __init__(self, precision = 8): - self.precision = precision - - def get_col_spec(self): - return "MYTYPE(%s)" % self.precision - - def bind_processor(self, dialect): - def process(value): - return value - return process - - def result_processor(self, dialect, coltype): - def process(value): - return value - return process - - Once the type is made, it's immediately usable:: - - table = Table('foo', meta, - Column('id', Integer, primary_key=True), - Column('data', MyType(16)) - ) - - """ - __visit_name__ = "user_defined" - - def _adapt_expression(self, op, othertype): - """evaluate the return type of , - and apply any adaptations to the given operator. - - """ - return self.adapt_operator(op), self - - def adapt_operator(self, op): - """A hook which allows the given operator to be adapted - to something new. - - See also UserDefinedType._adapt_expression(), an as-yet- - semi-public method with greater capability in this regard. - - """ - return op - -class TypeDecorator(TypeEngine): - """Allows the creation of types which add additional functionality - to an existing type. - - This method is preferred to direct subclassing of SQLAlchemy's - built-in types as it ensures that all required functionality of - the underlying type is kept in place. - - Typical usage:: - - import sqlalchemy.types as types - - class MyType(types.TypeDecorator): - '''Prefixes Unicode values with "PREFIX:" on the way in and - strips it off on the way out. - ''' - - impl = types.Unicode - - def process_bind_param(self, value, dialect): - return "PREFIX:" + value - - def process_result_value(self, value, dialect): - return value[7:] - - def copy(self): - return MyType(self.impl.length) - - The class-level "impl" attribute is required, and can reference any - TypeEngine class. Alternatively, the load_dialect_impl() method - can be used to provide different type classes based on the dialect - given; in this case, the "impl" variable can reference - ``TypeEngine`` as a placeholder. - - Types that receive a Python type that isn't similar to the ultimate type - used may want to define the :meth:`TypeDecorator.coerce_compared_value` - method. This is used to give the expression system a hint when coercing - Python objects into bind parameters within expressions. Consider this - expression:: - - mytable.c.somecol + datetime.date(2009, 5, 15) - - Above, if "somecol" is an ``Integer`` variant, it makes sense that - we're doing date arithmetic, where above is usually interpreted - by databases as adding a number of days to the given date. - The expression system does the right thing by not attempting to - coerce the "date()" value into an integer-oriented bind parameter. - - However, in the case of ``TypeDecorator``, we are usually changing an - incoming Python type to something new - ``TypeDecorator`` by default will - "coerce" the non-typed side to be the same type as itself. Such as below, - we define an "epoch" type that stores a date value as an integer:: - - class MyEpochType(types.TypeDecorator): - impl = types.Integer - - epoch = datetime.date(1970, 1, 1) - - def process_bind_param(self, value, dialect): - return (value - self.epoch).days - - def process_result_value(self, value, dialect): - return self.epoch + timedelta(days=value) - - Our expression of ``somecol + date`` with the above type will coerce the - "date" on the right side to also be treated as ``MyEpochType``. - - This behavior can be overridden via the - :meth:`~TypeDecorator.coerce_compared_value` method, which returns a type - that should be used for the value of the expression. Below we set it such - that an integer value will be treated as an ``Integer``, and any other - value is assumed to be a date and will be treated as a ``MyEpochType``:: - - def coerce_compared_value(self, op, value): - if isinstance(value, int): - return Integer() - else: - return self - - """ - - __visit_name__ = "type_decorator" - - def __init__(self, *args, **kwargs): - """Construct a :class:`.TypeDecorator`. - - Arguments sent here are passed to the constructor - of the class assigned to the ``impl`` class level attribute, - assuming the ``impl`` is a callable, and the resulting - object is assigned to the ``self.impl`` instance attribute - (thus overriding the class attribute of the same name). - - If the class level ``impl`` is not a callable (the unusual case), - it will be assigned to the same instance attribute 'as-is', - ignoring those arguments passed to the constructor. - - Subclasses can override this to customize the generation - of ``self.impl`` entirely. - - """ - - if not hasattr(self.__class__, 'impl'): - raise AssertionError("TypeDecorator implementations " - "require a class-level variable " - "'impl' which refers to the class of " - "type being decorated") - self.impl = to_instance(self.__class__.impl, *args, **kwargs) - - - def _gen_dialect_impl(self, dialect): - """ - #todo - """ - adapted = dialect.type_descriptor(self) - if adapted is not self: - return adapted - - # otherwise adapt the impl type, link - # to a copy of this TypeDecorator and return - # that. - typedesc = self.load_dialect_impl(dialect).dialect_impl(dialect) - tt = self.copy() - if not isinstance(tt, self.__class__): - raise AssertionError('Type object %s does not properly ' - 'implement the copy() method, it must ' - 'return an object of type %s' % (self, - self.__class__)) - tt.impl = typedesc - return tt - - @property - def _type_affinity(self): - """ - #todo - """ - return self.impl._type_affinity - - def type_engine(self, dialect): - """Return a dialect-specific :class:`.TypeEngine` instance for this :class:`.TypeDecorator`. - - In most cases this returns a dialect-adapted form of - the :class:`.TypeEngine` type represented by ``self.impl``. - Makes usage of :meth:`dialect_impl` but also traverses - into wrapped :class:`.TypeDecorator` instances. - Behavior can be customized here by overriding :meth:`load_dialect_impl`. - - """ - adapted = dialect.type_descriptor(self) - if type(adapted) is not type(self): - return adapted - elif isinstance(self.impl, TypeDecorator): - return self.impl.type_engine(dialect) - else: - return self.load_dialect_impl(dialect) - - def load_dialect_impl(self, dialect): - """Return a :class:`.TypeEngine` object corresponding to a dialect. - - This is an end-user override hook that can be used to provide - differing types depending on the given dialect. It is used - by the :class:`.TypeDecorator` implementation of :meth:`type_engine` - to help determine what type should ultimately be returned - for a given :class:`.TypeDecorator`. - - By default returns ``self.impl``. - - """ - return self.impl - - def __getattr__(self, key): - """Proxy all other undefined accessors to the underlying - implementation.""" - return getattr(self.impl, key) - - def process_bind_param(self, value, dialect): - """Receive a bound parameter value to be converted. - - Subclasses override this method to return the - value that should be passed along to the underlying - :class:`.TypeEngine` object, and from there to the - DBAPI ``execute()`` method. - - The operation could be anything desired to perform custom - behavior, such as transforming or serializing data. - This could also be used as a hook for validating logic. - - This operation should be designed with the reverse operation - in mind, which would be the process_result_value method of - this class. - - :param value: Data to operate upon, of any type expected by - this method in the subclass. Can be ``None``. - :param dialect: the :class:`.Dialect` in use. - - """ - - raise NotImplementedError() - - def process_result_value(self, value, dialect): - """Receive a result-row column value to be converted. - - Subclasses should implement this method to operate on data - fetched from the database. - - Subclasses override this method to return the - value that should be passed back to the application, - given a value that is already processed by - the underlying :class:`.TypeEngine` object, originally - from the DBAPI cursor method ``fetchone()`` or similar. - - The operation could be anything desired to perform custom - behavior, such as transforming or serializing data. - This could also be used as a hook for validating logic. - - :param value: Data to operate upon, of any type expected by - this method in the subclass. Can be ``None``. - :param dialect: the :class:`.Dialect` in use. - - This operation should be designed to be reversible by - the "process_bind_param" method of this class. - - """ - - raise NotImplementedError() - - def bind_processor(self, dialect): - """Provide a bound value processing function for the - given :class:`.Dialect`. - - This is the method that fulfills the :class:`.TypeEngine` - contract for bound value conversion. :class:`.TypeDecorator` - will wrap a user-defined implementation of - :meth:`process_bind_param` here. - - User-defined code can override this method directly, - though its likely best to use :meth:`process_bind_param` so that - the processing provided by ``self.impl`` is maintained. - - :param dialect: Dialect instance in use. - - This method is the reverse counterpart to the - :meth:`result_processor` method of this class. - - """ - if self.__class__.process_bind_param.func_code \ - is not TypeDecorator.process_bind_param.func_code: - process_param = self.process_bind_param - impl_processor = self.impl.bind_processor(dialect) - if impl_processor: - def process(value): - return impl_processor(process_param(value, dialect)) - - else: - def process(value): - return process_param(value, dialect) - - return process - else: - return self.impl.bind_processor(dialect) - - def result_processor(self, dialect, coltype): - """Provide a result value processing function for the given :class:`.Dialect`. - - This is the method that fulfills the :class:`.TypeEngine` - contract for result value conversion. :class:`.TypeDecorator` - will wrap a user-defined implementation of - :meth:`process_result_value` here. - - User-defined code can override this method directly, - though its likely best to use :meth:`process_result_value` so that - the processing provided by ``self.impl`` is maintained. - - :param dialect: Dialect instance in use. - :param coltype: An SQLAlchemy data type - - This method is the reverse counterpart to the - :meth:`bind_processor` method of this class. - - """ - if self.__class__.process_result_value.func_code \ - is not TypeDecorator.process_result_value.func_code: - process_value = self.process_result_value - impl_processor = self.impl.result_processor(dialect, - coltype) - if impl_processor: - def process(value): - return process_value(impl_processor(value), dialect) - - else: - def process(value): - return process_value(value, dialect) - - return process - else: - return self.impl.result_processor(dialect, coltype) - - def coerce_compared_value(self, op, value): - """Suggest a type for a 'coerced' Python value in an expression. - - By default, returns self. This method is called by - the expression system when an object using this type is - on the left or right side of an expression against a plain Python - object which does not yet have a SQLAlchemy type assigned:: - - expr = table.c.somecolumn + 35 - - Where above, if ``somecolumn`` uses this type, this method will - be called with the value ``operator.add`` - and ``35``. The return value is whatever SQLAlchemy type should - be used for ``35`` for this particular operation. - - """ - return self - - def _coerce_compared_value(self, op, value): - """See :meth:`.TypeEngine._coerce_compared_value` for a description.""" - - return self.coerce_compared_value(op, value) - - def copy(self): - """Produce a copy of this :class:`.TypeDecorator` instance. - - This is a shallow copy and is provided to fulfill part of - the :class:`.TypeEngine` contract. It usually does not - need to be overridden unless the user-defined :class:`.TypeDecorator` - has local state that should be deep-copied. - - """ - - instance = self.__class__.__new__(self.__class__) - instance.__dict__.update(self.__dict__) - return instance - - def get_dbapi_type(self, dbapi): - """Return the DBAPI type object represented by this :class:`.TypeDecorator`. - - By default this calls upon :meth:`.TypeEngine.get_dbapi_type` of the - underlying "impl". - """ - return self.impl.get_dbapi_type(dbapi) - - def copy_value(self, value): - """Given a value, produce a copy of it. - - By default this calls upon :meth:`.TypeEngine.copy_value` - of the underlying "impl". - - :meth:`.copy_value` will return the object - itself, assuming "mutability" is not enabled. - Only the :class:`.MutableType` mixin provides a copy - function that actually produces a new object. - The copying function is used by the ORM when - "mutable" types are used, to memoize the original - version of an object as loaded from the database, - which is then compared to the possibly mutated - version to check for changes. - - Modern implementations should use the - ``sqlalchemy.ext.mutable`` extension described in - :ref:`mutable_toplevel` for intercepting in-place - changes to values. - - """ - return self.impl.copy_value(value) - - def compare_values(self, x, y): - """Given two values, compare them for equality. - - By default this calls upon :meth:`.TypeEngine.compare_values` - of the underlying "impl", which in turn usually - uses the Python equals operator ``==``. - - This function is used by the ORM to compare - an original-loaded value with an intercepted - "changed" value, to determine if a net change - has occurred. - - """ - return self.impl.compare_values(x, y) - - def is_mutable(self): - """Return True if the target Python type is 'mutable'. - - This allows systems like the ORM to know if a column value can - be considered 'not changed' by comparing the identity of - objects alone. Values such as dicts, lists which - are serialized into strings are examples of "mutable" - column structures. - - .. note:: - - This functionality is now superseded by the - ``sqlalchemy.ext.mutable`` extension described in - :ref:`mutable_toplevel`. - - """ - return self.impl.is_mutable() - - def _adapt_expression(self, op, othertype): - """ - #todo - """ - op, typ =self.impl._adapt_expression(op, othertype) - if typ is self.impl: - return op, self - else: - return op, typ - - def __repr__(self): - return util.generic_repr(self, to_inspect=self.impl) - -class Variant(TypeDecorator): - """A wrapping type that selects among a variety of - implementations based on dialect in use. - - The :class:`.Variant` type is typically constructed - using the :meth:`.TypeEngine.with_variant` method. - - .. versionadded:: 0.7.2 - - """ - - def __init__(self, base, mapping): - """Construct a new :class:`.Variant`. - - :param base: the base 'fallback' type - :param mapping: dictionary of string dialect names to :class:`.TypeEngine` - instances. - - """ - self.impl = base - self.mapping = mapping - - def load_dialect_impl(self, dialect): - if dialect.name in self.mapping: - return self.mapping[dialect.name] - else: - return self.impl - - def with_variant(self, type_, dialect_name): - """Return a new :class:`.Variant` which adds the given - type + dialect name to the mapping, in addition to the - mapping present in this :class:`.Variant`. - - :param type_: a :class:`.TypeEngine` that will be selected - as a variant from the originating type, when a dialect - of the given name is in use. - :param dialect_name: base name of the dialect which uses - this type. (i.e. ``'postgresql'``, ``'mysql'``, etc.) - - """ - - if dialect_name in self.mapping: - raise exc.ArgumentError( - "Dialect '%s' is already present in " - "the mapping for this Variant" % dialect_name) - mapping = self.mapping.copy() - mapping[dialect_name] = type_ - return Variant(self.impl, mapping) - -class MutableType(object): - """A mixin that marks a :class:`.TypeEngine` as representing - a mutable Python object type. This functionality is used - only by the ORM. - - .. versionchanged:: 0.7 - :class:`.MutableType` is superseded - by the ``sqlalchemy.ext.mutable`` extension described in - :ref:`mutable_toplevel`. This extension provides an event - driven approach to in-place mutation detection that does not - incur the severe performance penalty of the :class:`.MutableType` - approach. - - "mutable" means that changes can occur in place to a value - of this type. Examples includes Python lists, dictionaries, - and sets, as well as user-defined objects. The primary - need for identification of "mutable" types is by the ORM, - which applies special rules to such values in order to guarantee - that changes are detected. These rules may have a significant - performance impact, described below. - - A :class:`.MutableType` usually allows a flag called - ``mutable=False`` to enable/disable the "mutability" flag, - represented on this class by :meth:`is_mutable`. Examples - include :class:`.PickleType` and - :class:`~sqlalchemy.dialects.postgresql.base.ARRAY`. Setting - this flag to ``True`` enables mutability-specific behavior - by the ORM. - - The :meth:`copy_value` and :meth:`compare_values` functions - represent a copy and compare function for values of this - type - implementing subclasses should override these - appropriately. - - .. warning:: - - The usage of mutable types has significant performance - implications when using the ORM. In order to detect changes, the - ORM must create a copy of the value when it is first - accessed, so that changes to the current value can be compared - against the "clean" database-loaded value. Additionally, when the - ORM checks to see if any data requires flushing, it must scan - through all instances in the session which are known to have - "mutable" attributes and compare the current value of each - one to its "clean" - value. So for example, if the Session contains 6000 objects (a - fairly large amount) and autoflush is enabled, every individual - execution of :class:`.Query` will require a full scan of that subset of - the 6000 objects that have mutable attributes, possibly resulting - in tens of thousands of additional method calls for every query. - - .. versionchanged:: 0.7 - As of SQLAlchemy 0.7, the ``sqlalchemy.ext.mutable`` is provided - which allows an event driven approach to in-place - mutation detection. This approach should now be favored over - the usage of :class:`.MutableType` with ``mutable=True``. - ``sqlalchemy.ext.mutable`` is described in :ref:`mutable_toplevel`. - - """ - - def is_mutable(self): - """Return True if the target Python type is 'mutable'. - - For :class:`.MutableType`, this method is set to - return ``True``. - - """ - return True - - def copy_value(self, value): - """Unimplemented.""" - raise NotImplementedError() - - def compare_values(self, x, y): - """Compare *x* == *y*.""" - return x == y - -def to_instance(typeobj, *arg, **kw): - if typeobj is None: - return NULLTYPE - - if util.callable(typeobj): - return typeobj(*arg, **kw) - else: - return typeobj - -def adapt_type(typeobj, colspecs): - if isinstance(typeobj, type): - typeobj = typeobj() - for t in typeobj.__class__.__mro__[0:-1]: - try: - impltype = colspecs[t] - break - except KeyError: - pass - else: - # couldnt adapt - so just return the type itself - # (it may be a user-defined type) - return typeobj - # if we adapted the given generic type to a database-specific type, - # but it turns out the originally given "generic" type - # is actually a subclass of our resulting type, then we were already - # given a more specific type than that required; so use that. - if (issubclass(typeobj.__class__, impltype)): - return typeobj - return typeobj.adapt(impltype) - - - - - -class NullType(TypeEngine): - """An unknown type. - - NullTypes will stand in if :class:`~sqlalchemy.Table` reflection - encounters a column data type unknown to SQLAlchemy. The - resulting columns are nearly fully usable: the DB-API adapter will - handle all translation to and from the database data type. - - NullType does not have sufficient information to particpate in a - ``CREATE TABLE`` statement and will raise an exception if - encountered during a :meth:`~sqlalchemy.Table.create` operation. - - """ - __visit_name__ = 'null' - - def _adapt_expression(self, op, othertype): - if isinstance(othertype, NullType) or not operators.is_commutative(op): - return op, self - else: - return othertype._adapt_expression(op, self) - -NullTypeEngine = NullType - -class Concatenable(object): - """A mixin that marks a type as supporting 'concatenation', - typically strings.""" - - def _adapt_expression(self, op, othertype): - if op is operators.add and issubclass(othertype._type_affinity, - (Concatenable, NullType)): - return operators.concat_op, self - else: - return op, self - -class _DateAffinity(object): - """Mixin date/time specific expression adaptations. - - Rules are implemented within Date,Time,Interval,DateTime, Numeric, - Integer. Based on http://www.postgresql.org/docs/current/static - /functions-datetime.html. - - """ - - @property - def _expression_adaptations(self): - raise NotImplementedError() - - _blank_dict = util.immutabledict() - def _adapt_expression(self, op, othertype): - othertype = othertype._type_affinity - return op, \ - self._expression_adaptations.get(op, self._blank_dict).\ - get(othertype, NULLTYPE) - -class String(Concatenable, TypeEngine): - """The base for all string and character types. - - In SQL, corresponds to VARCHAR. Can also take Python unicode objects - and encode to the database's encoding in bind params (and the reverse for - result sets.) - - The `length` field is usually required when the `String` type is - used within a CREATE TABLE statement, as VARCHAR requires a length - on most databases. - - """ - - __visit_name__ = 'string' - - def __init__(self, length=None, convert_unicode=False, - assert_unicode=None, unicode_error=None, - _warn_on_bytestring=False - ): - """ - Create a string-holding type. - - :param length: optional, a length for the column for use in - DDL statements. May be safely omitted if no ``CREATE - TABLE`` will be issued. Certain databases may require a - ``length`` for use in DDL, and will raise an exception when - the ``CREATE TABLE`` DDL is issued if a ``VARCHAR`` - with no length is included. Whether the value is - interpreted as bytes or characters is database specific. - - :param convert_unicode: When set to ``True``, the - :class:`.String` type will assume that - input is to be passed as Python ``unicode`` objects, - and results returned as Python ``unicode`` objects. - If the DBAPI in use does not support Python unicode - (which is fewer and fewer these days), SQLAlchemy - will encode/decode the value, using the - value of the ``encoding`` parameter passed to - :func:`.create_engine` as the encoding. - - When using a DBAPI that natively supports Python - unicode objects, this flag generally does not - need to be set. For columns that are explicitly - intended to store non-ASCII data, the :class:`.Unicode` - or :class:`UnicodeText` - types should be used regardless, which feature - the same behavior of ``convert_unicode`` but - also indicate an underlying column type that - directly supports unicode, such as ``NVARCHAR``. - - For the extremely rare case that Python ``unicode`` - is to be encoded/decoded by SQLAlchemy on a backend - that does natively support Python ``unicode``, - the value ``force`` can be passed here which will - cause SQLAlchemy's encode/decode services to be - used unconditionally. - - :param assert_unicode: Deprecated. A warning is emitted - when a non-``unicode`` object is passed to the - :class:`.Unicode` subtype of :class:`.String`, - or the :class:`.UnicodeText` subtype of :class:`.Text`. - See :class:`.Unicode` for information on how to - control this warning. - - :param unicode_error: Optional, a method to use to handle Unicode - conversion errors. Behaves like the ``errors`` keyword argument to - the standard library's ``string.decode()`` functions. This flag - requires that ``convert_unicode`` is set to ``force`` - otherwise, - SQLAlchemy is not guaranteed to handle the task of unicode - conversion. Note that this flag adds significant performance - overhead to row-fetching operations for backends that already - return unicode objects natively (which most DBAPIs do). This - flag should only be used as a last resort for reading - strings from a column with varied or corrupted encodings. - - """ - if unicode_error is not None and convert_unicode != 'force': - raise exc.ArgumentError("convert_unicode must be 'force' " - "when unicode_error is set.") - - if assert_unicode: - util.warn_deprecated('assert_unicode is deprecated. ' - 'SQLAlchemy emits a warning in all ' - 'cases where it would otherwise like ' - 'to encode a Python unicode object ' - 'into a specific encoding but a plain ' - 'bytestring is received. This does ' - '*not* apply to DBAPIs that coerce ' - 'Unicode natively.') - self.length = length - self.convert_unicode = convert_unicode - self.unicode_error = unicode_error - self._warn_on_bytestring = _warn_on_bytestring - - def bind_processor(self, dialect): - if self.convert_unicode or dialect.convert_unicode: - if dialect.supports_unicode_binds and \ - self.convert_unicode != 'force': - if self._warn_on_bytestring: - def process(value): - # Py3K - #if isinstance(value, bytes): - # Py2K - if isinstance(value, str): - # end Py2K - util.warn("Unicode type received non-unicode bind " - "param value.") - return value - return process - else: - return None - else: - encoder = codecs.getencoder(dialect.encoding) - warn_on_bytestring = self._warn_on_bytestring - def process(value): - if isinstance(value, unicode): - return encoder(value, self.unicode_error)[0] - elif warn_on_bytestring and value is not None: - util.warn("Unicode type received non-unicode bind " - "param value") - return value - return process - else: - return None - - def result_processor(self, dialect, coltype): - wants_unicode = self.convert_unicode or dialect.convert_unicode - needs_convert = wants_unicode and \ - (dialect.returns_unicode_strings is not True or - self.convert_unicode == 'force') - - if needs_convert: - to_unicode = processors.to_unicode_processor_factory( - dialect.encoding, self.unicode_error) - - if dialect.returns_unicode_strings: - # we wouldn't be here unless convert_unicode='force' - # was specified, or the driver has erratic unicode-returning - # habits. since we will be getting back unicode - # in most cases, we check for it (decode will fail). - def process(value): - if isinstance(value, unicode): - return value - else: - return to_unicode(value) - return process - else: - # here, we assume that the object is not unicode, - # avoiding expensive isinstance() check. - return to_unicode - else: - return None - - @property - def python_type(self): - if self.convert_unicode: - return unicode - else: - return str - - def get_dbapi_type(self, dbapi): - return dbapi.STRING - -class Text(String): - """A variably sized string type. - - In SQL, usually corresponds to CLOB or TEXT. Can also take Python - unicode objects and encode to the database's encoding in bind - params (and the reverse for result sets.) - - """ - __visit_name__ = 'text' - -class Unicode(String): - """A variable length Unicode string type. - - The :class:`.Unicode` type is a :class:`.String` subclass - that assumes input and output as Python ``unicode`` data, - and in that regard is equivalent to the usage of the - ``convert_unicode`` flag with the :class:`.String` type. - However, unlike plain :class:`.String`, it also implies an - underlying column type that is explicitly supporting of non-ASCII - data, such as ``NVARCHAR`` on Oracle and SQL Server. - This can impact the output of ``CREATE TABLE`` statements - and ``CAST`` functions at the dialect level, and can - also affect the handling of bound parameters in some - specific DBAPI scenarios. - - The encoding used by the :class:`.Unicode` type is usually - determined by the DBAPI itself; most modern DBAPIs - feature support for Python ``unicode`` objects as bound - values and result set values, and the encoding should - be configured as detailed in the notes for the target - DBAPI in the :ref:`dialect_toplevel` section. - - For those DBAPIs which do not support, or are not configured - to accommodate Python ``unicode`` objects - directly, SQLAlchemy does the encoding and decoding - outside of the DBAPI. The encoding in this scenario - is determined by the ``encoding`` flag passed to - :func:`.create_engine`. - - When using the :class:`.Unicode` type, it is only appropriate - to pass Python ``unicode`` objects, and not plain ``str``. - If a plain ``str`` is passed under Python 2, a warning - is emitted. If you notice your application emitting these warnings but - you're not sure of the source of them, the Python - ``warnings`` filter, documented at - http://docs.python.org/library/warnings.html, - can be used to turn these warnings into exceptions - which will illustrate a stack trace:: - - import warnings - warnings.simplefilter('error') - - For an application that wishes to pass plain bytestrings - and Python ``unicode`` objects to the ``Unicode`` type - equally, the bytestrings must first be decoded into - unicode. The recipe at :ref:`coerce_to_unicode` illustrates - how this is done. - - See also: - - :class:`.UnicodeText` - unlengthed textual counterpart - to :class:`.Unicode`. - - """ - - __visit_name__ = 'unicode' - - def __init__(self, length=None, **kwargs): - """ - Create a :class:`.Unicode` object. - - Parameters are the same as that of :class:`.String`, - with the exception that ``convert_unicode`` - defaults to ``True``. - - """ - kwargs.setdefault('convert_unicode', True) - kwargs.setdefault('_warn_on_bytestring', True) - super(Unicode, self).__init__(length=length, **kwargs) - -class UnicodeText(Text): - """An unbounded-length Unicode string type. - - See :class:`.Unicode` for details on the unicode - behavior of this object. - - Like :class:`.Unicode`, usage the :class:`.UnicodeText` type implies a - unicode-capable type being used on the backend, such as - ``NCLOB``, ``NTEXT``. - - """ - - __visit_name__ = 'unicode_text' - - def __init__(self, length=None, **kwargs): - """ - Create a Unicode-converting Text type. - - Parameters are the same as that of :class:`.Text`, - with the exception that ``convert_unicode`` - defaults to ``True``. - - """ - kwargs.setdefault('convert_unicode', True) - kwargs.setdefault('_warn_on_bytestring', True) - super(UnicodeText, self).__init__(length=length, **kwargs) - - -class Integer(_DateAffinity, TypeEngine): - """A type for ``int`` integers.""" - - __visit_name__ = 'integer' - - def get_dbapi_type(self, dbapi): - return dbapi.NUMBER - - @property - def python_type(self): - return int - - @util.memoized_property - def _expression_adaptations(self): - # TODO: need a dictionary object that will - # handle operators generically here, this is incomplete - return { - operators.add:{ - Date:Date, - Integer:Integer, - Numeric:Numeric, - }, - operators.mul:{ - Interval:Interval, - Integer:Integer, - Numeric:Numeric, - }, - # Py2K - operators.div:{ - Integer:Integer, - Numeric:Numeric, - }, - # end Py2K - operators.truediv:{ - Integer:Integer, - Numeric:Numeric, - }, - operators.sub:{ - Integer:Integer, - Numeric:Numeric, - }, - } - -class SmallInteger(Integer): - """A type for smaller ``int`` integers. - - Typically generates a ``SMALLINT`` in DDL, and otherwise acts like - a normal :class:`.Integer` on the Python side. - - """ - - __visit_name__ = 'small_integer' - - -class BigInteger(Integer): - """A type for bigger ``int`` integers. - - Typically generates a ``BIGINT`` in DDL, and otherwise acts like - a normal :class:`.Integer` on the Python side. - - """ - - __visit_name__ = 'big_integer' - - -class Numeric(_DateAffinity, TypeEngine): - """A type for fixed precision numbers. - - Typically generates DECIMAL or NUMERIC. Returns - ``decimal.Decimal`` objects by default, applying - conversion as needed. - - .. note:: - - The `cdecimal `_ library - is a high performing alternative to Python's built-in - ``decimal.Decimal`` type, which performs very poorly in high volume - situations. SQLAlchemy 0.7 is tested against ``cdecimal`` and supports - it fully. The type is not necessarily supported by DBAPI - implementations however, most of which contain an import for plain - ``decimal`` in their source code, even though some such as psycopg2 - provide hooks for alternate adapters. SQLAlchemy imports ``decimal`` - globally as well. While the alternate ``Decimal`` class can be patched - into SQLA's ``decimal`` module, overall the most straightforward and - foolproof way to use "cdecimal" given current DBAPI and Python support - is to patch it directly into sys.modules before anything else is - imported:: - - import sys - import cdecimal - sys.modules["decimal"] = cdecimal - - While the global patch is a little ugly, it's particularly - important to use just one decimal library at a time since - Python Decimal and cdecimal Decimal objects - are not currently compatible *with each other*:: - - >>> import cdecimal - >>> import decimal - >>> decimal.Decimal("10") == cdecimal.Decimal("10") - False - - SQLAlchemy will provide more natural support of - cdecimal if and when it becomes a standard part of Python - installations and is supported by all DBAPIs. - - """ - - __visit_name__ = 'numeric' - - def __init__(self, precision=None, scale=None, asdecimal=True): - """ - Construct a Numeric. - - :param precision: the numeric precision for use in DDL ``CREATE - TABLE``. - - :param scale: the numeric scale for use in DDL ``CREATE TABLE``. - - :param asdecimal: default True. Return whether or not - values should be sent as Python Decimal objects, or - as floats. Different DBAPIs send one or the other based on - datatypes - the Numeric type will ensure that return values - are one or the other across DBAPIs consistently. - - When using the ``Numeric`` type, care should be taken to ensure - that the asdecimal setting is apppropriate for the DBAPI in use - - when Numeric applies a conversion from Decimal->float or float-> - Decimal, this conversion incurs an additional performance overhead - for all result columns received. - - DBAPIs that return Decimal natively (e.g. psycopg2) will have - better accuracy and higher performance with a setting of ``True``, - as the native translation to Decimal reduces the amount of floating- - point issues at play, and the Numeric type itself doesn't need - to apply any further conversions. However, another DBAPI which - returns floats natively *will* incur an additional conversion - overhead, and is still subject to floating point data loss - in - which case ``asdecimal=False`` will at least remove the extra - conversion overhead. - - """ - self.precision = precision - self.scale = scale - self.asdecimal = asdecimal - - def get_dbapi_type(self, dbapi): - return dbapi.NUMBER - - @property - def python_type(self): - if self.asdecimal: - return decimal.Decimal - else: - return float - - def bind_processor(self, dialect): - if dialect.supports_native_decimal: - return None - else: - return processors.to_float - - def result_processor(self, dialect, coltype): - if self.asdecimal: - if dialect.supports_native_decimal: - # we're a "numeric", DBAPI will give us Decimal directly - return None - else: - util.warn('Dialect %s+%s does *not* support Decimal ' - 'objects natively, and SQLAlchemy must ' - 'convert from floating point - rounding ' - 'errors and other issues may occur. Please ' - 'consider storing Decimal numbers as strings ' - 'or integers on this platform for lossless ' - 'storage.' % (dialect.name, dialect.driver)) - - # we're a "numeric", DBAPI returns floats, convert. - if self.scale is not None: - return processors.to_decimal_processor_factory( - decimal.Decimal, self.scale) - else: - return processors.to_decimal_processor_factory( - decimal.Decimal) - else: - if dialect.supports_native_decimal: - return processors.to_float - else: - return None - - @util.memoized_property - def _expression_adaptations(self): - return { - operators.mul:{ - Interval:Interval, - Numeric:Numeric, - Integer:Numeric, - }, - # Py2K - operators.div:{ - Numeric:Numeric, - Integer:Numeric, - }, - # end Py2K - operators.truediv:{ - Numeric:Numeric, - Integer:Numeric, - }, - operators.add:{ - Numeric:Numeric, - Integer:Numeric, - }, - operators.sub:{ - Numeric:Numeric, - Integer:Numeric, - } - } - -class Float(Numeric): - """A type for ``float`` numbers. - - Returns Python ``float`` objects by default, applying - conversion as needed. - - """ - - __visit_name__ = 'float' - - scale = None - - def __init__(self, precision=None, asdecimal=False, **kwargs): - """ - Construct a Float. - - :param precision: the numeric precision for use in DDL ``CREATE - TABLE``. - - :param asdecimal: the same flag as that of :class:`.Numeric`, but - defaults to ``False``. Note that setting this flag to ``True`` - results in floating point conversion. - - :param \**kwargs: deprecated. Additional arguments here are ignored - by the default :class:`.Float` type. For database specific - floats that support additional arguments, see that dialect's - documentation for details, such as :class:`sqlalchemy.dialects.mysql.FLOAT`. - - """ - self.precision = precision - self.asdecimal = asdecimal - if kwargs: - util.warn_deprecated("Additional keyword arguments " - "passed to Float ignored.") - - def result_processor(self, dialect, coltype): - if self.asdecimal: - return processors.to_decimal_processor_factory(decimal.Decimal) - else: - return None - - @util.memoized_property - def _expression_adaptations(self): - return { - operators.mul:{ - Interval:Interval, - Numeric:Float, - }, - # Py2K - operators.div:{ - Numeric:Float, - }, - # end Py2K - operators.truediv:{ - Numeric:Float, - }, - operators.add:{ - Numeric:Float, - }, - operators.sub:{ - Numeric:Float, - } - } - - -class DateTime(_DateAffinity, TypeEngine): - """A type for ``datetime.datetime()`` objects. - - Date and time types return objects from the Python ``datetime`` - module. Most DBAPIs have built in support for the datetime - module, with the noted exception of SQLite. In the case of - SQLite, date and time types are stored as strings which are then - converted back to datetime objects when rows are returned. - - """ - - __visit_name__ = 'datetime' - - def __init__(self, timezone=False): - """Construct a new :class:`.DateTime`. - - :param timezone: boolean. If True, and supported by the - backend, will produce 'TIMESTAMP WITH TIMEZONE'. For backends - that don't support timezone aware timestamps, has no - effect. - - """ - self.timezone = timezone - - def get_dbapi_type(self, dbapi): - return dbapi.DATETIME - - @property - def python_type(self): - return dt.datetime - - @util.memoized_property - def _expression_adaptations(self): - return { - operators.add:{ - Interval:DateTime, - }, - operators.sub:{ - Interval:DateTime, - DateTime:Interval, - }, - } - - -class Date(_DateAffinity,TypeEngine): - """A type for ``datetime.date()`` objects.""" - - __visit_name__ = 'date' - - def get_dbapi_type(self, dbapi): - return dbapi.DATETIME - - @property - def python_type(self): - return dt.date - - @util.memoized_property - def _expression_adaptations(self): - return { - operators.add:{ - Integer:Date, - Interval:DateTime, - Time:DateTime, - }, - operators.sub:{ - # date - integer = date - Integer:Date, - - # date - date = integer. - Date:Integer, - - Interval:DateTime, - - # date - datetime = interval, - # this one is not in the PG docs - # but works - DateTime:Interval, - }, - } - - -class Time(_DateAffinity,TypeEngine): - """A type for ``datetime.time()`` objects.""" - - __visit_name__ = 'time' - - def __init__(self, timezone=False): - self.timezone = timezone - - def get_dbapi_type(self, dbapi): - return dbapi.DATETIME - - @property - def python_type(self): - return dt.time - - @util.memoized_property - def _expression_adaptations(self): - return { - operators.add:{ - Date:DateTime, - Interval:Time - }, - operators.sub:{ - Time:Interval, - Interval:Time, - }, - } - - -class _Binary(TypeEngine): - """Define base behavior for binary types.""" - - def __init__(self, length=None): - self.length = length - - @property - def python_type(self): - # Py3K - #return bytes - # Py2K - return str - # end Py2K - - # Python 3 - sqlite3 doesn't need the `Binary` conversion - # here, though pg8000 does to indicate "bytea" - def bind_processor(self, dialect): - DBAPIBinary = dialect.dbapi.Binary - def process(value): - x = self - if value is not None: - return DBAPIBinary(value) - else: - return None - return process - - # Python 3 has native bytes() type - # both sqlite3 and pg8000 seem to return it - # (i.e. and not 'memoryview') - # Py2K - def result_processor(self, dialect, coltype): - if util.jython: - def process(value): - if value is not None: - if isinstance(value, array.array): - return value.tostring() - return str(value) - else: - return None - else: - process = processors.to_str - return process - # end Py2K - - def _coerce_compared_value(self, op, value): - """See :meth:`.TypeEngine._coerce_compared_value` for a description.""" - - if isinstance(value, basestring): - return self - else: - return super(_Binary, self)._coerce_compared_value(op, value) - - def get_dbapi_type(self, dbapi): - return dbapi.BINARY - -class LargeBinary(_Binary): - """A type for large binary byte data. - - The Binary type generates BLOB or BYTEA when tables are created, - and also converts incoming values using the ``Binary`` callable - provided by each DB-API. - - """ - - __visit_name__ = 'large_binary' - - def __init__(self, length=None): - """ - Construct a LargeBinary type. - - :param length: optional, a length for the column for use in - DDL statements, for those BLOB types that accept a length - (i.e. MySQL). It does *not* produce a small BINARY/VARBINARY - type - use the BINARY/VARBINARY types specifically for those. - May be safely omitted if no ``CREATE - TABLE`` will be issued. Certain databases may require a - *length* for use in DDL, and will raise an exception when - the ``CREATE TABLE`` DDL is issued. - - """ - _Binary.__init__(self, length=length) - -class Binary(LargeBinary): - """Deprecated. Renamed to LargeBinary.""" - - def __init__(self, *arg, **kw): - util.warn_deprecated('The Binary type has been renamed to ' - 'LargeBinary.') - LargeBinary.__init__(self, *arg, **kw) - -class SchemaType(events.SchemaEventTarget): - """Mark a type as possibly requiring schema-level DDL for usage. - - Supports types that must be explicitly created/dropped (i.e. PG ENUM type) - as well as types that are complimented by table or schema level - constraints, triggers, and other rules. - - :class:`.SchemaType` classes can also be targets for the - :meth:`.DDLEvents.before_parent_attach` and :meth:`.DDLEvents.after_parent_attach` - events, where the events fire off surrounding the association of - the type object with a parent :class:`.Column`. - - """ - - def __init__(self, **kw): - self.name = kw.pop('name', None) - self.quote = kw.pop('quote', None) - self.schema = kw.pop('schema', None) - self.metadata = kw.pop('metadata', None) - if self.metadata: - event.listen( - self.metadata, - "before_create", - util.portable_instancemethod(self._on_metadata_create) - ) - event.listen( - self.metadata, - "after_drop", - util.portable_instancemethod(self._on_metadata_drop) - ) - - def _set_parent(self, column): - column._on_table_attach(util.portable_instancemethod(self._set_table)) - - def _set_table(self, column, table): - event.listen( - table, - "before_create", - util.portable_instancemethod( - self._on_table_create) - ) - event.listen( - table, - "after_drop", - util.portable_instancemethod(self._on_table_drop) - ) - if self.metadata is None: - # TODO: what's the difference between self.metadata - # and table.metadata here ? - event.listen( - table.metadata, - "before_create", - util.portable_instancemethod(self._on_metadata_create) - ) - event.listen( - table.metadata, - "after_drop", - util.portable_instancemethod(self._on_metadata_drop) - ) - - @property - def bind(self): - return self.metadata and self.metadata.bind or None - - def create(self, bind=None, checkfirst=False): - """Issue CREATE ddl for this type, if applicable.""" - - if bind is None: - bind = schema._bind_or_error(self) - t = self.dialect_impl(bind.dialect) - if t.__class__ is not self.__class__ and isinstance(t, SchemaType): - t.create(bind=bind, checkfirst=checkfirst) - - def drop(self, bind=None, checkfirst=False): - """Issue DROP ddl for this type, if applicable.""" - - if bind is None: - bind = schema._bind_or_error(self) - t = self.dialect_impl(bind.dialect) - if t.__class__ is not self.__class__ and isinstance(t, SchemaType): - t.drop(bind=bind, checkfirst=checkfirst) - - def _on_table_create(self, target, bind, **kw): - t = self.dialect_impl(bind.dialect) - if t.__class__ is not self.__class__ and isinstance(t, SchemaType): - t._on_table_create(target, bind, **kw) - - def _on_table_drop(self, target, bind, **kw): - t = self.dialect_impl(bind.dialect) - if t.__class__ is not self.__class__ and isinstance(t, SchemaType): - t._on_table_drop(target, bind, **kw) - - def _on_metadata_create(self, target, bind, **kw): - t = self.dialect_impl(bind.dialect) - if t.__class__ is not self.__class__ and isinstance(t, SchemaType): - t._on_metadata_create(target, bind, **kw) - - def _on_metadata_drop(self, target, bind, **kw): - t = self.dialect_impl(bind.dialect) - if t.__class__ is not self.__class__ and isinstance(t, SchemaType): - t._on_metadata_drop(target, bind, **kw) - -class Enum(String, SchemaType): - """Generic Enum Type. - - The Enum type provides a set of possible string values which the - column is constrained towards. - - By default, uses the backend's native ENUM type if available, - else uses VARCHAR + a CHECK constraint. - - See also: - - :class:`~.postgresql.ENUM` - PostgreSQL-specific type, - which has additional functionality. - - """ - - __visit_name__ = 'enum' - - def __init__(self, *enums, **kw): - """Construct an enum. - - Keyword arguments which don't apply to a specific backend are ignored - by that backend. - - :param \*enums: string or unicode enumeration labels. If unicode - labels are present, the `convert_unicode` flag is auto-enabled. - - :param convert_unicode: Enable unicode-aware bind parameter and - result-set processing for this Enum's data. This is set - automatically based on the presence of unicode label strings. - - :param metadata: Associate this type directly with a ``MetaData`` - object. For types that exist on the target database as an - independent schema construct (Postgresql), this type will be - created and dropped within ``create_all()`` and ``drop_all()`` - operations. If the type is not associated with any ``MetaData`` - object, it will associate itself with each ``Table`` in which it is - used, and will be created when any of those individual tables are - created, after a check is performed for it's existence. The type is - only dropped when ``drop_all()`` is called for that ``Table`` - object's metadata, however. - - :param name: The name of this type. This is required for Postgresql - and any future supported database which requires an explicitly - named type, or an explicitly named constraint in order to generate - the type and/or a table that uses it. - - :param native_enum: Use the database's native ENUM type when - available. Defaults to True. When False, uses VARCHAR + check - constraint for all backends. - - :param schema: Schemaname of this type. For types that exist on the - target database as an independent schema construct (Postgresql), - this parameter specifies the named schema in which the type is - present. - - :param quote: Force quoting to be on or off on the type's name. If - left as the default of `None`, the usual schema-level "case - sensitive"/"reserved name" rules are used to determine if this - type's name should be quoted. - - """ - self.enums = enums - self.native_enum = kw.pop('native_enum', True) - convert_unicode = kw.pop('convert_unicode', None) - if convert_unicode is None: - for e in enums: - if isinstance(e, unicode): - convert_unicode = True - break - else: - convert_unicode = False - - if self.enums: - length = max(len(x) for x in self.enums) - else: - length = 0 - String.__init__(self, - length=length, - convert_unicode=convert_unicode, - ) - SchemaType.__init__(self, **kw) - - def __repr__(self): - return util.generic_repr(self, [ - ("native_enum", True), - ("name", None) - ]) - - def _should_create_constraint(self, compiler): - return not self.native_enum or \ - not compiler.dialect.supports_native_enum - - def _set_table(self, column, table): - if self.native_enum: - SchemaType._set_table(self, column, table) - - - e = schema.CheckConstraint( - column.in_(self.enums), - name=self.name, - _create_rule=util.portable_instancemethod( - self._should_create_constraint) - ) - table.append_constraint(e) - - def adapt(self, impltype, **kw): - if issubclass(impltype, Enum): - return impltype(name=self.name, - quote=self.quote, - schema=self.schema, - metadata=self.metadata, - convert_unicode=self.convert_unicode, - native_enum=self.native_enum, - *self.enums, - **kw - ) - else: - return super(Enum, self).adapt(impltype, **kw) - -class PickleType(MutableType, TypeDecorator): - """Holds Python objects, which are serialized using pickle. - - PickleType builds upon the Binary type to apply Python's - ``pickle.dumps()`` to incoming objects, and ``pickle.loads()`` on - the way out, allowing any pickleable Python object to be stored as - a serialized binary field. - - """ - - impl = LargeBinary - - def __init__(self, protocol=pickle.HIGHEST_PROTOCOL, - pickler=None, mutable=False, comparator=None): - """ - Construct a PickleType. - - :param protocol: defaults to ``pickle.HIGHEST_PROTOCOL``. - - :param pickler: defaults to cPickle.pickle or pickle.pickle if - cPickle is not available. May be any object with - pickle-compatible ``dumps` and ``loads`` methods. - - :param mutable: defaults to False; implements - :meth:`AbstractType.is_mutable`. When ``True``, incoming - objects will be compared against copies of themselves - using the Python "equals" operator, unless the - ``comparator`` argument is present. See - :class:`.MutableType` for details on "mutable" type - behavior. - - .. versionchanged:: 0.7.0 - Default changed from ``True``. - - .. note:: - - This functionality is now superseded by the - ``sqlalchemy.ext.mutable`` extension described in - :ref:`mutable_toplevel`. - - :param comparator: a 2-arg callable predicate used - to compare values of this type. If left as ``None``, - the Python "equals" operator is used to compare values. - - """ - self.protocol = protocol - self.pickler = pickler or pickle - self.mutable = mutable - self.comparator = comparator - super(PickleType, self).__init__() - - def __reduce__(self): - return PickleType, (self.protocol, - None, - self.mutable, - self.comparator) - - def bind_processor(self, dialect): - impl_processor = self.impl.bind_processor(dialect) - dumps = self.pickler.dumps - protocol = self.protocol - if impl_processor: - def process(value): - if value is not None: - value = dumps(value, protocol) - return impl_processor(value) - else: - def process(value): - if value is not None: - value = dumps(value, protocol) - return value - return process - - def result_processor(self, dialect, coltype): - impl_processor = self.impl.result_processor(dialect, coltype) - loads = self.pickler.loads - if impl_processor: - def process(value): - value = impl_processor(value) - if value is None: - return None - return loads(value) - else: - def process(value): - if value is None: - return None - return loads(value) - return process - - def copy_value(self, value): - if self.mutable: - return self.pickler.loads( - self.pickler.dumps(value, self.protocol)) - else: - return value - - def compare_values(self, x, y): - if self.comparator: - return self.comparator(x, y) - else: - return x == y - - def is_mutable(self): - """Return True if the target Python type is 'mutable'. - - When this method is overridden, :meth:`copy_value` should - also be supplied. The :class:`.MutableType` mixin - is recommended as a helper. - - """ - return self.mutable - - -class Boolean(TypeEngine, SchemaType): - """A bool datatype. - - Boolean typically uses BOOLEAN or SMALLINT on the DDL side, and on - the Python side deals in ``True`` or ``False``. - - """ - - __visit_name__ = 'boolean' - - def __init__(self, create_constraint=True, name=None): - """Construct a Boolean. - - :param create_constraint: defaults to True. If the boolean - is generated as an int/smallint, also create a CHECK constraint - on the table that ensures 1 or 0 as a value. - - :param name: if a CHECK constraint is generated, specify - the name of the constraint. - - """ - self.create_constraint = create_constraint - self.name = name - - def _should_create_constraint(self, compiler): - return not compiler.dialect.supports_native_boolean - - def _set_table(self, column, table): - if not self.create_constraint: - return - - e = schema.CheckConstraint( - column.in_([0, 1]), - name=self.name, - _create_rule=util.portable_instancemethod( - self._should_create_constraint) - ) - table.append_constraint(e) - - @property - def python_type(self): - return bool - - def bind_processor(self, dialect): - if dialect.supports_native_boolean: - return None - else: - return processors.boolean_to_int - - def result_processor(self, dialect, coltype): - if dialect.supports_native_boolean: - return None - else: - return processors.int_to_boolean - -class Interval(_DateAffinity, TypeDecorator): - """A type for ``datetime.timedelta()`` objects. - - The Interval type deals with ``datetime.timedelta`` objects. In - PostgreSQL, the native ``INTERVAL`` type is used; for others, the - value is stored as a date which is relative to the "epoch" - (Jan. 1, 1970). - - Note that the ``Interval`` type does not currently provide date arithmetic - operations on platforms which do not support interval types natively. Such - operations usually require transformation of both sides of the expression - (such as, conversion of both sides into integer epoch values first) which - currently is a manual procedure (such as via - :attr:`~sqlalchemy.sql.expression.func`). - - """ - - impl = DateTime - epoch = dt.datetime.utcfromtimestamp(0) - - def __init__(self, native=True, - second_precision=None, - day_precision=None): - """Construct an Interval object. - - :param native: when True, use the actual - INTERVAL type provided by the database, if - supported (currently Postgresql, Oracle). - Otherwise, represent the interval data as - an epoch value regardless. - - :param second_precision: For native interval types - which support a "fractional seconds precision" parameter, - i.e. Oracle and Postgresql - - :param day_precision: for native interval types which - support a "day precision" parameter, i.e. Oracle. - - """ - super(Interval, self).__init__() - self.native = native - self.second_precision = second_precision - self.day_precision = day_precision - - def adapt(self, cls, **kw): - if self.native and hasattr(cls, '_adapt_from_generic_interval'): - return cls._adapt_from_generic_interval(self, **kw) - else: - return self.__class__( - native=self.native, - second_precision=self.second_precision, - day_precision=self.day_precision, - **kw) - - @property - def python_type(self): - return dt.timedelta - - def bind_processor(self, dialect): - impl_processor = self.impl.bind_processor(dialect) - epoch = self.epoch - if impl_processor: - def process(value): - if value is not None: - value = epoch + value - return impl_processor(value) - else: - def process(value): - if value is not None: - value = epoch + value - return value - return process - - def result_processor(self, dialect, coltype): - impl_processor = self.impl.result_processor(dialect, coltype) - epoch = self.epoch - if impl_processor: - def process(value): - value = impl_processor(value) - if value is None: - return None - return value - epoch - else: - def process(value): - if value is None: - return None - return value - epoch - return process - - @util.memoized_property - def _expression_adaptations(self): - return { - operators.add:{ - Date:DateTime, - Interval:Interval, - DateTime:DateTime, - Time:Time, - }, - operators.sub:{ - Interval:Interval - }, - operators.mul:{ - Numeric:Interval - }, - operators.truediv: { - Numeric:Interval - }, - # Py2K - operators.div: { - Numeric:Interval - } - # end Py2K - } - - @property - def _type_affinity(self): - return Interval - - def _coerce_compared_value(self, op, value): - """See :meth:`.TypeEngine._coerce_compared_value` for a description.""" - - return self.impl._coerce_compared_value(op, value) - - -class REAL(Float): - """The SQL REAL type.""" - - __visit_name__ = 'REAL' - -class FLOAT(Float): - """The SQL FLOAT type.""" - - __visit_name__ = 'FLOAT' - -class NUMERIC(Numeric): - """The SQL NUMERIC type.""" - - __visit_name__ = 'NUMERIC' - - -class DECIMAL(Numeric): - """The SQL DECIMAL type.""" - - __visit_name__ = 'DECIMAL' - - -class INTEGER(Integer): - """The SQL INT or INTEGER type.""" - - __visit_name__ = 'INTEGER' -INT = INTEGER - - -class SMALLINT(SmallInteger): - """The SQL SMALLINT type.""" - - __visit_name__ = 'SMALLINT' - - -class BIGINT(BigInteger): - """The SQL BIGINT type.""" - - __visit_name__ = 'BIGINT' - -class TIMESTAMP(DateTime): - """The SQL TIMESTAMP type.""" - - __visit_name__ = 'TIMESTAMP' - - def get_dbapi_type(self, dbapi): - return dbapi.TIMESTAMP - -class DATETIME(DateTime): - """The SQL DATETIME type.""" - - __visit_name__ = 'DATETIME' - - -class DATE(Date): - """The SQL DATE type.""" - - __visit_name__ = 'DATE' - - -class TIME(Time): - """The SQL TIME type.""" - - __visit_name__ = 'TIME' - -class TEXT(Text): - """The SQL TEXT type.""" - - __visit_name__ = 'TEXT' - -class CLOB(Text): - """The CLOB type. - - This type is found in Oracle and Informix. - """ - - __visit_name__ = 'CLOB' - -class VARCHAR(String): - """The SQL VARCHAR type.""" - - __visit_name__ = 'VARCHAR' - -class NVARCHAR(Unicode): - """The SQL NVARCHAR type.""" - - __visit_name__ = 'NVARCHAR' - -class CHAR(String): - """The SQL CHAR type.""" - - __visit_name__ = 'CHAR' - - -class NCHAR(Unicode): - """The SQL NCHAR type.""" - - __visit_name__ = 'NCHAR' - - -class BLOB(LargeBinary): - """The SQL BLOB type.""" - - __visit_name__ = 'BLOB' - -class BINARY(_Binary): - """The SQL BINARY type.""" - - __visit_name__ = 'BINARY' - -class VARBINARY(_Binary): - """The SQL VARBINARY type.""" - - __visit_name__ = 'VARBINARY' - - -class BOOLEAN(Boolean): - """The SQL BOOLEAN type.""" - - __visit_name__ = 'BOOLEAN' - -NULLTYPE = NullType() -BOOLEANTYPE = Boolean() -STRINGTYPE = String() - -_type_map = { - str: String(), - # Py3K - #bytes : LargeBinary(), - # Py2K - unicode : Unicode(), - # end Py2K - int : Integer(), - float : Numeric(), - bool: BOOLEANTYPE, - decimal.Decimal : Numeric(), - dt.date : Date(), - dt.datetime : DateTime(), - dt.time : Time(), - dt.timedelta : Interval(), - NoneType: NULLTYPE -} - diff --git a/libs/sqlalchemy/util/__init__.py b/libs/sqlalchemy/util/__init__.py deleted file mode 100644 index 8cb4c65b..00000000 --- a/libs/sqlalchemy/util/__init__.py +++ /dev/null @@ -1,34 +0,0 @@ -# util/__init__.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -from compat import callable, cmp, reduce, defaultdict, py25_dict, \ - threading, py3k_warning, jython, pypy, cpython, win32, set_types, buffer, \ - pickle, update_wrapper, partial, md5_hex, decode_slice, dottedgetter,\ - parse_qsl, any, contextmanager, next - -from _collections import NamedTuple, ImmutableContainer, immutabledict, \ - Properties, OrderedProperties, ImmutableProperties, OrderedDict, \ - OrderedSet, IdentitySet, OrderedIdentitySet, column_set, \ - column_dict, ordered_column_set, populate_column_dict, unique_list, \ - UniqueAppender, PopulateDict, EMPTY_SET, to_list, to_set, \ - to_column_set, update_copy, flatten_iterator, WeakIdentityMapping, \ - LRUCache, ScopedRegistry, ThreadLocalRegistry - -from langhelpers import iterate_attributes, class_hierarchy, \ - portable_instancemethod, unbound_method_to_callable, \ - getargspec_init, format_argspec_init, format_argspec_plus, \ - get_func_kwargs, get_cls_kwargs, decorator, as_interface, \ - memoized_property, memoized_instancemethod, \ - reset_memoized, group_expirable_memoized_property, importlater, \ - monkeypatch_proxied_specials, asbool, bool_or_str, coerce_kw_type,\ - duck_type_collection, assert_arg_type, symbol, dictlike_iteritems,\ - classproperty, set_creation_order, warn_exception, warn, NoneType,\ - constructor_copy, methods_equivalent, chop_traceback, asint,\ - generic_repr, counter - -from deprecations import warn_deprecated, warn_pending_deprecation, \ - deprecated, pending_deprecation - diff --git a/libs/sqlalchemy/util/_collections.py b/libs/sqlalchemy/util/_collections.py deleted file mode 100644 index 5a09dca6..00000000 --- a/libs/sqlalchemy/util/_collections.py +++ /dev/null @@ -1,905 +0,0 @@ -# util/_collections.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Collection classes and helpers.""" - -import sys -import itertools -import weakref -import operator -from langhelpers import symbol -from compat import time_func, threading - -EMPTY_SET = frozenset() - - -class NamedTuple(tuple): - """tuple() subclass that adds labeled names. - - Is also pickleable. - - """ - - def __new__(cls, vals, labels=None): - t = tuple.__new__(cls, vals) - if labels: - t.__dict__.update(zip(labels, vals)) - t._labels = labels - return t - - def keys(self): - return [l for l in self._labels if l is not None] - -class ImmutableContainer(object): - def _immutable(self, *arg, **kw): - raise TypeError("%s object is immutable" % self.__class__.__name__) - - __delitem__ = __setitem__ = __setattr__ = _immutable - -class immutabledict(ImmutableContainer, dict): - - clear = pop = popitem = setdefault = \ - update = ImmutableContainer._immutable - - def __new__(cls, *args): - new = dict.__new__(cls) - dict.__init__(new, *args) - return new - - def __init__(self, *args): - pass - - def __reduce__(self): - return immutabledict, (dict(self), ) - - def union(self, d): - if not self: - return immutabledict(d) - else: - d2 = immutabledict(self) - dict.update(d2, d) - return d2 - - def __repr__(self): - return "immutabledict(%s)" % dict.__repr__(self) - -class Properties(object): - """Provide a __getattr__/__setattr__ interface over a dict.""" - - def __init__(self, data): - self.__dict__['_data'] = data - - def __len__(self): - return len(self._data) - - def __iter__(self): - return self._data.itervalues() - - def __add__(self, other): - return list(self) + list(other) - - def __setitem__(self, key, object): - self._data[key] = object - - def __getitem__(self, key): - return self._data[key] - - def __delitem__(self, key): - del self._data[key] - - def __setattr__(self, key, object): - self._data[key] = object - - def __getstate__(self): - return {'_data': self.__dict__['_data']} - - def __setstate__(self, state): - self.__dict__['_data'] = state['_data'] - - def __getattr__(self, key): - try: - return self._data[key] - except KeyError: - raise AttributeError(key) - - def __contains__(self, key): - return key in self._data - - def as_immutable(self): - """Return an immutable proxy for this :class:`.Properties`.""" - - return ImmutableProperties(self._data) - - def update(self, value): - self._data.update(value) - - def get(self, key, default=None): - if key in self: - return self[key] - else: - return default - - def keys(self): - return self._data.keys() - - def has_key(self, key): - return key in self._data - - def clear(self): - self._data.clear() - -class OrderedProperties(Properties): - """Provide a __getattr__/__setattr__ interface with an OrderedDict - as backing store.""" - def __init__(self): - Properties.__init__(self, OrderedDict()) - - -class ImmutableProperties(ImmutableContainer, Properties): - """Provide immutable dict/object attribute to an underlying dictionary.""" - - -class OrderedDict(dict): - """A dict that returns keys/values/items in the order they were added.""" - - def __init__(self, ____sequence=None, **kwargs): - self._list = [] - if ____sequence is None: - if kwargs: - self.update(**kwargs) - else: - self.update(____sequence, **kwargs) - - def clear(self): - self._list = [] - dict.clear(self) - - def copy(self): - return self.__copy__() - - def __copy__(self): - return OrderedDict(self) - - def sort(self, *arg, **kw): - self._list.sort(*arg, **kw) - - def update(self, ____sequence=None, **kwargs): - if ____sequence is not None: - if hasattr(____sequence, 'keys'): - for key in ____sequence.keys(): - self.__setitem__(key, ____sequence[key]) - else: - for key, value in ____sequence: - self[key] = value - if kwargs: - self.update(kwargs) - - def setdefault(self, key, value): - if key not in self: - self.__setitem__(key, value) - return value - else: - return self.__getitem__(key) - - def __iter__(self): - return iter(self._list) - - def values(self): - return [self[key] for key in self._list] - - def itervalues(self): - return iter([self[key] for key in self._list]) - - def keys(self): - return list(self._list) - - def iterkeys(self): - return iter(self.keys()) - - def items(self): - return [(key, self[key]) for key in self.keys()] - - def iteritems(self): - return iter(self.items()) - - def __setitem__(self, key, object): - if key not in self: - try: - self._list.append(key) - except AttributeError: - # work around Python pickle loads() with - # dict subclass (seems to ignore __setstate__?) - self._list = [key] - dict.__setitem__(self, key, object) - - def __delitem__(self, key): - dict.__delitem__(self, key) - self._list.remove(key) - - def pop(self, key, *default): - present = key in self - value = dict.pop(self, key, *default) - if present: - self._list.remove(key) - return value - - def popitem(self): - item = dict.popitem(self) - self._list.remove(item[0]) - return item - -class OrderedSet(set): - def __init__(self, d=None): - set.__init__(self) - self._list = [] - if d is not None: - self.update(d) - - def add(self, element): - if element not in self: - self._list.append(element) - set.add(self, element) - - def remove(self, element): - set.remove(self, element) - self._list.remove(element) - - def insert(self, pos, element): - if element not in self: - self._list.insert(pos, element) - set.add(self, element) - - def discard(self, element): - if element in self: - self._list.remove(element) - set.remove(self, element) - - def clear(self): - set.clear(self) - self._list = [] - - def __getitem__(self, key): - return self._list[key] - - def __iter__(self): - return iter(self._list) - - def __add__(self, other): - return self.union(other) - - def __repr__(self): - return '%s(%r)' % (self.__class__.__name__, self._list) - - __str__ = __repr__ - - def update(self, iterable): - for e in iterable: - if e not in self: - self._list.append(e) - set.add(self, e) - return self - - __ior__ = update - - def union(self, other): - result = self.__class__(self) - result.update(other) - return result - - __or__ = union - - def intersection(self, other): - other = set(other) - return self.__class__(a for a in self if a in other) - - __and__ = intersection - - def symmetric_difference(self, other): - other = set(other) - result = self.__class__(a for a in self if a not in other) - result.update(a for a in other if a not in self) - return result - - __xor__ = symmetric_difference - - def difference(self, other): - other = set(other) - return self.__class__(a for a in self if a not in other) - - __sub__ = difference - - def intersection_update(self, other): - other = set(other) - set.intersection_update(self, other) - self._list = [ a for a in self._list if a in other] - return self - - __iand__ = intersection_update - - def symmetric_difference_update(self, other): - set.symmetric_difference_update(self, other) - self._list = [ a for a in self._list if a in self] - self._list += [ a for a in other._list if a in self] - return self - - __ixor__ = symmetric_difference_update - - def difference_update(self, other): - set.difference_update(self, other) - self._list = [ a for a in self._list if a in self] - return self - - __isub__ = difference_update - - -class IdentitySet(object): - """A set that considers only object id() for uniqueness. - - This strategy has edge cases for builtin types- it's possible to have - two 'foo' strings in one of these sets, for example. Use sparingly. - - """ - - _working_set = set - - def __init__(self, iterable=None): - self._members = dict() - if iterable: - for o in iterable: - self.add(o) - - def add(self, value): - self._members[id(value)] = value - - def __contains__(self, value): - return id(value) in self._members - - def remove(self, value): - del self._members[id(value)] - - def discard(self, value): - try: - self.remove(value) - except KeyError: - pass - - def pop(self): - try: - pair = self._members.popitem() - return pair[1] - except KeyError: - raise KeyError('pop from an empty set') - - def clear(self): - self._members.clear() - - def __sub__(self, other): - return self.difference(other) - - def __cmp__(self, other): - raise TypeError('cannot compare sets using cmp()') - - def __eq__(self, other): - if isinstance(other, IdentitySet): - return self._members == other._members - else: - return False - - def __ne__(self, other): - if isinstance(other, IdentitySet): - return self._members != other._members - else: - return True - - def issubset(self, iterable): - other = type(self)(iterable) - - if len(self) > len(other): - return False - for m in itertools.ifilterfalse(other._members.__contains__, - self._members.iterkeys()): - return False - return True - - def __le__(self, other): - if not isinstance(other, IdentitySet): - return NotImplemented - return self.issubset(other) - - def __lt__(self, other): - if not isinstance(other, IdentitySet): - return NotImplemented - return len(self) < len(other) and self.issubset(other) - - def issuperset(self, iterable): - other = type(self)(iterable) - - if len(self) < len(other): - return False - - for m in itertools.ifilterfalse(self._members.__contains__, - other._members.iterkeys()): - return False - return True - - def __ge__(self, other): - if not isinstance(other, IdentitySet): - return NotImplemented - return self.issuperset(other) - - def __gt__(self, other): - if not isinstance(other, IdentitySet): - return NotImplemented - return len(self) > len(other) and self.issuperset(other) - - def union(self, iterable): - result = type(self)() - # testlib.pragma exempt:__hash__ - result._members.update( - self._working_set(self._member_id_tuples()).union(_iter_id(iterable))) - return result - - def __or__(self, other): - if not isinstance(other, IdentitySet): - return NotImplemented - return self.union(other) - - def update(self, iterable): - self._members = self.union(iterable)._members - - def __ior__(self, other): - if not isinstance(other, IdentitySet): - return NotImplemented - self.update(other) - return self - - def difference(self, iterable): - result = type(self)() - # testlib.pragma exempt:__hash__ - result._members.update( - self._working_set(self._member_id_tuples()).difference(_iter_id(iterable))) - return result - - def __sub__(self, other): - if not isinstance(other, IdentitySet): - return NotImplemented - return self.difference(other) - - def difference_update(self, iterable): - self._members = self.difference(iterable)._members - - def __isub__(self, other): - if not isinstance(other, IdentitySet): - return NotImplemented - self.difference_update(other) - return self - - def intersection(self, iterable): - result = type(self)() - # testlib.pragma exempt:__hash__ - result._members.update( - self._working_set(self._member_id_tuples()).intersection(_iter_id(iterable))) - return result - - def __and__(self, other): - if not isinstance(other, IdentitySet): - return NotImplemented - return self.intersection(other) - - def intersection_update(self, iterable): - self._members = self.intersection(iterable)._members - - def __iand__(self, other): - if not isinstance(other, IdentitySet): - return NotImplemented - self.intersection_update(other) - return self - - def symmetric_difference(self, iterable): - result = type(self)() - # testlib.pragma exempt:__hash__ - result._members.update( - self._working_set(self._member_id_tuples()).symmetric_difference(_iter_id(iterable))) - return result - - def _member_id_tuples(self): - return ((id(v), v) for v in self._members.itervalues()) - - def __xor__(self, other): - if not isinstance(other, IdentitySet): - return NotImplemented - return self.symmetric_difference(other) - - def symmetric_difference_update(self, iterable): - self._members = self.symmetric_difference(iterable)._members - - def __ixor__(self, other): - if not isinstance(other, IdentitySet): - return NotImplemented - self.symmetric_difference(other) - return self - - def copy(self): - return type(self)(self._members.itervalues()) - - __copy__ = copy - - def __len__(self): - return len(self._members) - - def __iter__(self): - return self._members.itervalues() - - def __hash__(self): - raise TypeError('set objects are unhashable') - - def __repr__(self): - return '%s(%r)' % (type(self).__name__, self._members.values()) - - -class OrderedIdentitySet(IdentitySet): - class _working_set(OrderedSet): - # a testing pragma: exempt the OIDS working set from the test suite's - # "never call the user's __hash__" assertions. this is a big hammer, - # but it's safe here: IDS operates on (id, instance) tuples in the - # working set. - __sa_hash_exempt__ = True - - def __init__(self, iterable=None): - IdentitySet.__init__(self) - self._members = OrderedDict() - if iterable: - for o in iterable: - self.add(o) - - -if sys.version_info >= (2, 5): - class PopulateDict(dict): - """A dict which populates missing values via a creation function. - - Note the creation function takes a key, unlike - collections.defaultdict. - - """ - - def __init__(self, creator): - self.creator = creator - - def __missing__(self, key): - self[key] = val = self.creator(key) - return val -else: - class PopulateDict(dict): - """A dict which populates missing values via a creation function.""" - - def __init__(self, creator): - self.creator = creator - - def __getitem__(self, key): - try: - return dict.__getitem__(self, key) - except KeyError: - self[key] = value = self.creator(key) - return value - -# define collections that are capable of storing -# ColumnElement objects as hashable keys/elements. -column_set = set -column_dict = dict -ordered_column_set = OrderedSet -populate_column_dict = PopulateDict - -def unique_list(seq, hashfunc=None): - seen = {} - if not hashfunc: - return [x for x in seq - if x not in seen - and not seen.__setitem__(x, True)] - else: - return [x for x in seq - if hashfunc(x) not in seen - and not seen.__setitem__(hashfunc(x), True)] - -class UniqueAppender(object): - """Appends items to a collection ensuring uniqueness. - - Additional appends() of the same object are ignored. Membership is - determined by identity (``is a``) not equality (``==``). - """ - - def __init__(self, data, via=None): - self.data = data - self._unique = {} - if via: - self._data_appender = getattr(data, via) - elif hasattr(data, 'append'): - self._data_appender = data.append - elif hasattr(data, 'add'): - self._data_appender = data.add - - def append(self, item): - id_ = id(item) - if id_ not in self._unique: - self._data_appender(item) - self._unique[id_] = True - - def __iter__(self): - return iter(self.data) - -def to_list(x, default=None): - if x is None: - return default - if not isinstance(x, (list, tuple)): - return [x] - else: - return x - -def to_set(x): - if x is None: - return set() - if not isinstance(x, set): - return set(to_list(x)) - else: - return x - -def to_column_set(x): - if x is None: - return column_set() - if not isinstance(x, column_set): - return column_set(to_list(x)) - else: - return x - -def update_copy(d, _new=None, **kw): - """Copy the given dict and update with the given values.""" - - d = d.copy() - if _new: - d.update(_new) - d.update(**kw) - return d - -def flatten_iterator(x): - """Given an iterator of which further sub-elements may also be - iterators, flatten the sub-elements into a single iterator. - - """ - for elem in x: - if not isinstance(elem, basestring) and hasattr(elem, '__iter__'): - for y in flatten_iterator(elem): - yield y - else: - yield elem - -class WeakIdentityMapping(weakref.WeakKeyDictionary): - """A WeakKeyDictionary with an object identity index. - - Adds a .by_id dictionary to a regular WeakKeyDictionary. Trades - performance during mutation operations for accelerated lookups by id(). - - The usual cautions about weak dictionaries and iteration also apply to - this subclass. - - """ - _none = symbol('none') - - def __init__(self): - weakref.WeakKeyDictionary.__init__(self) - self.by_id = {} - self._weakrefs = {} - - def __setitem__(self, object, value): - oid = id(object) - self.by_id[oid] = value - if oid not in self._weakrefs: - self._weakrefs[oid] = self._ref(object) - weakref.WeakKeyDictionary.__setitem__(self, object, value) - - def __delitem__(self, object): - del self._weakrefs[id(object)] - del self.by_id[id(object)] - weakref.WeakKeyDictionary.__delitem__(self, object) - - def setdefault(self, object, default=None): - value = weakref.WeakKeyDictionary.setdefault(self, object, default) - oid = id(object) - if value is default: - self.by_id[oid] = default - if oid not in self._weakrefs: - self._weakrefs[oid] = self._ref(object) - return value - - def pop(self, object, default=_none): - if default is self._none: - value = weakref.WeakKeyDictionary.pop(self, object) - else: - value = weakref.WeakKeyDictionary.pop(self, object, default) - if id(object) in self.by_id: - del self._weakrefs[id(object)] - del self.by_id[id(object)] - return value - - def popitem(self): - item = weakref.WeakKeyDictionary.popitem(self) - oid = id(item[0]) - del self._weakrefs[oid] - del self.by_id[oid] - return item - - def clear(self): - # Py2K - # in 3k, MutableMapping calls popitem() - self._weakrefs.clear() - self.by_id.clear() - # end Py2K - weakref.WeakKeyDictionary.clear(self) - - def update(self, *a, **kw): - raise NotImplementedError - - def _cleanup(self, wr, key=None): - if key is None: - key = wr.key - try: - del self._weakrefs[key] - except (KeyError, AttributeError): # pragma: no cover - pass # pragma: no cover - try: - del self.by_id[key] - except (KeyError, AttributeError): # pragma: no cover - pass # pragma: no cover - - class _keyed_weakref(weakref.ref): - def __init__(self, object, callback): - weakref.ref.__init__(self, object, callback) - self.key = id(object) - - def _ref(self, object): - return self._keyed_weakref(object, self._cleanup) - - -class LRUCache(dict): - """Dictionary with 'squishy' removal of least - recently used items. - - """ - def __init__(self, capacity=100, threshold=.5): - self.capacity = capacity - self.threshold = threshold - self._counter = 0 - - def _inc_counter(self): - self._counter += 1 - return self._counter - - def __getitem__(self, key): - item = dict.__getitem__(self, key) - item[2] = self._inc_counter() - return item[1] - - def values(self): - return [i[1] for i in dict.values(self)] - - def setdefault(self, key, value): - if key in self: - return self[key] - else: - self[key] = value - return value - - def __setitem__(self, key, value): - item = dict.get(self, key) - if item is None: - item = [key, value, self._inc_counter()] - dict.__setitem__(self, key, item) - else: - item[1] = value - self._manage_size() - - def _manage_size(self): - while len(self) > self.capacity + self.capacity * self.threshold: - by_counter = sorted(dict.values(self), - key=operator.itemgetter(2), - reverse=True) - for item in by_counter[self.capacity:]: - try: - del self[item[0]] - except KeyError: - # if we couldnt find a key, most - # likely some other thread broke in - # on us. loop around and try again - break - - -class ScopedRegistry(object): - """A Registry that can store one or multiple instances of a single - class on the basis of a "scope" function. - - The object implements ``__call__`` as the "getter", so by - calling ``myregistry()`` the contained object is returned - for the current scope. - - :param createfunc: - a callable that returns a new object to be placed in the registry - - :param scopefunc: - a callable that will return a key to store/retrieve an object. - """ - - def __init__(self, createfunc, scopefunc): - """Construct a new :class:`.ScopedRegistry`. - - :param createfunc: A creation function that will generate - a new value for the current scope, if none is present. - - :param scopefunc: A function that returns a hashable - token representing the current scope (such as, current - thread identifier). - - """ - self.createfunc = createfunc - self.scopefunc = scopefunc - self.registry = {} - - def __call__(self): - key = self.scopefunc() - try: - return self.registry[key] - except KeyError: - return self.registry.setdefault(key, self.createfunc()) - - def has(self): - """Return True if an object is present in the current scope.""" - - return self.scopefunc() in self.registry - - def set(self, obj): - """Set the value forthe current scope.""" - - self.registry[self.scopefunc()] = obj - - def clear(self): - """Clear the current scope, if any.""" - - try: - del self.registry[self.scopefunc()] - except KeyError: - pass - -class ThreadLocalRegistry(ScopedRegistry): - """A :class:`.ScopedRegistry` that uses a ``threading.local()`` - variable for storage. - - """ - def __init__(self, createfunc): - self.createfunc = createfunc - self.registry = threading.local() - - def __call__(self): - try: - return self.registry.value - except AttributeError: - val = self.registry.value = self.createfunc() - return val - - def has(self): - return hasattr(self.registry, "value") - - def set(self, obj): - self.registry.value = obj - - def clear(self): - try: - del self.registry.value - except AttributeError: - pass - -def _iter_id(iterable): - """Generator: ((id(o), o) for o in iterable).""" - - for item in iterable: - yield id(item), item - diff --git a/libs/sqlalchemy/util/compat.py b/libs/sqlalchemy/util/compat.py deleted file mode 100644 index 18ea2815..00000000 --- a/libs/sqlalchemy/util/compat.py +++ /dev/null @@ -1,243 +0,0 @@ -# util/compat.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Handle Python version/platform incompatibilities.""" - -import sys - - -try: - import threading -except ImportError: - import dummy_threading as threading - -py32 = sys.version_info >= (3, 2) -py3k_warning = getattr(sys, 'py3kwarning', False) or sys.version_info >= (3, 0) -jython = sys.platform.startswith('java') -pypy = hasattr(sys, 'pypy_version_info') -win32 = sys.platform.startswith('win') -cpython = not pypy and not jython # TODO: something better for this ? - -if py3k_warning: - set_types = set -elif sys.version_info < (2, 6): - import sets - set_types = set, sets.Set -else: - # 2.6 deprecates sets.Set, but we still need to be able to detect them - # in user code and as return values from DB-APIs - ignore = ('ignore', None, DeprecationWarning, None, 0) - import warnings - try: - warnings.filters.insert(0, ignore) - except Exception: - import sets - else: - import sets - warnings.filters.remove(ignore) - - set_types = set, sets.Set - -if sys.version_info < (2, 6): - def next(iter): - return iter.next() -else: - next = next -if py3k_warning: - import pickle -else: - try: - import cPickle as pickle - except ImportError: - import pickle - -# a controversial feature, required by MySQLdb currently -def buffer(x): - return x - -# Py2K -buffer = buffer -# end Py2K - -try: - from contextlib import contextmanager -except ImportError: - def contextmanager(fn): - return fn - -try: - from functools import update_wrapper -except ImportError: - def update_wrapper(wrapper, wrapped, - assigned=('__doc__', '__module__', '__name__'), - updated=('__dict__',)): - for attr in assigned: - setattr(wrapper, attr, getattr(wrapped, attr)) - for attr in updated: - getattr(wrapper, attr).update(getattr(wrapped, attr, ())) - return wrapper - -try: - from functools import partial -except ImportError: - def partial(func, *args, **keywords): - def newfunc(*fargs, **fkeywords): - newkeywords = keywords.copy() - newkeywords.update(fkeywords) - return func(*(args + fargs), **newkeywords) - return newfunc - - -if sys.version_info < (2, 6): - # emits a nasty deprecation warning - # in newer pythons - from cgi import parse_qsl -else: - from urlparse import parse_qsl - -# Py3K -#from inspect import getfullargspec as inspect_getfullargspec -# Py2K -from inspect import getargspec as inspect_getfullargspec -# end Py2K - -if py3k_warning: - # they're bringing it back in 3.2. brilliant ! - def callable(fn): - return hasattr(fn, '__call__') - def cmp(a, b): - return (a > b) - (a < b) - - from functools import reduce -else: - callable = callable - cmp = cmp - reduce = reduce - -try: - from collections import defaultdict -except ImportError: - class defaultdict(dict): - def __init__(self, default_factory=None, *a, **kw): - if (default_factory is not None and - not hasattr(default_factory, '__call__')): - raise TypeError('first argument must be callable') - dict.__init__(self, *a, **kw) - self.default_factory = default_factory - def __getitem__(self, key): - try: - return dict.__getitem__(self, key) - except KeyError: - return self.__missing__(key) - def __missing__(self, key): - if self.default_factory is None: - raise KeyError(key) - self[key] = value = self.default_factory() - return value - def __reduce__(self): - if self.default_factory is None: - args = tuple() - else: - args = self.default_factory, - return type(self), args, None, None, self.iteritems() - def copy(self): - return self.__copy__() - def __copy__(self): - return type(self)(self.default_factory, self) - def __deepcopy__(self, memo): - import copy - return type(self)(self.default_factory, - copy.deepcopy(self.items())) - def __repr__(self): - return 'defaultdict(%s, %s)' % (self.default_factory, - dict.__repr__(self)) - - -# find or create a dict implementation that supports __missing__ -class _probe(dict): - def __missing__(self, key): - return 1 - -try: - try: - _probe()['missing'] - py25_dict = dict - except KeyError: - class py25_dict(dict): - def __getitem__(self, key): - try: - return dict.__getitem__(self, key) - except KeyError: - try: - missing = self.__missing__ - except AttributeError: - raise KeyError(key) - else: - return missing(key) -finally: - del _probe - - -try: - import hashlib - _md5 = hashlib.md5 -except ImportError: - import md5 - _md5 = md5.new - -def md5_hex(x): - # Py3K - #x = x.encode('utf-8') - m = _md5() - m.update(x) - return m.hexdigest() - -import time -if win32 or jython: - time_func = time.clock -else: - time_func = time.time - -if sys.version_info >= (2, 5): - any = any -else: - def any(iterator): - for item in iterator: - if bool(item): - return True - else: - return False - -if sys.version_info >= (2, 5): - def decode_slice(slc): - """decode a slice object as sent to __getitem__. - - takes into account the 2.5 __index__() method, basically. - - """ - ret = [] - for x in slc.start, slc.stop, slc.step: - if hasattr(x, '__index__'): - x = x.__index__() - ret.append(x) - return tuple(ret) -else: - def decode_slice(slc): - return (slc.start, slc.stop, slc.step) - -if sys.version_info >= (2, 6): - from operator import attrgetter as dottedgetter -else: - def dottedgetter(attr): - def g(obj): - for name in attr.split("."): - obj = getattr(obj, name) - return obj - return g - - -import decimal - diff --git a/libs/sqlalchemy/util/deprecations.py b/libs/sqlalchemy/util/deprecations.py deleted file mode 100644 index 330d35db..00000000 --- a/libs/sqlalchemy/util/deprecations.py +++ /dev/null @@ -1,118 +0,0 @@ -# util/deprecations.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Helpers related to deprecation of functions, methods, classes, other -functionality.""" - -from sqlalchemy import exc -import warnings -import re -from langhelpers import decorator - -def warn_deprecated(msg, stacklevel=3): - warnings.warn(msg, exc.SADeprecationWarning, stacklevel=stacklevel) - -def warn_pending_deprecation(msg, stacklevel=3): - warnings.warn(msg, exc.SAPendingDeprecationWarning, stacklevel=stacklevel) - -def deprecated(version, message=None, add_deprecation_to_docstring=True): - """Decorates a function and issues a deprecation warning on use. - - :param message: - If provided, issue message in the warning. A sensible default - is used if not provided. - - :param add_deprecation_to_docstring: - Default True. If False, the wrapped function's __doc__ is left - as-is. If True, the 'message' is prepended to the docs if - provided, or sensible default if message is omitted. - - """ - - if add_deprecation_to_docstring: - header = ".. deprecated:: %s %s" % \ - (version, (message or '')) - else: - header = None - - if message is None: - message = "Call to deprecated function %(func)s" - - def decorate(fn): - return _decorate_with_warning( - fn, exc.SADeprecationWarning, - message % dict(func=fn.__name__), header) - return decorate - -def pending_deprecation(version, message=None, - add_deprecation_to_docstring=True): - """Decorates a function and issues a pending deprecation warning on use. - - :param version: - An approximate future version at which point the pending deprecation - will become deprecated. Not used in messaging. - - :param message: - If provided, issue message in the warning. A sensible default - is used if not provided. - - :param add_deprecation_to_docstring: - Default True. If False, the wrapped function's __doc__ is left - as-is. If True, the 'message' is prepended to the docs if - provided, or sensible default if message is omitted. - """ - - if add_deprecation_to_docstring: - header = ".. deprecated:: %s (pending) %s" % \ - (version, (message or '')) - else: - header = None - - if message is None: - message = "Call to deprecated function %(func)s" - - def decorate(fn): - return _decorate_with_warning( - fn, exc.SAPendingDeprecationWarning, - message % dict(func=fn.__name__), header) - return decorate - -def _sanitize_restructured_text(text): - def repl(m): - type_, name = m.group(1, 2) - if type_ in ("func", "meth"): - name += "()" - return name - return re.sub(r'\:(\w+)\:`~?\.?(.+?)`', repl, text) - - -def _decorate_with_warning(func, wtype, message, docstring_header=None): - """Wrap a function with a warnings.warn and augmented docstring.""" - - message = _sanitize_restructured_text(message) - - @decorator - def warned(fn, *args, **kwargs): - warnings.warn(wtype(message), stacklevel=3) - return fn(*args, **kwargs) - - doc = func.__doc__ is not None and func.__doc__ or '' - if docstring_header is not None: - docstring_header %= dict(func=func.__name__) - docs = doc and doc.expandtabs().split('\n') or [] - indent = '' - for line in docs[1:]: - text = line.lstrip() - if text: - indent = line[0:len(line) - len(text)] - break - point = min(len(docs), 1) - docs.insert(point, '\n' + indent + docstring_header.rstrip()) - doc = '\n'.join(docs) - - decorated = warned(func) - decorated.__doc__ = doc - return decorated diff --git a/libs/sqlalchemy/util/langhelpers.py b/libs/sqlalchemy/util/langhelpers.py deleted file mode 100644 index b7c5132d..00000000 --- a/libs/sqlalchemy/util/langhelpers.py +++ /dev/null @@ -1,911 +0,0 @@ -# util/langhelpers.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Routines to help with the creation, loading and introspection of -modules, classes, hierarchies, attributes, functions, and methods. - -""" -import itertools -import inspect -import operator -import re -import sys -import types -import warnings -from compat import update_wrapper, set_types, threading, callable, inspect_getfullargspec, py3k_warning -from sqlalchemy import exc - -def _unique_symbols(used, *bases): - used = set(used) - for base in bases: - pool = itertools.chain((base,), - itertools.imap(lambda i: base + str(i), - xrange(1000))) - for sym in pool: - if sym not in used: - used.add(sym) - yield sym - break - else: - raise NameError("exhausted namespace for symbol base %s" % base) - -def decorator(target): - """A signature-matching decorator factory.""" - - def decorate(fn): - if not inspect.isfunction(fn): - raise Exception("not a decoratable function") - spec = inspect_getfullargspec(fn) - names = tuple(spec[0]) + spec[1:3] + (fn.func_name,) - targ_name, fn_name = _unique_symbols(names, 'target', 'fn') - - metadata = dict(target=targ_name, fn=fn_name) - metadata.update(format_argspec_plus(spec, grouped=False)) - - code = 'lambda %(args)s: %(target)s(%(fn)s, %(apply_kw)s)' % ( - metadata) - decorated = eval(code, {targ_name:target, fn_name:fn}) - decorated.func_defaults = getattr(fn, 'im_func', fn).func_defaults - return update_wrapper(decorated, fn) - return update_wrapper(decorate, target) - - -def get_cls_kwargs(cls): - """Return the full set of inherited kwargs for the given `cls`. - - Probes a class's __init__ method, collecting all named arguments. If the - __init__ defines a \**kwargs catch-all, then the constructor is presumed to - pass along unrecognized keywords to it's base classes, and the collection - process is repeated recursively on each of the bases. - - Uses a subset of inspect.getargspec() to cut down on method overhead. - No anonymous tuple arguments please ! - - """ - - for c in cls.__mro__: - if '__init__' in c.__dict__: - stack = set([c]) - break - else: - return [] - - args = set() - while stack: - class_ = stack.pop() - ctr = class_.__dict__.get('__init__', False) - if (not ctr or - not isinstance(ctr, types.FunctionType) or - not isinstance(ctr.func_code, types.CodeType)): - stack.update(class_.__bases__) - continue - - # this is shorthand for - # names, _, has_kw, _ = inspect.getargspec(ctr) - - names, has_kw = inspect_func_args(ctr) - args.update(names) - if has_kw: - stack.update(class_.__bases__) - args.discard('self') - return args - -try: - from inspect import CO_VARKEYWORDS - def inspect_func_args(fn): - co = fn.func_code - nargs = co.co_argcount - names = co.co_varnames - args = list(names[:nargs]) - has_kw = bool(co.co_flags & CO_VARKEYWORDS) - return args, has_kw -except ImportError: - def inspect_func_args(fn): - names, _, has_kw, _ = inspect.getargspec(fn) - return names, bool(has_kw) - -def get_func_kwargs(func): - """Return the set of legal kwargs for the given `func`. - - Uses getargspec so is safe to call for methods, functions, - etc. - - """ - - return inspect.getargspec(func)[0] - -def format_argspec_plus(fn, grouped=True): - """Returns a dictionary of formatted, introspected function arguments. - - A enhanced variant of inspect.formatargspec to support code generation. - - fn - An inspectable callable or tuple of inspect getargspec() results. - grouped - Defaults to True; include (parens, around, argument) lists - - Returns: - - args - Full inspect.formatargspec for fn - self_arg - The name of the first positional argument, varargs[0], or None - if the function defines no positional arguments. - apply_pos - args, re-written in calling rather than receiving syntax. Arguments are - passed positionally. - apply_kw - Like apply_pos, except keyword-ish args are passed as keywords. - - Example:: - - >>> format_argspec_plus(lambda self, a, b, c=3, **d: 123) - {'args': '(self, a, b, c=3, **d)', - 'self_arg': 'self', - 'apply_kw': '(self, a, b, c=c, **d)', - 'apply_pos': '(self, a, b, c, **d)'} - - """ - if callable(fn): - spec = inspect_getfullargspec(fn) - else: - # we accept an existing argspec... - spec = fn - args = inspect.formatargspec(*spec) - if spec[0]: - self_arg = spec[0][0] - elif spec[1]: - self_arg = '%s[0]' % spec[1] - else: - self_arg = None - - # Py3K - #apply_pos = inspect.formatargspec(spec[0], spec[1], spec[2], None, spec[4]) - #num_defaults = 0 - #if spec[3]: - # num_defaults += len(spec[3]) - #if spec[4]: - # num_defaults += len(spec[4]) - #name_args = spec[0] + spec[4] - # Py2K - apply_pos = inspect.formatargspec(spec[0], spec[1], spec[2]) - num_defaults = 0 - if spec[3]: - num_defaults += len(spec[3]) - name_args = spec[0] - # end Py2K - - if num_defaults: - defaulted_vals = name_args[0-num_defaults:] - else: - defaulted_vals = () - - apply_kw = inspect.formatargspec(name_args, spec[1], spec[2], defaulted_vals, - formatvalue=lambda x: '=' + x) - if grouped: - return dict(args=args, self_arg=self_arg, - apply_pos=apply_pos, apply_kw=apply_kw) - else: - return dict(args=args[1:-1], self_arg=self_arg, - apply_pos=apply_pos[1:-1], apply_kw=apply_kw[1:-1]) - -def format_argspec_init(method, grouped=True): - """format_argspec_plus with considerations for typical __init__ methods - - Wraps format_argspec_plus with error handling strategies for typical - __init__ cases:: - - object.__init__ -> (self) - other unreflectable (usually C) -> (self, *args, **kwargs) - - """ - try: - return format_argspec_plus(method, grouped=grouped) - except TypeError: - self_arg = 'self' - if method is object.__init__: - args = grouped and '(self)' or 'self' - else: - args = (grouped and '(self, *args, **kwargs)' - or 'self, *args, **kwargs') - return dict(self_arg='self', args=args, apply_pos=args, apply_kw=args) - -def getargspec_init(method): - """inspect.getargspec with considerations for typical __init__ methods - - Wraps inspect.getargspec with error handling for typical __init__ cases:: - - object.__init__ -> (self) - other unreflectable (usually C) -> (self, *args, **kwargs) - - """ - try: - return inspect.getargspec(method) - except TypeError: - if method is object.__init__: - return (['self'], None, None, None) - else: - return (['self'], 'args', 'kwargs', None) - - -def unbound_method_to_callable(func_or_cls): - """Adjust the incoming callable such that a 'self' argument is not required.""" - - if isinstance(func_or_cls, types.MethodType) and not func_or_cls.im_self: - return func_or_cls.im_func - else: - return func_or_cls - -def generic_repr(obj, additional_kw=(), to_inspect=None): - """Produce a __repr__() based on direct association of the __init__() - specification vs. same-named attributes present. - - """ - if to_inspect is None: - to_inspect = obj - def genargs(): - try: - (args, vargs, vkw, defaults) = inspect.getargspec(to_inspect.__init__) - except TypeError: - return - - default_len = defaults and len(defaults) or 0 - - if not default_len: - for arg in args[1:]: - yield repr(getattr(obj, arg, None)) - if vargs is not None and hasattr(obj, vargs): - yield ', '.join(repr(val) for val in getattr(obj, vargs)) - else: - for arg in args[1:-default_len]: - yield repr(getattr(obj, arg, None)) - for (arg, defval) in zip(args[-default_len:], defaults): - try: - val = getattr(obj, arg, None) - if val != defval: - yield '%s=%r' % (arg, val) - except: - pass - if additional_kw: - for arg, defval in additional_kw: - try: - val = getattr(obj, arg, None) - if val != defval: - yield '%s=%r' % (arg, val) - except: - pass - - return "%s(%s)" % (obj.__class__.__name__, ", ".join(genargs())) - -class portable_instancemethod(object): - """Turn an instancemethod into a (parent, name) pair - to produce a serializable callable. - - """ - def __init__(self, meth): - self.target = meth.im_self - self.name = meth.__name__ - - def __call__(self, *arg, **kw): - return getattr(self.target, self.name)(*arg, **kw) - -def class_hierarchy(cls): - """Return an unordered sequence of all classes related to cls. - - Traverses diamond hierarchies. - - Fibs slightly: subclasses of builtin types are not returned. Thus - class_hierarchy(class A(object)) returns (A, object), not A plus every - class systemwide that derives from object. - - Old-style classes are discarded and hierarchies rooted on them - will not be descended. - - """ - # Py2K - if isinstance(cls, types.ClassType): - return list() - # end Py2K - hier = set([cls]) - process = list(cls.__mro__) - while process: - c = process.pop() - # Py2K - if isinstance(c, types.ClassType): - continue - for b in (_ for _ in c.__bases__ - if _ not in hier and not isinstance(_, types.ClassType)): - # end Py2K - # Py3K - #for b in (_ for _ in c.__bases__ - # if _ not in hier): - process.append(b) - hier.add(b) - # Py3K - #if c.__module__ == 'builtins' or not hasattr(c, '__subclasses__'): - # continue - # Py2K - if c.__module__ == '__builtin__' or not hasattr(c, '__subclasses__'): - continue - # end Py2K - for s in [_ for _ in c.__subclasses__() if _ not in hier]: - process.append(s) - hier.add(s) - return list(hier) - -def iterate_attributes(cls): - """iterate all the keys and attributes associated - with a class, without using getattr(). - - Does not use getattr() so that class-sensitive - descriptors (i.e. property.__get__()) are not called. - - """ - keys = dir(cls) - for key in keys: - for c in cls.__mro__: - if key in c.__dict__: - yield (key, c.__dict__[key]) - break - -def monkeypatch_proxied_specials(into_cls, from_cls, skip=None, only=None, - name='self.proxy', from_instance=None): - """Automates delegation of __specials__ for a proxying type.""" - - if only: - dunders = only - else: - if skip is None: - skip = ('__slots__', '__del__', '__getattribute__', - '__metaclass__', '__getstate__', '__setstate__') - dunders = [m for m in dir(from_cls) - if (m.startswith('__') and m.endswith('__') and - not hasattr(into_cls, m) and m not in skip)] - for method in dunders: - try: - fn = getattr(from_cls, method) - if not hasattr(fn, '__call__'): - continue - fn = getattr(fn, 'im_func', fn) - except AttributeError: - continue - try: - spec = inspect.getargspec(fn) - fn_args = inspect.formatargspec(spec[0]) - d_args = inspect.formatargspec(spec[0][1:]) - except TypeError: - fn_args = '(self, *args, **kw)' - d_args = '(*args, **kw)' - - py = ("def %(method)s%(fn_args)s: " - "return %(name)s.%(method)s%(d_args)s" % locals()) - - env = from_instance is not None and {name: from_instance} or {} - exec py in env - try: - env[method].func_defaults = fn.func_defaults - except AttributeError: - pass - setattr(into_cls, method, env[method]) - - -def methods_equivalent(meth1, meth2): - """Return True if the two methods are the same implementation.""" - - # Py3K - #return getattr(meth1, '__func__', meth1) is getattr(meth2, '__func__', meth2) - # Py2K - return getattr(meth1, 'im_func', meth1) is getattr(meth2, 'im_func', meth2) - # end Py2K - -def as_interface(obj, cls=None, methods=None, required=None): - """Ensure basic interface compliance for an instance or dict of callables. - - Checks that ``obj`` implements public methods of ``cls`` or has members - listed in ``methods``. If ``required`` is not supplied, implementing at - least one interface method is sufficient. Methods present on ``obj`` that - are not in the interface are ignored. - - If ``obj`` is a dict and ``dict`` does not meet the interface - requirements, the keys of the dictionary are inspected. Keys present in - ``obj`` that are not in the interface will raise TypeErrors. - - Raises TypeError if ``obj`` does not meet the interface criteria. - - In all passing cases, an object with callable members is returned. In the - simple case, ``obj`` is returned as-is; if dict processing kicks in then - an anonymous class is returned. - - obj - A type, instance, or dictionary of callables. - cls - Optional, a type. All public methods of cls are considered the - interface. An ``obj`` instance of cls will always pass, ignoring - ``required``.. - methods - Optional, a sequence of method names to consider as the interface. - required - Optional, a sequence of mandatory implementations. If omitted, an - ``obj`` that provides at least one interface method is considered - sufficient. As a convenience, required may be a type, in which case - all public methods of the type are required. - - """ - if not cls and not methods: - raise TypeError('a class or collection of method names are required') - - if isinstance(cls, type) and isinstance(obj, cls): - return obj - - interface = set(methods or [m for m in dir(cls) if not m.startswith('_')]) - implemented = set(dir(obj)) - - complies = operator.ge - if isinstance(required, type): - required = interface - elif not required: - required = set() - complies = operator.gt - else: - required = set(required) - - if complies(implemented.intersection(interface), required): - return obj - - # No dict duck typing here. - if not type(obj) is dict: - qualifier = complies is operator.gt and 'any of' or 'all of' - raise TypeError("%r does not implement %s: %s" % ( - obj, qualifier, ', '.join(interface))) - - class AnonymousInterface(object): - """A callable-holding shell.""" - - if cls: - AnonymousInterface.__name__ = 'Anonymous' + cls.__name__ - found = set() - - for method, impl in dictlike_iteritems(obj): - if method not in interface: - raise TypeError("%r: unknown in this interface" % method) - if not callable(impl): - raise TypeError("%r=%r is not callable" % (method, impl)) - setattr(AnonymousInterface, method, staticmethod(impl)) - found.add(method) - - if complies(found, required): - return AnonymousInterface - - raise TypeError("dictionary does not contain required keys %s" % - ', '.join(required - found)) - - -class memoized_property(object): - """A read-only @property that is only evaluated once.""" - def __init__(self, fget, doc=None): - self.fget = fget - self.__doc__ = doc or fget.__doc__ - self.__name__ = fget.__name__ - - def __get__(self, obj, cls): - if obj is None: - return self - obj.__dict__[self.__name__] = result = self.fget(obj) - return result - - def _reset(self, obj): - obj.__dict__.pop(self.__name__, None) - -class memoized_instancemethod(object): - """Decorate a method memoize its return value. - - Best applied to no-arg methods: memoization is not sensitive to - argument values, and will always return the same value even when - called with different arguments. - - """ - def __init__(self, fget, doc=None): - self.fget = fget - self.__doc__ = doc or fget.__doc__ - self.__name__ = fget.__name__ - - def __get__(self, obj, cls): - if obj is None: - return self - def oneshot(*args, **kw): - result = self.fget(obj, *args, **kw) - memo = lambda *a, **kw: result - memo.__name__ = self.__name__ - memo.__doc__ = self.__doc__ - obj.__dict__[self.__name__] = memo - return result - oneshot.__name__ = self.__name__ - oneshot.__doc__ = self.__doc__ - return oneshot - -def reset_memoized(instance, name): - instance.__dict__.pop(name, None) - - -class group_expirable_memoized_property(object): - """A family of @memoized_properties that can be expired in tandem.""" - - def __init__(self, attributes=()): - self.attributes = [] - if attributes: - self.attributes.extend(attributes) - - def expire_instance(self, instance): - """Expire all memoized properties for *instance*.""" - stash = instance.__dict__ - for attribute in self.attributes: - stash.pop(attribute, None) - - def __call__(self, fn): - self.attributes.append(fn.__name__) - return memoized_property(fn) - - def method(self, fn): - self.attributes.append(fn.__name__) - return memoized_instancemethod(fn) - -class importlater(object): - """Deferred import object. - - e.g.:: - - somesubmod = importlater("mypackage.somemodule", "somesubmod") - - is equivalent to:: - - from mypackage.somemodule import somesubmod - - except evaluted upon attribute access to "somesubmod". - - importlater() currently requires that resolve_all() be - called, typically at the bottom of a package's __init__.py. - This is so that __import__ still called only at - module import time, and not potentially within - a non-main thread later on. - - """ - - _unresolved = set() - - def __init__(self, path, addtl=None): - self._il_path = path - self._il_addtl = addtl - importlater._unresolved.add(self) - - @classmethod - def resolve_all(cls): - for m in list(importlater._unresolved): - m._resolve() - - @property - def _full_path(self): - if self._il_addtl: - return self._il_path + "." + self._il_addtl - else: - return self._il_path - - @memoized_property - def module(self): - if self in importlater._unresolved: - raise ImportError( - "importlater.resolve_all() hasn't been called") - - m = self._initial_import - if self._il_addtl: - m = getattr(m, self._il_addtl) - else: - for token in self._il_path.split(".")[1:]: - m = getattr(m, token) - return m - - def _resolve(self): - importlater._unresolved.discard(self) - if self._il_addtl: - self._initial_import = __import__( - self._il_path, globals(), locals(), - [self._il_addtl]) - else: - self._initial_import = __import__(self._il_path) - - def __getattr__(self, key): - if key == 'module': - raise ImportError("Could not resolve module %s" - % self._full_path) - try: - attr = getattr(self.module, key) - except AttributeError: - raise AttributeError( - "Module %s has no attribute '%s'" % - (self._full_path, key) - ) - self.__dict__[key] = attr - return attr - -# from paste.deploy.converters -def asbool(obj): - if isinstance(obj, (str, unicode)): - obj = obj.strip().lower() - if obj in ['true', 'yes', 'on', 'y', 't', '1']: - return True - elif obj in ['false', 'no', 'off', 'n', 'f', '0']: - return False - else: - raise ValueError("String is not true/false: %r" % obj) - return bool(obj) - -def bool_or_str(*text): - """Return a callable that will evaulate a string as - boolean, or one of a set of "alternate" string values. - - """ - def bool_or_value(obj): - if obj in text: - return obj - else: - return asbool(obj) - return bool_or_value - -def asint(value): - """Coerce to integer.""" - - if value is None: - return value - return int(value) - - -def coerce_kw_type(kw, key, type_, flexi_bool=True): - """If 'key' is present in dict 'kw', coerce its value to type 'type\_' if - necessary. If 'flexi_bool' is True, the string '0' is considered false - when coercing to boolean. - """ - - if key in kw and type(kw[key]) is not type_ and kw[key] is not None: - if type_ is bool and flexi_bool: - kw[key] = asbool(kw[key]) - else: - kw[key] = type_(kw[key]) - - -def constructor_copy(obj, cls, **kw): - """Instantiate cls using the __dict__ of obj as constructor arguments. - - Uses inspect to match the named arguments of ``cls``. - - """ - - names = get_cls_kwargs(cls) - kw.update((k, obj.__dict__[k]) for k in names if k in obj.__dict__) - return cls(**kw) - - -def counter(): - """Return a threadsafe counter function.""" - - lock = threading.Lock() - counter = itertools.count(1L) - - # avoid the 2to3 "next" transformation... - def _next(): - lock.acquire() - try: - return counter.next() - finally: - lock.release() - - return _next - -def duck_type_collection(specimen, default=None): - """Given an instance or class, guess if it is or is acting as one of - the basic collection types: list, set and dict. If the __emulates__ - property is present, return that preferentially. - """ - - if hasattr(specimen, '__emulates__'): - # canonicalize set vs sets.Set to a standard: the builtin set - if (specimen.__emulates__ is not None and - issubclass(specimen.__emulates__, set_types)): - return set - else: - return specimen.__emulates__ - - isa = isinstance(specimen, type) and issubclass or isinstance - if isa(specimen, list): - return list - elif isa(specimen, set_types): - return set - elif isa(specimen, dict): - return dict - - if hasattr(specimen, 'append'): - return list - elif hasattr(specimen, 'add'): - return set - elif hasattr(specimen, 'set'): - return dict - else: - return default - -def assert_arg_type(arg, argtype, name): - if isinstance(arg, argtype): - return arg - else: - if isinstance(argtype, tuple): - raise exc.ArgumentError( - "Argument '%s' is expected to be one of type %s, got '%s'" % - (name, ' or '.join("'%s'" % a for a in argtype), type(arg))) - else: - raise exc.ArgumentError( - "Argument '%s' is expected to be of type '%s', got '%s'" % - (name, argtype, type(arg))) - - -def dictlike_iteritems(dictlike): - """Return a (key, value) iterator for almost any dict-like object.""" - - # Py3K - #if hasattr(dictlike, 'items'): - # return dictlike.items() - # Py2K - if hasattr(dictlike, 'iteritems'): - return dictlike.iteritems() - elif hasattr(dictlike, 'items'): - return iter(dictlike.items()) - # end Py2K - - getter = getattr(dictlike, '__getitem__', getattr(dictlike, 'get', None)) - if getter is None: - raise TypeError( - "Object '%r' is not dict-like" % dictlike) - - if hasattr(dictlike, 'iterkeys'): - def iterator(): - for key in dictlike.iterkeys(): - yield key, getter(key) - return iterator() - elif hasattr(dictlike, 'keys'): - return iter((key, getter(key)) for key in dictlike.keys()) - else: - raise TypeError( - "Object '%r' is not dict-like" % dictlike) - - -class classproperty(property): - """A decorator that behaves like @property except that operates - on classes rather than instances. - - The decorator is currently special when using the declarative - module, but note that the - :class:`~.sqlalchemy.ext.declarative.declared_attr` - decorator should be used for this purpose with declarative. - - """ - - def __init__(self, fget, *arg, **kw): - super(classproperty, self).__init__(fget, *arg, **kw) - self.__doc__ = fget.__doc__ - - def __get__(desc, self, cls): - return desc.fget(cls) - - -class _symbol(object): - def __init__(self, name, doc=None): - """Construct a new named symbol.""" - assert isinstance(name, str) - self.name = name - if doc: - self.__doc__ = doc - def __reduce__(self): - return symbol, (self.name,) - def __repr__(self): - return "" % self.name - -_symbol.__name__ = 'symbol' - - -class symbol(object): - """A constant symbol. - - >>> symbol('foo') is symbol('foo') - True - >>> symbol('foo') - - - A slight refinement of the MAGICCOOKIE=object() pattern. The primary - advantage of symbol() is its repr(). They are also singletons. - - Repeated calls of symbol('name') will all return the same instance. - - The optional ``doc`` argument assigns to ``__doc__``. This - is strictly so that Sphinx autoattr picks up the docstring we want - (it doesn't appear to pick up the in-module docstring if the datamember - is in a different module - autoattribute also blows up completely). - If Sphinx fixes/improves this then we would no longer need - ``doc`` here. - - """ - symbols = {} - _lock = threading.Lock() - - def __new__(cls, name, doc=None): - cls._lock.acquire() - try: - sym = cls.symbols.get(name) - if sym is None: - cls.symbols[name] = sym = _symbol(name, doc) - return sym - finally: - symbol._lock.release() - - -_creation_order = 1 -def set_creation_order(instance): - """Assign a '_creation_order' sequence to the given instance. - - This allows multiple instances to be sorted in order of creation - (typically within a single thread; the counter is not particularly - threadsafe). - - """ - global _creation_order - instance._creation_order = _creation_order - _creation_order +=1 - -def warn_exception(func, *args, **kwargs): - """executes the given function, catches all exceptions and converts to a warning.""" - try: - return func(*args, **kwargs) - except: - warn("%s('%s') ignored" % sys.exc_info()[0:2]) - - -def warn(msg, stacklevel=3): - """Issue a warning. - - If msg is a string, :class:`.exc.SAWarning` is used as - the category. - - .. note:: - - This function is swapped out when the test suite - runs, with a compatible version that uses - warnings.warn_explicit, so that the warnings registry can - be controlled. - - """ - if isinstance(msg, basestring): - warnings.warn(msg, exc.SAWarning, stacklevel=stacklevel) - else: - warnings.warn(msg, stacklevel=stacklevel) - -_SQLA_RE = re.compile(r'sqlalchemy/([a-z_]+/){0,2}[a-z_]+\.py') -_UNITTEST_RE = re.compile(r'unit(?:2|test2?/)') -def chop_traceback(tb, exclude_prefix=_UNITTEST_RE, exclude_suffix=_SQLA_RE): - """Chop extraneous lines off beginning and end of a traceback. - - :param tb: - a list of traceback lines as returned by ``traceback.format_stack()`` - - :param exclude_prefix: - a regular expression object matching lines to skip at beginning of ``tb`` - - :param exclude_suffix: - a regular expression object matching lines to skip at end of ``tb`` - """ - start = 0 - end = len(tb) - 1 - while start <= end and exclude_prefix.search(tb[start]): - start += 1 - while start <= end and exclude_suffix.search(tb[end]): - end -= 1 - return tb[start:end+1] - -NoneType = type(None) diff --git a/libs/sqlalchemy/util/queue.py b/libs/sqlalchemy/util/queue.py deleted file mode 100644 index acccf3c5..00000000 --- a/libs/sqlalchemy/util/queue.py +++ /dev/null @@ -1,225 +0,0 @@ -# util/queue.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""An adaptation of Py2.3/2.4's Queue module which supports reentrant -behavior, using RLock instead of Lock for its mutex object. - -This is to support the connection pool's usage of weakref callbacks to return -connections to the underlying Queue, which can in extremely -rare cases be invoked within the ``get()`` method of the Queue itself, -producing a ``put()`` inside the ``get()`` and therefore a reentrant -condition.""" - -from collections import deque -from time import time as _time -from sqlalchemy.util import threading -import sys - -if sys.version_info < (2, 6): - def notify_all(condition): - condition.notify() -else: - def notify_all(condition): - condition.notify_all() - - -__all__ = ['Empty', 'Full', 'Queue'] - -class Empty(Exception): - "Exception raised by Queue.get(block=0)/get_nowait()." - - pass - -class Full(Exception): - "Exception raised by Queue.put(block=0)/put_nowait()." - - pass - -class SAAbort(Exception): - "Special SQLA exception to abort waiting" - def __init__(self, context): - self.context = context - -class Queue: - def __init__(self, maxsize=0): - """Initialize a queue object with a given maximum size. - - If `maxsize` is <= 0, the queue size is infinite. - """ - - self._init(maxsize) - # mutex must be held whenever the queue is mutating. All methods - # that acquire mutex must release it before returning. mutex - # is shared between the two conditions, so acquiring and - # releasing the conditions also acquires and releases mutex. - self.mutex = threading.RLock() - # Notify not_empty whenever an item is added to the queue; a - # thread waiting to get is notified then. - self.not_empty = threading.Condition(self.mutex) - # Notify not_full whenever an item is removed from the queue; - # a thread waiting to put is notified then. - self.not_full = threading.Condition(self.mutex) - - # when this is set, SAAbort is raised within get(). - self._sqla_abort_context = False - - def qsize(self): - """Return the approximate size of the queue (not reliable!).""" - - self.mutex.acquire() - n = self._qsize() - self.mutex.release() - return n - - def empty(self): - """Return True if the queue is empty, False otherwise (not - reliable!).""" - - self.mutex.acquire() - n = self._empty() - self.mutex.release() - return n - - def full(self): - """Return True if the queue is full, False otherwise (not - reliable!).""" - - self.mutex.acquire() - n = self._full() - self.mutex.release() - return n - - def put(self, item, block=True, timeout=None): - """Put an item into the queue. - - If optional args `block` is True and `timeout` is None (the - default), block if necessary until a free slot is - available. If `timeout` is a positive number, it blocks at - most `timeout` seconds and raises the ``Full`` exception if no - free slot was available within that time. Otherwise (`block` - is false), put an item on the queue if a free slot is - immediately available, else raise the ``Full`` exception - (`timeout` is ignored in that case). - """ - - self.not_full.acquire() - try: - if not block: - if self._full(): - raise Full - elif timeout is None: - while self._full(): - self.not_full.wait() - else: - if timeout < 0: - raise ValueError("'timeout' must be a positive number") - endtime = _time() + timeout - while self._full(): - remaining = endtime - _time() - if remaining <= 0.0: - raise Full - self.not_full.wait(remaining) - self._put(item) - self.not_empty.notify() - finally: - self.not_full.release() - - def put_nowait(self, item): - """Put an item into the queue without blocking. - - Only enqueue the item if a free slot is immediately available. - Otherwise raise the ``Full`` exception. - """ - return self.put(item, False) - - def get(self, block=True, timeout=None): - """Remove and return an item from the queue. - - If optional args `block` is True and `timeout` is None (the - default), block if necessary until an item is available. If - `timeout` is a positive number, it blocks at most `timeout` - seconds and raises the ``Empty`` exception if no item was - available within that time. Otherwise (`block` is false), - return an item if one is immediately available, else raise the - ``Empty`` exception (`timeout` is ignored in that case). - """ - - self.not_empty.acquire() - try: - if not block: - if self._empty(): - raise Empty - elif timeout is None: - while self._empty(): - self.not_empty.wait() - if self._sqla_abort_context: - raise SAAbort(self._sqla_abort_context) - else: - if timeout < 0: - raise ValueError("'timeout' must be a positive number") - endtime = _time() + timeout - while self._empty(): - remaining = endtime - _time() - if remaining <= 0.0: - raise Empty - self.not_empty.wait(remaining) - if self._sqla_abort_context: - raise SAAbort(self._sqla_abort_context) - item = self._get() - self.not_full.notify() - return item - finally: - self.not_empty.release() - - def abort(self, context): - """Issue an 'abort', will force any thread waiting on get() - to stop waiting and raise SAAbort. - - """ - self._sqla_abort_context = context - if not self.not_full.acquire(False): - return - try: - notify_all(self.not_empty) - finally: - self.not_full.release() - - def get_nowait(self): - """Remove and return an item from the queue without blocking. - - Only get an item if one is immediately available. Otherwise - raise the ``Empty`` exception. - """ - - return self.get(False) - - # Override these methods to implement other queue organizations - # (e.g. stack or priority queue). - # These will only be called with appropriate locks held - - # Initialize the queue representation - def _init(self, maxsize): - self.maxsize = maxsize - self.queue = deque() - - def _qsize(self): - return len(self.queue) - - # Check whether the queue is empty - def _empty(self): - return not self.queue - - # Check whether the queue is full - def _full(self): - return self.maxsize > 0 and len(self.queue) == self.maxsize - - # Put a new item in the queue - def _put(self, item): - self.queue.append(item) - - # Get an item from the queue - def _get(self): - return self.queue.popleft() diff --git a/libs/sqlalchemy/util/topological.py b/libs/sqlalchemy/util/topological.py deleted file mode 100644 index 86e42c1f..00000000 --- a/libs/sqlalchemy/util/topological.py +++ /dev/null @@ -1,92 +0,0 @@ -# util/topological.py -# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors -# -# This module is part of SQLAlchemy and is released under -# the MIT License: http://www.opensource.org/licenses/mit-license.php - -"""Topological sorting algorithms.""" - -from sqlalchemy.exc import CircularDependencyError -from sqlalchemy import util - - -__all__ = ['sort', 'sort_as_subsets', 'find_cycles'] - -def sort_as_subsets(tuples, allitems): - - edges = util.defaultdict(set) - for parent, child in tuples: - edges[child].add(parent) - - todo = set(allitems) - - while todo: - output = set() - for node in list(todo): - if not todo.intersection(edges[node]): - output.add(node) - - if not output: - raise CircularDependencyError( - "Circular dependency detected.", - find_cycles(tuples, allitems), - _gen_edges(edges) - ) - - todo.difference_update(output) - yield output - -def sort(tuples, allitems): - """sort the given list of items by dependency. - - 'tuples' is a list of tuples representing a partial ordering. - """ - - for set_ in sort_as_subsets(tuples, allitems): - for s in set_: - yield s - -def find_cycles(tuples, allitems): - # straight from gvr with some mods - - edges = util.defaultdict(set) - for parent, child in tuples: - edges[parent].add(child) - nodes_to_test = set(edges) - - output = set() - - # we'd like to find all nodes that are - # involved in cycles, so we do the full - # pass through the whole thing for each - # node in the original list. - - # we can go just through parent edge nodes. - # if a node is only a child and never a parent, - # by definition it can't be part of a cycle. same - # if it's not in the edges at all. - for node in nodes_to_test: - stack = [node] - todo = nodes_to_test.difference(stack) - while stack: - top = stack[-1] - for node in edges[top]: - if node in stack: - cyc = stack[stack.index(node):] - todo.difference_update(cyc) - output.update(cyc) - - if node in todo: - stack.append(node) - todo.remove(node) - break - else: - node = stack.pop() - return output - -def _gen_edges(edges): - return set([ - (right, left) - for left in edges - for right in edges[left] - ]) diff --git a/libs/tempita/__init__.py b/libs/tempita/__init__.py deleted file mode 100644 index 2af21476..00000000 --- a/libs/tempita/__init__.py +++ /dev/null @@ -1,1160 +0,0 @@ -""" -A small templating language - -This implements a small templating language. This language implements -if/elif/else, for/continue/break, expressions, and blocks of Python -code. The syntax is:: - - {{any expression (function calls etc)}} - {{any expression | filter}} - {{for x in y}}...{{endfor}} - {{if x}}x{{elif y}}y{{else}}z{{endif}} - {{py:x=1}} - {{py: - def foo(bar): - return 'baz' - }} - {{default var = default_value}} - {{# comment}} - -You use this with the ``Template`` class or the ``sub`` shortcut. -The ``Template`` class takes the template string and the name of -the template (for errors) and a default namespace. Then (like -``string.Template``) you can call the ``tmpl.substitute(**kw)`` -method to make a substitution (or ``tmpl.substitute(a_dict)``). - -``sub(content, **kw)`` substitutes the template immediately. You -can use ``__name='tmpl.html'`` to set the name of the template. - -If there are syntax errors ``TemplateError`` will be raised. -""" - -import re -import sys -import cgi -from urllib import quote as url_quote -import os -import tokenize -from cStringIO import StringIO -from tempita._looper import looper -from tempita.compat3 import bytes, basestring_, next, is_unicode, coerce_text - -__all__ = ['TemplateError', 'Template', 'sub', 'HTMLTemplate', - 'sub_html', 'html', 'bunch'] - -token_re = re.compile(r'\{\{|\}\}') -in_re = re.compile(r'\s+in\s+') -var_re = re.compile(r'^[a-z_][a-z0-9_]*$', re.I) - - -class TemplateError(Exception): - """Exception raised while parsing a template - """ - - def __init__(self, message, position, name=None): - Exception.__init__(self, message) - self.position = position - self.name = name - - def __str__(self): - msg = ' '.join(self.args) - if self.position: - msg = '%s at line %s column %s' % ( - msg, self.position[0], self.position[1]) - if self.name: - msg += ' in %s' % self.name - return msg - - -class _TemplateContinue(Exception): - pass - - -class _TemplateBreak(Exception): - pass - - -def get_file_template(name, from_template): - path = os.path.join(os.path.dirname(from_template.name), name) - return from_template.__class__.from_filename( - path, namespace=from_template.namespace, - get_template=from_template.get_template) - - -class Template(object): - - default_namespace = { - 'start_braces': '{{', - 'end_braces': '}}', - 'looper': looper, - } - - default_encoding = 'utf8' - default_inherit = None - - def __init__(self, content, name=None, namespace=None, stacklevel=None, - get_template=None, default_inherit=None, line_offset=0): - self.content = content - self._unicode = is_unicode(content) - if name is None and stacklevel is not None: - try: - caller = sys._getframe(stacklevel) - except ValueError: - pass - else: - globals = caller.f_globals - lineno = caller.f_lineno - if '__file__' in globals: - name = globals['__file__'] - if name.endswith('.pyc') or name.endswith('.pyo'): - name = name[:-1] - elif '__name__' in globals: - name = globals['__name__'] - else: - name = '' - if lineno: - name += ':%s' % lineno - self.name = name - self._parsed = parse(content, name=name, line_offset=line_offset) - if namespace is None: - namespace = {} - self.namespace = namespace - self.get_template = get_template - if default_inherit is not None: - self.default_inherit = default_inherit - - def from_filename(cls, filename, namespace=None, encoding=None, - default_inherit=None, get_template=get_file_template): - f = open(filename, 'rb') - c = f.read() - f.close() - if encoding: - c = c.decode(encoding) - return cls(content=c, name=filename, namespace=namespace, - default_inherit=default_inherit, get_template=get_template) - - from_filename = classmethod(from_filename) - - def __repr__(self): - return '<%s %s name=%r>' % ( - self.__class__.__name__, - hex(id(self))[2:], self.name) - - def substitute(self, *args, **kw): - if args: - if kw: - raise TypeError( - "You can only give positional *or* keyword arguments") - if len(args) > 1: - raise TypeError( - "You can only give one positional argument") - if not hasattr(args[0], 'items'): - raise TypeError( - "If you pass in a single argument, you must pass in a dictionary-like object (with a .items() method); you gave %r" - % (args[0],)) - kw = args[0] - ns = kw - ns['__template_name__'] = self.name - if self.namespace: - ns.update(self.namespace) - result, defs, inherit = self._interpret(ns) - if not inherit: - inherit = self.default_inherit - if inherit: - result = self._interpret_inherit(result, defs, inherit, ns) - return result - - def _interpret(self, ns): - __traceback_hide__ = True - parts = [] - defs = {} - self._interpret_codes(self._parsed, ns, out=parts, defs=defs) - if '__inherit__' in defs: - inherit = defs.pop('__inherit__') - else: - inherit = None - return ''.join(parts), defs, inherit - - def _interpret_inherit(self, body, defs, inherit_template, ns): - __traceback_hide__ = True - if not self.get_template: - raise TemplateError( - 'You cannot use inheritance without passing in get_template', - position=None, name=self.name) - templ = self.get_template(inherit_template, self) - self_ = TemplateObject(self.name) - for name, value in defs.iteritems(): - setattr(self_, name, value) - self_.body = body - ns = ns.copy() - ns['self'] = self_ - return templ.substitute(ns) - - def _interpret_codes(self, codes, ns, out, defs): - __traceback_hide__ = True - for item in codes: - if isinstance(item, basestring_): - out.append(item) - else: - self._interpret_code(item, ns, out, defs) - - def _interpret_code(self, code, ns, out, defs): - __traceback_hide__ = True - name, pos = code[0], code[1] - if name == 'py': - self._exec(code[2], ns, pos) - elif name == 'continue': - raise _TemplateContinue() - elif name == 'break': - raise _TemplateBreak() - elif name == 'for': - vars, expr, content = code[2], code[3], code[4] - expr = self._eval(expr, ns, pos) - self._interpret_for(vars, expr, content, ns, out, defs) - elif name == 'cond': - parts = code[2:] - self._interpret_if(parts, ns, out, defs) - elif name == 'expr': - parts = code[2].split('|') - base = self._eval(parts[0], ns, pos) - for part in parts[1:]: - func = self._eval(part, ns, pos) - base = func(base) - out.append(self._repr(base, pos)) - elif name == 'default': - var, expr = code[2], code[3] - if var not in ns: - result = self._eval(expr, ns, pos) - ns[var] = result - elif name == 'inherit': - expr = code[2] - value = self._eval(expr, ns, pos) - defs['__inherit__'] = value - elif name == 'def': - name = code[2] - signature = code[3] - parts = code[4] - ns[name] = defs[name] = TemplateDef(self, name, signature, body=parts, ns=ns, - pos=pos) - elif name == 'comment': - return - else: - assert 0, "Unknown code: %r" % name - - def _interpret_for(self, vars, expr, content, ns, out, defs): - __traceback_hide__ = True - for item in expr: - if len(vars) == 1: - ns[vars[0]] = item - else: - if len(vars) != len(item): - raise ValueError( - 'Need %i items to unpack (got %i items)' - % (len(vars), len(item))) - for name, value in zip(vars, item): - ns[name] = value - try: - self._interpret_codes(content, ns, out, defs) - except _TemplateContinue: - continue - except _TemplateBreak: - break - - def _interpret_if(self, parts, ns, out, defs): - __traceback_hide__ = True - # @@: if/else/else gets through - for part in parts: - assert not isinstance(part, basestring_) - name, pos = part[0], part[1] - if name == 'else': - result = True - else: - result = self._eval(part[2], ns, pos) - if result: - self._interpret_codes(part[3], ns, out, defs) - break - - def _eval(self, code, ns, pos): - __traceback_hide__ = True - try: - try: - value = eval(code, self.default_namespace, ns) - except SyntaxError, e: - raise SyntaxError( - 'invalid syntax in expression: %s' % code) - return value - except: - exc_info = sys.exc_info() - e = exc_info[1] - if getattr(e, 'args', None): - arg0 = e.args[0] - else: - arg0 = coerce_text(e) - e.args = (self._add_line_info(arg0, pos),) - raise exc_info[0], e, exc_info[2] - - def _exec(self, code, ns, pos): - __traceback_hide__ = True - try: - exec code in self.default_namespace, ns - except: - exc_info = sys.exc_info() - e = exc_info[1] - if e.args: - e.args = (self._add_line_info(e.args[0], pos),) - else: - e.args = (self._add_line_info(None, pos),) - raise exc_info[0], e, exc_info[2] - - def _repr(self, value, pos): - __traceback_hide__ = True - try: - if value is None: - return '' - if self._unicode: - try: - value = unicode(value) - except UnicodeDecodeError: - value = bytes(value) - else: - if not isinstance(value, basestring_): - value = coerce_text(value) - if (is_unicode(value) - and self.default_encoding): - value = value.encode(self.default_encoding) - except: - exc_info = sys.exc_info() - e = exc_info[1] - e.args = (self._add_line_info(e.args[0], pos),) - raise exc_info[0], e, exc_info[2] - else: - if self._unicode and isinstance(value, bytes): - if not self.default_encoding: - raise UnicodeDecodeError( - 'Cannot decode bytes value %r into unicode ' - '(no default_encoding provided)' % value) - try: - value = value.decode(self.default_encoding) - except UnicodeDecodeError, e: - raise UnicodeDecodeError( - e.encoding, - e.object, - e.start, - e.end, - e.reason + ' in string %r' % value) - elif not self._unicode and is_unicode(value): - if not self.default_encoding: - raise UnicodeEncodeError( - 'Cannot encode unicode value %r into bytes ' - '(no default_encoding provided)' % value) - value = value.encode(self.default_encoding) - return value - - def _add_line_info(self, msg, pos): - msg = "%s at line %s column %s" % ( - msg, pos[0], pos[1]) - if self.name: - msg += " in file %s" % self.name - return msg - - -def sub(content, **kw): - name = kw.get('__name') - tmpl = Template(content, name=name) - return tmpl.substitute(kw) - - -def paste_script_template_renderer(content, vars, filename=None): - tmpl = Template(content, name=filename) - return tmpl.substitute(vars) - - -class bunch(dict): - - def __init__(self, **kw): - for name, value in kw.iteritems(): - setattr(self, name, value) - - def __setattr__(self, name, value): - self[name] = value - - def __getattr__(self, name): - try: - return self[name] - except KeyError: - raise AttributeError(name) - - def __getitem__(self, key): - if 'default' in self: - try: - return dict.__getitem__(self, key) - except KeyError: - return dict.__getitem__(self, 'default') - else: - return dict.__getitem__(self, key) - - def __repr__(self): - items = [ - (k, v) for k, v in self.iteritems()] - items.sort() - return '<%s %s>' % ( - self.__class__.__name__, - ' '.join(['%s=%r' % (k, v) for k, v in items])) - -############################################################ -## HTML Templating -############################################################ - - -class html(object): - - def __init__(self, value): - self.value = value - - def __str__(self): - return self.value - - def __html__(self): - return self.value - - def __repr__(self): - return '<%s %r>' % ( - self.__class__.__name__, self.value) - - -def html_quote(value, force=True): - if not force and hasattr(value, '__html__'): - return value.__html__() - if value is None: - return '' - if not isinstance(value, basestring_): - value = coerce_text(value) - if sys.version >= "3" and isinstance(value, bytes): - value = cgi.escape(value.decode('latin1'), 1) - value = value.encode('latin1') - else: - value = cgi.escape(value, 1) - if sys.version < "3": - if is_unicode(value): - value = value.encode('ascii', 'xmlcharrefreplace') - return value - - -def url(v): - v = coerce_text(v) - if is_unicode(v): - v = v.encode('utf8') - return url_quote(v) - - -def attr(**kw): - kw = list(kw.iteritems()) - kw.sort() - parts = [] - for name, value in kw: - if value is None: - continue - if name.endswith('_'): - name = name[:-1] - parts.append('%s="%s"' % (html_quote(name), html_quote(value))) - return html(' '.join(parts)) - - -class HTMLTemplate(Template): - - default_namespace = Template.default_namespace.copy() - default_namespace.update(dict( - html=html, - attr=attr, - url=url, - html_quote=html_quote, - )) - - def _repr(self, value, pos): - if hasattr(value, '__html__'): - value = value.__html__() - quote = False - else: - quote = True - plain = Template._repr(self, value, pos) - if quote: - return html_quote(plain) - else: - return plain - - -def sub_html(content, **kw): - name = kw.get('__name') - tmpl = HTMLTemplate(content, name=name) - return tmpl.substitute(kw) - - -class TemplateDef(object): - def __init__(self, template, func_name, func_signature, - body, ns, pos, bound_self=None): - self._template = template - self._func_name = func_name - self._func_signature = func_signature - self._body = body - self._ns = ns - self._pos = pos - self._bound_self = bound_self - - def __repr__(self): - return '' % ( - self._func_name, self._func_signature, - self._template.name, self._pos) - - def __str__(self): - return self() - - def __call__(self, *args, **kw): - values = self._parse_signature(args, kw) - ns = self._ns.copy() - ns.update(values) - if self._bound_self is not None: - ns['self'] = self._bound_self - out = [] - subdefs = {} - self._template._interpret_codes(self._body, ns, out, subdefs) - return ''.join(out) - - def __get__(self, obj, type=None): - if obj is None: - return self - return self.__class__( - self._template, self._func_name, self._func_signature, - self._body, self._ns, self._pos, bound_self=obj) - - def _parse_signature(self, args, kw): - values = {} - sig_args, var_args, var_kw, defaults = self._func_signature - extra_kw = {} - for name, value in kw.iteritems(): - if not var_kw and name not in sig_args: - raise TypeError( - 'Unexpected argument %s' % name) - if name in sig_args: - values[sig_args] = value - else: - extra_kw[name] = value - args = list(args) - sig_args = list(sig_args) - while args: - while sig_args and sig_args[0] in values: - sig_args.pop(0) - if sig_args: - name = sig_args.pop(0) - values[name] = args.pop(0) - elif var_args: - values[var_args] = tuple(args) - break - else: - raise TypeError( - 'Extra position arguments: %s' - % ', '.join(repr(v) for v in args)) - for name, value_expr in defaults.iteritems(): - if name not in values: - values[name] = self._template._eval( - value_expr, self._ns, self._pos) - for name in sig_args: - if name not in values: - raise TypeError( - 'Missing argument: %s' % name) - if var_kw: - values[var_kw] = extra_kw - return values - - -class TemplateObject(object): - - def __init__(self, name): - self.__name = name - self.get = TemplateObjectGetter(self) - - def __repr__(self): - return '<%s %s>' % (self.__class__.__name__, self.__name) - - -class TemplateObjectGetter(object): - - def __init__(self, template_obj): - self.__template_obj = template_obj - - def __getattr__(self, attr): - return getattr(self.__template_obj, attr, Empty) - - def __repr__(self): - return '<%s around %r>' % (self.__class__.__name__, self.__template_obj) - - -class _Empty(object): - def __call__(self, *args, **kw): - return self - - def __str__(self): - return '' - - def __repr__(self): - return 'Empty' - - def __unicode__(self): - return u'' - - def __iter__(self): - return iter(()) - - def __bool__(self): - return False - - if sys.version < "3": - __nonzero__ = __bool__ - -Empty = _Empty() -del _Empty - -############################################################ -## Lexing and Parsing -############################################################ - - -def lex(s, name=None, trim_whitespace=True, line_offset=0): - """ - Lex a string into chunks: - - >>> lex('hey') - ['hey'] - >>> lex('hey {{you}}') - ['hey ', ('you', (1, 7))] - >>> lex('hey {{') - Traceback (most recent call last): - ... - TemplateError: No }} to finish last expression at line 1 column 7 - >>> lex('hey }}') - Traceback (most recent call last): - ... - TemplateError: }} outside expression at line 1 column 7 - >>> lex('hey {{ {{') - Traceback (most recent call last): - ... - TemplateError: {{ inside expression at line 1 column 10 - - """ - in_expr = False - chunks = [] - last = 0 - last_pos = (1, 1) - for match in token_re.finditer(s): - expr = match.group(0) - pos = find_position(s, match.end(), line_offset) - if expr == '{{' and in_expr: - raise TemplateError('{{ inside expression', position=pos, - name=name) - elif expr == '}}' and not in_expr: - raise TemplateError('}} outside expression', position=pos, - name=name) - if expr == '{{': - part = s[last:match.start()] - if part: - chunks.append(part) - in_expr = True - else: - chunks.append((s[last:match.start()], last_pos)) - in_expr = False - last = match.end() - last_pos = pos - if in_expr: - raise TemplateError('No }} to finish last expression', - name=name, position=last_pos) - part = s[last:] - if part: - chunks.append(part) - if trim_whitespace: - chunks = trim_lex(chunks) - return chunks - -statement_re = re.compile(r'^(?:if |elif |for |def |inherit |default |py:)') -single_statements = ['else', 'endif', 'endfor', 'enddef', 'continue', 'break'] -trail_whitespace_re = re.compile(r'\n\r?[\t ]*$') -lead_whitespace_re = re.compile(r'^[\t ]*\n') - - -def trim_lex(tokens): - r""" - Takes a lexed set of tokens, and removes whitespace when there is - a directive on a line by itself: - - >>> tokens = lex('{{if x}}\nx\n{{endif}}\ny', trim_whitespace=False) - >>> tokens - [('if x', (1, 3)), '\nx\n', ('endif', (3, 3)), '\ny'] - >>> trim_lex(tokens) - [('if x', (1, 3)), 'x\n', ('endif', (3, 3)), 'y'] - """ - last_trim = None - for i in range(len(tokens)): - current = tokens[i] - if isinstance(tokens[i], basestring_): - # we don't trim this - continue - item = current[0] - if not statement_re.search(item) and item not in single_statements: - continue - if not i: - prev = '' - else: - prev = tokens[i - 1] - if i + 1 >= len(tokens): - next_chunk = '' - else: - next_chunk = tokens[i + 1] - if (not isinstance(next_chunk, basestring_) - or not isinstance(prev, basestring_)): - continue - prev_ok = not prev or trail_whitespace_re.search(prev) - if i == 1 and not prev.strip(): - prev_ok = True - if last_trim is not None and last_trim + 2 == i and not prev.strip(): - prev_ok = 'last' - if (prev_ok - and (not next_chunk or lead_whitespace_re.search(next_chunk) - or (i == len(tokens) - 2 and not next_chunk.strip()))): - if prev: - if ((i == 1 and not prev.strip()) - or prev_ok == 'last'): - tokens[i - 1] = '' - else: - m = trail_whitespace_re.search(prev) - # +1 to leave the leading \n on: - prev = prev[:m.start() + 1] - tokens[i - 1] = prev - if next_chunk: - last_trim = i - if i == len(tokens) - 2 and not next_chunk.strip(): - tokens[i + 1] = '' - else: - m = lead_whitespace_re.search(next_chunk) - next_chunk = next_chunk[m.end():] - tokens[i + 1] = next_chunk - return tokens - - -def find_position(string, index, line_offset): - """Given a string and index, return (line, column)""" - leading = string[:index].splitlines() - return (len(leading) + line_offset, len(leading[-1]) + 1) - - -def parse(s, name=None, line_offset=0): - r""" - Parses a string into a kind of AST - - >>> parse('{{x}}') - [('expr', (1, 3), 'x')] - >>> parse('foo') - ['foo'] - >>> parse('{{if x}}test{{endif}}') - [('cond', (1, 3), ('if', (1, 3), 'x', ['test']))] - >>> parse('series->{{for x in y}}x={{x}}{{endfor}}') - ['series->', ('for', (1, 11), ('x',), 'y', ['x=', ('expr', (1, 27), 'x')])] - >>> parse('{{for x, y in z:}}{{continue}}{{endfor}}') - [('for', (1, 3), ('x', 'y'), 'z', [('continue', (1, 21))])] - >>> parse('{{py:x=1}}') - [('py', (1, 3), 'x=1')] - >>> parse('{{if x}}a{{elif y}}b{{else}}c{{endif}}') - [('cond', (1, 3), ('if', (1, 3), 'x', ['a']), ('elif', (1, 12), 'y', ['b']), ('else', (1, 23), None, ['c']))] - - Some exceptions:: - - >>> parse('{{continue}}') - Traceback (most recent call last): - ... - TemplateError: continue outside of for loop at line 1 column 3 - >>> parse('{{if x}}foo') - Traceback (most recent call last): - ... - TemplateError: No {{endif}} at line 1 column 3 - >>> parse('{{else}}') - Traceback (most recent call last): - ... - TemplateError: else outside of an if block at line 1 column 3 - >>> parse('{{if x}}{{for x in y}}{{endif}}{{endfor}}') - Traceback (most recent call last): - ... - TemplateError: Unexpected endif at line 1 column 25 - >>> parse('{{if}}{{endif}}') - Traceback (most recent call last): - ... - TemplateError: if with no expression at line 1 column 3 - >>> parse('{{for x y}}{{endfor}}') - Traceback (most recent call last): - ... - TemplateError: Bad for (no "in") in 'x y' at line 1 column 3 - >>> parse('{{py:x=1\ny=2}}') - Traceback (most recent call last): - ... - TemplateError: Multi-line py blocks must start with a newline at line 1 column 3 - """ - tokens = lex(s, name=name, line_offset=line_offset) - result = [] - while tokens: - next_chunk, tokens = parse_expr(tokens, name) - result.append(next_chunk) - return result - - -def parse_expr(tokens, name, context=()): - if isinstance(tokens[0], basestring_): - return tokens[0], tokens[1:] - expr, pos = tokens[0] - expr = expr.strip() - if expr.startswith('py:'): - expr = expr[3:].lstrip(' \t') - if expr.startswith('\n') or expr.startswith('\r'): - expr = expr.lstrip('\r\n') - if '\r' in expr: - expr = expr.replace('\r\n', '\n') - expr = expr.replace('\r', '') - expr += '\n' - else: - if '\n' in expr: - raise TemplateError( - 'Multi-line py blocks must start with a newline', - position=pos, name=name) - return ('py', pos, expr), tokens[1:] - elif expr in ('continue', 'break'): - if 'for' not in context: - raise TemplateError( - 'continue outside of for loop', - position=pos, name=name) - return (expr, pos), tokens[1:] - elif expr.startswith('if '): - return parse_cond(tokens, name, context) - elif (expr.startswith('elif ') - or expr == 'else'): - raise TemplateError( - '%s outside of an if block' % expr.split()[0], - position=pos, name=name) - elif expr in ('if', 'elif', 'for'): - raise TemplateError( - '%s with no expression' % expr, - position=pos, name=name) - elif expr in ('endif', 'endfor', 'enddef'): - raise TemplateError( - 'Unexpected %s' % expr, - position=pos, name=name) - elif expr.startswith('for '): - return parse_for(tokens, name, context) - elif expr.startswith('default '): - return parse_default(tokens, name, context) - elif expr.startswith('inherit '): - return parse_inherit(tokens, name, context) - elif expr.startswith('def '): - return parse_def(tokens, name, context) - elif expr.startswith('#'): - return ('comment', pos, tokens[0][0]), tokens[1:] - return ('expr', pos, tokens[0][0]), tokens[1:] - - -def parse_cond(tokens, name, context): - start = tokens[0][1] - pieces = [] - context = context + ('if',) - while 1: - if not tokens: - raise TemplateError( - 'Missing {{endif}}', - position=start, name=name) - if (isinstance(tokens[0], tuple) - and tokens[0][0] == 'endif'): - return ('cond', start) + tuple(pieces), tokens[1:] - next_chunk, tokens = parse_one_cond(tokens, name, context) - pieces.append(next_chunk) - - -def parse_one_cond(tokens, name, context): - (first, pos), tokens = tokens[0], tokens[1:] - content = [] - if first.endswith(':'): - first = first[:-1] - if first.startswith('if '): - part = ('if', pos, first[3:].lstrip(), content) - elif first.startswith('elif '): - part = ('elif', pos, first[5:].lstrip(), content) - elif first == 'else': - part = ('else', pos, None, content) - else: - assert 0, "Unexpected token %r at %s" % (first, pos) - while 1: - if not tokens: - raise TemplateError( - 'No {{endif}}', - position=pos, name=name) - if (isinstance(tokens[0], tuple) - and (tokens[0][0] == 'endif' - or tokens[0][0].startswith('elif ') - or tokens[0][0] == 'else')): - return part, tokens - next_chunk, tokens = parse_expr(tokens, name, context) - content.append(next_chunk) - - -def parse_for(tokens, name, context): - first, pos = tokens[0] - tokens = tokens[1:] - context = ('for',) + context - content = [] - assert first.startswith('for ') - if first.endswith(':'): - first = first[:-1] - first = first[3:].strip() - match = in_re.search(first) - if not match: - raise TemplateError( - 'Bad for (no "in") in %r' % first, - position=pos, name=name) - vars = first[:match.start()] - if '(' in vars: - raise TemplateError( - 'You cannot have () in the variable section of a for loop (%r)' - % vars, position=pos, name=name) - vars = tuple([ - v.strip() for v in first[:match.start()].split(',') - if v.strip()]) - expr = first[match.end():] - while 1: - if not tokens: - raise TemplateError( - 'No {{endfor}}', - position=pos, name=name) - if (isinstance(tokens[0], tuple) - and tokens[0][0] == 'endfor'): - return ('for', pos, vars, expr, content), tokens[1:] - next_chunk, tokens = parse_expr(tokens, name, context) - content.append(next_chunk) - - -def parse_default(tokens, name, context): - first, pos = tokens[0] - assert first.startswith('default ') - first = first.split(None, 1)[1] - parts = first.split('=', 1) - if len(parts) == 1: - raise TemplateError( - "Expression must be {{default var=value}}; no = found in %r" % first, - position=pos, name=name) - var = parts[0].strip() - if ',' in var: - raise TemplateError( - "{{default x, y = ...}} is not supported", - position=pos, name=name) - if not var_re.search(var): - raise TemplateError( - "Not a valid variable name for {{default}}: %r" - % var, position=pos, name=name) - expr = parts[1].strip() - return ('default', pos, var, expr), tokens[1:] - - -def parse_inherit(tokens, name, context): - first, pos = tokens[0] - assert first.startswith('inherit ') - expr = first.split(None, 1)[1] - return ('inherit', pos, expr), tokens[1:] - - -def parse_def(tokens, name, context): - first, start = tokens[0] - tokens = tokens[1:] - assert first.startswith('def ') - first = first.split(None, 1)[1] - if first.endswith(':'): - first = first[:-1] - if '(' not in first: - func_name = first - sig = ((), None, None, {}) - elif not first.endswith(')'): - raise TemplateError("Function definition doesn't end with ): %s" % first, - position=start, name=name) - else: - first = first[:-1] - func_name, sig_text = first.split('(', 1) - sig = parse_signature(sig_text, name, start) - context = context + ('def',) - content = [] - while 1: - if not tokens: - raise TemplateError( - 'Missing {{enddef}}', - position=start, name=name) - if (isinstance(tokens[0], tuple) - and tokens[0][0] == 'enddef'): - return ('def', start, func_name, sig, content), tokens[1:] - next_chunk, tokens = parse_expr(tokens, name, context) - content.append(next_chunk) - - -def parse_signature(sig_text, name, pos): - tokens = tokenize.generate_tokens(StringIO(sig_text).readline) - sig_args = [] - var_arg = None - var_kw = None - defaults = {} - - def get_token(pos=False): - try: - tok_type, tok_string, (srow, scol), (erow, ecol), line = next(tokens) - except StopIteration: - return tokenize.ENDMARKER, '' - if pos: - return tok_type, tok_string, (srow, scol), (erow, ecol) - else: - return tok_type, tok_string - while 1: - var_arg_type = None - tok_type, tok_string = get_token() - if tok_type == tokenize.ENDMARKER: - break - if tok_type == tokenize.OP and (tok_string == '*' or tok_string == '**'): - var_arg_type = tok_string - tok_type, tok_string = get_token() - if tok_type != tokenize.NAME: - raise TemplateError('Invalid signature: (%s)' % sig_text, - position=pos, name=name) - var_name = tok_string - tok_type, tok_string = get_token() - if tok_type == tokenize.ENDMARKER or (tok_type == tokenize.OP and tok_string == ','): - if var_arg_type == '*': - var_arg = var_name - elif var_arg_type == '**': - var_kw = var_name - else: - sig_args.append(var_name) - if tok_type == tokenize.ENDMARKER: - break - continue - if var_arg_type is not None: - raise TemplateError('Invalid signature: (%s)' % sig_text, - position=pos, name=name) - if tok_type == tokenize.OP and tok_string == '=': - nest_type = None - unnest_type = None - nest_count = 0 - start_pos = end_pos = None - parts = [] - while 1: - tok_type, tok_string, s, e = get_token(True) - if start_pos is None: - start_pos = s - end_pos = e - if tok_type == tokenize.ENDMARKER and nest_count: - raise TemplateError('Invalid signature: (%s)' % sig_text, - position=pos, name=name) - if (not nest_count and - (tok_type == tokenize.ENDMARKER or (tok_type == tokenize.OP and tok_string == ','))): - default_expr = isolate_expression(sig_text, start_pos, end_pos) - defaults[var_name] = default_expr - sig_args.append(var_name) - break - parts.append((tok_type, tok_string)) - if nest_count and tok_type == tokenize.OP and tok_string == nest_type: - nest_count += 1 - elif nest_count and tok_type == tokenize.OP and tok_string == unnest_type: - nest_count -= 1 - if not nest_count: - nest_type = unnest_type = None - elif not nest_count and tok_type == tokenize.OP and tok_string in ('(', '[', '{'): - nest_type = tok_string - nest_count = 1 - unnest_type = {'(': ')', '[': ']', '{': '}'}[nest_type] - return sig_args, var_arg, var_kw, defaults - - -def isolate_expression(string, start_pos, end_pos): - srow, scol = start_pos - srow -= 1 - erow, ecol = end_pos - erow -= 1 - lines = string.splitlines(True) - if srow == erow: - return lines[srow][scol:ecol] - parts = [lines[srow][scol:]] - parts.extend(lines[srow+1:erow]) - if erow < len(lines): - # It'll sometimes give (end_row_past_finish, 0) - parts.append(lines[erow][:ecol]) - return ''.join(parts) - -_fill_command_usage = """\ -%prog [OPTIONS] TEMPLATE arg=value - -Use py:arg=value to set a Python value; otherwise all values are -strings. -""" - - -def fill_command(args=None): - import sys - import optparse - import pkg_resources - import os - if args is None: - args = sys.argv[1:] - dist = pkg_resources.get_distribution('Paste') - parser = optparse.OptionParser( - version=coerce_text(dist), - usage=_fill_command_usage) - parser.add_option( - '-o', '--output', - dest='output', - metavar="FILENAME", - help="File to write output to (default stdout)") - parser.add_option( - '--html', - dest='use_html', - action='store_true', - help="Use HTML style filling (including automatic HTML quoting)") - parser.add_option( - '--env', - dest='use_env', - action='store_true', - help="Put the environment in as top-level variables") - options, args = parser.parse_args(args) - if len(args) < 1: - print('You must give a template filename') - sys.exit(2) - template_name = args[0] - args = args[1:] - vars = {} - if options.use_env: - vars.update(os.environ) - for value in args: - if '=' not in value: - print('Bad argument: %r' % value) - sys.exit(2) - name, value = value.split('=', 1) - if name.startswith('py:'): - name = name[:3] - value = eval(value) - vars[name] = value - if template_name == '-': - template_content = sys.stdin.read() - template_name = '' - else: - f = open(template_name, 'rb') - template_content = f.read() - f.close() - if options.use_html: - TemplateClass = HTMLTemplate - else: - TemplateClass = Template - template = TemplateClass(template_content, name=template_name) - result = template.substitute(vars) - if options.output: - f = open(options.output, 'wb') - f.write(result) - f.close() - else: - sys.stdout.write(result) - -if __name__ == '__main__': - fill_command() diff --git a/libs/tempita/_looper.py b/libs/tempita/_looper.py deleted file mode 100644 index 6784c7cd..00000000 --- a/libs/tempita/_looper.py +++ /dev/null @@ -1,163 +0,0 @@ -""" -Helper for looping over sequences, particular in templates. - -Often in a loop in a template it's handy to know what's next up, -previously up, if this is the first or last item in the sequence, etc. -These can be awkward to manage in a normal Python loop, but using the -looper you can get a better sense of the context. Use like:: - - >>> for loop, item in looper(['a', 'b', 'c']): - ... print loop.number, item - ... if not loop.last: - ... print '---' - 1 a - --- - 2 b - --- - 3 c - -""" - -import sys -from tempita.compat3 import basestring_ - -__all__ = ['looper'] - - -class looper(object): - """ - Helper for looping (particularly in templates) - - Use this like:: - - for loop, item in looper(seq): - if loop.first: - ... - """ - - def __init__(self, seq): - self.seq = seq - - def __iter__(self): - return looper_iter(self.seq) - - def __repr__(self): - return '<%s for %r>' % ( - self.__class__.__name__, self.seq) - - -class looper_iter(object): - - def __init__(self, seq): - self.seq = list(seq) - self.pos = 0 - - def __iter__(self): - return self - - def __next__(self): - if self.pos >= len(self.seq): - raise StopIteration - result = loop_pos(self.seq, self.pos), self.seq[self.pos] - self.pos += 1 - return result - - if sys.version < "3": - next = __next__ - - -class loop_pos(object): - - def __init__(self, seq, pos): - self.seq = seq - self.pos = pos - - def __repr__(self): - return '' % ( - self.seq[self.pos], self.pos) - - def index(self): - return self.pos - index = property(index) - - def number(self): - return self.pos + 1 - number = property(number) - - def item(self): - return self.seq[self.pos] - item = property(item) - - def __next__(self): - try: - return self.seq[self.pos + 1] - except IndexError: - return None - __next__ = property(__next__) - - if sys.version < "3": - next = __next__ - - def previous(self): - if self.pos == 0: - return None - return self.seq[self.pos - 1] - previous = property(previous) - - def odd(self): - return not self.pos % 2 - odd = property(odd) - - def even(self): - return self.pos % 2 - even = property(even) - - def first(self): - return self.pos == 0 - first = property(first) - - def last(self): - return self.pos == len(self.seq) - 1 - last = property(last) - - def length(self): - return len(self.seq) - length = property(length) - - def first_group(self, getter=None): - """ - Returns true if this item is the start of a new group, - where groups mean that some attribute has changed. The getter - can be None (the item itself changes), an attribute name like - ``'.attr'``, a function, or a dict key or list index. - """ - if self.first: - return True - return self._compare_group(self.item, self.previous, getter) - - def last_group(self, getter=None): - """ - Returns true if this item is the end of a new group, - where groups mean that some attribute has changed. The getter - can be None (the item itself changes), an attribute name like - ``'.attr'``, a function, or a dict key or list index. - """ - if self.last: - return True - return self._compare_group(self.item, self.__next__, getter) - - def _compare_group(self, item, other, getter): - if getter is None: - return item != other - elif (isinstance(getter, basestring_) - and getter.startswith('.')): - getter = getter[1:] - if getter.endswith('()'): - getter = getter[:-2] - return getattr(item, getter)() != getattr(other, getter)() - else: - return getattr(item, getter) != getattr(other, getter) - elif hasattr(getter, '__call__'): - return getter(item) != getter(other) - else: - return item[getter] != other[getter] diff --git a/libs/tempita/compat3.py b/libs/tempita/compat3.py deleted file mode 100644 index 5e18fa01..00000000 --- a/libs/tempita/compat3.py +++ /dev/null @@ -1,45 +0,0 @@ -import sys - -__all__ = ['b', 'basestring_', 'bytes', 'next', 'is_unicode'] - -if sys.version < "3": - b = bytes = str - basestring_ = basestring -else: - - def b(s): - if isinstance(s, str): - return s.encode('latin1') - return bytes(s) - basestring_ = (bytes, str) - bytes = bytes -text = str - -if sys.version < "3": - - def next(obj): - return obj.next() -else: - next = next - -if sys.version < "3": - - def is_unicode(obj): - return isinstance(obj, unicode) -else: - - def is_unicode(obj): - return isinstance(obj, str) - - -def coerce_text(v): - if not isinstance(v, basestring_): - if sys.version < "3": - attr = '__unicode__' - else: - attr = '__str__' - if hasattr(v, attr): - return unicode(v) - else: - return bytes(v) - return v diff --git a/libs/tornado/__init__.py b/libs/tornado/__init__.py index c41ec97b..05174084 100755 --- a/libs/tornado/__init__.py +++ b/libs/tornado/__init__.py @@ -25,5 +25,5 @@ from __future__ import absolute_import, division, print_function, with_statement # is zero for an official release, positive for a development branch, # or negative for a release candidate or beta (after the base version # number has been incremented) -version = "3.2" -version_info = (3, 2, 0, 0) +version = "3.3.dev1" +version_info = (3, 3, 0, -100) diff --git a/libs/tornado/auth.py b/libs/tornado/auth.py index 9baac9ba..f15413e5 100755 --- a/libs/tornado/auth.py +++ b/libs/tornado/auth.py @@ -34,15 +34,29 @@ See the individual service classes below for complete documentation. Example usage for Google OpenID:: - class GoogleLoginHandler(tornado.web.RequestHandler, - tornado.auth.GoogleMixin): + class GoogleOAuth2LoginHandler(tornado.web.RequestHandler, + tornado.auth.GoogleOAuth2Mixin): @tornado.gen.coroutine def get(self): - if self.get_argument("openid.mode", None): - user = yield self.get_authenticated_user() - # Save the user with e.g. set_secure_cookie() + if self.get_argument('code', False): + user = yield self.get_authenticated_user( + redirect_uri='http://your.site.com/auth/google', + code=self.get_argument('code')) + # Save the user with e.g. set_secure_cookie else: - yield self.authenticate_redirect() + yield self.authorize_redirect( + redirect_uri='http://your.site.com/auth/google', + client_id=self.settings['google_oauth']['key'], + scope=['profile', 'email'], + response_type='code', + extra_params={'approval_prompt': 'auto'}) + +.. versionchanged:: 3.3 + All of the callback interfaces in this module are now guaranteed + to run their callback with an argument of ``None`` on error. + Previously some functions would do this while others would simply + terminate the request on their own. This change also ensures that + errors are more consistently reported through the ``Future`` interfaces. """ from __future__ import absolute_import, division, print_function, with_statement @@ -61,6 +75,7 @@ from tornado import httpclient from tornado import escape from tornado.httputil import url_concat from tornado.log import gen_log +from tornado.stack_context import ExceptionStackContext from tornado.util import bytes_type, u, unicode_type, ArgReplacer try: @@ -73,6 +88,11 @@ try: except ImportError: import urllib as urllib_parse # py2 +try: + long # py2 +except NameError: + long = int # py3 + class AuthError(Exception): pass @@ -103,7 +123,14 @@ def _auth_return_future(f): if callback is not None: future.add_done_callback( functools.partial(_auth_future_to_callback, callback)) - f(*args, **kwargs) + def handle_exception(typ, value, tb): + if future.done(): + return False + else: + future.set_exc_info((typ, value, tb)) + return True + with ExceptionStackContext(handle_exception): + f(*args, **kwargs) return future return wrapper @@ -161,7 +188,7 @@ class OpenIdMixin(object): url = self._OPENID_ENDPOINT if http_client is None: http_client = self.get_auth_http_client() - http_client.fetch(url, self.async_callback( + http_client.fetch(url, functools.partial( self._on_authentication_verified, callback), method="POST", body=urllib_parse.urlencode(args)) @@ -333,7 +360,7 @@ class OAuthMixin(object): http_client.fetch( self._oauth_request_token_url(callback_uri=callback_uri, extra_params=extra_params), - self.async_callback( + functools.partial( self._on_request_token, self._OAUTH_AUTHORIZE_URL, callback_uri, @@ -341,7 +368,7 @@ class OAuthMixin(object): else: http_client.fetch( self._oauth_request_token_url(), - self.async_callback( + functools.partial( self._on_request_token, self._OAUTH_AUTHORIZE_URL, callback_uri, callback)) @@ -378,7 +405,7 @@ class OAuthMixin(object): if http_client is None: http_client = self.get_auth_http_client() http_client.fetch(self._oauth_access_token_url(token), - self.async_callback(self._on_access_token, callback)) + functools.partial(self._on_access_token, callback)) def _oauth_request_token_url(self, callback_uri=None, extra_params=None): consumer_token = self._oauth_consumer_token() @@ -455,7 +482,7 @@ class OAuthMixin(object): access_token = _oauth_parse_response(response.body) self._oauth_get_user_future(access_token).add_done_callback( - self.async_callback(self._on_oauth_get_user, access_token, future)) + functools.partial(self._on_oauth_get_user, access_token, future)) def _oauth_consumer_token(self): """Subclasses must override this to return their OAuth consumer keys. @@ -640,7 +667,7 @@ class TwitterMixin(OAuthMixin): """ http = self.get_auth_http_client() http.fetch(self._oauth_request_token_url(callback_uri=callback_uri), - self.async_callback( + functools.partial( self._on_request_token, self._OAUTH_AUTHENTICATE_URL, None, callback)) @@ -698,7 +725,7 @@ class TwitterMixin(OAuthMixin): if args: url += "?" + urllib_parse.urlencode(args) http = self.get_auth_http_client() - http_callback = self.async_callback(self._on_twitter_request, callback) + http_callback = functools.partial(self._on_twitter_request, callback) if post_args is not None: http.fetch(url, method="POST", body=urllib_parse.urlencode(post_args), callback=http_callback) @@ -815,7 +842,7 @@ class FriendFeedMixin(OAuthMixin): args.update(oauth) if args: url += "?" + urllib_parse.urlencode(args) - callback = self.async_callback(self._on_friendfeed_request, callback) + callback = functools.partial(self._on_friendfeed_request, callback) http = self.get_auth_http_client() if post_args is not None: http.fetch(url, method="POST", body=urllib_parse.urlencode(post_args), @@ -856,6 +883,10 @@ class FriendFeedMixin(OAuthMixin): class GoogleMixin(OpenIdMixin, OAuthMixin): """Google Open ID / OAuth authentication. + *Deprecated:* New applications should use `GoogleOAuth2Mixin` + below instead of this class. As of May 19, 2014, Google has stopped + supporting registration-free authentication. + No application registration is necessary to use Google for authentication or to access Google resources on behalf of a user. @@ -926,7 +957,7 @@ class GoogleMixin(OpenIdMixin, OAuthMixin): http = self.get_auth_http_client() token = dict(key=token, secret="") http.fetch(self._oauth_access_token_url(token), - self.async_callback(self._on_access_token, callback)) + functools.partial(self._on_access_token, callback)) else: chain_future(OpenIdMixin.get_authenticated_user(self), callback) @@ -945,6 +976,19 @@ class GoogleMixin(OpenIdMixin, OAuthMixin): class GoogleOAuth2Mixin(OAuth2Mixin): """Google authentication using OAuth2. + In order to use, register your application with Google and copy the + relevant parameters to your application settings. + + * Go to the Google Dev Console at http://console.developers.google.com + * Select a project, or create a new one. + * In the sidebar on the left, select APIs & Auth. + * In the list of APIs, find the Google+ API service and set it to ON. + * In the sidebar on the left, select Credentials. + * In the OAuth section of the page, select Create New Client ID. + * Set the Redirect URI to point to your auth handler + * Copy the "Client secret" and "Client ID" to the application settings as + {"google_oauth": {"key": CLIENT_ID, "secret": CLIENT_SECRET}} + .. versionadded:: 3.2 """ _OAUTH_AUTHORIZE_URL = "https://accounts.google.com/o/oauth2/auth" @@ -958,7 +1002,7 @@ class GoogleOAuth2Mixin(OAuth2Mixin): Example usage:: - class GoogleOAuth2LoginHandler(LoginHandler, + class GoogleOAuth2LoginHandler(tornado.web.RequestHandler, tornado.auth.GoogleOAuth2Mixin): @tornado.gen.coroutine def get(self): @@ -985,7 +1029,7 @@ class GoogleOAuth2Mixin(OAuth2Mixin): }) http.fetch(self._OAUTH_ACCESS_TOKEN_URL, - self.async_callback(self._on_access_token, callback), + functools.partial(self._on_access_token, callback), method="POST", headers={'Content-Type': 'application/x-www-form-urlencoded'}, body=body) def _on_access_token(self, future, response): @@ -1026,7 +1070,7 @@ class FacebookMixin(object): @tornado.web.asynchronous def get(self): if self.get_argument("session", None): - self.get_authenticated_user(self.async_callback(self._on_auth)) + self.get_authenticated_user(self._on_auth) return yield self.authenticate_redirect() @@ -1112,7 +1156,7 @@ class FacebookMixin(object): session = escape.json_decode(self.get_argument("session")) self.facebook_request( method="facebook.users.getInfo", - callback=self.async_callback( + callback=functools.partial( self._on_get_user_info, callback, session), session_key=session["session_key"], uids=session["uid"], @@ -1138,7 +1182,7 @@ class FacebookMixin(object): def get(self): self.facebook_request( method="stream.get", - callback=self.async_callback(self._on_stream), + callback=self._on_stream, session_key=self.current_user["session_key"]) def _on_stream(self, stream): @@ -1162,7 +1206,7 @@ class FacebookMixin(object): url = "http://api.facebook.com/restserver.php?" + \ urllib_parse.urlencode(args) http = self.get_auth_http_client() - http.fetch(url, callback=self.async_callback( + http.fetch(url, callback=functools.partial( self._parse_response, callback)) def _on_get_user_info(self, callback, session, users): @@ -1260,7 +1304,7 @@ class FacebookGraphMixin(OAuth2Mixin): fields.update(extra_fields) http.fetch(self._oauth_request_token_url(**args), - self.async_callback(self._on_access_token, redirect_uri, client_id, + functools.partial(self._on_access_token, redirect_uri, client_id, client_secret, callback, fields)) def _on_access_token(self, redirect_uri, client_id, client_secret, @@ -1277,7 +1321,7 @@ class FacebookGraphMixin(OAuth2Mixin): self.facebook_request( path="/me", - callback=self.async_callback( + callback=functools.partial( self._on_get_user_info, future, session, fields), access_token=session["access_token"], fields=",".join(fields) @@ -1344,7 +1388,7 @@ class FacebookGraphMixin(OAuth2Mixin): if all_args: url += "?" + urllib_parse.urlencode(all_args) - callback = self.async_callback(self._on_facebook_request, callback) + callback = functools.partial(self._on_facebook_request, callback) http = self.get_auth_http_client() if post_args is not None: http.fetch(url, method="POST", body=urllib_parse.urlencode(post_args), diff --git a/libs/tornado/autoreload.py b/libs/tornado/autoreload.py index 79cccb49..3982579a 100755 --- a/libs/tornado/autoreload.py +++ b/libs/tornado/autoreload.py @@ -14,7 +14,7 @@ # License for the specific language governing permissions and limitations # under the License. -"""xAutomatically restart the server when a source file is modified. +"""Automatically restart the server when a source file is modified. Most applications should not access this module directly. Instead, pass the keyword argument ``autoreload=True`` to the diff --git a/libs/tornado/ca-certificates.crt b/libs/tornado/ca-certificates.crt deleted file mode 100755 index a1ede895..00000000 --- a/libs/tornado/ca-certificates.crt +++ /dev/null @@ -1,3562 +0,0 @@ -# This file contains certificates of known certificate authorities -# for use with SimpleAsyncHTTPClient. -# -# It was extracted from the Mozilla source tree using libcurl's mk-ca-bundle -# script on Aug 13, 2013. -# -# This data file is licenced under the MPL/GPL. - -## -## ca-bundle.crt -- Bundle of CA Root Certificates -## -## Certificate data from Mozilla as of: Tue Aug 13 03:28:51 2013 -## -## This is a bundle of X.509 certificates of public Certificate Authorities -## (CA). These were automatically extracted from Mozilla's root certificates -## file (certdata.txt). This file can be found in the mozilla source tree: -## http://mxr.mozilla.org/mozilla/source/security/nss/lib/ckfw/builtins/certdata.txt?raw=1 -## -## It contains the certificates in PEM format and therefore -## can be directly used with curl / libcurl / php_curl, or with -## an Apache+mod_ssl webserver for SSL client authentication. -## Just configure this file as the SSLCACertificateFile. -## - -# @(#) $RCSfile: certdata.txt,v $ $Revision: 1.87 $ $Date: 2012/12/29 16:32:45 $ - -GTE CyberTrust Global Root -========================== ------BEGIN CERTIFICATE----- -MIICWjCCAcMCAgGlMA0GCSqGSIb3DQEBBAUAMHUxCzAJBgNVBAYTAlVTMRgwFgYDVQQKEw9HVEUg -Q29ycG9yYXRpb24xJzAlBgNVBAsTHkdURSBDeWJlclRydXN0IFNvbHV0aW9ucywgSW5jLjEjMCEG -A1UEAxMaR1RFIEN5YmVyVHJ1c3QgR2xvYmFsIFJvb3QwHhcNOTgwODEzMDAyOTAwWhcNMTgwODEz -MjM1OTAwWjB1MQswCQYDVQQGEwJVUzEYMBYGA1UEChMPR1RFIENvcnBvcmF0aW9uMScwJQYDVQQL -Ex5HVEUgQ3liZXJUcnVzdCBTb2x1dGlvbnMsIEluYy4xIzAhBgNVBAMTGkdURSBDeWJlclRydXN0 -IEdsb2JhbCBSb290MIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCVD6C28FCc6HrHiM3dFw4u -sJTQGz0O9pTAipTHBsiQl8i4ZBp6fmw8U+E3KHNgf7KXUwefU/ltWJTSr41tiGeA5u2ylc9yMcql -HHK6XALnZELn+aks1joNrI1CqiQBOeacPwGFVw1Yh0X404Wqk2kmhXBIgD8SFcd5tB8FLztimQID -AQABMA0GCSqGSIb3DQEBBAUAA4GBAG3rGwnpXtlR22ciYaQqPEh346B8pt5zohQDhT37qw4wxYMW -M4ETCJ57NE7fQMh017l93PR2VX2bY1QY6fDq81yx2YtCHrnAlU66+tXifPVoYb+O7AWXX1uw16OF -NMQkpw0PlZPvy5TYnh+dXIVtx6quTx8itc2VrbqnzPmrC3p/ ------END CERTIFICATE----- - -Thawte Server CA -================ ------BEGIN CERTIFICATE----- -MIIDEzCCAnygAwIBAgIBATANBgkqhkiG9w0BAQQFADCBxDELMAkGA1UEBhMCWkExFTATBgNVBAgT -DFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMR0wGwYDVQQKExRUaGF3dGUgQ29uc3Vs -dGluZyBjYzEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjEZMBcGA1UE -AxMQVGhhd3RlIFNlcnZlciBDQTEmMCQGCSqGSIb3DQEJARYXc2VydmVyLWNlcnRzQHRoYXd0ZS5j -b20wHhcNOTYwODAxMDAwMDAwWhcNMjAxMjMxMjM1OTU5WjCBxDELMAkGA1UEBhMCWkExFTATBgNV -BAgTDFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMR0wGwYDVQQKExRUaGF3dGUgQ29u -c3VsdGluZyBjYzEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjEZMBcG -A1UEAxMQVGhhd3RlIFNlcnZlciBDQTEmMCQGCSqGSIb3DQEJARYXc2VydmVyLWNlcnRzQHRoYXd0 -ZS5jb20wgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBANOkUG7I/1Zr5s9dtuoMaHVHoqrC2oQl -/Kj0R1HahbUgdJSGHg91yekIYfUGbTBuFRkC6VLAYttNmZ7iagxEOM3+vuNkCXDF/rFrKbYvScg7 -1CcEJRCXL+eQbcAoQpnXTEPew/UhbVSfXcNY4cDk2VuwuNy0e982OsK1ZiIS1ocNAgMBAAGjEzAR -MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEEBQADgYEAB/pMaVz7lcxG7oWDTSEwjsrZqG9J -GubaUeNgcGyEYRGhGshIPllDfU+VPaGLtwtimHp1it2ITk6eQNuozDJ0uW8NxuOzRAvZim+aKZuZ -GCg70eNAKJpaPNW15yAbi8qkq43pUdniTCxZqdq5snUb9kLy78fyGPmJvKP/iiMucEc= ------END CERTIFICATE----- - -Thawte Premium Server CA -======================== ------BEGIN CERTIFICATE----- -MIIDJzCCApCgAwIBAgIBATANBgkqhkiG9w0BAQQFADCBzjELMAkGA1UEBhMCWkExFTATBgNVBAgT -DFdlc3Rlcm4gQ2FwZTESMBAGA1UEBxMJQ2FwZSBUb3duMR0wGwYDVQQKExRUaGF3dGUgQ29uc3Vs -dGluZyBjYzEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjEhMB8GA1UE -AxMYVGhhd3RlIFByZW1pdW0gU2VydmVyIENBMSgwJgYJKoZIhvcNAQkBFhlwcmVtaXVtLXNlcnZl -ckB0aGF3dGUuY29tMB4XDTk2MDgwMTAwMDAwMFoXDTIwMTIzMTIzNTk1OVowgc4xCzAJBgNVBAYT -AlpBMRUwEwYDVQQIEwxXZXN0ZXJuIENhcGUxEjAQBgNVBAcTCUNhcGUgVG93bjEdMBsGA1UEChMU -VGhhd3RlIENvbnN1bHRpbmcgY2MxKDAmBgNVBAsTH0NlcnRpZmljYXRpb24gU2VydmljZXMgRGl2 -aXNpb24xITAfBgNVBAMTGFRoYXd0ZSBQcmVtaXVtIFNlcnZlciBDQTEoMCYGCSqGSIb3DQEJARYZ -cHJlbWl1bS1zZXJ2ZXJAdGhhd3RlLmNvbTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA0jY2 -aovXwlue2oFBYo847kkEVdbQ7xwblRZH7xhINTpS9CtqBo87L+pW46+GjZ4X9560ZXUCTe/LCaIh -Udib0GfQug2SBhRz1JPLlyoAnFxODLz6FVL88kRu2hFKbgifLy3j+ao6hnO2RlNYyIkFvYMRuHM/ -qgeN9EJN50CdHDcCAwEAAaMTMBEwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQQFAAOBgQAm -SCwWwlj66BZ0DKqqX1Q/8tfJeGBeXm43YyJ3Nn6yF8Q0ufUIhfzJATj/Tb7yFkJD57taRvvBxhEf -8UqwKEbJw8RCfbz6q1lu1bdRiBHjpIUZa4JMpAwSremkrj/xw0llmozFyD4lt5SZu5IycQfwhl7t -UCemDaYj+bvLpgcUQg== ------END CERTIFICATE----- - -Equifax Secure CA -================= ------BEGIN CERTIFICATE----- -MIIDIDCCAomgAwIBAgIENd70zzANBgkqhkiG9w0BAQUFADBOMQswCQYDVQQGEwJVUzEQMA4GA1UE -ChMHRXF1aWZheDEtMCsGA1UECxMkRXF1aWZheCBTZWN1cmUgQ2VydGlmaWNhdGUgQXV0aG9yaXR5 -MB4XDTk4MDgyMjE2NDE1MVoXDTE4MDgyMjE2NDE1MVowTjELMAkGA1UEBhMCVVMxEDAOBgNVBAoT -B0VxdWlmYXgxLTArBgNVBAsTJEVxdWlmYXggU2VjdXJlIENlcnRpZmljYXRlIEF1dGhvcml0eTCB -nzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAwV2xWGcIYu6gmi0fCG2RFGiYCh7+2gRvE4RiIcPR -fM6fBeC4AfBONOziipUEZKzxa1NfBbPLZ4C/QgKO/t0BCezhABRP/PvwDN1Dulsr4R+AcJkVV5MW -8Q+XarfCaCMczE1ZMKxRHjuvK9buY0V7xdlfUNLjUA86iOe/FP3gx7kCAwEAAaOCAQkwggEFMHAG -A1UdHwRpMGcwZaBjoGGkXzBdMQswCQYDVQQGEwJVUzEQMA4GA1UEChMHRXF1aWZheDEtMCsGA1UE -CxMkRXF1aWZheCBTZWN1cmUgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MQ0wCwYDVQQDEwRDUkwxMBoG -A1UdEAQTMBGBDzIwMTgwODIyMTY0MTUxWjALBgNVHQ8EBAMCAQYwHwYDVR0jBBgwFoAUSOZo+SvS -spXXR9gjIBBPM5iQn9QwHQYDVR0OBBYEFEjmaPkr0rKV10fYIyAQTzOYkJ/UMAwGA1UdEwQFMAMB -Af8wGgYJKoZIhvZ9B0EABA0wCxsFVjMuMGMDAgbAMA0GCSqGSIb3DQEBBQUAA4GBAFjOKer89961 -zgK5F7WF0bnj4JXMJTENAKaSbn+2kmOeUJXRmm/kEd5jhW6Y7qj/WsjTVbJmcVfewCHrPSqnI0kB -BIZCe/zuf6IWUrVnZ9NA2zsmWLIodz2uFHdh1voqZiegDfqnc1zqcPGUIWVEX/r87yloqaKHee95 -70+sB3c4 ------END CERTIFICATE----- - -Digital Signature Trust Co. Global CA 1 -======================================= ------BEGIN CERTIFICATE----- -MIIDKTCCApKgAwIBAgIENnAVljANBgkqhkiG9w0BAQUFADBGMQswCQYDVQQGEwJVUzEkMCIGA1UE -ChMbRGlnaXRhbCBTaWduYXR1cmUgVHJ1c3QgQ28uMREwDwYDVQQLEwhEU1RDQSBFMTAeFw05ODEy -MTAxODEwMjNaFw0xODEyMTAxODQwMjNaMEYxCzAJBgNVBAYTAlVTMSQwIgYDVQQKExtEaWdpdGFs -IFNpZ25hdHVyZSBUcnVzdCBDby4xETAPBgNVBAsTCERTVENBIEUxMIGdMA0GCSqGSIb3DQEBAQUA -A4GLADCBhwKBgQCgbIGpzzQeJN3+hijM3oMv+V7UQtLodGBmE5gGHKlREmlvMVW5SXIACH7TpWJE -NySZj9mDSI+ZbZUTu0M7LklOiDfBu1h//uG9+LthzfNHwJmm8fOR6Hh8AMthyUQncWlVSn5JTe2i -o74CTADKAqjuAQIxZA9SLRN0dja1erQtcQIBA6OCASQwggEgMBEGCWCGSAGG+EIBAQQEAwIABzBo -BgNVHR8EYTBfMF2gW6BZpFcwVTELMAkGA1UEBhMCVVMxJDAiBgNVBAoTG0RpZ2l0YWwgU2lnbmF0 -dXJlIFRydXN0IENvLjERMA8GA1UECxMIRFNUQ0EgRTExDTALBgNVBAMTBENSTDEwKwYDVR0QBCQw -IoAPMTk5ODEyMTAxODEwMjNagQ8yMDE4MTIxMDE4MTAyM1owCwYDVR0PBAQDAgEGMB8GA1UdIwQY -MBaAFGp5fpFpRhgTCgJ3pVlbYJglDqL4MB0GA1UdDgQWBBRqeX6RaUYYEwoCd6VZW2CYJQ6i+DAM -BgNVHRMEBTADAQH/MBkGCSqGSIb2fQdBAAQMMAobBFY0LjADAgSQMA0GCSqGSIb3DQEBBQUAA4GB -ACIS2Hod3IEGtgllsofIH160L+nEHvI8wbsEkBFKg05+k7lNQseSJqBcNJo4cvj9axY+IO6CizEq -kzaFI4iKPANo08kJD038bKTaKHKTDomAsH3+gG9lbRgzl4vCa4nuYD3Im+9/KzJic5PLPON74nZ4 -RbyhkwS7hp86W0N6w4pl ------END CERTIFICATE----- - -Digital Signature Trust Co. Global CA 3 -======================================= ------BEGIN CERTIFICATE----- -MIIDKTCCApKgAwIBAgIENm7TzjANBgkqhkiG9w0BAQUFADBGMQswCQYDVQQGEwJVUzEkMCIGA1UE -ChMbRGlnaXRhbCBTaWduYXR1cmUgVHJ1c3QgQ28uMREwDwYDVQQLEwhEU1RDQSBFMjAeFw05ODEy -MDkxOTE3MjZaFw0xODEyMDkxOTQ3MjZaMEYxCzAJBgNVBAYTAlVTMSQwIgYDVQQKExtEaWdpdGFs -IFNpZ25hdHVyZSBUcnVzdCBDby4xETAPBgNVBAsTCERTVENBIEUyMIGdMA0GCSqGSIb3DQEBAQUA -A4GLADCBhwKBgQC/k48Xku8zExjrEH9OFr//Bo8qhbxe+SSmJIi2A7fBw18DW9Fvrn5C6mYjuGOD -VvsoLeE4i7TuqAHhzhy2iCoiRoX7n6dwqUcUP87eZfCocfdPJmyMvMa1795JJ/9IKn3oTQPMx7JS -xhcxEzu1TdvIxPbDDyQq2gyd55FbgM2UnQIBA6OCASQwggEgMBEGCWCGSAGG+EIBAQQEAwIABzBo -BgNVHR8EYTBfMF2gW6BZpFcwVTELMAkGA1UEBhMCVVMxJDAiBgNVBAoTG0RpZ2l0YWwgU2lnbmF0 -dXJlIFRydXN0IENvLjERMA8GA1UECxMIRFNUQ0EgRTIxDTALBgNVBAMTBENSTDEwKwYDVR0QBCQw -IoAPMTk5ODEyMDkxOTE3MjZagQ8yMDE4MTIwOTE5MTcyNlowCwYDVR0PBAQDAgEGMB8GA1UdIwQY -MBaAFB6CTShlgDzJQW6sNS5ay97u+DlbMB0GA1UdDgQWBBQegk0oZYA8yUFurDUuWsve7vg5WzAM -BgNVHRMEBTADAQH/MBkGCSqGSIb2fQdBAAQMMAobBFY0LjADAgSQMA0GCSqGSIb3DQEBBQUAA4GB -AEeNg61i8tuwnkUiBbmi1gMOOHLnnvx75pO2mqWilMg0HZHRxdf0CiUPPXiBng+xZ8SQTGPdXqfi -up/1902lMXucKS1M/mQ+7LZT/uqb7YLbdHVLB3luHtgZg3Pe9T7Qtd7nS2h9Qy4qIOF+oHhEngj1 -mPnHfxsb1gYgAlihw6ID ------END CERTIFICATE----- - -Verisign Class 3 Public Primary Certification Authority -======================================================= ------BEGIN CERTIFICATE----- -MIICPDCCAaUCEHC65B0Q2Sk0tjjKewPMur8wDQYJKoZIhvcNAQECBQAwXzELMAkGA1UEBhMCVVMx -FzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMTcwNQYDVQQLEy5DbGFzcyAzIFB1YmxpYyBQcmltYXJ5 -IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTk2MDEyOTAwMDAwMFoXDTI4MDgwMTIzNTk1OVow -XzELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMTcwNQYDVQQLEy5DbGFzcyAz -IFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIGfMA0GCSqGSIb3DQEBAQUA -A4GNADCBiQKBgQDJXFme8huKARS0EN8EQNvjV69qRUCPhAwL0TPZ2RHP7gJYHyX3KqhEBarsAx94 -f56TuZoAqiN91qyFomNFx3InzPRMxnVx0jnvT0Lwdd8KkMaOIG+YD/isI19wKTakyYbnsZogy1Ol -hec9vn2a/iRFM9x2Fe0PonFkTGUugWhFpwIDAQABMA0GCSqGSIb3DQEBAgUAA4GBALtMEivPLCYA -TxQT3ab7/AoRhIzzKBxnki98tsX63/Dolbwdj2wsqFHMc9ikwFPwTtYmwHYBV4GSXiHx0bH/59Ah -WM1pF+NEHJwZRDmJXNycAA9WjQKZ7aKQRUzkuxCkPfAyAw7xzvjoyVGM5mKf5p/AfbdynMk2Omuf -Tqj/ZA1k ------END CERTIFICATE----- - -Verisign Class 3 Public Primary Certification Authority - G2 -============================================================ ------BEGIN CERTIFICATE----- -MIIDAjCCAmsCEH3Z/gfPqB63EHln+6eJNMYwDQYJKoZIhvcNAQEFBQAwgcExCzAJBgNVBAYTAlVT -MRcwFQYDVQQKEw5WZXJpU2lnbiwgSW5jLjE8MDoGA1UECxMzQ2xhc3MgMyBQdWJsaWMgUHJpbWFy -eSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcyMTowOAYDVQQLEzEoYykgMTk5OCBWZXJpU2ln -biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MR8wHQYDVQQLExZWZXJpU2lnbiBUcnVz -dCBOZXR3b3JrMB4XDTk4MDUxODAwMDAwMFoXDTI4MDgwMTIzNTk1OVowgcExCzAJBgNVBAYTAlVT -MRcwFQYDVQQKEw5WZXJpU2lnbiwgSW5jLjE8MDoGA1UECxMzQ2xhc3MgMyBQdWJsaWMgUHJpbWFy -eSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcyMTowOAYDVQQLEzEoYykgMTk5OCBWZXJpU2ln -biwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVzZSBvbmx5MR8wHQYDVQQLExZWZXJpU2lnbiBUcnVz -dCBOZXR3b3JrMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQDMXtERXVxp0KvTuWpMmR9ZmDCO -FoUgRm1HP9SFIIThbbP4pO0M8RcPO/mn+SXXwc+EY/J8Y8+iR/LGWzOOZEAEaMGAuWQcRXfH2G71 -lSk8UOg013gfqLptQ5GVj0VXXn7F+8qkBOvqlzdUMG+7AUcyM83cV5tkaWH4mx0ciU9cZwIDAQAB -MA0GCSqGSIb3DQEBBQUAA4GBAFFNzb5cy5gZnBWyATl4Lk0PZ3BwmcYQWpSkU01UbSuvDV1Ai2TT -1+7eVmGSX6bEHRBhNtMsJzzoKQm5EWR0zLVznxxIqbxhAe7iF6YM40AIOw7n60RzKprxaZLvcRTD -Oaxxp5EJb+RxBrO6WVcmeQD2+A2iMzAo1KpYoJ2daZH9 ------END CERTIFICATE----- - -GlobalSign Root CA -================== ------BEGIN CERTIFICATE----- -MIIDdTCCAl2gAwIBAgILBAAAAAABFUtaw5QwDQYJKoZIhvcNAQEFBQAwVzELMAkGA1UEBhMCQkUx -GTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2ExEDAOBgNVBAsTB1Jvb3QgQ0ExGzAZBgNVBAMTEkds -b2JhbFNpZ24gUm9vdCBDQTAeFw05ODA5MDExMjAwMDBaFw0yODAxMjgxMjAwMDBaMFcxCzAJBgNV -BAYTAkJFMRkwFwYDVQQKExBHbG9iYWxTaWduIG52LXNhMRAwDgYDVQQLEwdSb290IENBMRswGQYD -VQQDExJHbG9iYWxTaWduIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDa -DuaZjc6j40+Kfvvxi4Mla+pIH/EqsLmVEQS98GPR4mdmzxzdzxtIK+6NiY6arymAZavpxy0Sy6sc -THAHoT0KMM0VjU/43dSMUBUc71DuxC73/OlS8pF94G3VNTCOXkNz8kHp1Wrjsok6Vjk4bwY8iGlb -Kk3Fp1S4bInMm/k8yuX9ifUSPJJ4ltbcdG6TRGHRjcdGsnUOhugZitVtbNV4FpWi6cgKOOvyJBNP -c1STE4U6G7weNLWLBYy5d4ux2x8gkasJU26Qzns3dLlwR5EiUWMWea6xrkEmCMgZK9FGqkjWZCrX -gzT/LCrBbBlDSgeF59N89iFo7+ryUp9/k5DPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV -HRMBAf8EBTADAQH/MB0GA1UdDgQWBBRge2YaRQ2XyolQL30EzTSo//z9SzANBgkqhkiG9w0BAQUF -AAOCAQEA1nPnfE920I2/7LqivjTFKDK1fPxsnCwrvQmeU79rXqoRSLblCKOzyj1hTdNGCbM+w6Dj -Y1Ub8rrvrTnhQ7k4o+YviiY776BQVvnGCv04zcQLcFGUl5gE38NflNUVyRRBnMRddWQVDf9VMOyG -j/8N7yy5Y0b2qvzfvGn9LhJIZJrglfCm7ymPAbEVtQwdpf5pLGkkeB6zpxxxYu7KyJesF12KwvhH -hm4qxFYxldBniYUr+WymXUadDKqC5JlR3XC321Y9YeRq4VzW9v493kHMB65jUr9TU/Qr6cf9tveC -X4XSQRjbgbMEHMUfpIBvFSDJ3gyICh3WZlXi/EjJKSZp4A== ------END CERTIFICATE----- - -GlobalSign Root CA - R2 -======================= ------BEGIN CERTIFICATE----- -MIIDujCCAqKgAwIBAgILBAAAAAABD4Ym5g0wDQYJKoZIhvcNAQEFBQAwTDEgMB4GA1UECxMXR2xv -YmFsU2lnbiBSb290IENBIC0gUjIxEzARBgNVBAoTCkdsb2JhbFNpZ24xEzARBgNVBAMTCkdsb2Jh -bFNpZ24wHhcNMDYxMjE1MDgwMDAwWhcNMjExMjE1MDgwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxT -aWduIFJvb3QgQ0EgLSBSMjETMBEGA1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2ln -bjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKbPJA6+Lm8omUVCxKs+IVSbC9N/hHD6 -ErPLv4dfxn+G07IwXNb9rfF73OX4YJYJkhD10FPe+3t+c4isUoh7SqbKSaZeqKeMWhG8eoLrvozp -s6yWJQeXSpkqBy+0Hne/ig+1AnwblrjFuTosvNYSuetZfeLQBoZfXklqtTleiDTsvHgMCJiEbKjN -S7SgfQx5TfC4LcshytVsW33hoCmEofnTlEnLJGKRILzdC9XZzPnqJworc5HGnRusyMvo4KD0L5CL -TfuwNhv2GXqF4G3yYROIXJ/gkwpRl4pazq+r1feqCapgvdzZX99yqWATXgAByUr6P6TqBwMhAo6C -ygPCm48CAwEAAaOBnDCBmTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E -FgQUm+IHV2ccHsBqBt5ZtJot39wZhi4wNgYDVR0fBC8wLTAroCmgJ4YlaHR0cDovL2NybC5nbG9i -YWxzaWduLm5ldC9yb290LXIyLmNybDAfBgNVHSMEGDAWgBSb4gdXZxwewGoG3lm0mi3f3BmGLjAN -BgkqhkiG9w0BAQUFAAOCAQEAmYFThxxol4aR7OBKuEQLq4GsJ0/WwbgcQ3izDJr86iw8bmEbTUsp -9Z8FHSbBuOmDAGJFtqkIk7mpM0sYmsL4h4hO291xNBrBVNpGP+DTKqttVCL1OmLNIG+6KYnX3ZHu -01yiPqFbQfXf5WRDLenVOavSot+3i9DAgBkcRcAtjOj4LaR0VknFBbVPFd5uRHg5h6h+u/N5GJG7 -9G+dwfCMNYxdAfvDbbnvRG15RjF+Cv6pgsH/76tuIMRQyV+dTZsXjAzlAcmgQWpzU/qlULRuJQ/7 -TBj0/VLZjmmx6BEP3ojY+x1J96relc8geMJgEtslQIxq/H5COEBkEveegeGTLg== ------END CERTIFICATE----- - -ValiCert Class 1 VA -=================== ------BEGIN CERTIFICATE----- -MIIC5zCCAlACAQEwDQYJKoZIhvcNAQEFBQAwgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRp -b24gTmV0d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENs -YXNzIDEgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZh -bGljZXJ0LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMB4XDTk5MDYyNTIy -MjM0OFoXDTE5MDYyNTIyMjM0OFowgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRpb24gTmV0 -d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENsYXNzIDEg -UG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZhbGljZXJ0 -LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMIGfMA0GCSqGSIb3DQEBAQUA -A4GNADCBiQKBgQDYWYJ6ibiWuqYvaG9YLqdUHAZu9OqNSLwxlBfw8068srg1knaw0KWlAdcAAxIi -GQj4/xEjm84H9b9pGib+TunRf50sQB1ZaG6m+FiwnRqP0z/x3BkGgagO4DrdyFNFCQbmD3DD+kCm -DuJWBQ8YTfwggtFzVXSNdnKgHZ0dwN0/cQIDAQABMA0GCSqGSIb3DQEBBQUAA4GBAFBoPUn0LBwG -lN+VYH+Wexf+T3GtZMjdd9LvWVXoP+iOBSoh8gfStadS/pyxtuJbdxdA6nLWI8sogTLDAHkY7FkX -icnGah5xyf23dKUlRWnFSKsZ4UWKJWsZ7uW7EvV/96aNUcPwnXS3qT6gpf+2SQMT2iLM7XGCK5nP -Orf1LXLI ------END CERTIFICATE----- - -ValiCert Class 2 VA -=================== ------BEGIN CERTIFICATE----- -MIIC5zCCAlACAQEwDQYJKoZIhvcNAQEFBQAwgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRp -b24gTmV0d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENs -YXNzIDIgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZh -bGljZXJ0LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMB4XDTk5MDYyNjAw -MTk1NFoXDTE5MDYyNjAwMTk1NFowgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRpb24gTmV0 -d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENsYXNzIDIg -UG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZhbGljZXJ0 -LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMIGfMA0GCSqGSIb3DQEBAQUA -A4GNADCBiQKBgQDOOnHK5avIWZJV16vYdA757tn2VUdZZUcOBVXc65g2PFxTXdMwzzjsvUGJ7SVC -CSRrCl6zfN1SLUzm1NZ9WlmpZdRJEy0kTRxQb7XBhVQ7/nHk01xC+YDgkRoKWzk2Z/M/VXwbP7Rf -ZHM047QSv4dk+NoS/zcnwbNDu+97bi5p9wIDAQABMA0GCSqGSIb3DQEBBQUAA4GBADt/UG9vUJSZ -SWI4OB9L+KXIPqeCgfYrx+jFzug6EILLGACOTb2oWH+heQC1u+mNr0HZDzTuIYEZoDJJKPTEjlbV -UjP9UNV+mWwD5MlM/Mtsq2azSiGM5bUMMj4QssxsodyamEwCW/POuZ6lcg5Ktz885hZo+L7tdEy8 -W9ViH0Pd ------END CERTIFICATE----- - -RSA Root Certificate 1 -====================== ------BEGIN CERTIFICATE----- -MIIC5zCCAlACAQEwDQYJKoZIhvcNAQEFBQAwgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRp -b24gTmV0d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENs -YXNzIDMgUG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZh -bGljZXJ0LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMB4XDTk5MDYyNjAw -MjIzM1oXDTE5MDYyNjAwMjIzM1owgbsxJDAiBgNVBAcTG1ZhbGlDZXJ0IFZhbGlkYXRpb24gTmV0 -d29yazEXMBUGA1UEChMOVmFsaUNlcnQsIEluYy4xNTAzBgNVBAsTLFZhbGlDZXJ0IENsYXNzIDMg -UG9saWN5IFZhbGlkYXRpb24gQXV0aG9yaXR5MSEwHwYDVQQDExhodHRwOi8vd3d3LnZhbGljZXJ0 -LmNvbS8xIDAeBgkqhkiG9w0BCQEWEWluZm9AdmFsaWNlcnQuY29tMIGfMA0GCSqGSIb3DQEBAQUA -A4GNADCBiQKBgQDjmFGWHOjVsQaBalfDcnWTq8+epvzzFlLWLU2fNUSoLgRNB0mKOCn1dzfnt6td -3zZxFJmP3MKS8edgkpfs2Ejcv8ECIMYkpChMMFp2bbFc893enhBxoYjHW5tBbcqwuI4V7q0zK89H -BFx1cQqYJJgpp0lZpd34t0NiYfPT4tBVPwIDAQABMA0GCSqGSIb3DQEBBQUAA4GBAFa7AliEZwgs -3x/be0kz9dNnnfS0ChCzycUs4pJqcXgn8nCDQtM+z6lU9PHYkhaM0QTLS6vJn0WuPIqpsHEzXcjF -V9+vqDWzf4mH6eglkrh/hXqu1rweN1gqZ8mRzyqBPu3GOd/APhmcGcwTTYJBtYze4D1gCCAPRX5r -on+jjBXu ------END CERTIFICATE----- - -Verisign Class 3 Public Primary Certification Authority - G3 -============================================================ ------BEGIN CERTIFICATE----- -MIIEGjCCAwICEQCbfgZJoz5iudXukEhxKe9XMA0GCSqGSIb3DQEBBQUAMIHKMQswCQYDVQQGEwJV -UzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0IE5ldHdv -cmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl -IG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDMgUHVibGljIFByaW1hcnkgQ2VydGlmaWNh -dGlvbiBBdXRob3JpdHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQsw -CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRy -dXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhv -cml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDMgUHVibGljIFByaW1hcnkg -Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC -ggEBAMu6nFL8eB8aHm8bN3O9+MlrlBIwT/A2R/XQkQr1F8ilYcEWQE37imGQ5XYgwREGfassbqb1 -EUGO+i2tKmFZpGcmTNDovFJbcCAEWNF6yaRpvIMXZK0Fi7zQWM6NjPXr8EJJC52XJ2cybuGukxUc -cLwgTS8Y3pKI6GyFVxEa6X7jJhFUokWWVYPKMIno3Nij7SqAP395ZVc+FSBmCC+Vk7+qRy+oRpfw -EuL+wgorUeZ25rdGt+INpsyow0xZVYnm6FNcHOqd8GIWC6fJXwzw3sJ2zq/3avL6QaaiMxTJ5Xpj -055iN9WFZZ4O5lMkdBteHRJTW8cs54NJOxWuimi5V5cCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEA -ERSWwauSCPc/L8my/uRan2Te2yFPhpk0djZX3dAVL8WtfxUfN2JzPtTnX84XA9s1+ivbrmAJXx5f -j267Cz3qWhMeDGBvtcC1IyIuBwvLqXTLR7sdwdela8wv0kL9Sd2nic9TutoAWii/gt/4uhMdUIaC -/Y4wjylGsB49Ndo4YhYYSq3mtlFs3q9i6wHQHiT+eo8SGhJouPtmmRQURVyu565pF4ErWjfJXir0 -xuKhXFSbplQAz/DxwceYMBo7Nhbbo27q/a2ywtrvAkcTisDxszGtTxzhT5yvDwyd93gN2PQ1VoDa -t20Xj50egWTh/sVFuq1ruQp6Tk9LhO5L8X3dEQ== ------END CERTIFICATE----- - -Verisign Class 4 Public Primary Certification Authority - G3 -============================================================ ------BEGIN CERTIFICATE----- -MIIEGjCCAwICEQDsoKeLbnVqAc/EfMwvlF7XMA0GCSqGSIb3DQEBBQUAMIHKMQswCQYDVQQGEwJV -UzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0IE5ldHdv -cmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl -IG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDQgUHVibGljIFByaW1hcnkgQ2VydGlmaWNh -dGlvbiBBdXRob3JpdHkgLSBHMzAeFw05OTEwMDEwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMIHKMQsw -CQYDVQQGEwJVUzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRy -dXN0IE5ldHdvcmsxOjA4BgNVBAsTMShjKSAxOTk5IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhv -cml6ZWQgdXNlIG9ubHkxRTBDBgNVBAMTPFZlcmlTaWduIENsYXNzIDQgUHVibGljIFByaW1hcnkg -Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC -ggEBAK3LpRFpxlmr8Y+1GQ9Wzsy1HyDkniYlS+BzZYlZ3tCD5PUPtbut8XzoIfzk6AzufEUiGXaS -tBO3IFsJ+mGuqPKljYXCKtbeZjbSmwL0qJJgfJxptI8kHtCGUvYynEFYHiK9zUVilQhu0GbdU6LM -8BDcVHOLBKFGMzNcF0C5nk3T875Vg+ixiY5afJqWIpA7iCXy0lOIAgwLePLmNxdLMEYH5IBtptiW -Lugs+BGzOA1mppvqySNb247i8xOOGlktqgLw7KSHZtzBP/XYufTsgsbSPZUd5cBPhMnZo0QoBmrX -Razwa2rvTl/4EYIeOGM0ZlDUPpNz+jDDZq3/ky2X7wMCAwEAATANBgkqhkiG9w0BAQUFAAOCAQEA -j/ola09b5KROJ1WrIhVZPMq1CtRK26vdoV9TxaBXOcLORyu+OshWv8LZJxA6sQU8wHcxuzrTBXtt -mhwwjIDLk5Mqg6sFUYICABFna/OIYUdfA5PVWw3g8dShMjWFsjrbsIKr0csKvE+MW8VLADsfKoKm -fjaF3H48ZwC15DtS4KjrXRX5xm3wrR0OhbepmnMUWluPQSjA1egtTaRezarZ7c7c2NU8Qh0XwRJd -RTjDOPP8hS6DRkiy1yBfkjaP53kPmF6Z6PDQpLv1U70qzlmwr25/bLvSHgCwIe34QWKCudiyxLtG -UPMxxY8BqHTr9Xgn2uf3ZkPznoM+IKrDNWCRzg== ------END CERTIFICATE----- - -Entrust.net Secure Server CA -============================ ------BEGIN CERTIFICATE----- -MIIE2DCCBEGgAwIBAgIEN0rSQzANBgkqhkiG9w0BAQUFADCBwzELMAkGA1UEBhMCVVMxFDASBgNV -BAoTC0VudHJ1c3QubmV0MTswOQYDVQQLEzJ3d3cuZW50cnVzdC5uZXQvQ1BTIGluY29ycC4gYnkg -cmVmLiAobGltaXRzIGxpYWIuKTElMCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRl -ZDE6MDgGA1UEAxMxRW50cnVzdC5uZXQgU2VjdXJlIFNlcnZlciBDZXJ0aWZpY2F0aW9uIEF1dGhv -cml0eTAeFw05OTA1MjUxNjA5NDBaFw0xOTA1MjUxNjM5NDBaMIHDMQswCQYDVQQGEwJVUzEUMBIG -A1UEChMLRW50cnVzdC5uZXQxOzA5BgNVBAsTMnd3dy5lbnRydXN0Lm5ldC9DUFMgaW5jb3JwLiBi -eSByZWYuIChsaW1pdHMgbGlhYi4pMSUwIwYDVQQLExwoYykgMTk5OSBFbnRydXN0Lm5ldCBMaW1p -dGVkMTowOAYDVQQDEzFFbnRydXN0Lm5ldCBTZWN1cmUgU2VydmVyIENlcnRpZmljYXRpb24gQXV0 -aG9yaXR5MIGdMA0GCSqGSIb3DQEBAQUAA4GLADCBhwKBgQDNKIM0VBuJ8w+vN5Ex/68xYMmo6LIQ -aO2f55M28Qpku0f1BBc/I0dNxScZgSYMVHINiC3ZH5oSn7yzcdOAGT9HZnuMNSjSuQrfJNqc1lB5 -gXpa0zf3wkrYKZImZNHkmGw6AIr1NJtl+O3jEP/9uElY3KDegjlrgbEWGWG5VLbmQwIBA6OCAdcw -ggHTMBEGCWCGSAGG+EIBAQQEAwIABzCCARkGA1UdHwSCARAwggEMMIHeoIHboIHYpIHVMIHSMQsw -CQYDVQQGEwJVUzEUMBIGA1UEChMLRW50cnVzdC5uZXQxOzA5BgNVBAsTMnd3dy5lbnRydXN0Lm5l -dC9DUFMgaW5jb3JwLiBieSByZWYuIChsaW1pdHMgbGlhYi4pMSUwIwYDVQQLExwoYykgMTk5OSBF -bnRydXN0Lm5ldCBMaW1pdGVkMTowOAYDVQQDEzFFbnRydXN0Lm5ldCBTZWN1cmUgU2VydmVyIENl -cnRpZmljYXRpb24gQXV0aG9yaXR5MQ0wCwYDVQQDEwRDUkwxMCmgJ6AlhiNodHRwOi8vd3d3LmVu -dHJ1c3QubmV0L0NSTC9uZXQxLmNybDArBgNVHRAEJDAigA8xOTk5MDUyNTE2MDk0MFqBDzIwMTkw -NTI1MTYwOTQwWjALBgNVHQ8EBAMCAQYwHwYDVR0jBBgwFoAU8BdiE1U9s/8KAGv7UISX8+1i0Bow -HQYDVR0OBBYEFPAXYhNVPbP/CgBr+1CEl/PtYtAaMAwGA1UdEwQFMAMBAf8wGQYJKoZIhvZ9B0EA -BAwwChsEVjQuMAMCBJAwDQYJKoZIhvcNAQEFBQADgYEAkNwwAvpkdMKnCqV8IY00F6j7Rw7/JXyN -Ewr75Ji174z4xRAN95K+8cPV1ZVqBLssziY2ZcgxxufuP+NXdYR6Ee9GTxj005i7qIcyunL2POI9 -n9cd2cNgQ4xYDiKWL2KjLB+6rQXvqzJ4h6BUcxm1XAX5Uj5tLUUL9wqT6u0G+bI= ------END CERTIFICATE----- - -Entrust.net Premium 2048 Secure Server CA -========================================= ------BEGIN CERTIFICATE----- -MIIEXDCCA0SgAwIBAgIEOGO5ZjANBgkqhkiG9w0BAQUFADCBtDEUMBIGA1UEChMLRW50cnVzdC5u -ZXQxQDA+BgNVBAsUN3d3dy5lbnRydXN0Lm5ldC9DUFNfMjA0OCBpbmNvcnAuIGJ5IHJlZi4gKGxp -bWl0cyBsaWFiLikxJTAjBgNVBAsTHChjKSAxOTk5IEVudHJ1c3QubmV0IExpbWl0ZWQxMzAxBgNV -BAMTKkVudHJ1c3QubmV0IENlcnRpZmljYXRpb24gQXV0aG9yaXR5ICgyMDQ4KTAeFw05OTEyMjQx -NzUwNTFaFw0xOTEyMjQxODIwNTFaMIG0MRQwEgYDVQQKEwtFbnRydXN0Lm5ldDFAMD4GA1UECxQ3 -d3d3LmVudHJ1c3QubmV0L0NQU18yMDQ4IGluY29ycC4gYnkgcmVmLiAobGltaXRzIGxpYWIuKTEl -MCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDEzMDEGA1UEAxMqRW50cnVzdC5u -ZXQgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgKDIwNDgpMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A -MIIBCgKCAQEArU1LqRKGsuqjIAcVFmQqK0vRvwtKTY7tgHalZ7d4QMBzQshowNtTK91euHaYNZOL -Gp18EzoOH1u3Hs/lJBQesYGpjX24zGtLA/ECDNyrpUAkAH90lKGdCCmziAv1h3edVc3kw37XamSr -hRSGlVuXMlBvPci6Zgzj/L24ScF2iUkZ/cCovYmjZy/Gn7xxGWC4LeksyZB2ZnuU4q941mVTXTzW -nLLPKQP5L6RQstRIzgUyVYr9smRMDuSYB3Xbf9+5CFVghTAp+XtIpGmG4zU/HoZdenoVve8AjhUi -VBcAkCaTvA5JaJG/+EfTnZVCwQ5N328mz8MYIWJmQ3DW1cAH4QIDAQABo3QwcjARBglghkgBhvhC -AQEEBAMCAAcwHwYDVR0jBBgwFoAUVeSB0RGAvtiJuQijMfmhJAkWuXAwHQYDVR0OBBYEFFXkgdER -gL7YibkIozH5oSQJFrlwMB0GCSqGSIb2fQdBAAQQMA4bCFY1LjA6NC4wAwIEkDANBgkqhkiG9w0B -AQUFAAOCAQEAWUesIYSKF8mciVMeuoCFGsY8Tj6xnLZ8xpJdGGQC49MGCBFhfGPjK50xA3B20qMo -oPS7mmNz7W3lKtvtFKkrxjYR0CvrB4ul2p5cGZ1WEvVUKcgF7bISKo30Axv/55IQh7A6tcOdBTcS -o8f0FbnVpDkWm1M6I5HxqIKiaohowXkCIryqptau37AUX7iH0N18f3v/rxzP5tsHrV7bhZ3QKw0z -2wTR5klAEyt2+z7pnIkPFc4YsIV4IU9rTw76NmfNB/L/CNDi3tm/Kq+4h4YhPATKt5Rof8886ZjX -OP/swNlQ8C5LWK5Gb9Auw2DaclVyvUxFnmG6v4SBkgPR0ml8xQ== ------END CERTIFICATE----- - -Baltimore CyberTrust Root -========================= ------BEGIN CERTIFICATE----- -MIIDdzCCAl+gAwIBAgIEAgAAuTANBgkqhkiG9w0BAQUFADBaMQswCQYDVQQGEwJJRTESMBAGA1UE -ChMJQmFsdGltb3JlMRMwEQYDVQQLEwpDeWJlclRydXN0MSIwIAYDVQQDExlCYWx0aW1vcmUgQ3li -ZXJUcnVzdCBSb290MB4XDTAwMDUxMjE4NDYwMFoXDTI1MDUxMjIzNTkwMFowWjELMAkGA1UEBhMC -SUUxEjAQBgNVBAoTCUJhbHRpbW9yZTETMBEGA1UECxMKQ3liZXJUcnVzdDEiMCAGA1UEAxMZQmFs -dGltb3JlIEN5YmVyVHJ1c3QgUm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKME -uyKrmD1X6CZymrV51Cni4eiVgLGw41uOKymaZN+hXe2wCQVt2yguzmKiYv60iNoS6zjrIZ3AQSsB -UnuId9Mcj8e6uYi1agnnc+gRQKfRzMpijS3ljwumUNKoUMMo6vWrJYeKmpYcqWe4PwzV9/lSEy/C -G9VwcPCPwBLKBsua4dnKM3p31vjsufFoREJIE9LAwqSuXmD+tqYF/LTdB1kC1FkYmGP1pWPgkAx9 -XbIGevOF6uvUA65ehD5f/xXtabz5OTZydc93Uk3zyZAsuT3lySNTPx8kmCFcB5kpvcY67Oduhjpr -l3RjM71oGDHweI12v/yejl0qhqdNkNwnGjkCAwEAAaNFMEMwHQYDVR0OBBYEFOWdWTCCR1jMrPoI -VDaGezq1BE3wMBIGA1UdEwEB/wQIMAYBAf8CAQMwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEB -BQUAA4IBAQCFDF2O5G9RaEIFoN27TyclhAO992T9Ldcw46QQF+vaKSm2eT929hkTI7gQCvlYpNRh -cL0EYWoSihfVCr3FvDB81ukMJY2GQE/szKN+OMY3EU/t3WgxjkzSswF07r51XgdIGn9w/xZchMB5 -hbgF/X++ZRGjD8ACtPhSNzkE1akxehi/oCr0Epn3o0WC4zxe9Z2etciefC7IpJ5OCBRLbf1wbWsa -Y71k5h+3zvDyny67G7fyUIhzksLi4xaNmjICq44Y3ekQEe5+NauQrz4wlHrQMz2nZQ/1/I6eYs9H -RCwBXbsdtTLSR9I4LtD+gdwyah617jzV/OeBHRnDJELqYzmp ------END CERTIFICATE----- - -Equifax Secure Global eBusiness CA -================================== ------BEGIN CERTIFICATE----- -MIICkDCCAfmgAwIBAgIBATANBgkqhkiG9w0BAQQFADBaMQswCQYDVQQGEwJVUzEcMBoGA1UEChMT -RXF1aWZheCBTZWN1cmUgSW5jLjEtMCsGA1UEAxMkRXF1aWZheCBTZWN1cmUgR2xvYmFsIGVCdXNp -bmVzcyBDQS0xMB4XDTk5MDYyMTA0MDAwMFoXDTIwMDYyMTA0MDAwMFowWjELMAkGA1UEBhMCVVMx -HDAaBgNVBAoTE0VxdWlmYXggU2VjdXJlIEluYy4xLTArBgNVBAMTJEVxdWlmYXggU2VjdXJlIEds -b2JhbCBlQnVzaW5lc3MgQ0EtMTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEAuucXkAJlsTRV -PEnCUdXfp9E3j9HngXNBUmCbnaEXJnitx7HoJpQytd4zjTov2/KaelpzmKNc6fuKcxtc58O/gGzN -qfTWK8D3+ZmqY6KxRwIP1ORROhI8bIpaVIRw28HFkM9yRcuoWcDNM50/o5brhTMhHD4ePmBudpxn -hcXIw2ECAwEAAaNmMGQwEQYJYIZIAYb4QgEBBAQDAgAHMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0j -BBgwFoAUvqigdHJQa0S3ySPY+6j/s1draGwwHQYDVR0OBBYEFL6ooHRyUGtEt8kj2Puo/7NXa2hs -MA0GCSqGSIb3DQEBBAUAA4GBADDiAVGqx+pf2rnQZQ8w1j7aDRRJbpGTJxQx78T3LUX47Me/okEN -I7SS+RkAZ70Br83gcfxaz2TE4JaY0KNA4gGK7ycH8WUBikQtBmV1UsCGECAhX2xrD2yuCRyv8qIY -NMR1pHMc8Y3c7635s3a0kr/clRAevsvIO1qEYBlWlKlV ------END CERTIFICATE----- - -Equifax Secure eBusiness CA 1 -============================= ------BEGIN CERTIFICATE----- -MIICgjCCAeugAwIBAgIBBDANBgkqhkiG9w0BAQQFADBTMQswCQYDVQQGEwJVUzEcMBoGA1UEChMT -RXF1aWZheCBTZWN1cmUgSW5jLjEmMCQGA1UEAxMdRXF1aWZheCBTZWN1cmUgZUJ1c2luZXNzIENB -LTEwHhcNOTkwNjIxMDQwMDAwWhcNMjAwNjIxMDQwMDAwWjBTMQswCQYDVQQGEwJVUzEcMBoGA1UE -ChMTRXF1aWZheCBTZWN1cmUgSW5jLjEmMCQGA1UEAxMdRXF1aWZheCBTZWN1cmUgZUJ1c2luZXNz -IENBLTEwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAM4vGbwXt3fek6lfWg0XTzQaDJj0ItlZ -1MRoRvC0NcWFAyDGr0WlIVFFQesWWDYyb+JQYmT5/VGcqiTZ9J2DKocKIdMSODRsjQBuWqDZQu4a -IZX5UkxVWsUPOE9G+m34LjXWHXzr4vCwdYDIqROsvojvOm6rXyo4YgKwEnv+j6YDAgMBAAGjZjBk -MBEGCWCGSAGG+EIBAQQEAwIABzAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFEp4MlIR21kW -Nl7fwRQ2QGpHfEyhMB0GA1UdDgQWBBRKeDJSEdtZFjZe38EUNkBqR3xMoTANBgkqhkiG9w0BAQQF -AAOBgQB1W6ibAxHm6VZMzfmpTMANmvPMZWnmJXbMWbfWVMMdzZmsGd20hdXgPfxiIKeES1hl8eL5 -lSE/9dR+WB5Hh1Q+WKG1tfgq73HnvMP2sUlG4tega+VWeponmHxGYhTnyfxuAxJ5gDgdSIKN/Bf+ -KpYrtWKmpj29f5JZzVoqgrI3eQ== ------END CERTIFICATE----- - -Equifax Secure eBusiness CA 2 -============================= ------BEGIN CERTIFICATE----- -MIIDIDCCAomgAwIBAgIEN3DPtTANBgkqhkiG9w0BAQUFADBOMQswCQYDVQQGEwJVUzEXMBUGA1UE -ChMORXF1aWZheCBTZWN1cmUxJjAkBgNVBAsTHUVxdWlmYXggU2VjdXJlIGVCdXNpbmVzcyBDQS0y -MB4XDTk5MDYyMzEyMTQ0NVoXDTE5MDYyMzEyMTQ0NVowTjELMAkGA1UEBhMCVVMxFzAVBgNVBAoT -DkVxdWlmYXggU2VjdXJlMSYwJAYDVQQLEx1FcXVpZmF4IFNlY3VyZSBlQnVzaW5lc3MgQ0EtMjCB -nzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA5Dk5kx5SBhsoNviyoynF7Y6yEb3+6+e0dMKP/wXn -2Z0GvxLIPw7y1tEkshHe0XMJitSxLJgJDR5QRrKDpkWNYmi7hRsgcDKqQM2mll/EcTc/BPO3QSQ5 -BxoeLmFYoBIL5aXfxavqN3HMHMg3OrmXUqesxWoklE6ce8/AatbfIb0CAwEAAaOCAQkwggEFMHAG -A1UdHwRpMGcwZaBjoGGkXzBdMQswCQYDVQQGEwJVUzEXMBUGA1UEChMORXF1aWZheCBTZWN1cmUx -JjAkBgNVBAsTHUVxdWlmYXggU2VjdXJlIGVCdXNpbmVzcyBDQS0yMQ0wCwYDVQQDEwRDUkwxMBoG -A1UdEAQTMBGBDzIwMTkwNjIzMTIxNDQ1WjALBgNVHQ8EBAMCAQYwHwYDVR0jBBgwFoAUUJ4L6q9e -uSBIplBqy/3YIHqngnYwHQYDVR0OBBYEFFCeC+qvXrkgSKZQasv92CB6p4J2MAwGA1UdEwQFMAMB -Af8wGgYJKoZIhvZ9B0EABA0wCxsFVjMuMGMDAgbAMA0GCSqGSIb3DQEBBQUAA4GBAAyGgq3oThr1 -jokn4jVYPSm0B482UJW/bsGe68SQsoWou7dC4A8HOd/7npCy0cE+U58DRLB+S/Rv5Hwf5+Kx5Lia -78O9zt4LMjTZ3ijtM2vE1Nc9ElirfQkty3D1E4qUoSek1nDFbZS1yX2doNLGCEnZZpum0/QL3MUm -V+GRMOrN ------END CERTIFICATE----- - -AddTrust Low-Value Services Root -================================ ------BEGIN CERTIFICATE----- -MIIEGDCCAwCgAwIBAgIBATANBgkqhkiG9w0BAQUFADBlMQswCQYDVQQGEwJTRTEUMBIGA1UEChML -QWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSEwHwYDVQQDExhBZGRU -cnVzdCBDbGFzcyAxIENBIFJvb3QwHhcNMDAwNTMwMTAzODMxWhcNMjAwNTMwMTAzODMxWjBlMQsw -CQYDVQQGEwJTRTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBO -ZXR3b3JrMSEwHwYDVQQDExhBZGRUcnVzdCBDbGFzcyAxIENBIFJvb3QwggEiMA0GCSqGSIb3DQEB -AQUAA4IBDwAwggEKAoIBAQCWltQhSWDia+hBBwzexODcEyPNwTXH+9ZOEQpnXvUGW2ulCDtbKRY6 -54eyNAbFvAWlA3yCyykQruGIgb3WntP+LVbBFc7jJp0VLhD7Bo8wBN6ntGO0/7Gcrjyvd7ZWxbWr -oulpOj0OM3kyP3CCkplhbY0wCI9xP6ZIVxn4JdxLZlyldI+Yrsj5wAYi56xz36Uu+1LcsRVlIPo1 -Zmne3yzxbrww2ywkEtvrNTVokMsAsJchPXQhI2U0K7t4WaPW4XY5mqRJjox0r26kmqPZm9I4XJui -GMx1I4S+6+JNM3GOGvDC+Mcdoq0Dlyz4zyXG9rgkMbFjXZJ/Y/AlyVMuH79NAgMBAAGjgdIwgc8w -HQYDVR0OBBYEFJWxtPCUtr3H2tERCSG+wa9J/RB7MAsGA1UdDwQEAwIBBjAPBgNVHRMBAf8EBTAD -AQH/MIGPBgNVHSMEgYcwgYSAFJWxtPCUtr3H2tERCSG+wa9J/RB7oWmkZzBlMQswCQYDVQQGEwJT -RTEUMBIGA1UEChMLQWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSEw -HwYDVQQDExhBZGRUcnVzdCBDbGFzcyAxIENBIFJvb3SCAQEwDQYJKoZIhvcNAQEFBQADggEBACxt -ZBsfzQ3duQH6lmM0MkhHma6X7f1yFqZzR1r0693p9db7RcwpiURdv0Y5PejuvE1Uhh4dbOMXJ0Ph -iVYrqW9yTkkz43J8KiOavD7/KCrto/8cI7pDVwlnTUtiBi34/2ydYB7YHEt9tTEv2dB8Xfjea4MY -eDdXL+gzB2ffHsdrKpV2ro9Xo/D0UrSpUwjP4E/TelOL/bscVjby/rK25Xa71SJlpz/+0WatC7xr -mYbvP33zGDLKe8bjq2RGlfgmadlVg3sslgf/WSxEo8bl6ancoWOAWiFeIc9TVPC6b4nbqKqVz4vj -ccweGyBECMB6tkD9xOQ14R0WHNC8K47Wcdk= ------END CERTIFICATE----- - -AddTrust External Root -====================== ------BEGIN CERTIFICATE----- -MIIENjCCAx6gAwIBAgIBATANBgkqhkiG9w0BAQUFADBvMQswCQYDVQQGEwJTRTEUMBIGA1UEChML -QWRkVHJ1c3QgQUIxJjAkBgNVBAsTHUFkZFRydXN0IEV4dGVybmFsIFRUUCBOZXR3b3JrMSIwIAYD -VQQDExlBZGRUcnVzdCBFeHRlcm5hbCBDQSBSb290MB4XDTAwMDUzMDEwNDgzOFoXDTIwMDUzMDEw -NDgzOFowbzELMAkGA1UEBhMCU0UxFDASBgNVBAoTC0FkZFRydXN0IEFCMSYwJAYDVQQLEx1BZGRU -cnVzdCBFeHRlcm5hbCBUVFAgTmV0d29yazEiMCAGA1UEAxMZQWRkVHJ1c3QgRXh0ZXJuYWwgQ0Eg -Um9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALf3GjPm8gAELTngTlvtH7xsD821 -+iO2zt6bETOXpClMfZOfvUq8k+0DGuOPz+VtUFrWlymUWoCwSXrbLpX9uMq/NzgtHj6RQa1wVsfw -Tz/oMp50ysiQVOnGXw94nZpAPA6sYapeFI+eh6FqUNzXmk6vBbOmcZSccbNQYArHE504B4YCqOmo -aSYYkKtMsE8jqzpPhNjfzp/haW+710LXa0Tkx63ubUFfclpxCDezeWWkWaCUN/cALw3CknLa0Dhy -2xSoRcRdKn23tNbE7qzNE0S3ySvdQwAl+mG5aWpYIxG3pzOPVnVZ9c0p10a3CitlttNCbxWyuHv7 -7+ldU9U0WicCAwEAAaOB3DCB2TAdBgNVHQ4EFgQUrb2YejS0Jvf6xCZU7wO94CTLVBowCwYDVR0P -BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wgZkGA1UdIwSBkTCBjoAUrb2YejS0Jvf6xCZU7wO94CTL -VBqhc6RxMG8xCzAJBgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRUcnVzdCBBQjEmMCQGA1UECxMdQWRk -VHJ1c3QgRXh0ZXJuYWwgVFRQIE5ldHdvcmsxIjAgBgNVBAMTGUFkZFRydXN0IEV4dGVybmFsIENB -IFJvb3SCAQEwDQYJKoZIhvcNAQEFBQADggEBALCb4IUlwtYj4g+WBpKdQZic2YR5gdkeWxQHIzZl -j7DYd7usQWxHYINRsPkyPef89iYTx4AWpb9a/IfPeHmJIZriTAcKhjW88t5RxNKWt9x+Tu5w/Rw5 -6wwCURQtjr0W4MHfRnXnJK3s9EK0hZNwEGe6nQY1ShjTK3rMUUKhemPR5ruhxSvCNr4TDea9Y355 -e6cJDUCrat2PisP29owaQgVR1EX1n6diIWgVIEM8med8vSTYqZEXc4g/VhsxOBi0cQ+azcgOno4u -G+GMmIPLHzHxREzGBHNJdmAPx/i9F4BrLunMTA5amnkPIAou1Z5jJh5VkpTYghdae9C8x49OhgQ= ------END CERTIFICATE----- - -AddTrust Public Services Root -============================= ------BEGIN CERTIFICATE----- -MIIEFTCCAv2gAwIBAgIBATANBgkqhkiG9w0BAQUFADBkMQswCQYDVQQGEwJTRTEUMBIGA1UEChML -QWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSAwHgYDVQQDExdBZGRU -cnVzdCBQdWJsaWMgQ0EgUm9vdDAeFw0wMDA1MzAxMDQxNTBaFw0yMDA1MzAxMDQxNTBaMGQxCzAJ -BgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRUcnVzdCBBQjEdMBsGA1UECxMUQWRkVHJ1c3QgVFRQIE5l -dHdvcmsxIDAeBgNVBAMTF0FkZFRydXN0IFB1YmxpYyBDQSBSb290MIIBIjANBgkqhkiG9w0BAQEF -AAOCAQ8AMIIBCgKCAQEA6Rowj4OIFMEg2Dybjxt+A3S72mnTRqX4jsIMEZBRpS9mVEBV6tsfSlbu -nyNu9DnLoblv8n75XYcmYZ4c+OLspoH4IcUkzBEMP9smcnrHAZcHF/nXGCwwfQ56HmIexkvA/X1i -d9NEHif2P0tEs7c42TkfYNVRknMDtABp4/MUTu7R3AnPdzRGULD4EfL+OHn3Bzn+UZKXC1sIXzSG -Aa2Il+tmzV7R/9x98oTaunet3IAIx6eH1lWfl2royBFkuucZKT8Rs3iQhCBSWxHveNCD9tVIkNAw -HM+A+WD+eeSI8t0A65RF62WUaUC6wNW0uLp9BBGo6zEFlpROWCGOn9Bg/QIDAQABo4HRMIHOMB0G -A1UdDgQWBBSBPjfYkrAfd59ctKtzquf2NGAv+jALBgNVHQ8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB -/zCBjgYDVR0jBIGGMIGDgBSBPjfYkrAfd59ctKtzquf2NGAv+qFopGYwZDELMAkGA1UEBhMCU0Ux -FDASBgNVBAoTC0FkZFRydXN0IEFCMR0wGwYDVQQLExRBZGRUcnVzdCBUVFAgTmV0d29yazEgMB4G -A1UEAxMXQWRkVHJ1c3QgUHVibGljIENBIFJvb3SCAQEwDQYJKoZIhvcNAQEFBQADggEBAAP3FUr4 -JNojVhaTdt02KLmuG7jD8WS6IBh4lSknVwW8fCr0uVFV2ocC3g8WFzH4qnkuCRO7r7IgGRLlk/lL -+YPoRNWyQSW/iHVv/xD8SlTQX/D67zZzfRs2RcYhbbQVuE7PnFylPVoAjgbjPGsye/Kf8Lb93/Ao -GEjwxrzQvzSAlsJKsW2Ox5BF3i9nrEUEo3rcVZLJR2bYGozH7ZxOmuASu7VqTITh4SINhwBk/ox9 -Yjllpu9CtoAlEmEBqCQTcAARJl/6NVDFSMwGR+gn2HCNX2TmoUQmXiLsks3/QppEIW1cxeMiHV9H -EufOX1362KqxMy3ZdvJOOjMMK7MtkAY= ------END CERTIFICATE----- - -AddTrust Qualified Certificates Root -==================================== ------BEGIN CERTIFICATE----- -MIIEHjCCAwagAwIBAgIBATANBgkqhkiG9w0BAQUFADBnMQswCQYDVQQGEwJTRTEUMBIGA1UEChML -QWRkVHJ1c3QgQUIxHTAbBgNVBAsTFEFkZFRydXN0IFRUUCBOZXR3b3JrMSMwIQYDVQQDExpBZGRU -cnVzdCBRdWFsaWZpZWQgQ0EgUm9vdDAeFw0wMDA1MzAxMDQ0NTBaFw0yMDA1MzAxMDQ0NTBaMGcx -CzAJBgNVBAYTAlNFMRQwEgYDVQQKEwtBZGRUcnVzdCBBQjEdMBsGA1UECxMUQWRkVHJ1c3QgVFRQ -IE5ldHdvcmsxIzAhBgNVBAMTGkFkZFRydXN0IFF1YWxpZmllZCBDQSBSb290MIIBIjANBgkqhkiG -9w0BAQEFAAOCAQ8AMIIBCgKCAQEA5B6a/twJWoekn0e+EV+vhDTbYjx5eLfpMLXsDBwqxBb/4Oxx -64r1EW7tTw2R0hIYLUkVAcKkIhPHEWT/IhKauY5cLwjPcWqzZwFZ8V1G87B4pfYOQnrjfxvM0PC3 -KP0q6p6zsLkEqv32x7SxuCqg+1jxGaBvcCV+PmlKfw8i2O+tCBGaKZnhqkRFmhJePp1tUvznoD1o -L/BLcHwTOK28FSXx1s6rosAx1i+f4P8UWfyEk9mHfExUE+uf0S0R+Bg6Ot4l2ffTQO2kBhLEO+GR -wVY18BTcZTYJbqukB8c10cIDMzZbdSZtQvESa0NvS3GU+jQd7RNuyoB/mC9suWXY6QIDAQABo4HU -MIHRMB0GA1UdDgQWBBQ5lYtii1zJ1IC6WA+XPxUIQ8yYpzALBgNVHQ8EBAMCAQYwDwYDVR0TAQH/ -BAUwAwEB/zCBkQYDVR0jBIGJMIGGgBQ5lYtii1zJ1IC6WA+XPxUIQ8yYp6FrpGkwZzELMAkGA1UE -BhMCU0UxFDASBgNVBAoTC0FkZFRydXN0IEFCMR0wGwYDVQQLExRBZGRUcnVzdCBUVFAgTmV0d29y -azEjMCEGA1UEAxMaQWRkVHJ1c3QgUXVhbGlmaWVkIENBIFJvb3SCAQEwDQYJKoZIhvcNAQEFBQAD -ggEBABmrder4i2VhlRO6aQTvhsoToMeqT2QbPxj2qC0sVY8FtzDqQmodwCVRLae/DLPt7wh/bDxG -GuoYQ992zPlmhpwsaPXpF/gxsxjE1kh9I0xowX67ARRvxdlu3rsEQmr49lx95dr6h+sNNVJn0J6X -dgWTP5XHAeZpVTh/EGGZyeNfpso+gmNIquIISD6q8rKFYqa0p9m9N5xotS1WfbC3P6CxB9bpT9ze -RXEwMn8bLgn5v1Kh7sKAPgZcLlVAwRv1cEWw3F369nJad9Jjzc9YiQBCYz95OdBEsIJuQRno3eDB -iFrRHnGTHyQwdOUeqN48Jzd/g66ed8/wMLH/S5noxqE= ------END CERTIFICATE----- - -Entrust Root Certification Authority -==================================== ------BEGIN CERTIFICATE----- -MIIEkTCCA3mgAwIBAgIERWtQVDANBgkqhkiG9w0BAQUFADCBsDELMAkGA1UEBhMCVVMxFjAUBgNV -BAoTDUVudHJ1c3QsIEluYy4xOTA3BgNVBAsTMHd3dy5lbnRydXN0Lm5ldC9DUFMgaXMgaW5jb3Jw -b3JhdGVkIGJ5IHJlZmVyZW5jZTEfMB0GA1UECxMWKGMpIDIwMDYgRW50cnVzdCwgSW5jLjEtMCsG -A1UEAxMkRW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA2MTEyNzIwMjM0 -MloXDTI2MTEyNzIwNTM0MlowgbAxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMu -MTkwNwYDVQQLEzB3d3cuZW50cnVzdC5uZXQvQ1BTIGlzIGluY29ycG9yYXRlZCBieSByZWZlcmVu -Y2UxHzAdBgNVBAsTFihjKSAyMDA2IEVudHJ1c3QsIEluYy4xLTArBgNVBAMTJEVudHJ1c3QgUm9v -dCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB -ALaVtkNC+sZtKm9I35RMOVcF7sN5EUFoNu3s/poBj6E4KPz3EEZmLk0eGrEaTsbRwJWIsMn/MYsz -A9u3g3s+IIRe7bJWKKf44LlAcTfFy0cOlypowCKVYhXbR9n10Cv/gkvJrT7eTNuQgFA/CYqEAOww -Cj0Yzfv9KlmaI5UXLEWeH25DeW0MXJj+SKfFI0dcXv1u5x609mhF0YaDW6KKjbHjKYD+JXGIrb68 -j6xSlkuqUY3kEzEZ6E5Nn9uss2rVvDlUccp6en+Q3X0dgNmBu1kmwhH+5pPi94DkZfs0Nw4pgHBN -rziGLp5/V6+eF67rHMsoIV+2HNjnogQi+dPa2MsCAwEAAaOBsDCBrTAOBgNVHQ8BAf8EBAMCAQYw -DwYDVR0TAQH/BAUwAwEB/zArBgNVHRAEJDAigA8yMDA2MTEyNzIwMjM0MlqBDzIwMjYxMTI3MjA1 -MzQyWjAfBgNVHSMEGDAWgBRokORnpKZTgMeGZqTx90tD+4S9bTAdBgNVHQ4EFgQUaJDkZ6SmU4DH -hmak8fdLQ/uEvW0wHQYJKoZIhvZ9B0EABBAwDhsIVjcuMTo0LjADAgSQMA0GCSqGSIb3DQEBBQUA -A4IBAQCT1DCw1wMgKtD5Y+iRDAUgqV8ZyntyTtSx29CW+1RaGSwMCPeyvIWonX9tO1KzKtvn1ISM -Y/YPyyYBkVBs9F8U4pN0wBOeMDpQ47RgxRzwIkSNcUesyBrJ6ZuaAGAT/3B+XxFNSRuzFVJ7yVTa -v52Vr2ua2J7p8eRDjeIRRDq/r72DQnNSi6q7pynP9WQcCk3RvKqsnyrQ/39/2n3qse0wJcGE2jTS -W3iDVuycNsMm4hH2Z0kdkquM++v/eu6FSqdQgPCnXEqULl8FmTxSQeDNtGPPAUO6nIPcj2A781q0 -tHuu2guQOHXvgR1m0vdXcDazv/wor3ElhVsT/h5/WrQ8 ------END CERTIFICATE----- - -RSA Security 2048 v3 -==================== ------BEGIN CERTIFICATE----- -MIIDYTCCAkmgAwIBAgIQCgEBAQAAAnwAAAAKAAAAAjANBgkqhkiG9w0BAQUFADA6MRkwFwYDVQQK -ExBSU0EgU2VjdXJpdHkgSW5jMR0wGwYDVQQLExRSU0EgU2VjdXJpdHkgMjA0OCBWMzAeFw0wMTAy -MjIyMDM5MjNaFw0yNjAyMjIyMDM5MjNaMDoxGTAXBgNVBAoTEFJTQSBTZWN1cml0eSBJbmMxHTAb -BgNVBAsTFFJTQSBTZWN1cml0eSAyMDQ4IFYzMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC -AQEAt49VcdKA3XtpeafwGFAyPGJn9gqVB93mG/Oe2dJBVGutn3y+Gc37RqtBaB4Y6lXIL5F4iSj7 -Jylg/9+PjDvJSZu1pJTOAeo+tWN7fyb9Gd3AIb2E0S1PRsNO3Ng3OTsor8udGuorryGlwSMiuLgb -WhOHV4PR8CDn6E8jQrAApX2J6elhc5SYcSa8LWrg903w8bYqODGBDSnhAMFRD0xS+ARaqn1y07iH -KrtjEAMqs6FPDVpeRrc9DvV07Jmf+T0kgYim3WBU6JU2PcYJk5qjEoAAVZkZR73QpXzDuvsf9/UP -+Ky5tfQ3mBMY3oVbtwyCO4dvlTlYMNpuAWgXIszACwIDAQABo2MwYTAPBgNVHRMBAf8EBTADAQH/ -MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBQHw1EwpKrpRa41JPr/JCwz0LGdjDAdBgNVHQ4E -FgQUB8NRMKSq6UWuNST6/yQsM9CxnYwwDQYJKoZIhvcNAQEFBQADggEBAF8+hnZuuDU8TjYcHnmY -v/3VEhF5Ug7uMYm83X/50cYVIeiKAVQNOvtUudZj1LGqlk2iQk3UUx+LEN5/Zb5gEydxiKRz44Rj -0aRV4VCT5hsOedBnvEbIvz8XDZXmxpBp3ue0L96VfdASPz0+f00/FGj1EVDVwfSQpQgdMWD/YIwj -VAqv/qFuxdF6Kmh4zx6CCiC0H63lhbJqaHVOrSU3lIW+vaHU6rcMSzyd6BIA8F+sDeGscGNz9395 -nzIlQnQFgCi/vcEkllgVsRch6YlL2weIZ/QVrXA+L02FO8K32/6YaCOJ4XQP3vTFhGMpG8zLB8kA -pKnXwiJPZ9d37CAFYd4= ------END CERTIFICATE----- - -GeoTrust Global CA -================== ------BEGIN CERTIFICATE----- -MIIDVDCCAjygAwIBAgIDAjRWMA0GCSqGSIb3DQEBBQUAMEIxCzAJBgNVBAYTAlVTMRYwFAYDVQQK -Ew1HZW9UcnVzdCBJbmMuMRswGQYDVQQDExJHZW9UcnVzdCBHbG9iYWwgQ0EwHhcNMDIwNTIxMDQw -MDAwWhcNMjIwNTIxMDQwMDAwWjBCMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5j -LjEbMBkGA1UEAxMSR2VvVHJ1c3QgR2xvYmFsIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB -CgKCAQEA2swYYzD99BcjGlZ+W988bDjkcbd4kdS8odhM+KhDtgPpTSEHCIjaWC9mOSm9BXiLnTjo -BbdqfnGk5sRgprDvgOSJKA+eJdbtg/OtppHHmMlCGDUUna2YRpIuT8rxh0PBFpVXLVDviS2Aelet -8u5fa9IAjbkU+BQVNdnARqN7csiRv8lVK83Qlz6cJmTM386DGXHKTubU1XupGc1V3sjs0l44U+Vc -T4wt/lAjNvxm5suOpDkZALeVAjmRCw7+OC7RHQWa9k0+bw8HHa8sHo9gOeL6NlMTOdReJivbPagU -vTLrGAMoUgRx5aszPeE4uwc2hGKceeoWMPRfwCvocWvk+QIDAQABo1MwUTAPBgNVHRMBAf8EBTAD -AQH/MB0GA1UdDgQWBBTAephojYn7qwVkDBF9qn1luMrMTjAfBgNVHSMEGDAWgBTAephojYn7qwVk -DBF9qn1luMrMTjANBgkqhkiG9w0BAQUFAAOCAQEANeMpauUvXVSOKVCUn5kaFOSPeCpilKInZ57Q -zxpeR+nBsqTP3UEaBU6bS+5Kb1VSsyShNwrrZHYqLizz/Tt1kL/6cdjHPTfStQWVYrmm3ok9Nns4 -d0iXrKYgjy6myQzCsplFAMfOEVEiIuCl6rYVSAlk6l5PdPcFPseKUgzbFbS9bZvlxrFUaKnjaZC2 -mqUPuLk/IH2uSrW4nOQdtqvmlKXBx4Ot2/Unhw4EbNX/3aBd7YdStysVAq45pmp06drE57xNNB6p -XE0zX5IJL4hmXXeXxx12E6nV5fEWCRE11azbJHFwLJhWC9kXtNHjUStedejV0NxPNO3CBWaAocvm -Mw== ------END CERTIFICATE----- - -GeoTrust Global CA 2 -==================== ------BEGIN CERTIFICATE----- -MIIDZjCCAk6gAwIBAgIBATANBgkqhkiG9w0BAQUFADBEMQswCQYDVQQGEwJVUzEWMBQGA1UEChMN -R2VvVHJ1c3QgSW5jLjEdMBsGA1UEAxMUR2VvVHJ1c3QgR2xvYmFsIENBIDIwHhcNMDQwMzA0MDUw -MDAwWhcNMTkwMzA0MDUwMDAwWjBEMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5j -LjEdMBsGA1UEAxMUR2VvVHJ1c3QgR2xvYmFsIENBIDIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw -ggEKAoIBAQDvPE1APRDfO1MA4Wf+lGAVPoWI8YkNkMgoI5kF6CsgncbzYEbYwbLVjDHZ3CB5JIG/ -NTL8Y2nbsSpr7iFY8gjpeMtvy/wWUsiRxP89c96xPqfCfWbB9X5SJBri1WeR0IIQ13hLTytCOb1k -LUCgsBDTOEhGiKEMuzozKmKY+wCdE1l/bztyqu6mD4b5BWHqZ38MN5aL5mkWRxHCJ1kDs6ZgwiFA -Vvqgx306E+PsV8ez1q6diYD3Aecs9pYrEw15LNnA5IZ7S4wMcoKK+xfNAGw6EzywhIdLFnopsk/b -HdQL82Y3vdj2V7teJHq4PIu5+pIaGoSe2HSPqht/XvT+RSIhAgMBAAGjYzBhMA8GA1UdEwEB/wQF -MAMBAf8wHQYDVR0OBBYEFHE4NvICMVNHK266ZUapEBVYIAUJMB8GA1UdIwQYMBaAFHE4NvICMVNH -K266ZUapEBVYIAUJMA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQUFAAOCAQEAA/e1K6tdEPx7 -srJerJsOflN4WT5CBP51o62sgU7XAotexC3IUnbHLB/8gTKY0UvGkpMzNTEv/NgdRN3ggX+d6Yvh -ZJFiCzkIjKx0nVnZellSlxG5FntvRdOW2TF9AjYPnDtuzywNA0ZF66D0f0hExghAzN4bcLUprbqL -OzRldRtxIR0sFAqwlpW41uryZfspuk/qkZN0abby/+Ea0AzRdoXLiiW9l14sbxWZJue2Kf8i7MkC -x1YAzUm5s2x7UwQa4qjJqhIFI8LO57sEAszAR6LkxCkvW0VXiVHuPOtSCP8HNR6fNWpHSlaY0VqF -H4z1Ir+rzoPz4iIprn2DQKi6bA== ------END CERTIFICATE----- - -GeoTrust Universal CA -===================== ------BEGIN CERTIFICATE----- -MIIFaDCCA1CgAwIBAgIBATANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJVUzEWMBQGA1UEChMN -R2VvVHJ1c3QgSW5jLjEeMBwGA1UEAxMVR2VvVHJ1c3QgVW5pdmVyc2FsIENBMB4XDTA0MDMwNDA1 -MDAwMFoXDTI5MDMwNDA1MDAwMFowRTELMAkGA1UEBhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IElu -Yy4xHjAcBgNVBAMTFUdlb1RydXN0IFVuaXZlcnNhbCBDQTCCAiIwDQYJKoZIhvcNAQEBBQADggIP -ADCCAgoCggIBAKYVVaCjxuAfjJ0hUNfBvitbtaSeodlyWL0AG0y/YckUHUWCq8YdgNY96xCcOq9t -JPi8cQGeBvV8Xx7BDlXKg5pZMK4ZyzBIle0iN430SppyZj6tlcDgFgDgEB8rMQ7XlFTTQjOgNB0e -RXbdT8oYN+yFFXoZCPzVx5zw8qkuEKmS5j1YPakWaDwvdSEYfyh3peFhF7em6fgemdtzbvQKoiFs -7tqqhZJmr/Z6a4LauiIINQ/PQvE1+mrufislzDoR5G2vc7J2Ha3QsnhnGqQ5HFELZ1aD/ThdDc7d -8Lsrlh/eezJS/R27tQahsiFepdaVaH/wmZ7cRQg+59IJDTWU3YBOU5fXtQlEIGQWFwMCTFMNaN7V -qnJNk22CDtucvc+081xdVHppCZbW2xHBjXWotM85yM48vCR85mLK4b19p71XZQvk/iXttmkQ3Cga -Rr0BHdCXteGYO8A3ZNY9lO4L4fUorgtWv3GLIylBjobFS1J72HGrH4oVpjuDWtdYAVHGTEHZf9hB -Z3KiKN9gg6meyHv8U3NyWfWTehd2Ds735VzZC1U0oqpbtWpU5xPKV+yXbfReBi9Fi1jUIxaS5BZu -KGNZMN9QAZxjiRqf2xeUgnA3wySemkfWWspOqGmJch+RbNt+nhutxx9z3SxPGWX9f5NAEC7S8O08 -ni4oPmkmM8V7AgMBAAGjYzBhMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNq7LqqwDLiIJlF0 -XG0D08DYj3rWMB8GA1UdIwQYMBaAFNq7LqqwDLiIJlF0XG0D08DYj3rWMA4GA1UdDwEB/wQEAwIB -hjANBgkqhkiG9w0BAQUFAAOCAgEAMXjmx7XfuJRAyXHEqDXsRh3ChfMoWIawC/yOsjmPRFWrZIRc -aanQmjg8+uUfNeVE44B5lGiku8SfPeE0zTBGi1QrlaXv9z+ZhP015s8xxtxqv6fXIwjhmF7DWgh2 -qaavdy+3YL1ERmrvl/9zlcGO6JP7/TG37FcREUWbMPEaiDnBTzynANXH/KttgCJwpQzgXQQpAvvL -oJHRfNbDflDVnVi+QTjruXU8FdmbyUqDWcDaU/0zuzYYm4UPFd3uLax2k7nZAY1IEKj79TiG8dsK -xr2EoyNB3tZ3b4XUhRxQ4K5RirqNPnbiucon8l+f725ZDQbYKxek0nxru18UGkiPGkzns0ccjkxF -KyDuSN/n3QmOGKjaQI2SJhFTYXNd673nxE0pN2HrrDktZy4W1vUAg4WhzH92xH3kt0tm7wNFYGm2 -DFKWkoRepqO1pD4r2czYG0eq8kTaT/kD6PAUyz/zg97QwVTjt+gKN02LIFkDMBmhLMi9ER/frslK -xfMnZmaGrGiR/9nmUxwPi1xpZQomyB40w11Re9epnAahNt3ViZS82eQtDF4JbAiXfKM9fJP/P6EU -p8+1Xevb2xzEdt+Iub1FBZUbrvxGakyvSOPOrg/SfuvmbJxPgWp6ZKy7PtXny3YuxadIwVyQD8vI -P/rmMuGNG2+k5o7Y+SlIis5z/iw= ------END CERTIFICATE----- - -GeoTrust Universal CA 2 -======================= ------BEGIN CERTIFICATE----- -MIIFbDCCA1SgAwIBAgIBATANBgkqhkiG9w0BAQUFADBHMQswCQYDVQQGEwJVUzEWMBQGA1UEChMN -R2VvVHJ1c3QgSW5jLjEgMB4GA1UEAxMXR2VvVHJ1c3QgVW5pdmVyc2FsIENBIDIwHhcNMDQwMzA0 -MDUwMDAwWhcNMjkwMzA0MDUwMDAwWjBHMQswCQYDVQQGEwJVUzEWMBQGA1UEChMNR2VvVHJ1c3Qg -SW5jLjEgMB4GA1UEAxMXR2VvVHJ1c3QgVW5pdmVyc2FsIENBIDIwggIiMA0GCSqGSIb3DQEBAQUA -A4ICDwAwggIKAoICAQCzVFLByT7y2dyxUxpZKeexw0Uo5dfR7cXFS6GqdHtXr0om/Nj1XqduGdt0 -DE81WzILAePb63p3NeqqWuDW6KFXlPCQo3RWlEQwAx5cTiuFJnSCegx2oG9NzkEtoBUGFF+3Qs17 -j1hhNNwqCPkuwwGmIkQcTAeC5lvO0Ep8BNMZcyfwqph/Lq9O64ceJHdqXbboW0W63MOhBW9Wjo8Q -JqVJwy7XQYci4E+GymC16qFjwAGXEHm9ADwSbSsVsaxLse4YuU6W3Nx2/zu+z18DwPw76L5GG//a -QMJS9/7jOvdqdzXQ2o3rXhhqMcceujwbKNZrVMaqW9eiLBsZzKIC9ptZvTdrhrVtgrrY6slWvKk2 -WP0+GfPtDCapkzj4T8FdIgbQl+rhrcZV4IErKIM6+vR7IVEAvlI4zs1meaj0gVbi0IMJR1FbUGrP -20gaXT73y/Zl92zxlfgCOzJWgjl6W70viRu/obTo/3+NjN8D8WBOWBFM66M/ECuDmgFz2ZRthAAn -ZqzwcEAJQpKtT5MNYQlRJNiS1QuUYbKHsu3/mjX/hVTK7URDrBs8FmtISgocQIgfksILAAX/8sgC -SqSqqcyZlpwvWOB94b67B9xfBHJcMTTD7F8t4D1kkCLm0ey4Lt1ZrtmhN79UNdxzMk+MBB4zsslG -8dhcyFVQyWi9qLo2CQIDAQABo2MwYTAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBR281Xh+qQ2 -+/CfXGJx7Tz0RzgQKzAfBgNVHSMEGDAWgBR281Xh+qQ2+/CfXGJx7Tz0RzgQKzAOBgNVHQ8BAf8E -BAMCAYYwDQYJKoZIhvcNAQEFBQADggIBAGbBxiPz2eAubl/oz66wsCVNK/g7WJtAJDday6sWSf+z -dXkzoS9tcBc0kf5nfo/sm+VegqlVHy/c1FEHEv6sFj4sNcZj/NwQ6w2jqtB8zNHQL1EuxBRa3ugZ -4T7GzKQp5y6EqgYweHZUcyiYWTjgAA1i00J9IZ+uPTqM1fp3DRgrFg5fNuH8KrUwJM/gYwx7WBr+ -mbpCErGR9Hxo4sjoryzqyX6uuyo9DRXcNJW2GHSoag/HtPQTxORb7QrSpJdMKu0vbBKJPfEncKpq -A1Ihn0CoZ1Dy81of398j9tx4TuaYT1U6U+Pv8vSfx3zYWK8pIpe44L2RLrB27FcRz+8pRPPphXpg -Y+RdM4kX2TGq2tbzGDVyz4crL2MjhF2EjD9XoIj8mZEoJmmZ1I+XRL6O1UixpCgp8RW04eWe3fiP -pm8m1wk8OhwRDqZsN/etRIcsKMfYdIKz0G9KV7s1KSegi+ghp4dkNl3M2Basx7InQJJVOCiNUW7d -FGdTbHFcJoRNdVq2fmBWqU2t+5sel/MN2dKXVHfaPRK34B7vCAas+YWH6aLcr34YEoP9VhdBLtUp -gn2Z9DH2canPLAEnpQW5qrJITirvn5NSUZU8UnOOVkwXQMAJKOSLakhT2+zNVVXxxvjpoixMptEm -X36vWkzaH6byHCx+rgIW0lbQL1dTR+iS ------END CERTIFICATE----- - -America Online Root Certification Authority 1 -============================================= ------BEGIN CERTIFICATE----- -MIIDpDCCAoygAwIBAgIBATANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEcMBoGA1UEChMT -QW1lcmljYSBPbmxpbmUgSW5jLjE2MDQGA1UEAxMtQW1lcmljYSBPbmxpbmUgUm9vdCBDZXJ0aWZp -Y2F0aW9uIEF1dGhvcml0eSAxMB4XDTAyMDUyODA2MDAwMFoXDTM3MTExOTIwNDMwMFowYzELMAkG -A1UEBhMCVVMxHDAaBgNVBAoTE0FtZXJpY2EgT25saW5lIEluYy4xNjA0BgNVBAMTLUFtZXJpY2Eg -T25saW5lIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgMTCCASIwDQYJKoZIhvcNAQEBBQAD -ggEPADCCAQoCggEBAKgv6KRpBgNHw+kqmP8ZonCaxlCyfqXfaE0bfA+2l2h9LaaLl+lkhsmj76CG -v2BlnEtUiMJIxUo5vxTjWVXlGbR0yLQFOVwWpeKVBeASrlmLojNoWBym1BW32J/X3HGrfpq/m44z -DyL9Hy7nBzbvYjnF3cu6JRQj3gzGPTzOggjmZj7aUTsWOqMFf6Dch9Wc/HKpoH145LcxVR5lu9Rh -sCFg7RAycsWSJR74kEoYeEfffjA3PlAb2xzTa5qGUwew76wGePiEmf4hjUyAtgyC9mZweRrTT6PP -8c9GsEsPPt2IYriMqQkoO3rHl+Ee5fSfwMCuJKDIodkP1nsmgmkyPacCAwEAAaNjMGEwDwYDVR0T -AQH/BAUwAwEB/zAdBgNVHQ4EFgQUAK3Zo/Z59m50qX8zPYEX10zPM94wHwYDVR0jBBgwFoAUAK3Z -o/Z59m50qX8zPYEX10zPM94wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBBQUAA4IBAQB8itEf -GDeC4Liwo+1WlchiYZwFos3CYiZhzRAW18y0ZTTQEYqtqKkFZu90821fnZmv9ov761KyBZiibyrF -VL0lvV+uyIbqRizBs73B6UlwGBaXCBOMIOAbLjpHyx7kADCVW/RFo8AasAFOq73AI25jP4BKxQft -3OJvx8Fi8eNy1gTIdGcL+oiroQHIb/AUr9KZzVGTfu0uOMe9zkZQPXLjeSWdm4grECDdpbgyn43g -Kd8hdIaC2y+CMMbHNYaz+ZZfRtsMRf3zUMNvxsNIrUam4SdHCh0Om7bCd39j8uB9Gr784N/Xx6ds -sPmuujz9dLQR6FgNgLzTqIA6me11zEZ7 ------END CERTIFICATE----- - -America Online Root Certification Authority 2 -============================================= ------BEGIN CERTIFICATE----- -MIIFpDCCA4ygAwIBAgIBATANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEcMBoGA1UEChMT -QW1lcmljYSBPbmxpbmUgSW5jLjE2MDQGA1UEAxMtQW1lcmljYSBPbmxpbmUgUm9vdCBDZXJ0aWZp -Y2F0aW9uIEF1dGhvcml0eSAyMB4XDTAyMDUyODA2MDAwMFoXDTM3MDkyOTE0MDgwMFowYzELMAkG -A1UEBhMCVVMxHDAaBgNVBAoTE0FtZXJpY2EgT25saW5lIEluYy4xNjA0BgNVBAMTLUFtZXJpY2Eg -T25saW5lIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgMjCCAiIwDQYJKoZIhvcNAQEBBQAD -ggIPADCCAgoCggIBAMxBRR3pPU0Q9oyxQcngXssNt79Hc9PwVU3dxgz6sWYFas14tNwC206B89en -fHG8dWOgXeMHDEjsJcQDIPT/DjsS/5uN4cbVG7RtIuOx238hZK+GvFciKtZHgVdEglZTvYYUAQv8 -f3SkWq7xuhG1m1hagLQ3eAkzfDJHA1zEpYNI9FdWboE2JxhP7JsowtS013wMPgwr38oE18aO6lhO -qKSlGBxsRZijQdEt0sdtjRnxrXm3gT+9BoInLRBYBbV4Bbkv2wxrkJB+FFk4u5QkE+XRnRTf04JN -RvCAOVIyD+OEsnpD8l7eXz8d3eOyG6ChKiMDbi4BFYdcpnV1x5dhvt6G3NRI270qv0pV2uh9UPu0 -gBe4lL8BPeraunzgWGcXuVjgiIZGZ2ydEEdYMtA1fHkqkKJaEBEjNa0vzORKW6fIJ/KD3l67Xnfn -6KVuY8INXWHQjNJsWiEOyiijzirplcdIz5ZvHZIlyMbGwcEMBawmxNJ10uEqZ8A9W6Wa6897Gqid -FEXlD6CaZd4vKL3Ob5Rmg0gp2OpljK+T2WSfVVcmv2/LNzGZo2C7HK2JNDJiuEMhBnIMoVxtRsX6 -Kc8w3onccVvdtjc+31D1uAclJuW8tf48ArO3+L5DwYcRlJ4jbBeKuIonDFRH8KmzwICMoCfrHRnj -B453cMor9H124HhnAgMBAAGjYzBhMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFE1FwWg4u3Op -aaEg5+31IqEjFNeeMB8GA1UdIwQYMBaAFE1FwWg4u3OpaaEg5+31IqEjFNeeMA4GA1UdDwEB/wQE -AwIBhjANBgkqhkiG9w0BAQUFAAOCAgEAZ2sGuV9FOypLM7PmG2tZTiLMubekJcmnxPBUlgtk87FY -T15R/LKXeydlwuXK5w0MJXti4/qftIe3RUavg6WXSIylvfEWK5t2LHo1YGwRgJfMqZJS5ivmae2p -+DYtLHe/YUjRYwu5W1LtGLBDQiKmsXeu3mnFzcccobGlHBD7GL4acN3Bkku+KVqdPzW+5X1R+FXg -JXUjhx5c3LqdsKyzadsXg8n33gy8CNyRnqjQ1xU3c6U1uPx+xURABsPr+CKAXEfOAuMRn0T//Zoy -zH1kUQ7rVyZ2OuMeIjzCpjbdGe+n/BLzJsBZMYVMnNjP36TMzCmT/5RtdlwTCJfy7aULTd3oyWgO -ZtMADjMSW7yV5TKQqLPGbIOtd+6Lfn6xqavT4fG2wLHqiMDn05DpKJKUe2h7lyoKZy2FAjgQ5ANh -1NolNscIWC2hp1GvMApJ9aZphwctREZ2jirlmjvXGKL8nDgQzMY70rUXOm/9riW99XJZZLF0Kjhf -GEzfz3EEWjbUvy+ZnOjZurGV5gJLIaFb1cFPj65pbVPbAZO1XB4Y3WRayhgoPmMEEf0cjQAPuDff -Z4qdZqkCapH/E8ovXYO8h5Ns3CRRFgQlZvqz2cK6Kb6aSDiCmfS/O0oxGfm/jiEzFMpPVF/7zvuP -cX/9XhmgD0uRuMRUvAawRY8mkaKO/qk= ------END CERTIFICATE----- - -Visa eCommerce Root -=================== ------BEGIN CERTIFICATE----- -MIIDojCCAoqgAwIBAgIQE4Y1TR0/BvLB+WUF1ZAcYjANBgkqhkiG9w0BAQUFADBrMQswCQYDVQQG -EwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMmVmlzYSBJbnRlcm5hdGlvbmFsIFNlcnZpY2Ug -QXNzb2NpYXRpb24xHDAaBgNVBAMTE1Zpc2EgZUNvbW1lcmNlIFJvb3QwHhcNMDIwNjI2MDIxODM2 -WhcNMjIwNjI0MDAxNjEyWjBrMQswCQYDVQQGEwJVUzENMAsGA1UEChMEVklTQTEvMC0GA1UECxMm -VmlzYSBJbnRlcm5hdGlvbmFsIFNlcnZpY2UgQXNzb2NpYXRpb24xHDAaBgNVBAMTE1Zpc2EgZUNv -bW1lcmNlIFJvb3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvV95WHm6h2mCxlCfL -F9sHP4CFT8icttD0b0/Pmdjh28JIXDqsOTPHH2qLJj0rNfVIsZHBAk4ElpF7sDPwsRROEW+1QK8b -RaVK7362rPKgH1g/EkZgPI2h4H3PVz4zHvtH8aoVlwdVZqW1LS7YgFmypw23RuwhY/81q6UCzyr0 -TP579ZRdhE2o8mCP2w4lPJ9zcc+U30rq299yOIzzlr3xF7zSujtFWsan9sYXiwGd/BmoKoMWuDpI -/k4+oKsGGelT84ATB+0tvz8KPFUgOSwsAGl0lUq8ILKpeeUYiZGo3BxN77t+Nwtd/jmliFKMAGzs -GHxBvfaLdXe6YJ2E5/4tAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEG -MB0GA1UdDgQWBBQVOIMPPyw/cDMezUb+B4wg4NfDtzANBgkqhkiG9w0BAQUFAAOCAQEAX/FBfXxc -CLkr4NWSR/pnXKUTwwMhmytMiUbPWU3J/qVAtmPN3XEolWcRzCSs00Rsca4BIGsDoo8Ytyk6feUW -YFN4PMCvFYP3j1IzJL1kk5fui/fbGKhtcbP3LBfQdCVp9/5rPJS+TUtBjE7ic9DjkCJzQ83z7+pz -zkWKsKZJ/0x9nXGIxHYdkFsd7v3M9+79YKWxehZx0RbQfBI8bGmX265fOZpwLwU8GUYEmSA20GBu -YQa7FkKMcPcw++DbZqMAAb3mLNqRX6BGi01qnD093QVG/na/oAo85ADmJ7f/hC3euiInlhBx6yLt -398znM/jra6O1I7mT1GvFpLgXPYHDw== ------END CERTIFICATE----- - -Certum Root CA -============== ------BEGIN CERTIFICATE----- -MIIDDDCCAfSgAwIBAgIDAQAgMA0GCSqGSIb3DQEBBQUAMD4xCzAJBgNVBAYTAlBMMRswGQYDVQQK -ExJVbml6ZXRvIFNwLiB6IG8uby4xEjAQBgNVBAMTCUNlcnR1bSBDQTAeFw0wMjA2MTExMDQ2Mzla -Fw0yNzA2MTExMDQ2MzlaMD4xCzAJBgNVBAYTAlBMMRswGQYDVQQKExJVbml6ZXRvIFNwLiB6IG8u -by4xEjAQBgNVBAMTCUNlcnR1bSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAM6x -wS7TT3zNJc4YPk/EjG+AanPIW1H4m9LcuwBcsaD8dQPugfCI7iNS6eYVM42sLQnFdvkrOYCJ5JdL -kKWoePhzQ3ukYbDYWMzhbGZ+nPMJXlVjhNWo7/OxLjBos8Q82KxujZlakE403Daaj4GIULdtlkIJ -89eVgw1BS7Bqa/j8D35in2fE7SZfECYPCE/wpFcozo+47UX2bu4lXapuOb7kky/ZR6By6/qmW6/K -Uz/iDsaWVhFu9+lmqSbYf5VT7QqFiLpPKaVCjF62/IUgAKpoC6EahQGcxEZjgoi2IrHu/qpGWX7P -NSzVttpd90gzFFS269lvzs2I1qsb2pY7HVkCAwEAAaMTMBEwDwYDVR0TAQH/BAUwAwEB/zANBgkq -hkiG9w0BAQUFAAOCAQEAuI3O7+cUus/usESSbLQ5PqKEbq24IXfS1HeCh+YgQYHu4vgRt2PRFze+ -GXYkHAQaTOs9qmdvLdTN/mUxcMUbpgIKumB7bVjCmkn+YzILa+M6wKyrO7Do0wlRjBCDxjTgxSvg -GrZgFCdsMneMvLJymM/NzD+5yCRCFNZX/OYmQ6kd5YCQzgNUKD73P9P4Te1qCjqTE5s7FCMTY5w/ -0YcneeVMUeMBrYVdGjux1XMQpNPyvG5k9VpWkKjHDkx0Dy5xO/fIR/RpbxXyEV6DHpx8Uq79AtoS -qFlnGNu8cN2bsWntgM6JQEhqDjXKKWYVIZQs6GAqm4VKQPNriiTsBhYscw== ------END CERTIFICATE----- - -Comodo AAA Services root -======================== ------BEGIN CERTIFICATE----- -MIIEMjCCAxqgAwIBAgIBATANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJHQjEbMBkGA1UECAwS -R3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRowGAYDVQQKDBFDb21vZG8gQ0Eg -TGltaXRlZDEhMB8GA1UEAwwYQUFBIENlcnRpZmljYXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAwMDAw -MFoXDTI4MTIzMTIzNTk1OVowezELMAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFuY2hl -c3RlcjEQMA4GA1UEBwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxITAfBgNV -BAMMGEFBQSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC -ggEBAL5AnfRu4ep2hxxNRUSOvkbIgwadwSr+GB+O5AL686tdUIoWMQuaBtDFcCLNSS1UY8y2bmhG -C1Pqy0wkwLxyTurxFa70VJoSCsN6sjNg4tqJVfMiWPPe3M/vg4aijJRPn2jymJBGhCfHdr/jzDUs -i14HZGWCwEiwqJH5YZ92IFCokcdmtet4YgNW8IoaE+oxox6gmf049vYnMlhvB/VruPsUK6+3qszW -Y19zjNoFmag4qMsXeDZRrOme9Hg6jc8P2ULimAyrL58OAd7vn5lJ8S3frHRNG5i1R8XlKdH5kBjH -Ypy+g8cmez6KJcfA3Z3mNWgQIJ2P2N7Sw4ScDV7oL8kCAwEAAaOBwDCBvTAdBgNVHQ4EFgQUoBEK -Iz6W8Qfs4q8p74Klf9AwpLQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wewYDVR0f -BHQwcjA4oDagNIYyaHR0cDovL2NybC5jb21vZG9jYS5jb20vQUFBQ2VydGlmaWNhdGVTZXJ2aWNl -cy5jcmwwNqA0oDKGMGh0dHA6Ly9jcmwuY29tb2RvLm5ldC9BQUFDZXJ0aWZpY2F0ZVNlcnZpY2Vz -LmNybDANBgkqhkiG9w0BAQUFAAOCAQEACFb8AvCb6P+k+tZ7xkSAzk/ExfYAWMymtrwUSWgEdujm -7l3sAg9g1o1QGE8mTgHj5rCl7r+8dFRBv/38ErjHT1r0iWAFf2C3BUrz9vHCv8S5dIa2LX1rzNLz -Rt0vxuBqw8M0Ayx9lt1awg6nCpnBBYurDC/zXDrPbDdVCYfeU0BsWO/8tqtlbgT2G9w84FoVxp7Z -8VlIMCFlA2zs6SFz7JsDoeA3raAVGI/6ugLOpyypEBMs1OUIJqsil2D4kF501KKaU73yqWjgom7C -12yxow+ev+to51byrvLjKzg6CYG1a4XXvi3tPxq3smPi9WIsgtRqAEFQ8TmDn5XpNpaYbg== ------END CERTIFICATE----- - -Comodo Secure Services root -=========================== ------BEGIN CERTIFICATE----- -MIIEPzCCAyegAwIBAgIBATANBgkqhkiG9w0BAQUFADB+MQswCQYDVQQGEwJHQjEbMBkGA1UECAwS -R3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRowGAYDVQQKDBFDb21vZG8gQ0Eg -TGltaXRlZDEkMCIGA1UEAwwbU2VjdXJlIENlcnRpZmljYXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAw -MDAwMFoXDTI4MTIzMTIzNTk1OVowfjELMAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFu -Y2hlc3RlcjEQMA4GA1UEBwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxJDAi -BgNVBAMMG1NlY3VyZSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEBBQADggEP -ADCCAQoCggEBAMBxM4KK0HDrc4eCQNUd5MvJDkKQ+d40uaG6EfQlhfPMcm3ye5drswfxdySRXyWP -9nQ95IDC+DwN879A6vfIUtFyb+/Iq0G4bi4XKpVpDM3SHpR7LZQdqnXXs5jLrLxkU0C8j6ysNstc -rbvd4JQX7NFc0L/vpZXJkMWwrPsbQ996CF23uPJAGysnnlDOXmWCiIxe004MeuoIkbY2qitC++rC -oznl2yY4rYsK7hljxxwk3wN42ubqwUcaCwtGCd0C/N7Lh1/XMGNooa7cMqG6vv5Eq2i2pRcV/b3V -p6ea5EQz6YiO/O1R65NxTq0B50SOqy3LqP4BSUjwwN3HaNiS/j0CAwEAAaOBxzCBxDAdBgNVHQ4E -FgQUPNiTiMLAggnMAZkGkyDpnnAJY08wDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8w -gYEGA1UdHwR6MHgwO6A5oDeGNWh0dHA6Ly9jcmwuY29tb2RvY2EuY29tL1NlY3VyZUNlcnRpZmlj -YXRlU2VydmljZXMuY3JsMDmgN6A1hjNodHRwOi8vY3JsLmNvbW9kby5uZXQvU2VjdXJlQ2VydGlm -aWNhdGVTZXJ2aWNlcy5jcmwwDQYJKoZIhvcNAQEFBQADggEBAIcBbSMdflsXfcFhMs+P5/OKlFlm -4J4oqF7Tt/Q05qo5spcWxYJvMqTpjOev/e/C6LlLqqP05tqNZSH7uoDrJiiFGv45jN5bBAS0VPmj -Z55B+glSzAVIqMk/IQQezkhr/IXownuvf7fM+F86/TXGDe+X3EyrEeFryzHRbPtIgKvcnDe4IRRL -DXE97IMzbtFuMhbsmMcWi1mmNKsFVy2T96oTy9IT4rcuO81rUBcJaD61JlfutuC23bkpgHl9j6Pw -pCikFcSF9CfUa7/lXORlAnZUtOM3ZiTTGWHIUhDlizeauan5Hb/qmZJhlv8BzaFfDbxxvA6sCx1H -RR3B7Hzs/Sk= ------END CERTIFICATE----- - -Comodo Trusted Services root -============================ ------BEGIN CERTIFICATE----- -MIIEQzCCAyugAwIBAgIBATANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJHQjEbMBkGA1UECAwS -R3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRowGAYDVQQKDBFDb21vZG8gQ0Eg -TGltaXRlZDElMCMGA1UEAwwcVHJ1c3RlZCBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczAeFw0wNDAxMDEw -MDAwMDBaFw0yODEyMzEyMzU5NTlaMH8xCzAJBgNVBAYTAkdCMRswGQYDVQQIDBJHcmVhdGVyIE1h -bmNoZXN0ZXIxEDAOBgNVBAcMB1NhbGZvcmQxGjAYBgNVBAoMEUNvbW9kbyBDQSBMaW1pdGVkMSUw -IwYDVQQDDBxUcnVzdGVkIENlcnRpZmljYXRlIFNlcnZpY2VzMIIBIjANBgkqhkiG9w0BAQEFAAOC -AQ8AMIIBCgKCAQEA33FvNlhTWvI2VFeAxHQIIO0Yfyod5jWaHiWsnOWWfnJSoBVC21ndZHoa0Lh7 -3TkVvFVIxO06AOoxEbrycXQaZ7jPM8yoMa+j49d/vzMtTGo87IvDktJTdyR0nAducPy9C1t2ul/y -/9c3S0pgePfw+spwtOpZqqPOSC+pw7ILfhdyFgymBwwbOM/JYrc/oJOlh0Hyt3BAd9i+FHzjqMB6 -juljatEPmsbS9Is6FARW1O24zG71++IsWL1/T2sr92AkWCTOJu80kTrV44HQsvAEAtdbtz6SrGsS -ivnkBbA7kUlcsutT6vifR4buv5XAwAaf0lteERv0xwQ1KdJVXOTt6wIDAQABo4HJMIHGMB0GA1Ud -DgQWBBTFe1i97doladL3WRaoszLAeydb9DAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB -/zCBgwYDVR0fBHwwejA8oDqgOIY2aHR0cDovL2NybC5jb21vZG9jYS5jb20vVHJ1c3RlZENlcnRp -ZmljYXRlU2VydmljZXMuY3JsMDqgOKA2hjRodHRwOi8vY3JsLmNvbW9kby5uZXQvVHJ1c3RlZENl -cnRpZmljYXRlU2VydmljZXMuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQDIk4E7ibSvuIQSTI3S8Ntw -uleGFTQQuS9/HrCoiWChisJ3DFBKmwCL2Iv0QeLQg4pKHBQGsKNoBXAxMKdTmw7pSqBYaWcOrp32 -pSxBvzwGa+RZzG0Q8ZZvH9/0BAKkn0U+yNj6NkZEUD+Cl5EfKNsYEYwq5GWDVxISjBc/lDb+XbDA -BHcTuPQV1T84zJQ6VdCsmPW6AF/ghhmBeC8owH7TzEIK9a5QoNE+xqFx7D+gIIxmOom0jtTYsU0l -R+4viMi14QVFwL4Ucd56/Y57fU0IlqUSc/AtyjcndBInTMu2l+nZrghtWjlA3QVHdWpaIbOjGM9O -9y5Xt5hwXsjEeLBi ------END CERTIFICATE----- - -QuoVadis Root CA -================ ------BEGIN CERTIFICATE----- -MIIF0DCCBLigAwIBAgIEOrZQizANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJCTTEZMBcGA1UE -ChMQUXVvVmFkaXMgTGltaXRlZDElMCMGA1UECxMcUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0 -eTEuMCwGA1UEAxMlUXVvVmFkaXMgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wMTAz -MTkxODMzMzNaFw0yMTAzMTcxODMzMzNaMH8xCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRp -cyBMaW1pdGVkMSUwIwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MS4wLAYDVQQD -EyVRdW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEF -AAOCAQ8AMIIBCgKCAQEAv2G1lVO6V/z68mcLOhrfEYBklbTRvM16z/Ypli4kVEAkOPcahdxYTMuk -J0KX0J+DisPkBgNbAKVRHnAEdOLB1Dqr1607BxgFjv2DrOpm2RgbaIr1VxqYuvXtdj182d6UajtL -F8HVj71lODqV0D1VNk7feVcxKh7YWWVJWCCYfqtffp/p1k3sg3Spx2zY7ilKhSoGFPlU5tPaZQeL -YzcS19Dsw3sgQUSj7cugF+FxZc4dZjH3dgEZyH0DWLaVSR2mEiboxgx24ONmy+pdpibu5cxfvWen -AScOospUxbF6lR1xHkopigPcakXBpBlebzbNw6Kwt/5cOOJSvPhEQ+aQuwIDAQABo4ICUjCCAk4w -PQYIKwYBBQUHAQEEMTAvMC0GCCsGAQUFBzABhiFodHRwczovL29jc3AucXVvdmFkaXNvZmZzaG9y -ZS5jb20wDwYDVR0TAQH/BAUwAwEB/zCCARoGA1UdIASCAREwggENMIIBCQYJKwYBBAG+WAABMIH7 -MIHUBggrBgEFBQcCAjCBxxqBxFJlbGlhbmNlIG9uIHRoZSBRdW9WYWRpcyBSb290IENlcnRpZmlj -YXRlIGJ5IGFueSBwYXJ0eSBhc3N1bWVzIGFjY2VwdGFuY2Ugb2YgdGhlIHRoZW4gYXBwbGljYWJs -ZSBzdGFuZGFyZCB0ZXJtcyBhbmQgY29uZGl0aW9ucyBvZiB1c2UsIGNlcnRpZmljYXRpb24gcHJh -Y3RpY2VzLCBhbmQgdGhlIFF1b1ZhZGlzIENlcnRpZmljYXRlIFBvbGljeS4wIgYIKwYBBQUHAgEW -Fmh0dHA6Ly93d3cucXVvdmFkaXMuYm0wHQYDVR0OBBYEFItLbe3TKbkGGew5Oanwl4Rqy+/fMIGu -BgNVHSMEgaYwgaOAFItLbe3TKbkGGew5Oanwl4Rqy+/foYGEpIGBMH8xCzAJBgNVBAYTAkJNMRkw -FwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMSUwIwYDVQQLExxSb290IENlcnRpZmljYXRpb24gQXV0 -aG9yaXR5MS4wLAYDVQQDEyVRdW9WYWRpcyBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggQ6 -tlCLMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAitQUtf70mpKnGdSkfnIYj9lo -fFIk3WdvOXrEql494liwTXCYhGHoG+NpGA7O+0dQoE7/8CQfvbLO9Sf87C9TqnN7Az10buYWnuul -LsS/VidQK2K6vkscPFVcQR0kvoIgR13VRH56FmjffU1RcHhXHTMe/QKZnAzNCgVPx7uOpHX6Sm2x -gI4JVrmcGmD+XcHXetwReNDWXcG31a0ymQM6isxUJTkxgXsTIlG6Rmyhu576BGxJJnSP0nPrzDCi -5upZIof4l/UO/erMkqQWxFIY6iHOsfHmhIHluqmGKPJDWl0Snawe2ajlCmqnf6CHKc/yiU3U7MXi -5nrQNiOKSnQ2+Q== ------END CERTIFICATE----- - -QuoVadis Root CA 2 -================== ------BEGIN CERTIFICATE----- -MIIFtzCCA5+gAwIBAgICBQkwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0xGTAXBgNVBAoT -EFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJvb3QgQ0EgMjAeFw0wNjExMjQx -ODI3MDBaFw0zMTExMjQxODIzMzNaMEUxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM -aW1pdGVkMRswGQYDVQQDExJRdW9WYWRpcyBSb290IENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4IC -DwAwggIKAoICAQCaGMpLlA0ALa8DKYrwD4HIrkwZhR0In6spRIXzL4GtMh6QRr+jhiYaHv5+HBg6 -XJxgFyo6dIMzMH1hVBHL7avg5tKifvVrbxi3Cgst/ek+7wrGsxDp3MJGF/hd/aTa/55JWpzmM+Yk -lvc/ulsrHHo1wtZn/qtmUIttKGAr79dgw8eTvI02kfN/+NsRE8Scd3bBrrcCaoF6qUWD4gXmuVbB -lDePSHFjIuwXZQeVikvfj8ZaCuWw419eaxGrDPmF60Tp+ARz8un+XJiM9XOva7R+zdRcAitMOeGy -lZUtQofX1bOQQ7dsE/He3fbE+Ik/0XX1ksOR1YqI0JDs3G3eicJlcZaLDQP9nL9bFqyS2+r+eXyt -66/3FsvbzSUr5R/7mp/iUcw6UwxI5g69ybR2BlLmEROFcmMDBOAENisgGQLodKcftslWZvB1Jdxn -wQ5hYIizPtGo/KPaHbDRsSNU30R2be1B2MGyIrZTHN81Hdyhdyox5C315eXbyOD/5YDXC2Og/zOh -D7osFRXql7PSorW+8oyWHhqPHWykYTe5hnMz15eWniN9gqRMgeKh0bpnX5UHoycR7hYQe7xFSkyy -BNKr79X9DFHOUGoIMfmR2gyPZFwDwzqLID9ujWc9Otb+fVuIyV77zGHcizN300QyNQliBJIWENie -J0f7OyHj+OsdWwIDAQABo4GwMIGtMA8GA1UdEwEB/wQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1Ud -DgQWBBQahGK8SEwzJQTU7tD2A8QZRtGUazBuBgNVHSMEZzBlgBQahGK8SEwzJQTU7tD2A8QZRtGU -a6FJpEcwRTELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMT -ElF1b1ZhZGlzIFJvb3QgQ0EgMoICBQkwDQYJKoZIhvcNAQEFBQADggIBAD4KFk2fBluornFdLwUv -Z+YTRYPENvbzwCYMDbVHZF34tHLJRqUDGCdViXh9duqWNIAXINzng/iN/Ae42l9NLmeyhP3ZRPx3 -UIHmfLTJDQtyU/h2BwdBR5YM++CCJpNVjP4iH2BlfF/nJrP3MpCYUNQ3cVX2kiF495V5+vgtJodm -VjB3pjd4M1IQWK4/YY7yarHvGH5KWWPKjaJW1acvvFYfzznB4vsKqBUsfU16Y8Zsl0Q80m/DShcK -+JDSV6IZUaUtl0HaB0+pUNqQjZRG4T7wlP0QADj1O+hA4bRuVhogzG9Yje0uRY/W6ZM/57Es3zrW -IozchLsib9D45MY56QSIPMO661V6bYCZJPVsAfv4l7CUW+v90m/xd2gNNWQjrLhVoQPRTUIZ3Ph1 -WVaj+ahJefivDrkRoHy3au000LYmYjgahwz46P0u05B/B5EqHdZ+XIWDmbA4CD/pXvk1B+TJYm5X -f6dQlfe6yJvmjqIBxdZmv3lh8zwc4bmCXF2gw+nYSL0ZohEUGW6yhhtoPkg3Goi3XZZenMfvJ2II -4pEZXNLxId26F0KCl3GBUzGpn/Z9Yr9y4aOTHcyKJloJONDO1w2AFrR4pTqHTI2KpdVGl/IsELm8 -VCLAAVBpQ570su9t+Oza8eOx79+Rj1QqCyXBJhnEUhAFZdWCEOrCMc0u ------END CERTIFICATE----- - -QuoVadis Root CA 3 -================== ------BEGIN CERTIFICATE----- -MIIGnTCCBIWgAwIBAgICBcYwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0xGTAXBgNVBAoT -EFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJvb3QgQ0EgMzAeFw0wNjExMjQx -OTExMjNaFw0zMTExMjQxOTA2NDRaMEUxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM -aW1pdGVkMRswGQYDVQQDExJRdW9WYWRpcyBSb290IENBIDMwggIiMA0GCSqGSIb3DQEBAQUAA4IC -DwAwggIKAoICAQDMV0IWVJzmmNPTTe7+7cefQzlKZbPoFog02w1ZkXTPkrgEQK0CSzGrvI2RaNgg -DhoB4hp7Thdd4oq3P5kazethq8Jlph+3t723j/z9cI8LoGe+AaJZz3HmDyl2/7FWeUUrH556VOij -KTVopAFPD6QuN+8bv+OPEKhyq1hX51SGyMnzW9os2l2ObjyjPtr7guXd8lyyBTNvijbO0BNO/79K -DDRMpsMhvVAEVeuxu537RR5kFd5VAYwCdrXLoT9CabwvvWhDFlaJKjdhkf2mrk7AyxRllDdLkgbv -BNDInIjbC3uBr7E9KsRlOni27tyAsdLTmZw67mtaa7ONt9XOnMK+pUsvFrGeaDsGb659n/je7Mwp -p5ijJUMv7/FfJuGITfhebtfZFG4ZM2mnO4SJk8RTVROhUXhA+LjJou57ulJCg54U7QVSWllWp5f8 -nT8KKdjcT5EOE7zelaTfi5m+rJsziO+1ga8bxiJTyPbH7pcUsMV8eFLI8M5ud2CEpukqdiDtWAEX -MJPpGovgc2PZapKUSU60rUqFxKMiMPwJ7Wgic6aIDFUhWMXhOp8q3crhkODZc6tsgLjoC2SToJyM -Gf+z0gzskSaHirOi4XCPLArlzW1oUevaPwV/izLmE1xr/l9A4iLItLRkT9a6fUg+qGkM17uGcclz -uD87nSVL2v9A6wIDAQABo4IBlTCCAZEwDwYDVR0TAQH/BAUwAwEB/zCB4QYDVR0gBIHZMIHWMIHT -BgkrBgEEAb5YAAMwgcUwgZMGCCsGAQUFBwICMIGGGoGDQW55IHVzZSBvZiB0aGlzIENlcnRpZmlj -YXRlIGNvbnN0aXR1dGVzIGFjY2VwdGFuY2Ugb2YgdGhlIFF1b1ZhZGlzIFJvb3QgQ0EgMyBDZXJ0 -aWZpY2F0ZSBQb2xpY3kgLyBDZXJ0aWZpY2F0aW9uIFByYWN0aWNlIFN0YXRlbWVudC4wLQYIKwYB -BQUHAgEWIWh0dHA6Ly93d3cucXVvdmFkaXNnbG9iYWwuY29tL2NwczALBgNVHQ8EBAMCAQYwHQYD -VR0OBBYEFPLAE+CCQz777i9nMpY1XNu4ywLQMG4GA1UdIwRnMGWAFPLAE+CCQz777i9nMpY1XNu4 -ywLQoUmkRzBFMQswCQYDVQQGEwJCTTEZMBcGA1UEChMQUXVvVmFkaXMgTGltaXRlZDEbMBkGA1UE -AxMSUXVvVmFkaXMgUm9vdCBDQSAzggIFxjANBgkqhkiG9w0BAQUFAAOCAgEAT62gLEz6wPJv92ZV -qyM07ucp2sNbtrCD2dDQ4iH782CnO11gUyeim/YIIirnv6By5ZwkajGxkHon24QRiSemd1o417+s -hvzuXYO8BsbRd2sPbSQvS3pspweWyuOEn62Iix2rFo1bZhfZFvSLgNLd+LJ2w/w4E6oM3kJpK27z -POuAJ9v1pkQNn1pVWQvVDVJIxa6f8i+AxeoyUDUSly7B4f/xI4hROJ/yZlZ25w9Rl6VSDE1JUZU2 -Pb+iSwwQHYaZTKrzchGT5Or2m9qoXadNt54CrnMAyNojA+j56hl0YgCUyyIgvpSnWbWCar6ZeXqp -8kokUvd0/bpO5qgdAm6xDYBEwa7TIzdfu4V8K5Iu6H6li92Z4b8nby1dqnuH/grdS/yO9SbkbnBC -bjPsMZ57k8HkyWkaPcBrTiJt7qtYTcbQQcEr6k8Sh17rRdhs9ZgC06DYVYoGmRmioHfRMJ6szHXu -g/WwYjnPbFfiTNKRCw51KBuav/0aQ/HKd/s7j2G4aSgWQgRecCocIdiP4b0jWy10QJLZYxkNc91p -vGJHvOB0K7Lrfb5BG7XARsWhIstfTsEokt4YutUqKLsRixeTmJlglFwjz1onl14LBQaTNx47aTbr -qZ5hHY8y2o4M1nQ+ewkk2gF3R8Q7zTSMmfXK4SVhM7JZG+Ju1zdXtg2pEto= ------END CERTIFICATE----- - -Security Communication Root CA -============================== ------BEGIN CERTIFICATE----- -MIIDWjCCAkKgAwIBAgIBADANBgkqhkiG9w0BAQUFADBQMQswCQYDVQQGEwJKUDEYMBYGA1UEChMP -U0VDT00gVHJ1c3QubmV0MScwJQYDVQQLEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTEw -HhcNMDMwOTMwMDQyMDQ5WhcNMjMwOTMwMDQyMDQ5WjBQMQswCQYDVQQGEwJKUDEYMBYGA1UEChMP -U0VDT00gVHJ1c3QubmV0MScwJQYDVQQLEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTEw -ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCzs/5/022x7xZ8V6UMbXaKL0u/ZPtM7orw -8yl89f/uKuDp6bpbZCKamm8sOiZpUQWZJtzVHGpxxpp9Hp3dfGzGjGdnSj74cbAZJ6kJDKaVv0uM -DPpVmDvY6CKhS3E4eayXkmmziX7qIWgGmBSWh9JhNrxtJ1aeV+7AwFb9Ms+k2Y7CI9eNqPPYJayX -5HA49LY6tJ07lyZDo6G8SVlyTCMwhwFY9k6+HGhWZq/NQV3Is00qVUarH9oe4kA92819uZKAnDfd -DJZkndwi92SL32HeFZRSFaB9UslLqCHJxrHty8OVYNEP8Ktw+N/LTX7s1vqr2b1/VPKl6Xn62dZ2 -JChzAgMBAAGjPzA9MB0GA1UdDgQWBBSgc0mZaNyFW2XjmygvV5+9M7wHSDALBgNVHQ8EBAMCAQYw -DwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEAaECpqLvkT115swW1F7NgE+vGkl3g -0dNq/vu+m22/xwVtWSDEHPC32oRYAmP6SBbvT6UL90qY8j+eG61Ha2POCEfrUj94nK9NrvjVT8+a -mCoQQTlSxN3Zmw7vkwGusi7KaEIkQmywszo+zenaSMQVy+n5Bw+SUEmK3TGXX8npN6o7WWWXlDLJ -s58+OmJYxUmtYg5xpTKqL8aJdkNAExNnPaJUJRDL8Try2frbSVa7pv6nQTXD4IhhyYjH3zYQIphZ -6rBK+1YWc26sTfcioU+tHXotRSflMMFe8toTyyVCUZVHA4xsIcx0Qu1T/zOLjw9XARYvz6buyXAi -FL39vmwLAw== ------END CERTIFICATE----- - -Sonera Class 2 Root CA -====================== ------BEGIN CERTIFICATE----- -MIIDIDCCAgigAwIBAgIBHTANBgkqhkiG9w0BAQUFADA5MQswCQYDVQQGEwJGSTEPMA0GA1UEChMG -U29uZXJhMRkwFwYDVQQDExBTb25lcmEgQ2xhc3MyIENBMB4XDTAxMDQwNjA3Mjk0MFoXDTIxMDQw -NjA3Mjk0MFowOTELMAkGA1UEBhMCRkkxDzANBgNVBAoTBlNvbmVyYTEZMBcGA1UEAxMQU29uZXJh -IENsYXNzMiBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJAXSjWdyvANlsdE+hY3 -/Ei9vX+ALTU74W+oZ6m/AxxNjG8yR9VBaKQTBME1DJqEQ/xcHf+Js+gXGM2RX/uJ4+q/Tl18GybT -dXnt5oTjV+WtKcT0OijnpXuENmmz/V52vaMtmdOQTiMofRhj8VQ7Jp12W5dCsv+u8E7s3TmVToMG -f+dJQMjFAbJUWmYdPfz56TwKnoG4cPABi+QjVHzIrviQHgCWctRUz2EjvOr7nQKV0ba5cTppCD8P -tOFCx4j1P5iop7oc4HFx71hXgVB6XGt0Rg6DA5jDjqhu8nYybieDwnPz3BjotJPqdURrBGAgcVeH -nfO+oJAjPYok4doh28MCAwEAAaMzMDEwDwYDVR0TAQH/BAUwAwEB/zARBgNVHQ4ECgQISqCqWITT -XjwwCwYDVR0PBAQDAgEGMA0GCSqGSIb3DQEBBQUAA4IBAQBazof5FnIVV0sd2ZvnoiYw7JNn39Yt -0jSv9zilzqsWuasvfDXLrNAPtEwr/IDva4yRXzZ299uzGxnq9LIR/WFxRL8oszodv7ND6J+/3DEI -cbCdjdY0RzKQxmUk96BKfARzjzlvF4xytb1LyHr4e4PDKE6cCepnP7JnBBvDFNr450kkkdAdavph -Oe9r5yF1BgfYErQhIHBCcYHaPJo2vqZbDWpsmh+Re/n570K6Tk6ezAyNlNzZRZxe7EJQY670XcSx -EtzKO6gunRRaBXW37Ndj4ro1tgQIkejanZz2ZrUYrAqmVCY0M9IbwdR/GjqOC6oybtv8TyWf2TLH -llpwrN9M ------END CERTIFICATE----- - -Staat der Nederlanden Root CA -============================= ------BEGIN CERTIFICATE----- -MIIDujCCAqKgAwIBAgIEAJiWijANBgkqhkiG9w0BAQUFADBVMQswCQYDVQQGEwJOTDEeMBwGA1UE -ChMVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSYwJAYDVQQDEx1TdGFhdCBkZXIgTmVkZXJsYW5kZW4g -Um9vdCBDQTAeFw0wMjEyMTcwOTIzNDlaFw0xNTEyMTYwOTE1MzhaMFUxCzAJBgNVBAYTAk5MMR4w -HAYDVQQKExVTdGFhdCBkZXIgTmVkZXJsYW5kZW4xJjAkBgNVBAMTHVN0YWF0IGRlciBOZWRlcmxh -bmRlbiBSb290IENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAmNK1URF6gaYUmHFt -vsznExvWJw56s2oYHLZhWtVhCb/ekBPHZ+7d89rFDBKeNVU+LCeIQGv33N0iYfXCxw719tV2U02P -jLwYdjeFnejKScfST5gTCaI+Ioicf9byEGW07l8Y1Rfj+MX94p2i71MOhXeiD+EwR+4A5zN9RGca -C1Hoi6CeUJhoNFIfLm0B8mBF8jHrqTFoKbt6QZ7GGX+UtFE5A3+y3qcym7RHjm+0Sq7lr7HcsBth -vJly3uSJt3omXdozSVtSnA71iq3DuD3oBmrC1SoLbHuEvVYFy4ZlkuxEK7COudxwC0barbxjiDn6 -22r+I/q85Ej0ZytqERAhSQIDAQABo4GRMIGOMAwGA1UdEwQFMAMBAf8wTwYDVR0gBEgwRjBEBgRV -HSAAMDwwOgYIKwYBBQUHAgEWLmh0dHA6Ly93d3cucGtpb3ZlcmhlaWQubmwvcG9saWNpZXMvcm9v -dC1wb2xpY3kwDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBSofeu8Y6R0E3QA7Jbg0zTBLL9s+DAN -BgkqhkiG9w0BAQUFAAOCAQEABYSHVXQ2YcG70dTGFagTtJ+k/rvuFbQvBgwp8qiSpGEN/KtcCFtR -EytNwiphyPgJWPwtArI5fZlmgb9uXJVFIGzmeafR2Bwp/MIgJ1HI8XxdNGdphREwxgDS1/PTfLbw -MVcoEoJz6TMvplW0C5GUR5z6u3pCMuiufi3IvKwUv9kP2Vv8wfl6leF9fpb8cbDCTMjfRTTJzg3y -nGQI0DvDKcWy7ZAEwbEpkcUwb8GpcjPM/l0WFywRaed+/sWDCN+83CI6LiBpIzlWYGeQiy52OfsR -iJf2fL1LuCAWZwWN4jvBcj+UlTfHXbme2JOhF4//DGYVwSR8MnwDHTuhWEUykw== ------END CERTIFICATE----- - -TDC Internet Root CA -==================== ------BEGIN CERTIFICATE----- -MIIEKzCCAxOgAwIBAgIEOsylTDANBgkqhkiG9w0BAQUFADBDMQswCQYDVQQGEwJESzEVMBMGA1UE -ChMMVERDIEludGVybmV0MR0wGwYDVQQLExRUREMgSW50ZXJuZXQgUm9vdCBDQTAeFw0wMTA0MDUx -NjMzMTdaFw0yMTA0MDUxNzAzMTdaMEMxCzAJBgNVBAYTAkRLMRUwEwYDVQQKEwxUREMgSW50ZXJu -ZXQxHTAbBgNVBAsTFFREQyBJbnRlcm5ldCBSb290IENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A -MIIBCgKCAQEAxLhAvJHVYx/XmaCLDEAedLdInUaMArLgJF/wGROnN4NrXceO+YQwzho7+vvOi20j -xsNuZp+Jpd/gQlBn+h9sHvTQBda/ytZO5GhgbEaqHF1j4QeGDmUApy6mcca8uYGoOn0a0vnRrEvL -znWv3Hv6gXPU/Lq9QYjUdLP5Xjg6PEOo0pVOd20TDJ2PeAG3WiAfAzc14izbSysseLlJ28TQx5yc -5IogCSEWVmb/Bexb4/DPqyQkXsN/cHoSxNK1EKC2IeGNeGlVRGn1ypYcNIUXJXfi9i8nmHj9eQY6 -otZaQ8H/7AQ77hPv01ha/5Lr7K7a8jcDR0G2l8ktCkEiu7vmpwIDAQABo4IBJTCCASEwEQYJYIZI -AYb4QgEBBAQDAgAHMGUGA1UdHwReMFwwWqBYoFakVDBSMQswCQYDVQQGEwJESzEVMBMGA1UEChMM -VERDIEludGVybmV0MR0wGwYDVQQLExRUREMgSW50ZXJuZXQgUm9vdCBDQTENMAsGA1UEAxMEQ1JM -MTArBgNVHRAEJDAigA8yMDAxMDQwNTE2MzMxN1qBDzIwMjEwNDA1MTcwMzE3WjALBgNVHQ8EBAMC -AQYwHwYDVR0jBBgwFoAUbGQBx/2FbazI2p5QCIUItTxWqFAwHQYDVR0OBBYEFGxkAcf9hW2syNqe -UAiFCLU8VqhQMAwGA1UdEwQFMAMBAf8wHQYJKoZIhvZ9B0EABBAwDhsIVjUuMDo0LjADAgSQMA0G -CSqGSIb3DQEBBQUAA4IBAQBOQ8zR3R0QGwZ/t6T609lN+yOfI1Rb5osvBCiLtSdtiaHsmGnc540m -gwV5dOy0uaOXwTUA/RXaOYE6lTGQ3pfphqiZdwzlWqCE/xIWrG64jcN7ksKsLtB9KOy282A4aW8+ -2ARVPp7MVdK6/rtHBNcK2RYKNCn1WBPVT8+PVkuzHu7TmHnaCB4Mb7j4Fifvwm899qNLPg7kbWzb -O0ESm70NRyN/PErQr8Cv9u8btRXE64PECV90i9kR+8JWsTz4cMo0jUNAE4z9mQNUecYu6oah9jrU -Cbz0vGbMPVjQV0kK7iXiQe4T+Zs4NNEA9X7nlB38aQNiuJkFBT1reBK9sG9l ------END CERTIFICATE----- - -UTN DATACorp SGC Root CA -======================== ------BEGIN CERTIFICATE----- -MIIEXjCCA0agAwIBAgIQRL4Mi1AAIbQR0ypoBqmtaTANBgkqhkiG9w0BAQUFADCBkzELMAkGA1UE -BhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0eTEeMBwGA1UEChMVVGhl -IFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xGzAZ -BgNVBAMTElVUTiAtIERBVEFDb3JwIFNHQzAeFw05OTA2MjQxODU3MjFaFw0xOTA2MjQxOTA2MzBa -MIGTMQswCQYDVQQGEwJVUzELMAkGA1UECBMCVVQxFzAVBgNVBAcTDlNhbHQgTGFrZSBDaXR5MR4w -HAYDVQQKExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxITAfBgNVBAsTGGh0dHA6Ly93d3cudXNlcnRy -dXN0LmNvbTEbMBkGA1UEAxMSVVROIC0gREFUQUNvcnAgU0dDMIIBIjANBgkqhkiG9w0BAQEFAAOC -AQ8AMIIBCgKCAQEA3+5YEKIrblXEjr8uRgnn4AgPLit6E5Qbvfa2gI5lBZMAHryv4g+OGQ0SR+ys -raP6LnD43m77VkIVni5c7yPeIbkFdicZD0/Ww5y0vpQZY/KmEQrrU0icvvIpOxboGqBMpsn0GFlo -wHDyUwDAXlCCpVZvNvlK4ESGoE1O1kduSUrLZ9emxAW5jh70/P/N5zbgnAVssjMiFdC04MwXwLLA -9P4yPykqlXvY8qdOD1R8oQ2AswkDwf9c3V6aPryuvEeKaq5xyh+xKrhfQgUL7EYw0XILyulWbfXv -33i+Ybqypa4ETLyorGkVl73v67SMvzX41MPRKA5cOp9wGDMgd8SirwIDAQABo4GrMIGoMAsGA1Ud -DwQEAwIBxjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRTMtGzz3/64PGgXYVOktKeRR20TzA9 -BgNVHR8ENjA0MDKgMKAuhixodHRwOi8vY3JsLnVzZXJ0cnVzdC5jb20vVVROLURBVEFDb3JwU0dD -LmNybDAqBgNVHSUEIzAhBggrBgEFBQcDAQYKKwYBBAGCNwoDAwYJYIZIAYb4QgQBMA0GCSqGSIb3 -DQEBBQUAA4IBAQAnNZcAiosovcYzMB4p/OL31ZjUQLtgyr+rFywJNn9Q+kHcrpY6CiM+iVnJowft -Gzet/Hy+UUla3joKVAgWRcKZsYfNjGjgaQPpxE6YsjuMFrMOoAyYUJuTqXAJyCyjj98C5OBxOvG0 -I3KgqgHf35g+FFCgMSa9KOlaMCZ1+XtgHI3zzVAmbQQnmt/VDUVHKWss5nbZqSl9Mt3JNjy9rjXx -EZ4du5A/EkdOjtd+D2JzHVImOBwYSf0wdJrE5SIv2MCN7ZF6TACPcn9d2t0bi0Vr591pl6jFVkwP -DPafepE39peC4N1xaf92P2BNPM/3mfnGV/TJVTl4uix5yaaIK/QI ------END CERTIFICATE----- - -UTN USERFirst Hardware Root CA -============================== ------BEGIN CERTIFICATE----- -MIIEdDCCA1ygAwIBAgIQRL4Mi1AAJLQR0zYq/mUK/TANBgkqhkiG9w0BAQUFADCBlzELMAkGA1UE -BhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0eTEeMBwGA1UEChMVVGhl -IFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8vd3d3LnVzZXJ0cnVzdC5jb20xHzAd -BgNVBAMTFlVUTi1VU0VSRmlyc3QtSGFyZHdhcmUwHhcNOTkwNzA5MTgxMDQyWhcNMTkwNzA5MTgx -OTIyWjCBlzELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAlVUMRcwFQYDVQQHEw5TYWx0IExha2UgQ2l0 -eTEeMBwGA1UEChMVVGhlIFVTRVJUUlVTVCBOZXR3b3JrMSEwHwYDVQQLExhodHRwOi8vd3d3LnVz -ZXJ0cnVzdC5jb20xHzAdBgNVBAMTFlVUTi1VU0VSRmlyc3QtSGFyZHdhcmUwggEiMA0GCSqGSIb3 -DQEBAQUAA4IBDwAwggEKAoIBAQCx98M4P7Sof885glFn0G2f0v9Y8+efK+wNiVSZuTiZFvfgIXlI -wrthdBKWHTxqctU8EGc6Oe0rE81m65UJM6Rsl7HoxuzBdXmcRl6Nq9Bq/bkqVRcQVLMZ8Jr28bFd -tqdt++BxF2uiiPsA3/4aMXcMmgF6sTLjKwEHOG7DpV4jvEWbe1DByTCP2+UretNb+zNAHqDVmBe8 -i4fDidNdoI6yqqr2jmmIBsX6iSHzCJ1pLgkzmykNRg+MzEk0sGlRvfkGzWitZky8PqxhvQqIDsjf -Pe58BEydCl5rkdbux+0ojatNh4lz0G6k0B4WixThdkQDf2Os5M1JnMWS9KsyoUhbAgMBAAGjgbkw -gbYwCwYDVR0PBAQDAgHGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFKFyXyYbKJhDlV0HN9WF -lp1L0sNFMEQGA1UdHwQ9MDswOaA3oDWGM2h0dHA6Ly9jcmwudXNlcnRydXN0LmNvbS9VVE4tVVNF -UkZpcnN0LUhhcmR3YXJlLmNybDAxBgNVHSUEKjAoBggrBgEFBQcDAQYIKwYBBQUHAwUGCCsGAQUF -BwMGBggrBgEFBQcDBzANBgkqhkiG9w0BAQUFAAOCAQEARxkP3nTGmZev/K0oXnWO6y1n7k57K9cM -//bey1WiCuFMVGWTYGufEpytXoMs61quwOQt9ABjHbjAbPLPSbtNk28GpgoiskliCE7/yMgUsogW -XecB5BKV5UU0s4tpvc+0hY91UZ59Ojg6FEgSxvunOxqNDYJAB+gECJChicsZUN/KHAG8HQQZexB2 -lzvukJDKxA4fFm517zP4029bHpbj4HR3dHuKom4t3XbWOTCC8KucUvIqx69JXn7HaOWCgchqJ/kn -iCrVWFCVH/A7HFe7fRQ5YiuayZSSKqMiDP+JJn1fIytH1xUdqWqeUQ0qUZ6B+dQ7XnASfxAynB67 -nfhmqA== ------END CERTIFICATE----- - -Camerfirma Chambers of Commerce Root -==================================== ------BEGIN CERTIFICATE----- -MIIEvTCCA6WgAwIBAgIBADANBgkqhkiG9w0BAQUFADB/MQswCQYDVQQGEwJFVTEnMCUGA1UEChMe -QUMgQ2FtZXJmaXJtYSBTQSBDSUYgQTgyNzQzMjg3MSMwIQYDVQQLExpodHRwOi8vd3d3LmNoYW1i -ZXJzaWduLm9yZzEiMCAGA1UEAxMZQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdDAeFw0wMzA5MzAx -NjEzNDNaFw0zNzA5MzAxNjEzNDRaMH8xCzAJBgNVBAYTAkVVMScwJQYDVQQKEx5BQyBDYW1lcmZp -cm1hIFNBIENJRiBBODI3NDMyODcxIzAhBgNVBAsTGmh0dHA6Ly93d3cuY2hhbWJlcnNpZ24ub3Jn -MSIwIAYDVQQDExlDaGFtYmVycyBvZiBDb21tZXJjZSBSb290MIIBIDANBgkqhkiG9w0BAQEFAAOC -AQ0AMIIBCAKCAQEAtzZV5aVdGDDg2olUkfzIx1L4L1DZ77F1c2VHfRtbunXF/KGIJPov7coISjlU -xFF6tdpg6jg8gbLL8bvZkSM/SAFwdakFKq0fcfPJVD0dBmpAPrMMhe5cG3nCYsS4No41XQEMIwRH -NaqbYE6gZj3LJgqcQKH0XZi/caulAGgq7YN6D6IUtdQis4CwPAxaUWktWBiP7Zme8a7ileb2R6jW -DA+wWFjbw2Y3npuRVDM30pQcakjJyfKl2qUMI/cjDpwyVV5xnIQFUZot/eZOKjRa3spAN2cMVCFV -d9oKDMyXroDclDZK9D7ONhMeU+SsTjoF7Nuucpw4i9A5O4kKPnf+dQIBA6OCAUQwggFAMBIGA1Ud -EwEB/wQIMAYBAf8CAQwwPAYDVR0fBDUwMzAxoC+gLYYraHR0cDovL2NybC5jaGFtYmVyc2lnbi5v -cmcvY2hhbWJlcnNyb290LmNybDAdBgNVHQ4EFgQU45T1sU3p26EpW1eLTXYGduHRooowDgYDVR0P -AQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIABzAnBgNVHREEIDAegRxjaGFtYmVyc3Jvb3RAY2hh -bWJlcnNpZ24ub3JnMCcGA1UdEgQgMB6BHGNoYW1iZXJzcm9vdEBjaGFtYmVyc2lnbi5vcmcwWAYD -VR0gBFEwTzBNBgsrBgEEAYGHLgoDATA+MDwGCCsGAQUFBwIBFjBodHRwOi8vY3BzLmNoYW1iZXJz -aWduLm9yZy9jcHMvY2hhbWJlcnNyb290Lmh0bWwwDQYJKoZIhvcNAQEFBQADggEBAAxBl8IahsAi -fJ/7kPMa0QOx7xP5IV8EnNrJpY0nbJaHkb5BkAFyk+cefV/2icZdp0AJPaxJRUXcLo0waLIJuvvD -L8y6C98/d3tGfToSJI6WjzwFCm/SlCgdbQzALogi1djPHRPH8EjX1wWnz8dHnjs8NMiAT9QUu/wN -UPf6s+xCX6ndbcj0dc97wXImsQEcXCz9ek60AcUFV7nnPKoF2YjpB0ZBzu9Bga5Y34OirsrXdx/n -ADydb47kMgkdTXg0eDQ8lJsm7U9xxhl6vSAiSFr+S30Dt+dYvsYyTnQeaN2oaFuzPu5ifdmA6Ap1 -erfutGWaIZDgqtCYvDi1czyL+Nw= ------END CERTIFICATE----- - -Camerfirma Global Chambersign Root -================================== ------BEGIN CERTIFICATE----- -MIIExTCCA62gAwIBAgIBADANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQGEwJFVTEnMCUGA1UEChMe -QUMgQ2FtZXJmaXJtYSBTQSBDSUYgQTgyNzQzMjg3MSMwIQYDVQQLExpodHRwOi8vd3d3LmNoYW1i -ZXJzaWduLm9yZzEgMB4GA1UEAxMXR2xvYmFsIENoYW1iZXJzaWduIFJvb3QwHhcNMDMwOTMwMTYx -NDE4WhcNMzcwOTMwMTYxNDE4WjB9MQswCQYDVQQGEwJFVTEnMCUGA1UEChMeQUMgQ2FtZXJmaXJt -YSBTQSBDSUYgQTgyNzQzMjg3MSMwIQYDVQQLExpodHRwOi8vd3d3LmNoYW1iZXJzaWduLm9yZzEg -MB4GA1UEAxMXR2xvYmFsIENoYW1iZXJzaWduIFJvb3QwggEgMA0GCSqGSIb3DQEBAQUAA4IBDQAw -ggEIAoIBAQCicKLQn0KuWxfH2H3PFIP8T8mhtxOviteePgQKkotgVvq0Mi+ITaFgCPS3CU6gSS9J -1tPfnZdan5QEcOw/Wdm3zGaLmFIoCQLfxS+EjXqXd7/sQJ0lcqu1PzKY+7e3/HKE5TWH+VX6ox8O -by4o3Wmg2UIQxvi1RMLQQ3/bvOSiPGpVeAp3qdjqGTK3L/5cPxvusZjsyq16aUXjlg9V9ubtdepl -6DJWk0aJqCWKZQbua795B9Dxt6/tLE2Su8CoX6dnfQTyFQhwrJLWfQTSM/tMtgsL+xrJxI0DqX5c -8lCrEqWhz0hQpe/SyBoT+rB/sYIcd2oPX9wLlY/vQ37mRQklAgEDo4IBUDCCAUwwEgYDVR0TAQH/ -BAgwBgEB/wIBDDA/BgNVHR8EODA2MDSgMqAwhi5odHRwOi8vY3JsLmNoYW1iZXJzaWduLm9yZy9j -aGFtYmVyc2lnbnJvb3QuY3JsMB0GA1UdDgQWBBRDnDafsJ4wTcbOX60Qq+UDpfqpFDAOBgNVHQ8B -Af8EBAMCAQYwEQYJYIZIAYb4QgEBBAQDAgAHMCoGA1UdEQQjMCGBH2NoYW1iZXJzaWducm9vdEBj -aGFtYmVyc2lnbi5vcmcwKgYDVR0SBCMwIYEfY2hhbWJlcnNpZ25yb290QGNoYW1iZXJzaWduLm9y -ZzBbBgNVHSAEVDBSMFAGCysGAQQBgYcuCgEBMEEwPwYIKwYBBQUHAgEWM2h0dHA6Ly9jcHMuY2hh -bWJlcnNpZ24ub3JnL2Nwcy9jaGFtYmVyc2lnbnJvb3QuaHRtbDANBgkqhkiG9w0BAQUFAAOCAQEA -PDtwkfkEVCeR4e3t/mh/YV3lQWVPMvEYBZRqHN4fcNs+ezICNLUMbKGKfKX0j//U2K0X1S0E0T9Y -gOKBWYi+wONGkyT+kL0mojAt6JcmVzWJdJYY9hXiryQZVgICsroPFOrGimbBhkVVi76SvpykBMdJ -PJ7oKXqJ1/6v/2j1pReQvayZzKWGVwlnRtvWFsJG8eSpUPWP0ZIV018+xgBJOm5YstHRJw0lyDL4 -IBHNfTIzSJRUTN3cecQwn+uOuFW114hcxWokPbLTBQNRxgfvzBRydD1ucs4YKIxKoHflCStFREes -t2d/AYoFWpO+ocH/+OcOZ6RHSXZddZAa9SaP8A== ------END CERTIFICATE----- - -NetLock Notary (Class A) Root -============================= ------BEGIN CERTIFICATE----- -MIIGfTCCBWWgAwIBAgICAQMwDQYJKoZIhvcNAQEEBQAwga8xCzAJBgNVBAYTAkhVMRAwDgYDVQQI -EwdIdW5nYXJ5MREwDwYDVQQHEwhCdWRhcGVzdDEnMCUGA1UEChMeTmV0TG9jayBIYWxvemF0Yml6 -dG9uc2FnaSBLZnQuMRowGAYDVQQLExFUYW51c2l0dmFueWtpYWRvazE2MDQGA1UEAxMtTmV0TG9j -ayBLb3pqZWd5em9pIChDbGFzcyBBKSBUYW51c2l0dmFueWtpYWRvMB4XDTk5MDIyNDIzMTQ0N1oX -DTE5MDIxOTIzMTQ0N1owga8xCzAJBgNVBAYTAkhVMRAwDgYDVQQIEwdIdW5nYXJ5MREwDwYDVQQH -EwhCdWRhcGVzdDEnMCUGA1UEChMeTmV0TG9jayBIYWxvemF0Yml6dG9uc2FnaSBLZnQuMRowGAYD -VQQLExFUYW51c2l0dmFueWtpYWRvazE2MDQGA1UEAxMtTmV0TG9jayBLb3pqZWd5em9pIChDbGFz -cyBBKSBUYW51c2l0dmFueWtpYWRvMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvHSM -D7tM9DceqQWC2ObhbHDqeLVu0ThEDaiDzl3S1tWBxdRL51uUcCbbO51qTGL3cfNk1mE7PetzozfZ -z+qMkjvN9wfcZnSX9EUi3fRc4L9t875lM+QVOr/bmJBVOMTtplVjC7B4BPTjbsE/jvxReB+SnoPC -/tmwqcm8WgD/qaiYdPv2LD4VOQ22BFWoDpggQrOxJa1+mm9dU7GrDPzr4PN6s6iz/0b2Y6LYOph7 -tqyF/7AlT3Rj5xMHpQqPBffAZG9+pyeAlt7ULoZgx2srXnN7F+eRP2QM2EsiNCubMvJIH5+hCoR6 -4sKtlz2O1cH5VqNQ6ca0+pii7pXmKgOM3wIDAQABo4ICnzCCApswDgYDVR0PAQH/BAQDAgAGMBIG -A1UdEwEB/wQIMAYBAf8CAQQwEQYJYIZIAYb4QgEBBAQDAgAHMIICYAYJYIZIAYb4QgENBIICURaC -Ak1GSUdZRUxFTSEgRXplbiB0YW51c2l0dmFueSBhIE5ldExvY2sgS2Z0LiBBbHRhbGFub3MgU3pv -bGdhbHRhdGFzaSBGZWx0ZXRlbGVpYmVuIGxlaXJ0IGVsamFyYXNvayBhbGFwamFuIGtlc3p1bHQu -IEEgaGl0ZWxlc2l0ZXMgZm9seWFtYXRhdCBhIE5ldExvY2sgS2Z0LiB0ZXJtZWtmZWxlbG9zc2Vn -LWJpenRvc2l0YXNhIHZlZGkuIEEgZGlnaXRhbGlzIGFsYWlyYXMgZWxmb2dhZGFzYW5hayBmZWx0 -ZXRlbGUgYXogZWxvaXJ0IGVsbGVub3J6ZXNpIGVsamFyYXMgbWVndGV0ZWxlLiBBeiBlbGphcmFz -IGxlaXJhc2EgbWVndGFsYWxoYXRvIGEgTmV0TG9jayBLZnQuIEludGVybmV0IGhvbmxhcGphbiBh -IGh0dHBzOi8vd3d3Lm5ldGxvY2submV0L2RvY3MgY2ltZW4gdmFneSBrZXJoZXRvIGF6IGVsbGVu -b3J6ZXNAbmV0bG9jay5uZXQgZS1tYWlsIGNpbWVuLiBJTVBPUlRBTlQhIFRoZSBpc3N1YW5jZSBh -bmQgdGhlIHVzZSBvZiB0aGlzIGNlcnRpZmljYXRlIGlzIHN1YmplY3QgdG8gdGhlIE5ldExvY2sg -Q1BTIGF2YWlsYWJsZSBhdCBodHRwczovL3d3dy5uZXRsb2NrLm5ldC9kb2NzIG9yIGJ5IGUtbWFp -bCBhdCBjcHNAbmV0bG9jay5uZXQuMA0GCSqGSIb3DQEBBAUAA4IBAQBIJEb3ulZv+sgoA0BO5TE5 -ayZrU3/b39/zcT0mwBQOxmd7I6gMc90Bu8bKbjc5VdXHjFYgDigKDtIqpLBJUsY4B/6+CgmM0ZjP -ytoUMaFP0jn8DxEsQ8Pdq5PHVT5HfBgaANzze9jyf1JsIPQLX2lS9O74silg6+NJMSEN1rUQQeJB -CWziGppWS3cC9qCbmieH6FUpccKQn0V4GuEVZD3QDtigdp+uxdAu6tYPVuxkf1qbFFgBJ34TUMdr -KuZoPL9coAob4Q566eKAw+np9v1sEZ7Q5SgnK1QyQhSCdeZK8CtmdWOMovsEPoMOmzbwGOQmIMOM -8CgHrTwXZoi1/baI ------END CERTIFICATE----- - -NetLock Business (Class B) Root -=============================== ------BEGIN CERTIFICATE----- -MIIFSzCCBLSgAwIBAgIBaTANBgkqhkiG9w0BAQQFADCBmTELMAkGA1UEBhMCSFUxETAPBgNVBAcT -CEJ1ZGFwZXN0MScwJQYDVQQKEx5OZXRMb2NrIEhhbG96YXRiaXp0b25zYWdpIEtmdC4xGjAYBgNV -BAsTEVRhbnVzaXR2YW55a2lhZG9rMTIwMAYDVQQDEylOZXRMb2NrIFV6bGV0aSAoQ2xhc3MgQikg -VGFudXNpdHZhbnlraWFkbzAeFw05OTAyMjUxNDEwMjJaFw0xOTAyMjAxNDEwMjJaMIGZMQswCQYD -VQQGEwJIVTERMA8GA1UEBxMIQnVkYXBlc3QxJzAlBgNVBAoTHk5ldExvY2sgSGFsb3phdGJpenRv -bnNhZ2kgS2Z0LjEaMBgGA1UECxMRVGFudXNpdHZhbnlraWFkb2sxMjAwBgNVBAMTKU5ldExvY2sg -VXpsZXRpIChDbGFzcyBCKSBUYW51c2l0dmFueWtpYWRvMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCB -iQKBgQCx6gTsIKAjwo84YM/HRrPVG/77uZmeBNwcf4xKgZjupNTKihe5In+DCnVMm8Bp2GQ5o+2S -o/1bXHQawEfKOml2mrriRBf8TKPV/riXiK+IA4kfpPIEPsgHC+b5sy96YhQJRhTKZPWLgLViqNhr -1nGTLbO/CVRY7QbrqHvcQ7GhaQIDAQABo4ICnzCCApswEgYDVR0TAQH/BAgwBgEB/wIBBDAOBgNV -HQ8BAf8EBAMCAAYwEQYJYIZIAYb4QgEBBAQDAgAHMIICYAYJYIZIAYb4QgENBIICURaCAk1GSUdZ -RUxFTSEgRXplbiB0YW51c2l0dmFueSBhIE5ldExvY2sgS2Z0LiBBbHRhbGFub3MgU3pvbGdhbHRh -dGFzaSBGZWx0ZXRlbGVpYmVuIGxlaXJ0IGVsamFyYXNvayBhbGFwamFuIGtlc3p1bHQuIEEgaGl0 -ZWxlc2l0ZXMgZm9seWFtYXRhdCBhIE5ldExvY2sgS2Z0LiB0ZXJtZWtmZWxlbG9zc2VnLWJpenRv -c2l0YXNhIHZlZGkuIEEgZGlnaXRhbGlzIGFsYWlyYXMgZWxmb2dhZGFzYW5hayBmZWx0ZXRlbGUg -YXogZWxvaXJ0IGVsbGVub3J6ZXNpIGVsamFyYXMgbWVndGV0ZWxlLiBBeiBlbGphcmFzIGxlaXJh -c2EgbWVndGFsYWxoYXRvIGEgTmV0TG9jayBLZnQuIEludGVybmV0IGhvbmxhcGphbiBhIGh0dHBz -Oi8vd3d3Lm5ldGxvY2submV0L2RvY3MgY2ltZW4gdmFneSBrZXJoZXRvIGF6IGVsbGVub3J6ZXNA -bmV0bG9jay5uZXQgZS1tYWlsIGNpbWVuLiBJTVBPUlRBTlQhIFRoZSBpc3N1YW5jZSBhbmQgdGhl -IHVzZSBvZiB0aGlzIGNlcnRpZmljYXRlIGlzIHN1YmplY3QgdG8gdGhlIE5ldExvY2sgQ1BTIGF2 -YWlsYWJsZSBhdCBodHRwczovL3d3dy5uZXRsb2NrLm5ldC9kb2NzIG9yIGJ5IGUtbWFpbCBhdCBj -cHNAbmV0bG9jay5uZXQuMA0GCSqGSIb3DQEBBAUAA4GBAATbrowXr/gOkDFOzT4JwG06sPgzTEdM -43WIEJessDgVkcYplswhwG08pXTP2IKlOcNl40JwuyKQ433bNXbhoLXan3BukxowOR0w2y7jfLKR -stE3Kfq51hdcR0/jHTjrn9V7lagonhVK0dHQKwCXoOKSNitjrFgBazMpUIaD8QFI ------END CERTIFICATE----- - -NetLock Express (Class C) Root -============================== ------BEGIN CERTIFICATE----- -MIIFTzCCBLigAwIBAgIBaDANBgkqhkiG9w0BAQQFADCBmzELMAkGA1UEBhMCSFUxETAPBgNVBAcT -CEJ1ZGFwZXN0MScwJQYDVQQKEx5OZXRMb2NrIEhhbG96YXRiaXp0b25zYWdpIEtmdC4xGjAYBgNV -BAsTEVRhbnVzaXR2YW55a2lhZG9rMTQwMgYDVQQDEytOZXRMb2NrIEV4cHJlc3N6IChDbGFzcyBD -KSBUYW51c2l0dmFueWtpYWRvMB4XDTk5MDIyNTE0MDgxMVoXDTE5MDIyMDE0MDgxMVowgZsxCzAJ -BgNVBAYTAkhVMREwDwYDVQQHEwhCdWRhcGVzdDEnMCUGA1UEChMeTmV0TG9jayBIYWxvemF0Yml6 -dG9uc2FnaSBLZnQuMRowGAYDVQQLExFUYW51c2l0dmFueWtpYWRvazE0MDIGA1UEAxMrTmV0TG9j -ayBFeHByZXNzeiAoQ2xhc3MgQykgVGFudXNpdHZhbnlraWFkbzCBnzANBgkqhkiG9w0BAQEFAAOB -jQAwgYkCgYEA6+ywbGGKIyWvYCDj2Z/8kwvbXY2wobNAOoLO/XXgeDIDhlqGlZHtU/qdQPzm6N3Z -W3oDvV3zOwzDUXmbrVWg6dADEK8KuhRC2VImESLH0iDMgqSaqf64gXadarfSNnU+sYYJ9m5tfk63 -euyucYT2BDMIJTLrdKwWRMbkQJMdf60CAwEAAaOCAp8wggKbMBIGA1UdEwEB/wQIMAYBAf8CAQQw -DgYDVR0PAQH/BAQDAgAGMBEGCWCGSAGG+EIBAQQEAwIABzCCAmAGCWCGSAGG+EIBDQSCAlEWggJN -RklHWUVMRU0hIEV6ZW4gdGFudXNpdHZhbnkgYSBOZXRMb2NrIEtmdC4gQWx0YWxhbm9zIFN6b2xn -YWx0YXRhc2kgRmVsdGV0ZWxlaWJlbiBsZWlydCBlbGphcmFzb2sgYWxhcGphbiBrZXN6dWx0LiBB -IGhpdGVsZXNpdGVzIGZvbHlhbWF0YXQgYSBOZXRMb2NrIEtmdC4gdGVybWVrZmVsZWxvc3NlZy1i -aXp0b3NpdGFzYSB2ZWRpLiBBIGRpZ2l0YWxpcyBhbGFpcmFzIGVsZm9nYWRhc2FuYWsgZmVsdGV0 -ZWxlIGF6IGVsb2lydCBlbGxlbm9yemVzaSBlbGphcmFzIG1lZ3RldGVsZS4gQXogZWxqYXJhcyBs -ZWlyYXNhIG1lZ3RhbGFsaGF0byBhIE5ldExvY2sgS2Z0LiBJbnRlcm5ldCBob25sYXBqYW4gYSBo -dHRwczovL3d3dy5uZXRsb2NrLm5ldC9kb2NzIGNpbWVuIHZhZ3kga2VyaGV0byBheiBlbGxlbm9y -emVzQG5ldGxvY2submV0IGUtbWFpbCBjaW1lbi4gSU1QT1JUQU5UISBUaGUgaXNzdWFuY2UgYW5k -IHRoZSB1c2Ugb2YgdGhpcyBjZXJ0aWZpY2F0ZSBpcyBzdWJqZWN0IHRvIHRoZSBOZXRMb2NrIENQ -UyBhdmFpbGFibGUgYXQgaHR0cHM6Ly93d3cubmV0bG9jay5uZXQvZG9jcyBvciBieSBlLW1haWwg -YXQgY3BzQG5ldGxvY2submV0LjANBgkqhkiG9w0BAQQFAAOBgQAQrX/XDDKACtiG8XmYta3UzbM2 -xJZIwVzNmtkFLp++UOv0JhQQLdRmF/iewSf98e3ke0ugbLWrmldwpu2gpO0u9f38vf5NNwgMvOOW -gyL1SRt/Syu0VMGAfJlOHdCM7tCs5ZL6dVb+ZKATj7i4Fp1hBWeAyNDYpQcCNJgEjTME1A== ------END CERTIFICATE----- - -XRamp Global CA Root -==================== ------BEGIN CERTIFICATE----- -MIIEMDCCAxigAwIBAgIQUJRs7Bjq1ZxN1ZfvdY+grTANBgkqhkiG9w0BAQUFADCBgjELMAkGA1UE -BhMCVVMxHjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2Vj -dXJpdHkgU2VydmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBB -dXRob3JpdHkwHhcNMDQxMTAxMTcxNDA0WhcNMzUwMTAxMDUzNzE5WjCBgjELMAkGA1UEBhMCVVMx -HjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2VjdXJpdHkg -U2VydmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBBdXRob3Jp -dHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCYJB69FbS638eMpSe2OAtp87ZOqCwu -IR1cRN8hXX4jdP5efrRKt6atH67gBhbim1vZZ3RrXYCPKZ2GG9mcDZhtdhAoWORlsH9KmHmf4MMx -foArtYzAQDsRhtDLooY2YKTVMIJt2W7QDxIEM5dfT2Fa8OT5kavnHTu86M/0ay00fOJIYRyO82FE -zG+gSqmUsE3a56k0enI4qEHMPJQRfevIpoy3hsvKMzvZPTeL+3o+hiznc9cKV6xkmxnr9A8ECIqs -AxcZZPRaJSKNNCyy9mgdEm3Tih4U2sSPpuIjhdV6Db1q4Ons7Be7QhtnqiXtRYMh/MHJfNViPvry -xS3T/dRlAgMBAAGjgZ8wgZwwEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0PBAQDAgGGMA8GA1Ud -EwEB/wQFMAMBAf8wHQYDVR0OBBYEFMZPoj0GY4QJnM5i5ASsjVy16bYbMDYGA1UdHwQvMC0wK6Ap -oCeGJWh0dHA6Ly9jcmwueHJhbXBzZWN1cml0eS5jb20vWEdDQS5jcmwwEAYJKwYBBAGCNxUBBAMC -AQEwDQYJKoZIhvcNAQEFBQADggEBAJEVOQMBG2f7Shz5CmBbodpNl2L5JFMn14JkTpAuw0kbK5rc -/Kh4ZzXxHfARvbdI4xD2Dd8/0sm2qlWkSLoC295ZLhVbO50WfUfXN+pfTXYSNrsf16GBBEYgoyxt -qZ4Bfj8pzgCT3/3JknOJiWSe5yvkHJEs0rnOfc5vMZnT5r7SHpDwCRR5XCOrTdLaIR9NmXmd4c8n -nxCbHIgNsIpkQTG4DmyQJKSbXHGPurt+HBvbaoAPIbzp26a3QPSyi6mx5O+aGtA9aZnuqCij4Tyz -8LIRnM98QObd50N9otg6tamN8jSZxNQQ4Qb9CYQQO+7ETPTsJ3xCwnR8gooJybQDJbw= ------END CERTIFICATE----- - -Go Daddy Class 2 CA -=================== ------BEGIN CERTIFICATE----- -MIIEADCCAuigAwIBAgIBADANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMY -VGhlIEdvIERhZGR5IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRp -ZmljYXRpb24gQXV0aG9yaXR5MB4XDTA0MDYyOTE3MDYyMFoXDTM0MDYyOTE3MDYyMFowYzELMAkG -A1UEBhMCVVMxITAfBgNVBAoTGFRoZSBHbyBEYWRkeSBHcm91cCwgSW5jLjExMC8GA1UECxMoR28g -RGFkZHkgQ2xhc3MgMiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASAwDQYJKoZIhvcNAQEBBQAD -ggENADCCAQgCggEBAN6d1+pXGEmhW+vXX0iG6r7d/+TvZxz0ZWizV3GgXne77ZtJ6XCAPVYYYwhv -2vLM0D9/AlQiVBDYsoHUwHU9S3/Hd8M+eKsaA7Ugay9qK7HFiH7Eux6wwdhFJ2+qN1j3hybX2C32 -qRe3H3I2TqYXP2WYktsqbl2i/ojgC95/5Y0V4evLOtXiEqITLdiOr18SPaAIBQi2XKVlOARFmR6j -YGB0xUGlcmIbYsUfb18aQr4CUWWoriMYavx4A6lNf4DD+qta/KFApMoZFv6yyO9ecw3ud72a9nmY -vLEHZ6IVDd2gWMZEewo+YihfukEHU1jPEX44dMX4/7VpkI+EdOqXG68CAQOjgcAwgb0wHQYDVR0O -BBYEFNLEsNKR1EwRcbNhyz2h/t2oatTjMIGNBgNVHSMEgYUwgYKAFNLEsNKR1EwRcbNhyz2h/t2o -atTjoWekZTBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYVGhlIEdvIERhZGR5IEdyb3VwLCBJbmMu -MTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggEAMAwG -A1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBADJL87LKPpH8EsahB4yOd6AzBhRckB4Y9wim -PQoZ+YeAEW5p5JYXMP80kWNyOO7MHAGjHZQopDH2esRU1/blMVgDoszOYtuURXO1v0XJJLXVggKt -I3lpjbi2Tc7PTMozI+gciKqdi0FuFskg5YmezTvacPd+mSYgFFQlq25zheabIZ0KbIIOqPjCDPoQ -HmyW74cNxA9hi63ugyuV+I6ShHI56yDqg+2DzZduCLzrTia2cyvk0/ZM/iZx4mERdEr/VxqHD3VI -Ls9RaRegAhJhldXRQLIQTO7ErBBDpqWeCtWVYpoNz4iCxTIM5CufReYNnyicsbkqWletNw+vHX/b -vZ8= ------END CERTIFICATE----- - -Starfield Class 2 CA -==================== ------BEGIN CERTIFICATE----- -MIIEDzCCAvegAwIBAgIBADANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJVUzElMCMGA1UEChMc -U3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMpU3RhcmZpZWxkIENsYXNzIDIg -Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQwNjI5MTczOTE2WhcNMzQwNjI5MTczOTE2WjBo -MQswCQYDVQQGEwJVUzElMCMGA1UEChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAG -A1UECxMpU3RhcmZpZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEgMA0GCSqG -SIb3DQEBAQUAA4IBDQAwggEIAoIBAQC3Msj+6XGmBIWtDBFk385N78gDGIc/oav7PKaf8MOh2tTY -bitTkPskpD6E8J7oX+zlJ0T1KKY/e97gKvDIr1MvnsoFAZMej2YcOadN+lq2cwQlZut3f+dZxkqZ -JRRU6ybH838Z1TBwj6+wRir/resp7defqgSHo9T5iaU0X9tDkYI22WY8sbi5gv2cOj4QyDvvBmVm -epsZGD3/cVE8MC5fvj13c7JdBmzDI1aaK4UmkhynArPkPw2vCHmCuDY96pzTNbO8acr1zJ3o/WSN -F4Azbl5KXZnJHoe0nRrA1W4TNSNe35tfPe/W93bC6j67eA0cQmdrBNj41tpvi/JEoAGrAgEDo4HF -MIHCMB0GA1UdDgQWBBS/X7fRzt0fhvRbVazc1xDCDqmI5zCBkgYDVR0jBIGKMIGHgBS/X7fRzt0f -hvRbVazc1xDCDqmI56FspGowaDELMAkGA1UEBhMCVVMxJTAjBgNVBAoTHFN0YXJmaWVsZCBUZWNo -bm9sb2dpZXMsIEluYy4xMjAwBgNVBAsTKVN0YXJmaWVsZCBDbGFzcyAyIENlcnRpZmljYXRpb24g -QXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAAWdP4id0ckaVaGs -afPzWdqbAYcaT1epoXkJKtv3L7IezMdeatiDh6GX70k1PncGQVhiv45YuApnP+yz3SFmH8lU+nLM -PUxA2IGvd56Deruix/U0F47ZEUD0/CwqTRV/p2JdLiXTAAsgGh1o+Re49L2L7ShZ3U0WixeDyLJl -xy16paq8U4Zt3VekyvggQQto8PT7dL5WXXp59fkdheMtlb71cZBDzI0fmgAKhynpVSJYACPq4xJD -KVtHCN2MQWplBqjlIapBtJUhlbl90TSrE9atvNziPTnNvT51cKEYWQPJIrSPnNVeKtelttQKbfi3 -QBFGmh95DmK/D5fs4C8fF5Q= ------END CERTIFICATE----- - -StartCom Certification Authority -================================ ------BEGIN CERTIFICATE----- -MIIHyTCCBbGgAwIBAgIBATANBgkqhkiG9w0BAQUFADB9MQswCQYDVQQGEwJJTDEWMBQGA1UEChMN -U3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmlu -ZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0 -NjM2WhcNMzYwOTE3MTk0NjM2WjB9MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRk -LjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMg -U3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw -ggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZkpMyONvg45iPwbm2xPN1y -o4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rfOQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/ -Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/CJi/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/d -eMotHweXMAEtcnn6RtYTKqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt -2PZE4XNiHzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMMAv+Z -6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w+2OqqGwaVLRcJXrJ -osmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/ -untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVc -UjyJthkqcwEKDwOzEmDyei+B26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT -37uMdBNSSwIDAQABo4ICUjCCAk4wDAYDVR0TBAUwAwEB/zALBgNVHQ8EBAMCAa4wHQYDVR0OBBYE -FE4L7xqkQFulF2mHMMo0aEPQQa7yMGQGA1UdHwRdMFswLKAqoCiGJmh0dHA6Ly9jZXJ0LnN0YXJ0 -Y29tLm9yZy9zZnNjYS1jcmwuY3JsMCugKaAnhiVodHRwOi8vY3JsLnN0YXJ0Y29tLm9yZy9zZnNj -YS1jcmwuY3JsMIIBXQYDVR0gBIIBVDCCAVAwggFMBgsrBgEEAYG1NwEBATCCATswLwYIKwYBBQUH -AgEWI2h0dHA6Ly9jZXJ0LnN0YXJ0Y29tLm9yZy9wb2xpY3kucGRmMDUGCCsGAQUFBwIBFilodHRw -Oi8vY2VydC5zdGFydGNvbS5vcmcvaW50ZXJtZWRpYXRlLnBkZjCB0AYIKwYBBQUHAgIwgcMwJxYg -U3RhcnQgQ29tbWVyY2lhbCAoU3RhcnRDb20pIEx0ZC4wAwIBARqBl0xpbWl0ZWQgTGlhYmlsaXR5 -LCByZWFkIHRoZSBzZWN0aW9uICpMZWdhbCBMaW1pdGF0aW9ucyogb2YgdGhlIFN0YXJ0Q29tIENl -cnRpZmljYXRpb24gQXV0aG9yaXR5IFBvbGljeSBhdmFpbGFibGUgYXQgaHR0cDovL2NlcnQuc3Rh -cnRjb20ub3JnL3BvbGljeS5wZGYwEQYJYIZIAYb4QgEBBAQDAgAHMDgGCWCGSAGG+EIBDQQrFilT -dGFydENvbSBGcmVlIFNTTCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTANBgkqhkiG9w0BAQUFAAOC -AgEAFmyZ9GYMNPXQhV59CuzaEE44HF7fpiUFS5Eyweg78T3dRAlbB0mKKctmArexmvclmAk8jhvh -3TaHK0u7aNM5Zj2gJsfyOZEdUauCe37Vzlrk4gNXcGmXCPleWKYK34wGmkUWFjgKXlf2Ysd6AgXm -vB618p70qSmD+LIU424oh0TDkBreOKk8rENNZEXO3SipXPJzewT4F+irsfMuXGRuczE6Eri8sxHk -fY+BUZo7jYn0TZNmezwD7dOaHZrzZVD1oNB1ny+v8OqCQ5j4aZyJecRDjkZy42Q2Eq/3JR44iZB3 -fsNrarnDy0RLrHiQi+fHLB5LEUTINFInzQpdn4XBidUaePKVEFMy3YCEZnXZtWgo+2EuvoSoOMCZ -EoalHmdkrQYuL6lwhceWD3yJZfWOQ1QOq92lgDmUYMA0yZZwLKMS9R9Ie70cfmu3nZD0Ijuu+Pwq -yvqCUqDvr0tVk+vBtfAii6w0TiYiBKGHLHVKt+V9E9e4DGTANtLJL4YSjCMJwRuCO3NJo2pXh5Tl -1njFmUNj403gdy3hZZlyaQQaRwnmDwFWJPsfvw55qVguucQJAX6Vum0ABj6y6koQOdjQK/W/7HW/ -lwLFCRsI3FU34oH7N4RDYiDK51ZLZer+bMEkkyShNOsF/5oirpt9P/FlUQqmMGqz9IgcgA38coro -g14= ------END CERTIFICATE----- - -Taiwan GRCA -=========== ------BEGIN CERTIFICATE----- -MIIFcjCCA1qgAwIBAgIQH51ZWtcvwgZEpYAIaeNe9jANBgkqhkiG9w0BAQUFADA/MQswCQYDVQQG -EwJUVzEwMC4GA1UECgwnR292ZXJubWVudCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4X -DTAyMTIwNTEzMjMzM1oXDTMyMTIwNTEzMjMzM1owPzELMAkGA1UEBhMCVFcxMDAuBgNVBAoMJ0dv -dmVybm1lbnQgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCAiIwDQYJKoZIhvcNAQEBBQAD -ggIPADCCAgoCggIBAJoluOzMonWoe/fOW1mKydGGEghU7Jzy50b2iPN86aXfTEc2pBsBHH8eV4qN -w8XRIePaJD9IK/ufLqGU5ywck9G/GwGHU5nOp/UKIXZ3/6m3xnOUT0b3EEk3+qhZSV1qgQdW8or5 -BtD3cCJNtLdBuTK4sfCxw5w/cP1T3YGq2GN49thTbqGsaoQkclSGxtKyyhwOeYHWtXBiCAEuTk8O -1RGvqa/lmr/czIdtJuTJV6L7lvnM4T9TjGxMfptTCAtsF/tnyMKtsc2AtJfcdgEWFelq16TheEfO -htX7MfP6Mb40qij7cEwdScevLJ1tZqa2jWR+tSBqnTuBto9AAGdLiYa4zGX+FVPpBMHWXx1E1wov -J5pGfaENda1UhhXcSTvxls4Pm6Dso3pdvtUqdULle96ltqqvKKyskKw4t9VoNSZ63Pc78/1Fm9G7 -Q3hub/FCVGqY8A2tl+lSXunVanLeavcbYBT0peS2cWeqH+riTcFCQP5nRhc4L0c/cZyu5SHKYS1t -B6iEfC3uUSXxY5Ce/eFXiGvviiNtsea9P63RPZYLhY3Naye7twWb7LuRqQoHEgKXTiCQ8P8NHuJB -O9NAOueNXdpm5AKwB1KYXA6OM5zCppX7VRluTI6uSw+9wThNXo+EHWbNxWCWtFJaBYmOlXqYwZE8 -lSOyDvR5tMl8wUohAgMBAAGjajBoMB0GA1UdDgQWBBTMzO/MKWCkO7GStjz6MmKPrCUVOzAMBgNV -HRMEBTADAQH/MDkGBGcqBwAEMTAvMC0CAQAwCQYFKw4DAhoFADAHBgVnKgMAAAQUA5vwIhP/lSg2 -09yewDL7MTqKUWUwDQYJKoZIhvcNAQEFBQADggIBAECASvomyc5eMN1PhnR2WPWus4MzeKR6dBcZ -TulStbngCnRiqmjKeKBMmo4sIy7VahIkv9Ro04rQ2JyftB8M3jh+Vzj8jeJPXgyfqzvS/3WXy6Tj -Zwj/5cAWtUgBfen5Cv8b5Wppv3ghqMKnI6mGq3ZW6A4M9hPdKmaKZEk9GhiHkASfQlK3T8v+R0F2 -Ne//AHY2RTKbxkaFXeIksB7jSJaYV0eUVXoPQbFEJPPB/hprv4j9wabak2BegUqZIJxIZhm1AHlU -D7gsL0u8qV1bYH+Mh6XgUmMqvtg7hUAV/h62ZT/FS9p+tXo1KaMuephgIqP0fSdOLeq0dDzpD6Qz -DxARvBMB1uUO07+1EqLhRSPAzAhuYbeJq4PjJB7mXQfnHyA+z2fI56wwbSdLaG5LKlwCCDTb+Hbk -Z6MmnD+iMsJKxYEYMRBWqoTvLQr/uB930r+lWKBi5NdLkXWNiYCYfm3LU05er/ayl4WXudpVBrkk -7tfGOB5jGxI7leFYrPLfhNVfmS8NVVvmONsuP3LpSIXLuykTjx44VbnzssQwmSNOXfJIoRIM3BKQ -CZBUkQM8R+XVyWXgt0t97EfTsws+rZ7QdAAO671RrcDeLMDDav7v3Aun+kbfYNucpllQdSNpc5Oy -+fwC00fmcc4QAu4njIT/rEUNE1yDMuAlpYYsfPQS ------END CERTIFICATE----- - -Firmaprofesional Root CA -======================== ------BEGIN CERTIFICATE----- -MIIEVzCCAz+gAwIBAgIBATANBgkqhkiG9w0BAQUFADCBnTELMAkGA1UEBhMCRVMxIjAgBgNVBAcT -GUMvIE11bnRhbmVyIDI0NCBCYXJjZWxvbmExQjBABgNVBAMTOUF1dG9yaWRhZCBkZSBDZXJ0aWZp -Y2FjaW9uIEZpcm1hcHJvZmVzaW9uYWwgQ0lGIEE2MjYzNDA2ODEmMCQGCSqGSIb3DQEJARYXY2FA -ZmlybWFwcm9mZXNpb25hbC5jb20wHhcNMDExMDI0MjIwMDAwWhcNMTMxMDI0MjIwMDAwWjCBnTEL -MAkGA1UEBhMCRVMxIjAgBgNVBAcTGUMvIE11bnRhbmVyIDI0NCBCYXJjZWxvbmExQjBABgNVBAMT -OUF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIEZpcm1hcHJvZmVzaW9uYWwgQ0lGIEE2MjYzNDA2 -ODEmMCQGCSqGSIb3DQEJARYXY2FAZmlybWFwcm9mZXNpb25hbC5jb20wggEiMA0GCSqGSIb3DQEB -AQUAA4IBDwAwggEKAoIBAQDnIwNvbyOlXnjOlSztlB5uCp4Bx+ow0Syd3Tfom5h5VtP8c9/Qit5V -j1H5WuretXDE7aTt/6MNbg9kUDGvASdYrv5sp0ovFy3Tc9UTHI9ZpTQsHVQERc1ouKDAA6XPhUJH -lShbz++AbOCQl4oBPB3zhxAwJkh91/zpnZFx/0GaqUC1N5wpIE8fUuOgfRNtVLcK3ulqTgesrBlf -3H5idPayBQC6haD9HThuy1q7hryUZzM1gywfI834yJFxzJeL764P3CkDG8A563DtwW4O2GcLiam8 -NeTvtjS0pbbELaW+0MOUJEjb35bTALVmGotmBQ/dPz/LP6pemkr4tErvlTcbAgMBAAGjgZ8wgZww -KgYDVR0RBCMwIYYfaHR0cDovL3d3dy5maXJtYXByb2Zlc2lvbmFsLmNvbTASBgNVHRMBAf8ECDAG -AQH/AgEBMCsGA1UdEAQkMCKADzIwMDExMDI0MjIwMDAwWoEPMjAxMzEwMjQyMjAwMDBaMA4GA1Ud -DwEB/wQEAwIBBjAdBgNVHQ4EFgQUMwugZtHq2s7eYpMEKFK1FH84aLcwDQYJKoZIhvcNAQEFBQAD -ggEBAEdz/o0nVPD11HecJ3lXV7cVVuzH2Fi3AQL0M+2TUIiefEaxvT8Ub/GzR0iLjJcG1+p+o1wq -u00vR+L4OQbJnC4xGgN49Lw4xiKLMzHwFgQEffl25EvXwOaD7FnMP97/T2u3Z36mhoEyIwOdyPdf -wUpgpZKpsaSgYMN4h7Mi8yrrW6ntBas3D7Hi05V2Y1Z0jFhyGzflZKG+TQyTmAyX9odtsz/ny4Cm -7YjHX1BiAuiZdBbQ5rQ58SfLyEDW44YQqSMSkuBpQWOnryULwMWSyx6Yo1q6xTMPoJcB3X/ge9YG -VM+h4k0460tQtcsm9MracEpqoeJ5quGnM/b9Sh/22WA= ------END CERTIFICATE----- - -Wells Fargo Root CA -=================== ------BEGIN CERTIFICATE----- -MIID5TCCAs2gAwIBAgIEOeSXnjANBgkqhkiG9w0BAQUFADCBgjELMAkGA1UEBhMCVVMxFDASBgNV -BAoTC1dlbGxzIEZhcmdvMSwwKgYDVQQLEyNXZWxscyBGYXJnbyBDZXJ0aWZpY2F0aW9uIEF1dGhv -cml0eTEvMC0GA1UEAxMmV2VsbHMgRmFyZ28gUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcN -MDAxMDExMTY0MTI4WhcNMjEwMTE0MTY0MTI4WjCBgjELMAkGA1UEBhMCVVMxFDASBgNVBAoTC1dl -bGxzIEZhcmdvMSwwKgYDVQQLEyNXZWxscyBGYXJnbyBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTEv -MC0GA1UEAxMmV2VsbHMgRmFyZ28gUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0GCSqG -SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDVqDM7Jvk0/82bfuUER84A4n135zHCLielTWi5MbqNQ1mX -x3Oqfz1cQJ4F5aHiidlMuD+b+Qy0yGIZLEWukR5zcUHESxP9cMIlrCL1dQu3U+SlK93OvRw6esP3 -E48mVJwWa2uv+9iWsWCaSOAlIiR5NM4OJgALTqv9i86C1y8IcGjBqAr5dE8Hq6T54oN+J3N0Prj5 -OEL8pahbSCOz6+MlsoCultQKnMJ4msZoGK43YjdeUXWoWGPAUe5AeH6orxqg4bB4nVCMe+ez/I4j -sNtlAHCEAQgAFG5Uhpq6zPk3EPbg3oQtnaSFN9OH4xXQwReQfhkhahKpdv0SAulPIV4XAgMBAAGj -YTBfMA8GA1UdEwEB/wQFMAMBAf8wTAYDVR0gBEUwQzBBBgtghkgBhvt7hwcBCzAyMDAGCCsGAQUF -BwIBFiRodHRwOi8vd3d3LndlbGxzZmFyZ28uY29tL2NlcnRwb2xpY3kwDQYJKoZIhvcNAQEFBQAD -ggEBANIn3ZwKdyu7IvICtUpKkfnRLb7kuxpo7w6kAOnu5+/u9vnldKTC2FJYxHT7zmu1Oyl5GFrv -m+0fazbuSCUlFLZWohDo7qd/0D+j0MNdJu4HzMPBJCGHHt8qElNvQRbn7a6U+oxy+hNH8Dx+rn0R -OhPs7fpvcmR7nX1/Jv16+yWt6j4pf0zjAFcysLPp7VMX2YuyFA4w6OXVE8Zkr8QA1dhYJPz1j+zx -x32l2w8n0cbyQIjmH/ZhqPRCyLk306m+LFZ4wnKbWV01QIroTmMatukgalHizqSQ33ZwmVxwQ023 -tqcZZE6St8WRPH9IFmV7Fv3L/PvZ1dZPIWU7Sn9Ho/s= ------END CERTIFICATE----- - -Swisscom Root CA 1 -================== ------BEGIN CERTIFICATE----- -MIIF2TCCA8GgAwIBAgIQXAuFXAvnWUHfV8w/f52oNjANBgkqhkiG9w0BAQUFADBkMQswCQYDVQQG -EwJjaDERMA8GA1UEChMIU3dpc3Njb20xJTAjBgNVBAsTHERpZ2l0YWwgQ2VydGlmaWNhdGUgU2Vy -dmljZXMxGzAZBgNVBAMTElN3aXNzY29tIFJvb3QgQ0EgMTAeFw0wNTA4MTgxMjA2MjBaFw0yNTA4 -MTgyMjA2MjBaMGQxCzAJBgNVBAYTAmNoMREwDwYDVQQKEwhTd2lzc2NvbTElMCMGA1UECxMcRGln -aXRhbCBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczEbMBkGA1UEAxMSU3dpc3Njb20gUm9vdCBDQSAxMIIC -IjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA0LmwqAzZuz8h+BvVM5OAFmUgdbI9m2BtRsiM -MW8Xw/qabFbtPMWRV8PNq5ZJkCoZSx6jbVfd8StiKHVFXqrWW/oLJdihFvkcxC7mlSpnzNApbjyF -NDhhSbEAn9Y6cV9Nbc5fuankiX9qUvrKm/LcqfmdmUc/TilftKaNXXsLmREDA/7n29uj/x2lzZAe -AR81sH8A25Bvxn570e56eqeqDFdvpG3FEzuwpdntMhy0XmeLVNxzh+XTF3xmUHJd1BpYwdnP2IkC -b6dJtDZd0KTeByy2dbcokdaXvij1mB7qWybJvbCXc9qukSbraMH5ORXWZ0sKbU/Lz7DkQnGMU3nn -7uHbHaBuHYwadzVcFh4rUx80i9Fs/PJnB3r1re3WmquhsUvhzDdf/X/NTa64H5xD+SpYVUNFvJbN -cA78yeNmuk6NO4HLFWR7uZToXTNShXEuT46iBhFRyePLoW4xCGQMwtI89Tbo19AOeCMgkckkKmUp -WyL3Ic6DXqTz3kvTaI9GdVyDCW4pa8RwjPWd1yAv/0bSKzjCL3UcPX7ape8eYIVpQtPM+GP+HkM5 -haa2Y0EQs3MevNP6yn0WR+Kn1dCjigoIlmJWbjTb2QK5MHXjBNLnj8KwEUAKrNVxAmKLMb7dxiNY -MUJDLXT5xp6mig/p/r+D5kNXJLrvRjSq1xIBOO0CAwEAAaOBhjCBgzAOBgNVHQ8BAf8EBAMCAYYw -HQYDVR0hBBYwFDASBgdghXQBUwABBgdghXQBUwABMBIGA1UdEwEB/wQIMAYBAf8CAQcwHwYDVR0j -BBgwFoAUAyUv3m+CATpcLNwroWm1Z9SM0/0wHQYDVR0OBBYEFAMlL95vggE6XCzcK6FptWfUjNP9 -MA0GCSqGSIb3DQEBBQUAA4ICAQA1EMvspgQNDQ/NwNurqPKIlwzfky9NfEBWMXrrpA9gzXrzvsMn -jgM+pN0S734edAY8PzHyHHuRMSG08NBsl9Tpl7IkVh5WwzW9iAUPWxAaZOHHgjD5Mq2eUCzneAXQ -MbFamIp1TpBcahQq4FJHgmDmHtqBsfsUC1rxn9KVuj7QG9YVHaO+htXbD8BJZLsuUBlL0iT43R4H -VtA4oJVwIHaM190e3p9xxCPvgxNcoyQVTSlAPGrEqdi3pkSlDfTgnXceQHAm/NrZNuR55LU/vJtl -vrsRls/bxig5OgjOR1tTWsWZ/l2p3e9M1MalrQLmjAcSHm8D0W+go/MpvRLHUKKwf4ipmXeascCl -OS5cfGniLLDqN2qk4Vrh9VDlg++luyqI54zb/W1elxmofmZ1a3Hqv7HHb6D0jqTsNFFbjCYDcKF3 -1QESVwA12yPeDooomf2xEG9L/zgtYE4snOtnta1J7ksfrK/7DZBaZmBwXarNeNQk7shBoJMBkpxq -nvy5JMWzFYJ+vq6VK+uxwNrjAWALXmmshFZhvnEX/h0TD/7Gh0Xp/jKgGg0TpJRVcaUWi7rKibCy -x/yP2FS1k2Kdzs9Z+z0YzirLNRWCXf9UIltxUvu3yf5gmwBBZPCqKuy2QkPOiWaByIufOVQDJdMW -NY6E0F/6MBr1mmz0DlP5OlvRHA== ------END CERTIFICATE----- - -DigiCert Assured ID Root CA -=========================== ------BEGIN CERTIFICATE----- -MIIDtzCCAp+gAwIBAgIQDOfg5RfYRv6P5WD8G/AwOTANBgkqhkiG9w0BAQUFADBlMQswCQYDVQQG -EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSQw -IgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0EwHhcNMDYxMTEwMDAwMDAwWhcNMzEx -MTEwMDAwMDAwWjBlMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQL -ExB3d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0Ew -ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtDhXO5EOAXLGH87dg+XESpa7cJpSIqvTO -9SA5KFhgDPiA2qkVlTJhPLWxKISKityfCgyDF3qPkKyK53lTXDGEKvYPmDI2dsze3Tyoou9q+yHy -UmHfnyDXH+Kx2f4YZNISW1/5WBg1vEfNoTb5a3/UsDg+wRvDjDPZ2C8Y/igPs6eD1sNuRMBhNZYW -/lmci3Zt1/GiSw0r/wty2p5g0I6QNcZ4VYcgoc/lbQrISXwxmDNsIumH0DJaoroTghHtORedmTpy -oeb6pNnVFzF1roV9Iq4/AUaG9ih5yLHa5FcXxH4cDrC0kqZWs72yl+2qp/C3xag/lRbQ/6GW6whf -GHdPAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRF -66Kv9JLLgjEtUYunpyGd823IDzAfBgNVHSMEGDAWgBRF66Kv9JLLgjEtUYunpyGd823IDzANBgkq -hkiG9w0BAQUFAAOCAQEAog683+Lt8ONyc3pklL/3cmbYMuRCdWKuh+vy1dneVrOfzM4UKLkNl2Bc -EkxY5NM9g0lFWJc1aRqoR+pWxnmrEthngYTffwk8lOa4JiwgvT2zKIn3X/8i4peEH+ll74fg38Fn -SbNd67IJKusm7Xi+fT8r87cmNW1fiQG2SVufAQWbqz0lwcy2f8Lxb4bG+mRo64EtlOtCt/qMHt1i -8b5QZ7dsvfPxH2sMNgcWfzd8qVttevESRmCD1ycEvkvOl77DZypoEd+A5wwzZr8TDRRu838fYxAe -+o0bJW1sj6W3YQGx0qMmoRBxna3iw/nDmVG3KwcIzi7mULKn+gpFL6Lw8g== ------END CERTIFICATE----- - -DigiCert Global Root CA -======================= ------BEGIN CERTIFICATE----- -MIIDrzCCApegAwIBAgIQCDvgVpBCRrGhdWrJWZHHSjANBgkqhkiG9w0BAQUFADBhMQswCQYDVQQG -EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSAw -HgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBDQTAeFw0wNjExMTAwMDAwMDBaFw0zMTExMTAw -MDAwMDBaMGExCzAJBgNVBAYTAlVTMRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3 -dy5kaWdpY2VydC5jb20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IENBMIIBIjANBgkq -hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4jvhEXLeqKTTo1eqUKKPC3eQyaKl7hLOllsBCSDMAZOn -TjC3U/dDxGkAV53ijSLdhwZAAIEJzs4bg7/fzTtxRuLWZscFs3YnFo97nh6Vfe63SKMI2tavegw5 -BmV/Sl0fvBf4q77uKNd0f3p4mVmFaG5cIzJLv07A6Fpt43C/dxC//AH2hdmoRBBYMql1GNXRor5H -4idq9Joz+EkIYIvUX7Q6hL+hqkpMfT7PT19sdl6gSzeRntwi5m3OFBqOasv+zbMUZBfHWymeMr/y -7vrTC0LUq7dBMtoM1O/4gdW7jVg/tRvoSSiicNoxBN33shbyTApOB6jtSj1etX+jkMOvJwIDAQAB -o2MwYTAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUA95QNVbRTLtm -8KPiGxvDl7I90VUwHwYDVR0jBBgwFoAUA95QNVbRTLtm8KPiGxvDl7I90VUwDQYJKoZIhvcNAQEF -BQADggEBAMucN6pIExIK+t1EnE9SsPTfrgT1eXkIoyQY/EsrhMAtudXH/vTBH1jLuG2cenTnmCmr -EbXjcKChzUyImZOMkXDiqw8cvpOp/2PV5Adg06O/nVsJ8dWO41P0jmP6P6fbtGbfYmbW0W5BjfIt -tep3Sp+dWOIrWcBAI+0tKIJFPnlUkiaY4IBIqDfv8NZ5YBberOgOzW6sRBc4L0na4UU+Krk2U886 -UAb3LujEV0lsYSEY1QSteDwsOoBrp+uvFRTp2InBuThs4pFsiv9kuXclVzDAGySj4dzp30d8tbQk -CAUw7C29C79Fv1C5qfPrmAESrciIxpg0X40KPMbp1ZWVbd4= ------END CERTIFICATE----- - -DigiCert High Assurance EV Root CA -================================== ------BEGIN CERTIFICATE----- -MIIDxTCCAq2gAwIBAgIQAqxcJmoLQJuPC3nyrkYldzANBgkqhkiG9w0BAQUFADBsMQswCQYDVQQG -EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSsw -KQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5jZSBFViBSb290IENBMB4XDTA2MTExMDAwMDAw -MFoXDTMxMTExMDAwMDAwMFowbDELMAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZ -MBcGA1UECxMQd3d3LmRpZ2ljZXJ0LmNvbTErMCkGA1UEAxMiRGlnaUNlcnQgSGlnaCBBc3N1cmFu -Y2UgRVYgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMbM5XPm+9S75S0t -Mqbf5YE/yc0lSbZxKsPVlDRnogocsF9ppkCxxLeyj9CYpKlBWTrT3JTWPNt0OKRKzE0lgvdKpVMS -OO7zSW1xkX5jtqumX8OkhPhPYlG++MXs2ziS4wblCJEMxChBVfvLWokVfnHoNb9Ncgk9vjo4UFt3 -MRuNs8ckRZqnrG0AFFoEt7oT61EKmEFBIk5lYYeBQVCmeVyJ3hlKV9Uu5l0cUyx+mM0aBhakaHPQ -NAQTXKFx01p8VdteZOE3hzBWBOURtCmAEvF5OYiiAhF8J2a3iLd48soKqDirCmTCv2ZdlYTBoSUe -h10aUAsgEsxBu24LUTi4S8sCAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQFMAMB -Af8wHQYDVR0OBBYEFLE+w2kD+L9HAdSYJhoIAu9jZCvDMB8GA1UdIwQYMBaAFLE+w2kD+L9HAdSY -JhoIAu9jZCvDMA0GCSqGSIb3DQEBBQUAA4IBAQAcGgaX3NecnzyIZgYIVyHbIUf4KmeqvxgydkAQ -V8GK83rZEWWONfqe/EW1ntlMMUu4kehDLI6zeM7b41N5cdblIZQB2lWHmiRk9opmzN6cN82oNLFp -myPInngiK3BD41VHMWEZ71jFhS9OMPagMRYjyOfiZRYzy78aG6A9+MpeizGLYAiJLQwGXFK3xPkK -mNEVX58Svnw2Yzi9RKR/5CYrCsSXaQ3pjOLAEFe4yHYSkVXySGnYvCoCWw9E1CAx2/S6cCZdkGCe -vEsXCS+0yx5DaMkHJ8HSXPfqIbloEpw8nL+e/IBcm2PN7EeqJSdnoDfzAIJ9VNep+OkuE6N36B9K ------END CERTIFICATE----- - -Certplus Class 2 Primary CA -=========================== ------BEGIN CERTIFICATE----- -MIIDkjCCAnqgAwIBAgIRAIW9S/PY2uNp9pTXX8OlRCMwDQYJKoZIhvcNAQEFBQAwPTELMAkGA1UE -BhMCRlIxETAPBgNVBAoTCENlcnRwbHVzMRswGQYDVQQDExJDbGFzcyAyIFByaW1hcnkgQ0EwHhcN -OTkwNzA3MTcwNTAwWhcNMTkwNzA2MjM1OTU5WjA9MQswCQYDVQQGEwJGUjERMA8GA1UEChMIQ2Vy -dHBsdXMxGzAZBgNVBAMTEkNsYXNzIDIgUHJpbWFyeSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEP -ADCCAQoCggEBANxQltAS+DXSCHh6tlJw/W/uz7kRy1134ezpfgSN1sxvc0NXYKwzCkTsA18cgCSR -5aiRVhKC9+Ar9NuuYS6JEI1rbLqzAr3VNsVINyPi8Fo3UjMXEuLRYE2+L0ER4/YXJQyLkcAbmXuZ -Vg2v7tK8R1fjeUl7NIknJITesezpWE7+Tt9avkGtrAjFGA7v0lPubNCdEgETjdyAYveVqUSISnFO -YFWe2yMZeVYHDD9jC1yw4r5+FfyUM1hBOHTE4Y+L3yasH7WLO7dDWWuwJKZtkIvEcupdM5i3y95e -e++U8Rs+yskhwcWYAqqi9lt3m/V+llU0HGdpwPFC40es/CgcZlUCAwEAAaOBjDCBiTAPBgNVHRME -CDAGAQH/AgEKMAsGA1UdDwQEAwIBBjAdBgNVHQ4EFgQU43Mt38sOKAze3bOkynm4jrvoMIkwEQYJ -YIZIAYb4QgEBBAQDAgEGMDcGA1UdHwQwMC4wLKAqoCiGJmh0dHA6Ly93d3cuY2VydHBsdXMuY29t -L0NSTC9jbGFzczIuY3JsMA0GCSqGSIb3DQEBBQUAA4IBAQCnVM+IRBnL39R/AN9WM2K191EBkOvD -P9GIROkkXe/nFL0gt5o8AP5tn9uQ3Nf0YtaLcF3n5QRIqWh8yfFC82x/xXp8HVGIutIKPidd3i1R -TtMTZGnkLuPT55sJmabglZvOGtd/vjzOUrMRFcEPF80Du5wlFbqidon8BvEY0JNLDnyCt6X09l/+ -7UCmnYR0ObncHoUW2ikbhiMAybuJfm6AiB4vFLQDJKgybwOaRywwvlbGp0ICcBvqQNi6BQNwB6SW -//1IMwrh3KWBkJtN3X3n57LNXMhqlfil9o3EXXgIvnsG1knPGTZQIy4I5p4FTUcY1Rbpsda2ENW7 -l7+ijrRU ------END CERTIFICATE----- - -DST Root CA X3 -============== ------BEGIN CERTIFICATE----- -MIIDSjCCAjKgAwIBAgIQRK+wgNajJ7qJMDmGLvhAazANBgkqhkiG9w0BAQUFADA/MSQwIgYDVQQK -ExtEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdCBDby4xFzAVBgNVBAMTDkRTVCBSb290IENBIFgzMB4X -DTAwMDkzMDIxMTIxOVoXDTIxMDkzMDE0MDExNVowPzEkMCIGA1UEChMbRGlnaXRhbCBTaWduYXR1 -cmUgVHJ1c3QgQ28uMRcwFQYDVQQDEw5EU1QgUm9vdCBDQSBYMzCCASIwDQYJKoZIhvcNAQEBBQAD -ggEPADCCAQoCggEBAN+v6ZdQCINXtMxiZfaQguzH0yxrMMpb7NnDfcdAwRgUi+DoM3ZJKuM/IUmT -rE4Orz5Iy2Xu/NMhD2XSKtkyj4zl93ewEnu1lcCJo6m67XMuegwGMoOifooUMM0RoOEqOLl5CjH9 -UL2AZd+3UWODyOKIYepLYYHsUmu5ouJLGiifSKOeDNoJjj4XLh7dIN9bxiqKqy69cK3FCxolkHRy -xXtqqzTWMIn/5WgTe1QLyNau7Fqckh49ZLOMxt+/yUFw7BZy1SbsOFU5Q9D8/RhcQPGX69Wam40d -utolucbY38EVAjqr2m7xPi71XAicPNaDaeQQmxkqtilX4+U9m5/wAl0CAwEAAaNCMEAwDwYDVR0T -AQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFMSnsaR7LHH62+FLkHX/xBVghYkQ -MA0GCSqGSIb3DQEBBQUAA4IBAQCjGiybFwBcqR7uKGY3Or+Dxz9LwwmglSBd49lZRNI+DT69ikug -dB/OEIKcdBodfpga3csTS7MgROSR6cz8faXbauX+5v3gTt23ADq1cEmv8uXrAvHRAosZy5Q6XkjE -GB5YGV8eAlrwDPGxrancWYaLbumR9YbK+rlmM6pZW87ipxZzR8srzJmwN0jP41ZL9c8PDHIyh8bw -RLtTcm1D9SZImlJnt1ir/md2cXjbDaJWFBM5JDGFoqgCWjBH4d1QB7wCCZAA62RjYJsWvIjJEubS -fZGL+T0yjWW06XyxV3bqxbYoOb8VZRzI9neWagqNdwvYkQsEjgfbKbYK7p2CNTUQ ------END CERTIFICATE----- - -DST ACES CA X6 -============== ------BEGIN CERTIFICATE----- -MIIECTCCAvGgAwIBAgIQDV6ZCtadt3js2AdWO4YV2TANBgkqhkiG9w0BAQUFADBbMQswCQYDVQQG -EwJVUzEgMB4GA1UEChMXRGlnaXRhbCBTaWduYXR1cmUgVHJ1c3QxETAPBgNVBAsTCERTVCBBQ0VT -MRcwFQYDVQQDEw5EU1QgQUNFUyBDQSBYNjAeFw0wMzExMjAyMTE5NThaFw0xNzExMjAyMTE5NTha -MFsxCzAJBgNVBAYTAlVTMSAwHgYDVQQKExdEaWdpdGFsIFNpZ25hdHVyZSBUcnVzdDERMA8GA1UE -CxMIRFNUIEFDRVMxFzAVBgNVBAMTDkRTVCBBQ0VTIENBIFg2MIIBIjANBgkqhkiG9w0BAQEFAAOC -AQ8AMIIBCgKCAQEAuT31LMmU3HWKlV1j6IR3dma5WZFcRt2SPp/5DgO0PWGSvSMmtWPuktKe1jzI -DZBfZIGxqAgNTNj50wUoUrQBJcWVHAx+PhCEdc/BGZFjz+iokYi5Q1K7gLFViYsx+tC3dr5BPTCa -pCIlF3PoHuLTrCq9Wzgh1SpL11V94zpVvddtawJXa+ZHfAjIgrrep4c9oW24MFbCswKBXy314pow -GCi4ZtPLAZZv6opFVdbgnf9nKxcCpk4aahELfrd755jWjHZvwTvbUJN+5dCOHze4vbrGn2zpfDPy -MjwmR/onJALJfh1biEITajV8fTXpLmaRcpPVMibEdPVTo7NdmvYJywIDAQABo4HIMIHFMA8GA1Ud -EwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgHGMB8GA1UdEQQYMBaBFHBraS1vcHNAdHJ1c3Rkc3Qu -Y29tMGIGA1UdIARbMFkwVwYKYIZIAWUDAgEBATBJMEcGCCsGAQUFBwIBFjtodHRwOi8vd3d3LnRy -dXN0ZHN0LmNvbS9jZXJ0aWZpY2F0ZXMvcG9saWN5L0FDRVMtaW5kZXguaHRtbDAdBgNVHQ4EFgQU -CXIGThhDD+XWzMNqizF7eI+og7gwDQYJKoZIhvcNAQEFBQADggEBAKPYjtay284F5zLNAdMEA+V2 -5FYrnJmQ6AgwbN99Pe7lv7UkQIRJ4dEorsTCOlMwiPH1d25Ryvr/ma8kXxug/fKshMrfqfBfBC6t -Fr8hlxCBPeP/h40y3JTlR4peahPJlJU90u7INJXQgNStMgiAVDzgvVJT11J8smk/f3rPanTK+gQq -nExaBqXpIK1FZg9p8d2/6eMyi/rgwYZNcjwu2JN4Cir42NInPRmJX1p7ijvMDNpRrscL9yuwNwXs -vFcj4jjSm2jzVhKIT0J8uDHEtdvkyCE06UgRNe76x5JXxZ805Mf29w4LTJxoeHtxMcfrHuBnQfO3 -oKfN5XozNmr6mis= ------END CERTIFICATE----- - -TURKTRUST Certificate Services Provider Root 1 -============================================== ------BEGIN CERTIFICATE----- -MIID+zCCAuOgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBtzE/MD0GA1UEAww2VMOcUktUUlVTVCBF -bGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMQswCQYDVQQGDAJUUjEP -MA0GA1UEBwwGQU5LQVJBMVYwVAYDVQQKDE0oYykgMjAwNSBUw5xSS1RSVVNUIEJpbGdpIMSwbGV0 -acWfaW0gdmUgQmlsacWfaW0gR8O8dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWeLjAeFw0wNTA1MTMx -MDI3MTdaFw0xNTAzMjIxMDI3MTdaMIG3MT8wPQYDVQQDDDZUw5xSS1RSVVNUIEVsZWt0cm9uaWsg -U2VydGlmaWthIEhpem1ldCBTYcSfbGF5xLFjxLFzxLExCzAJBgNVBAYMAlRSMQ8wDQYDVQQHDAZB -TktBUkExVjBUBgNVBAoMTShjKSAyMDA1IFTDnFJLVFJVU1QgQmlsZ2kgxLBsZXRpxZ9pbSB2ZSBC -aWxpxZ9pbSBHw7x2ZW5sacSfaSBIaXptZXRsZXJpIEEuxZ4uMIIBIjANBgkqhkiG9w0BAQEFAAOC -AQ8AMIIBCgKCAQEAylIF1mMD2Bxf3dJ7XfIMYGFbazt0K3gNfUW9InTojAPBxhEqPZW8qZSwu5GX -yGl8hMW0kWxsE2qkVa2kheiVfrMArwDCBRj1cJ02i67L5BuBf5OI+2pVu32Fks66WJ/bMsW9Xe8i -Si9BB35JYbOG7E6mQW6EvAPs9TscyB/C7qju6hJKjRTP8wrgUDn5CDX4EVmt5yLqS8oUBt5CurKZ -8y1UiBAG6uEaPj1nH/vO+3yC6BFdSsG5FOpU2WabfIl9BJpiyelSPJ6c79L1JuTm5Rh8i27fbMx4 -W09ysstcP4wFjdFMjK2Sx+F4f2VsSQZQLJ4ywtdKxnWKWU51b0dewQIDAQABoxAwDjAMBgNVHRME -BTADAQH/MA0GCSqGSIb3DQEBBQUAA4IBAQAV9VX/N5aAWSGk/KEVTCD21F/aAyT8z5Aa9CEKmu46 -sWrv7/hg0Uw2ZkUd82YCdAR7kjCo3gp2D++Vbr3JN+YaDayJSFvMgzbC9UZcWYJWtNX+I7TYVBxE -q8Sn5RTOPEFhfEPmzcSBCYsk+1Ql1haolgxnB2+zUEfjHCQo3SqYpGH+2+oSN7wBGjSFvW5P55Fy -B0SFHljKVETd96y5y4khctuPwGkplyqjrhgjlxxBKot8KsF8kOipKMDTkcatKIdAaLX/7KfS0zgY -nNN9aV3wxqUeJBujR/xpB2jn5Jq07Q+hh4cCzofSSE7hvP/L8XKSRGQDJereW26fyfJOrN3H ------END CERTIFICATE----- - -TURKTRUST Certificate Services Provider Root 2 -============================================== ------BEGIN CERTIFICATE----- -MIIEPDCCAySgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBvjE/MD0GA1UEAww2VMOcUktUUlVTVCBF -bGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMQswCQYDVQQGEwJUUjEP -MA0GA1UEBwwGQW5rYXJhMV0wWwYDVQQKDFRUw5xSS1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUg -QmlsacWfaW0gR8O8dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWeLiAoYykgS2FzxLFtIDIwMDUwHhcN -MDUxMTA3MTAwNzU3WhcNMTUwOTE2MTAwNzU3WjCBvjE/MD0GA1UEAww2VMOcUktUUlVTVCBFbGVr -dHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMQswCQYDVQQGEwJUUjEPMA0G -A1UEBwwGQW5rYXJhMV0wWwYDVQQKDFRUw5xSS1RSVVNUIEJpbGdpIMSwbGV0acWfaW0gdmUgQmls -acWfaW0gR8O8dmVubGnEn2kgSGl6bWV0bGVyaSBBLsWeLiAoYykgS2FzxLFtIDIwMDUwggEiMA0G -CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCpNn7DkUNMwxmYCMjHWHtPFoylzkkBH3MOrHUTpvqe -LCDe2JAOCtFp0if7qnefJ1Il4std2NiDUBd9irWCPwSOtNXwSadktx4uXyCcUHVPr+G1QRT0mJKI -x+XlZEdhR3n9wFHxwZnn3M5q+6+1ATDcRhzviuyV79z/rxAc653YsKpqhRgNF8k+v/Gb0AmJQv2g -QrSdiVFVKc8bcLyEVK3BEx+Y9C52YItdP5qtygy/p1Zbj3e41Z55SZI/4PGXJHpsmxcPbe9TmJEr -5A++WXkHeLuXlfSfadRYhwqp48y2WBmfJiGxxFmNskF1wK1pzpwACPI2/z7woQ8arBT9pmAPAgMB -AAGjQzBBMB0GA1UdDgQWBBTZN7NOBf3Zz58SFq62iS/rJTqIHDAPBgNVHQ8BAf8EBQMDBwYAMA8G -A1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAHJglrfJ3NgpXiOFX7KzLXb7iNcX/ntt -Rbj2hWyfIvwqECLsqrkw9qtY1jkQMZkpAL2JZkH7dN6RwRgLn7Vhy506vvWolKMiVW4XSf/SKfE4 -Jl3vpao6+XF75tpYHdN0wgH6PmlYX63LaL4ULptswLbcoCb6dxriJNoaN+BnrdFzgw2lGh1uEpJ+ -hGIAF728JRhX8tepb1mIvDS3LoV4nZbcFMMsilKbloxSZj2GFotHuFEJjOp9zYhys2AzsfAKRO8P -9Qk3iCQOLGsgOqL6EfJANZxEaGM7rDNvY7wsu/LSy3Z9fYjYHcgFHW68lKlmjHdxx/qR+i9Rnuk5 -UrbnBEI= ------END CERTIFICATE----- - -SwissSign Gold CA - G2 -====================== ------BEGIN CERTIFICATE----- -MIIFujCCA6KgAwIBAgIJALtAHEP1Xk+wMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNVBAYTAkNIMRUw -EwYDVQQKEwxTd2lzc1NpZ24gQUcxHzAdBgNVBAMTFlN3aXNzU2lnbiBHb2xkIENBIC0gRzIwHhcN -MDYxMDI1MDgzMDM1WhcNMzYxMDI1MDgzMDM1WjBFMQswCQYDVQQGEwJDSDEVMBMGA1UEChMMU3dp -c3NTaWduIEFHMR8wHQYDVQQDExZTd2lzc1NpZ24gR29sZCBDQSAtIEcyMIICIjANBgkqhkiG9w0B -AQEFAAOCAg8AMIICCgKCAgEAr+TufoskDhJuqVAtFkQ7kpJcyrhdhJJCEyq8ZVeCQD5XJM1QiyUq -t2/876LQwB8CJEoTlo8jE+YoWACjR8cGp4QjK7u9lit/VcyLwVcfDmJlD909Vopz2q5+bbqBHH5C -jCA12UNNhPqE21Is8w4ndwtrvxEvcnifLtg+5hg3Wipy+dpikJKVyh+c6bM8K8vzARO/Ws/BtQpg -vd21mWRTuKCWs2/iJneRjOBiEAKfNA+k1ZIzUd6+jbqEemA8atufK+ze3gE/bk3lUIbLtK/tREDF -ylqM2tIrfKjuvqblCqoOpd8FUrdVxyJdMmqXl2MT28nbeTZ7hTpKxVKJ+STnnXepgv9VHKVxaSvR -AiTysybUa9oEVeXBCsdtMDeQKuSeFDNeFhdVxVu1yzSJkvGdJo+hB9TGsnhQ2wwMC3wLjEHXuend -jIj3o02yMszYF9rNt85mndT9Xv+9lz4pded+p2JYryU0pUHHPbwNUMoDAw8IWh+Vc3hiv69yFGkO -peUDDniOJihC8AcLYiAQZzlG+qkDzAQ4embvIIO1jEpWjpEA/I5cgt6IoMPiaG59je883WX0XaxR -7ySArqpWl2/5rX3aYT+YdzylkbYcjCbaZaIJbcHiVOO5ykxMgI93e2CaHt+28kgeDrpOVG2Y4OGi -GqJ3UM/EY5LsRxmd6+ZrzsECAwEAAaOBrDCBqTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUw -AwEB/zAdBgNVHQ4EFgQUWyV7lqRlUX64OfPAeGZe6Drn8O4wHwYDVR0jBBgwFoAUWyV7lqRlUX64 -OfPAeGZe6Drn8O4wRgYDVR0gBD8wPTA7BglghXQBWQECAQEwLjAsBggrBgEFBQcCARYgaHR0cDov -L3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIBACe645R88a7A3hfm -5djV9VSwg/S7zV4Fe0+fdWavPOhWfvxyeDgD2StiGwC5+OlgzczOUYrHUDFu4Up+GC9pWbY9ZIEr -44OE5iKHjn3g7gKZYbge9LgriBIWhMIxkziWMaa5O1M/wySTVltpkuzFwbs4AOPsF6m43Md8AYOf -Mke6UiI0HTJ6CVanfCU2qT1L2sCCbwq7EsiHSycR+R4tx5M/nttfJmtS2S6K8RTGRI0Vqbe/vd6m -Gu6uLftIdxf+u+yvGPUqUfA5hJeVbG4bwyvEdGB5JbAKJ9/fXtI5z0V9QkvfsywexcZdylU6oJxp -mo/a77KwPJ+HbBIrZXAVUjEaJM9vMSNQH4xPjyPDdEFjHFWoFN0+4FFQz/EbMFYOkrCChdiDyyJk -vC24JdVUorgG6q2SpCSgwYa1ShNqR88uC1aVVMvOmttqtKay20EIhid392qgQmwLOM7XdVAyksLf -KzAiSNDVQTglXaTpXZ/GlHXQRf0wl0OPkKsKx4ZzYEppLd6leNcG2mqeSz53OiATIgHQv2ieY2Br -NU0LbbqhPcCT4H8js1WtciVORvnSFu+wZMEBnunKoGqYDs/YYPIvSbjkQuE4NRb0yG5P94FW6Lqj -viOvrv1vA+ACOzB2+httQc8Bsem4yWb02ybzOqR08kkkW8mw0FfB+j564ZfJ ------END CERTIFICATE----- - -SwissSign Silver CA - G2 -======================== ------BEGIN CERTIFICATE----- -MIIFvTCCA6WgAwIBAgIITxvUL1S7L0swDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UEBhMCQ0gxFTAT -BgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMYU3dpc3NTaWduIFNpbHZlciBDQSAtIEcyMB4X -DTA2MTAyNTA4MzI0NloXDTM2MTAyNTA4MzI0NlowRzELMAkGA1UEBhMCQ0gxFTATBgNVBAoTDFN3 -aXNzU2lnbiBBRzEhMB8GA1UEAxMYU3dpc3NTaWduIFNpbHZlciBDQSAtIEcyMIICIjANBgkqhkiG -9w0BAQEFAAOCAg8AMIICCgKCAgEAxPGHf9N4Mfc4yfjDmUO8x/e8N+dOcbpLj6VzHVxumK4DV644 -N0MvFz0fyM5oEMF4rhkDKxD6LHmD9ui5aLlV8gREpzn5/ASLHvGiTSf5YXu6t+WiE7brYT7QbNHm -+/pe7R20nqA1W6GSy/BJkv6FCgU+5tkL4k+73JU3/JHpMjUi0R86TieFnbAVlDLaYQ1HTWBCrpJH -6INaUFjpiou5XaHc3ZlKHzZnu0jkg7Y360g6rw9njxcH6ATK72oxh9TAtvmUcXtnZLi2kUpCe2Uu -MGoM9ZDulebyzYLs2aFK7PayS+VFheZteJMELpyCbTapxDFkH4aDCyr0NQp4yVXPQbBH6TCfmb5h -qAaEuSh6XzjZG6k4sIN/c8HDO0gqgg8hm7jMqDXDhBuDsz6+pJVpATqJAHgE2cn0mRmrVn5bi4Y5 -FZGkECwJMoBgs5PAKrYYC51+jUnyEEp/+dVGLxmSo5mnJqy7jDzmDrxHB9xzUfFwZC8I+bRHHTBs -ROopN4WSaGa8gzj+ezku01DwH/teYLappvonQfGbGHLy9YR0SslnxFSuSGTfjNFusB3hB48IHpmc -celM2KX3RxIfdNFRnobzwqIjQAtz20um53MGjMGg6cFZrEb65i/4z3GcRm25xBWNOHkDRUjvxF3X -CO6HOSKGsg0PWEP3calILv3q1h8CAwEAAaOBrDCBqTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/ -BAUwAwEB/zAdBgNVHQ4EFgQUF6DNweRBtjpbO8tFnb0cwpj6hlgwHwYDVR0jBBgwFoAUF6DNweRB -tjpbO8tFnb0cwpj6hlgwRgYDVR0gBD8wPTA7BglghXQBWQEDAQEwLjAsBggrBgEFBQcCARYgaHR0 -cDovL3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIBAHPGgeAn0i0P -4JUw4ppBf1AsX19iYamGamkYDHRJ1l2E6kFSGG9YrVBWIGrGvShpWJHckRE1qTodvBqlYJ7YH39F -kWnZfrt4csEGDyrOj4VwYaygzQu4OSlWhDJOhrs9xCrZ1x9y7v5RoSJBsXECYxqCsGKrXlcSH9/L -3XWgwF15kIwb4FDm3jH+mHtwX6WQ2K34ArZv02DdQEsixT2tOnqfGhpHkXkzuoLcMmkDlm4fS/Bx -/uNncqCxv1yL5PqZIseEuRuNI5c/7SXgz2W79WEE790eslpBIlqhn10s6FvJbakMDHiqYMZWjwFa -DGi8aRl5xB9+lwW/xekkUV7U1UtT7dkjWjYDZaPBA61BMPNGG4WQr2W11bHkFlt4dR2Xem1ZqSqP -e97Dh4kQmUlzeMg9vVE1dCrV8X5pGyq7O70luJpaPXJhkGaH7gzWTdQRdAtq/gsD/KNVV4n+Ssuu -WxcFyPKNIzFTONItaj+CuY0IavdeQXRuwxF+B6wpYJE/OMpXEA29MC/HpeZBoNquBYeaoKRlbEwJ -DIm6uNO5wJOKMPqN5ZprFQFOZ6raYlY+hAhm0sQ2fac+EPyI4NSA5QC9qvNOBqN6avlicuMJT+ub -DgEj8Z+7fNzcbBGXJbLytGMU0gYqZ4yD9c7qB9iaah7s5Aq7KkzrCWA5zspi2C5u ------END CERTIFICATE----- - -GeoTrust Primary Certification Authority -======================================== ------BEGIN CERTIFICATE----- -MIIDfDCCAmSgAwIBAgIQGKy1av1pthU6Y2yv2vrEoTANBgkqhkiG9w0BAQUFADBYMQswCQYDVQQG -EwJVUzEWMBQGA1UEChMNR2VvVHJ1c3QgSW5jLjExMC8GA1UEAxMoR2VvVHJ1c3QgUHJpbWFyeSBD -ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNjExMjcwMDAwMDBaFw0zNjA3MTYyMzU5NTlaMFgx -CzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTEwLwYDVQQDEyhHZW9UcnVzdCBQ -cmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB -CgKCAQEAvrgVe//UfH1nrYNke8hCUy3f9oQIIGHWAVlqnEQRr+92/ZV+zmEwu3qDXwK9AWbK7hWN -b6EwnL2hhZ6UOvNWiAAxz9juapYC2e0DjPt1befquFUWBRaa9OBesYjAZIVcFU2Ix7e64HXprQU9 -nceJSOC7KMgD4TCTZF5SwFlwIjVXiIrxlQqD17wxcwE07e9GceBrAqg1cmuXm2bgyxx5X9gaBGge -RwLmnWDiNpcB3841kt++Z8dtd1k7j53WkBWUvEI0EME5+bEnPn7WinXFsq+W06Lem+SYvn3h6YGt -tm/81w7a4DSwDRp35+MImO9Y+pyEtzavwt+s0vQQBnBxNQIDAQABo0IwQDAPBgNVHRMBAf8EBTAD -AQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQULNVQQZcVi/CPNmFbSvtr2ZnJM5IwDQYJKoZI -hvcNAQEFBQADggEBAFpwfyzdtzRP9YZRqSa+S7iq8XEN3GHHoOo0Hnp3DwQ16CePbJC/kRYkRj5K -Ts4rFtULUh38H2eiAkUxT87z+gOneZ1TatnaYzr4gNfTmeGl4b7UVXGYNTq+k+qurUKykG/g/CFN -NWMziUnWm07Kx+dOCQD32sfvmWKZd7aVIl6KoKv0uHiYyjgZmclynnjNS6yvGaBzEi38wkG6gZHa -Floxt/m0cYASSJlyc1pZU8FjUjPtp8nSOQJw+uCxQmYpqptR7TBUIhRf2asdweSU8Pj1K/fqynhG -1riR/aYNKxoUAT6A8EKglQdebc3MS6RFjasS6LPeWuWgfOgPIh1a6Vk= ------END CERTIFICATE----- - -thawte Primary Root CA -====================== ------BEGIN CERTIFICATE----- -MIIEIDCCAwigAwIBAgIQNE7VVyDV7exJ9C/ON9srbTANBgkqhkiG9w0BAQUFADCBqTELMAkGA1UE -BhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2 -aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIwMDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhv -cml6ZWQgdXNlIG9ubHkxHzAdBgNVBAMTFnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwHhcNMDYxMTE3 -MDAwMDAwWhcNMzYwNzE2MjM1OTU5WjCBqTELMAkGA1UEBhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwg -SW5jLjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMv -KGMpIDIwMDYgdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxHzAdBgNVBAMT -FnRoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCs -oPD7gFnUnMekz52hWXMJEEUMDSxuaPFsW0hoSVk3/AszGcJ3f8wQLZU0HObrTQmnHNK4yZc2AreJ -1CRfBsDMRJSUjQJib+ta3RGNKJpchJAQeg29dGYvajig4tVUROsdB58Hum/u6f1OCyn1PoSgAfGc -q/gcfomk6KHYcWUNo1F77rzSImANuVud37r8UVsLr5iy6S7pBOhih94ryNdOwUxkHt3Ph1i6Sk/K -aAcdHJ1KxtUvkcx8cXIcxcBn6zL9yZJclNqFwJu/U30rCfSMnZEfl2pSy94JNqR32HuHUETVPm4p -afs5SSYeCaWAe0At6+gnhcn+Yf1+5nyXHdWdAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYD -VR0PAQH/BAQDAgEGMB0GA1UdDgQWBBR7W0XPr87Lev0xkhpqtvNG61dIUDANBgkqhkiG9w0BAQUF -AAOCAQEAeRHAS7ORtvzw6WfUDW5FvlXok9LOAz/t2iWwHVfLHjp2oEzsUHboZHIMpKnxuIvW1oeE -uzLlQRHAd9mzYJ3rG9XRbkREqaYB7FViHXe4XI5ISXycO1cRrK1zN44veFyQaEfZYGDm/Ac9IiAX -xPcW6cTYcvnIc3zfFi8VqT79aie2oetaupgf1eNNZAqdE8hhuvU5HIe6uL17In/2/qxAeeWsEG89 -jxt5dovEN7MhGITlNgDrYyCZuen+MwS7QcjBAvlEYyCegc5C09Y/LHbTY5xZ3Y+m4Q6gLkH3LpVH -z7z9M/P2C2F+fpErgUfCJzDupxBdN49cOSvkBPB7jVaMaA== ------END CERTIFICATE----- - -VeriSign Class 3 Public Primary Certification Authority - G5 -============================================================ ------BEGIN CERTIFICATE----- -MIIE0zCCA7ugAwIBAgIQGNrRniZ96LtKIVjNzGs7SjANBgkqhkiG9w0BAQUFADCByjELMAkGA1UE -BhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBO -ZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVk -IHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRp -ZmljYXRpb24gQXV0aG9yaXR5IC0gRzUwHhcNMDYxMTA4MDAwMDAwWhcNMzYwNzE2MjM1OTU5WjCB -yjELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2ln -biBUcnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNiBWZXJpU2lnbiwgSW5jLiAtIEZvciBh -dXRob3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmlt -YXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw -ggEKAoIBAQCvJAgIKXo1nmAMqudLO07cfLw8RRy7K+D+KQL5VwijZIUVJ/XxrcgxiV0i6CqqpkKz -j/i5Vbext0uz/o9+B1fs70PbZmIVYc9gDaTY3vjgw2IIPVQT60nKWVSFJuUrjxuf6/WhkcIzSdhD -Y2pSS9KP6HBRTdGJaXvHcPaz3BJ023tdS1bTlr8Vd6Gw9KIl8q8ckmcY5fQGBO+QueQA5N06tRn/ -Arr0PO7gi+s3i+z016zy9vA9r911kTMZHRxAy3QkGSGT2RT+rCpSx4/VBEnkjWNHiDxpg8v+R70r -fk/Fla4OndTRQ8Bnc+MUCH7lP59zuDMKz10/NIeWiu5T6CUVAgMBAAGjgbIwga8wDwYDVR0TAQH/ -BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJaW1hZ2Uv -Z2lmMCEwHzAHBgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYjaHR0cDovL2xvZ28udmVy -aXNpZ24uY29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFH/TZafC3ey78DAJ80M5+gKvMzEzMA0GCSqG -SIb3DQEBBQUAA4IBAQCTJEowX2LP2BqYLz3q3JktvXf2pXkiOOzEp6B4Eq1iDkVwZMXnl2YtmAl+ -X6/WzChl8gGqCBpH3vn5fJJaCGkgDdk+bW48DW7Y5gaRQBi5+MHt39tBquCWIMnNZBU4gcmU7qKE -KQsTb47bDN0lAtukixlE0kF6BWlKWE9gyn6CagsCqiUXObXbf+eEZSqVir2G3l6BFoMtEMze/aiC -Km0oHw0LxOXnGiYZ4fQRbxC1lfznQgUy286dUV4otp6F01vvpX1FQHKOtw5rDgb7MzVIcbidJ4vE -ZV8NhnacRHr2lVz2XTIIM6RUthg/aFzyQkqFOFSDX9HoLPKsEdao7WNq ------END CERTIFICATE----- - -SecureTrust CA -============== ------BEGIN CERTIFICATE----- -MIIDuDCCAqCgAwIBAgIQDPCOXAgWpa1Cf/DrJxhZ0DANBgkqhkiG9w0BAQUFADBIMQswCQYDVQQG -EwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24xFzAVBgNVBAMTDlNlY3VyZVRy -dXN0IENBMB4XDTA2MTEwNzE5MzExOFoXDTI5MTIzMTE5NDA1NVowSDELMAkGA1UEBhMCVVMxIDAe -BgNVBAoTF1NlY3VyZVRydXN0IENvcnBvcmF0aW9uMRcwFQYDVQQDEw5TZWN1cmVUcnVzdCBDQTCC -ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKukgeWVzfX2FI7CT8rU4niVWJxB4Q2ZQCQX -OZEzZum+4YOvYlyJ0fwkW2Gz4BERQRwdbvC4u/jep4G6pkjGnx29vo6pQT64lO0pGtSO0gMdA+9t -DWccV9cGrcrI9f4Or2YlSASWC12juhbDCE/RRvgUXPLIXgGZbf2IzIaowW8xQmxSPmjL8xk037uH -GFaAJsTQ3MBv396gwpEWoGQRS0S8Hvbn+mPeZqx2pHGj7DaUaHp3pLHnDi+BeuK1cobvomuL8A/b -01k/unK8RCSc43Oz969XL0Imnal0ugBS8kvNU3xHCzaFDmapCJcWNFfBZveA4+1wVMeT4C4oFVmH -ursCAwEAAaOBnTCBmjATBgkrBgEEAYI3FAIEBh4EAEMAQTALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/ -BAUwAwEB/zAdBgNVHQ4EFgQUQjK2FvoE/f5dS3rD/fdMQB1aQ68wNAYDVR0fBC0wKzApoCegJYYj -aHR0cDovL2NybC5zZWN1cmV0cnVzdC5jb20vU1RDQS5jcmwwEAYJKwYBBAGCNxUBBAMCAQAwDQYJ -KoZIhvcNAQEFBQADggEBADDtT0rhWDpSclu1pqNlGKa7UTt36Z3q059c4EVlew3KW+JwULKUBRSu -SceNQQcSc5R+DCMh/bwQf2AQWnL1mA6s7Ll/3XpvXdMc9P+IBWlCqQVxyLesJugutIxq/3HcuLHf -mbx8IVQr5Fiiu1cprp6poxkmD5kuCLDv/WnPmRoJjeOnnyvJNjR7JLN4TJUXpAYmHrZkUjZfYGfZ -nMUFdAvnZyPSCPyI6a6Lf+Ew9Dd+/cYy2i2eRDAwbO4H3tI0/NL/QPZL9GZGBlSm8jIKYyYwa5vR -3ItHuuG51WLQoqD0ZwV4KWMabwTW+MZMo5qxN7SN5ShLHZ4swrhovO0C7jE= ------END CERTIFICATE----- - -Secure Global CA -================ ------BEGIN CERTIFICATE----- -MIIDvDCCAqSgAwIBAgIQB1YipOjUiolN9BPI8PjqpTANBgkqhkiG9w0BAQUFADBKMQswCQYDVQQG -EwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24xGTAXBgNVBAMTEFNlY3VyZSBH -bG9iYWwgQ0EwHhcNMDYxMTA3MTk0MjI4WhcNMjkxMjMxMTk1MjA2WjBKMQswCQYDVQQGEwJVUzEg -MB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24xGTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwg -Q0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvNS7YrGxVaQZx5RNoJLNP2MwhR/jx -YDiJiQPpvepeRlMJ3Fz1Wuj3RSoC6zFh1ykzTM7HfAo3fg+6MpjhHZevj8fcyTiW89sa/FHtaMbQ -bqR8JNGuQsiWUGMu4P51/pinX0kuleM5M2SOHqRfkNJnPLLZ/kG5VacJjnIFHovdRIWCQtBJwB1g -8NEXLJXr9qXBkqPFwqcIYA1gBBCWeZ4WNOaptvolRTnIHmX5k/Wq8VLcmZg9pYYaDDUz+kulBAYV -HDGA76oYa8J719rO+TMg1fW9ajMtgQT7sFzUnKPiXB3jqUJ1XnvUd+85VLrJChgbEplJL4hL/VBi -0XPnj3pDAgMBAAGjgZ0wgZowEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0PBAQDAgGGMA8GA1Ud -EwEB/wQFMAMBAf8wHQYDVR0OBBYEFK9EBMJBfkiD2045AuzshHrmzsmkMDQGA1UdHwQtMCswKaAn -oCWGI2h0dHA6Ly9jcmwuc2VjdXJldHJ1c3QuY29tL1NHQ0EuY3JsMBAGCSsGAQQBgjcVAQQDAgEA -MA0GCSqGSIb3DQEBBQUAA4IBAQBjGghAfaReUw132HquHw0LURYD7xh8yOOvaliTFGCRsoTciE6+ -OYo68+aCiV0BN7OrJKQVDpI1WkpEXk5X+nXOH0jOZvQ8QCaSmGwb7iRGDBezUqXbpZGRzzfTb+cn -CDpOGR86p1hcF895P4vkp9MmI50mD1hp/Ed+stCNi5O/KU9DaXR2Z0vPB4zmAve14bRDtUstFJ/5 -3CYNv6ZHdAbYiNE6KTCEztI5gGIbqMdXSbxqVVFnFUq+NQfk1XWYN3kwFNspnWzFacxHVaIw98xc -f8LDmBxrThaA63p4ZUWiABqvDA1VZDRIuJK58bRQKfJPIx/abKwfROHdI3hRW8cW ------END CERTIFICATE----- - -COMODO Certification Authority -============================== ------BEGIN CERTIFICATE----- -MIIEHTCCAwWgAwIBAgIQToEtioJl4AsC7j41AkblPTANBgkqhkiG9w0BAQUFADCBgTELMAkGA1UE -BhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgG -A1UEChMRQ09NT0RPIENBIExpbWl0ZWQxJzAlBgNVBAMTHkNPTU9ETyBDZXJ0aWZpY2F0aW9uIEF1 -dGhvcml0eTAeFw0wNjEyMDEwMDAwMDBaFw0yOTEyMzEyMzU5NTlaMIGBMQswCQYDVQQGEwJHQjEb -MBkGA1UECBMSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHEwdTYWxmb3JkMRowGAYDVQQKExFD -T01PRE8gQ0EgTGltaXRlZDEnMCUGA1UEAxMeQ09NT0RPIENlcnRpZmljYXRpb24gQXV0aG9yaXR5 -MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0ECLi3LjkRv3UcEbVASY06m/weaKXTuH -+7uIzg3jLz8GlvCiKVCZrts7oVewdFFxze1CkU1B/qnI2GqGd0S7WWaXUF601CxwRM/aN5VCaTww -xHGzUvAhTaHYujl8HJ6jJJ3ygxaYqhZ8Q5sVW7euNJH+1GImGEaaP+vB+fGQV+useg2L23IwambV -4EajcNxo2f8ESIl33rXp+2dtQem8Ob0y2WIC8bGoPW43nOIv4tOiJovGuFVDiOEjPqXSJDlqR6sA -1KGzqSX+DT+nHbrTUcELpNqsOO9VUCQFZUaTNE8tja3G1CEZ0o7KBWFxB3NH5YoZEr0ETc5OnKVI -rLsm9wIDAQABo4GOMIGLMB0GA1UdDgQWBBQLWOWLxkwVN6RAqTCpIb5HNlpW/zAOBgNVHQ8BAf8E -BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zBJBgNVHR8EQjBAMD6gPKA6hjhodHRwOi8vY3JsLmNvbW9k -b2NhLmNvbS9DT01PRE9DZXJ0aWZpY2F0aW9uQXV0aG9yaXR5LmNybDANBgkqhkiG9w0BAQUFAAOC -AQEAPpiem/Yb6dc5t3iuHXIYSdOH5EOC6z/JqvWote9VfCFSZfnVDeFs9D6Mk3ORLgLETgdxb8CP -OGEIqB6BCsAvIC9Bi5HcSEW88cbeunZrM8gALTFGTO3nnc+IlP8zwFboJIYmuNg4ON8qa90SzMc/ -RxdMosIGlgnW2/4/PEZB31jiVg88O8EckzXZOFKs7sjsLjBOlDW0JB9LeGna8gI4zJVSk/BwJVmc -IGfE7vmLV2H0knZ9P4SNVbfo5azV8fUZVqZa+5Acr5Pr5RzUZ5ddBA6+C4OmF4O5MBKgxTMVBbkN -+8cFduPYSo38NBejxiEovjBFMR7HeL5YYTisO+IBZQ== ------END CERTIFICATE----- - -Network Solutions Certificate Authority -======================================= ------BEGIN CERTIFICATE----- -MIID5jCCAs6gAwIBAgIQV8szb8JcFuZHFhfjkDFo4DANBgkqhkiG9w0BAQUFADBiMQswCQYDVQQG -EwJVUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMuMTAwLgYDVQQDEydOZXR3b3Jr -IFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMDYxMjAxMDAwMDAwWhcNMjkxMjMx -MjM1OTU5WjBiMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMu -MTAwLgYDVQQDEydOZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0G -CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDkvH6SMG3G2I4rC7xGzuAnlt7e+foS0zwzc7MEL7xx -jOWftiJgPl9dzgn/ggwbmlFQGiaJ3dVhXRncEg8tCqJDXRfQNJIg6nPPOCwGJgl6cvf6UDL4wpPT -aaIjzkGxzOTVHzbRijr4jGPiFFlp7Q3Tf2vouAPlT2rlmGNpSAW+Lv8ztumXWWn4Zxmuk2GWRBXT -crA/vGp97Eh/jcOrqnErU2lBUzS1sLnFBgrEsEX1QV1uiUV7PTsmjHTC5dLRfbIR1PtYMiKagMnc -/Qzpf14Dl847ABSHJ3A4qY5usyd2mFHgBeMhqxrVhSI8KbWaFsWAqPS7azCPL0YCorEMIuDTAgMB -AAGjgZcwgZQwHQYDVR0OBBYEFCEwyfsA106Y2oeqKtCnLrFAMadMMA4GA1UdDwEB/wQEAwIBBjAP -BgNVHRMBAf8EBTADAQH/MFIGA1UdHwRLMEkwR6BFoEOGQWh0dHA6Ly9jcmwubmV0c29sc3NsLmNv -bS9OZXR3b3JrU29sdXRpb25zQ2VydGlmaWNhdGVBdXRob3JpdHkuY3JsMA0GCSqGSIb3DQEBBQUA -A4IBAQC7rkvnt1frf6ott3NHhWrB5KUd5Oc86fRZZXe1eltajSU24HqXLjjAV2CDmAaDn7l2em5Q -4LqILPxFzBiwmZVRDuwduIj/h1AcgsLj4DKAv6ALR8jDMe+ZZzKATxcheQxpXN5eNK4CtSbqUN9/ -GGUsyfJj4akH/nxxH2szJGoeBfcFaMBqEssuXmHLrijTfsK0ZpEmXzwuJF/LWA/rKOyvEZbz3Htv -wKeI8lN3s2Berq4o2jUsbzRF0ybh3uxbTydrFny9RAQYgrOJeRcQcT16ohZO9QHNpGxlaKFJdlxD -ydi8NmdspZS11My5vWo1ViHe2MPr+8ukYEywVaCge1ey ------END CERTIFICATE----- - -WellsSecure Public Root Certificate Authority -============================================= ------BEGIN CERTIFICATE----- -MIIEvTCCA6WgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBhTELMAkGA1UEBhMCVVMxIDAeBgNVBAoM -F1dlbGxzIEZhcmdvIFdlbGxzU2VjdXJlMRwwGgYDVQQLDBNXZWxscyBGYXJnbyBCYW5rIE5BMTYw -NAYDVQQDDC1XZWxsc1NlY3VyZSBQdWJsaWMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcN -MDcxMjEzMTcwNzU0WhcNMjIxMjE0MDAwNzU0WjCBhTELMAkGA1UEBhMCVVMxIDAeBgNVBAoMF1dl -bGxzIEZhcmdvIFdlbGxzU2VjdXJlMRwwGgYDVQQLDBNXZWxscyBGYXJnbyBCYW5rIE5BMTYwNAYD -VQQDDC1XZWxsc1NlY3VyZSBQdWJsaWMgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0G -CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDub7S9eeKPCCGeOARBJe+rWxxTkqxtnt3CxC5FlAM1 -iGd0V+PfjLindo8796jE2yljDpFoNoqXjopxaAkH5OjUDk/41itMpBb570OYj7OeUt9tkTmPOL13 -i0Nj67eT/DBMHAGTthP796EfvyXhdDcsHqRePGj4S78NuR4uNuip5Kf4D8uCdXw1LSLWwr8L87T8 -bJVhHlfXBIEyg1J55oNjz7fLY4sR4r1e6/aN7ZVyKLSsEmLpSjPmgzKuBXWVvYSV2ypcm44uDLiB -K0HmOFafSZtsdvqKXfcBeYF8wYNABf5x/Qw/zE5gCQ5lRxAvAcAFP4/4s0HvWkJ+We/SlwxlAgMB -AAGjggE0MIIBMDAPBgNVHRMBAf8EBTADAQH/MDkGA1UdHwQyMDAwLqAsoCqGKGh0dHA6Ly9jcmwu -cGtpLndlbGxzZmFyZ28uY29tL3dzcHJjYS5jcmwwDgYDVR0PAQH/BAQDAgHGMB0GA1UdDgQWBBQm -lRkQ2eihl5H/3BnZtQQ+0nMKajCBsgYDVR0jBIGqMIGngBQmlRkQ2eihl5H/3BnZtQQ+0nMKaqGB -i6SBiDCBhTELMAkGA1UEBhMCVVMxIDAeBgNVBAoMF1dlbGxzIEZhcmdvIFdlbGxzU2VjdXJlMRww -GgYDVQQLDBNXZWxscyBGYXJnbyBCYW5rIE5BMTYwNAYDVQQDDC1XZWxsc1NlY3VyZSBQdWJsaWMg -Um9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHmCAQEwDQYJKoZIhvcNAQEFBQADggEBALkVsUSRzCPI -K0134/iaeycNzXK7mQDKfGYZUMbVmO2rvwNa5U3lHshPcZeG1eMd/ZDJPHV3V3p9+N701NX3leZ0 -bh08rnyd2wIDBSxxSyU+B+NemvVmFymIGjifz6pBA4SXa5M4esowRBskRDPQ5NHcKDj0E0M1NSlj -qHyita04pO2t/caaH/+Xc/77szWnk4bGdpEA5qxRFsQnMlzbc9qlk1eOPm01JghZ1edE13YgY+es -E2fDbbFwRnzVlhE9iW9dqKHrjQrawx0zbKPqZxmamX9LPYNRKh3KL4YMon4QLSvUFpULB6ouFJJJ -tylv2G0xffX8oRAHh84vWdw+WNs= ------END CERTIFICATE----- - -COMODO ECC Certification Authority -================================== ------BEGIN CERTIFICATE----- -MIICiTCCAg+gAwIBAgIQH0evqmIAcFBUTAGem2OZKjAKBggqhkjOPQQDAzCBhTELMAkGA1UEBhMC -R0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UE -ChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBB -dXRob3JpdHkwHhcNMDgwMzA2MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0Ix -GzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMR -Q09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRo -b3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQDR3svdcmCFYX7deSRFtSrYpn1PlILBs5BAH+X -4QokPB0BBO490o0JlwzgdeT6+3eKKvUDYEs2ixYjFq0JcfRK9ChQtP6IHG4/bC8vCVlbpVsLM5ni -wz2J+Wos77LTBumjQjBAMB0GA1UdDgQWBBR1cacZSBm8nZ3qQUfflMRId5nTeTAOBgNVHQ8BAf8E -BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjEA7wNbeqy3eApyt4jf/7VG -FAkK+qDmfQjGGoe9GKhzvSbKYAydzpmfz1wPMOG+FDHqAjAU9JM8SaczepBGR7NjfRObTrdvGDeA -U/7dIOA1mjbRxwG55tzd8/8dLDoWV9mSOdY= ------END CERTIFICATE----- - -IGC/A -===== ------BEGIN CERTIFICATE----- -MIIEAjCCAuqgAwIBAgIFORFFEJQwDQYJKoZIhvcNAQEFBQAwgYUxCzAJBgNVBAYTAkZSMQ8wDQYD -VQQIEwZGcmFuY2UxDjAMBgNVBAcTBVBhcmlzMRAwDgYDVQQKEwdQTS9TR0ROMQ4wDAYDVQQLEwVE -Q1NTSTEOMAwGA1UEAxMFSUdDL0ExIzAhBgkqhkiG9w0BCQEWFGlnY2FAc2dkbi5wbS5nb3V2LmZy -MB4XDTAyMTIxMzE0MjkyM1oXDTIwMTAxNzE0MjkyMlowgYUxCzAJBgNVBAYTAkZSMQ8wDQYDVQQI -EwZGcmFuY2UxDjAMBgNVBAcTBVBhcmlzMRAwDgYDVQQKEwdQTS9TR0ROMQ4wDAYDVQQLEwVEQ1NT -STEOMAwGA1UEAxMFSUdDL0ExIzAhBgkqhkiG9w0BCQEWFGlnY2FAc2dkbi5wbS5nb3V2LmZyMIIB -IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsh/R0GLFMzvABIaIs9z4iPf930Pfeo2aSVz2 -TqrMHLmh6yeJ8kbpO0px1R2OLc/mratjUMdUC24SyZA2xtgv2pGqaMVy/hcKshd+ebUyiHDKcMCW -So7kVc0dJ5S/znIq7Fz5cyD+vfcuiWe4u0dzEvfRNWk68gq5rv9GQkaiv6GFGvm/5P9JhfejcIYy -HF2fYPepraX/z9E0+X1bF8bc1g4oa8Ld8fUzaJ1O/Id8NhLWo4DoQw1VYZTqZDdH6nfK0LJYBcNd -frGoRpAxVs5wKpayMLh35nnAvSk7/ZR3TL0gzUEl4C7HG7vupARB0l2tEmqKm0f7yd1GQOGdPDPQ -tQIDAQABo3cwdTAPBgNVHRMBAf8EBTADAQH/MAsGA1UdDwQEAwIBRjAVBgNVHSAEDjAMMAoGCCqB -egF5AQEBMB0GA1UdDgQWBBSjBS8YYFDCiQrdKyFP/45OqDAxNjAfBgNVHSMEGDAWgBSjBS8YYFDC -iQrdKyFP/45OqDAxNjANBgkqhkiG9w0BAQUFAAOCAQEABdwm2Pp3FURo/C9mOnTgXeQp/wYHE4RK -q89toB9RlPhJy3Q2FLwV3duJL92PoF189RLrn544pEfMs5bZvpwlqwN+Mw+VgQ39FuCIvjfwbF3Q -MZsyK10XZZOYYLxuj7GoPB7ZHPOpJkL5ZB3C55L29B5aqhlSXa/oovdgoPaN8In1buAKBQGVyYsg -Crpa/JosPL3Dt8ldeCUFP1YUmwza+zpI/pdpXsoQhvdOlgQITeywvl3cO45Pwf2aNjSaTFR+FwNI -lQgRHAdvhQh+XU3Endv7rs6y0bO4g2wdsrN58dhwmX7wEwLOXt1R0982gaEbeC9xs/FZTEYYKKuF -0mBWWg== ------END CERTIFICATE----- - -Security Communication EV RootCA1 -================================= ------BEGIN CERTIFICATE----- -MIIDfTCCAmWgAwIBAgIBADANBgkqhkiG9w0BAQUFADBgMQswCQYDVQQGEwJKUDElMCMGA1UEChMc -U0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEqMCgGA1UECxMhU2VjdXJpdHkgQ29tbXVuaWNh -dGlvbiBFViBSb290Q0ExMB4XDTA3MDYwNjAyMTIzMloXDTM3MDYwNjAyMTIzMlowYDELMAkGA1UE -BhMCSlAxJTAjBgNVBAoTHFNFQ09NIFRydXN0IFN5c3RlbXMgQ08uLExURC4xKjAoBgNVBAsTIVNl -Y3VyaXR5IENvbW11bmljYXRpb24gRVYgUm9vdENBMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC -AQoCggEBALx/7FebJOD+nLpCeamIivqA4PUHKUPqjgo0No0c+qe1OXj/l3X3L+SqawSERMqm4miO -/VVQYg+kcQ7OBzgtQoVQrTyWb4vVog7P3kmJPdZkLjjlHmy1V4qe70gOzXppFodEtZDkBp2uoQSX -WHnvIEqCa4wiv+wfD+mEce3xDuS4GBPMVjZd0ZoeUWs5bmB2iDQL87PRsJ3KYeJkHcFGB7hj3R4z -ZbOOCVVSPbW9/wfrrWFVGCypaZhKqkDFMxRldAD5kd6vA0jFQFTcD4SQaCDFkpbcLuUCRarAX1T4 -bepJz11sS6/vmsJWXMY1VkJqMF/Cq/biPT+zyRGPMUzXn0kCAwEAAaNCMEAwHQYDVR0OBBYEFDVK -9U2vP9eCOKyrcWUXdYydVZPmMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0GCSqG -SIb3DQEBBQUAA4IBAQCoh+ns+EBnXcPBZsdAS5f8hxOQWsTvoMpfi7ent/HWtWS3irO4G8za+6xm -iEHO6Pzk2x6Ipu0nUBsCMCRGef4Eh3CXQHPRwMFXGZpppSeZq51ihPZRwSzJIxXYKLerJRO1RuGG -Av8mjMSIkh1W/hln8lXkgKNrnKt34VFxDSDbEJrbvXZ5B3eZKK2aXtqxT0QsNY6llsf9g/BYxnnW -mHyojf6GPgcWkuF75x3sM3Z+Qi5KhfmRiWiEA4Glm5q+4zfFVKtWOxgtQaQM+ELbmaDgcm+7XeEW -T1MKZPlO9L9OVL14bIjqv5wTJMJwaaJ/D8g8rQjJsJhAoyrniIPtd490 ------END CERTIFICATE----- - -OISTE WISeKey Global Root GA CA -=============================== ------BEGIN CERTIFICATE----- -MIID8TCCAtmgAwIBAgIQQT1yx/RrH4FDffHSKFTfmjANBgkqhkiG9w0BAQUFADCBijELMAkGA1UE -BhMCQ0gxEDAOBgNVBAoTB1dJU2VLZXkxGzAZBgNVBAsTEkNvcHlyaWdodCAoYykgMjAwNTEiMCAG -A1UECxMZT0lTVEUgRm91bmRhdGlvbiBFbmRvcnNlZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBH -bG9iYWwgUm9vdCBHQSBDQTAeFw0wNTEyMTExNjAzNDRaFw0zNzEyMTExNjA5NTFaMIGKMQswCQYD -VQQGEwJDSDEQMA4GA1UEChMHV0lTZUtleTEbMBkGA1UECxMSQ29weXJpZ2h0IChjKSAyMDA1MSIw -IAYDVQQLExlPSVNURSBGb3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5 -IEdsb2JhbCBSb290IEdBIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAy0+zAJs9 -Nt350UlqaxBJH+zYK7LG+DKBKUOVTJoZIyEVRd7jyBxRVVuuk+g3/ytr6dTqvirdqFEr12bDYVxg -Asj1znJ7O7jyTmUIms2kahnBAbtzptf2w93NvKSLtZlhuAGio9RN1AU9ka34tAhxZK9w8RxrfvbD -d50kc3vkDIzh2TbhmYsFmQvtRTEJysIA2/dyoJaqlYfQjse2YXMNdmaM3Bu0Y6Kff5MTMPGhJ9vZ -/yxViJGg4E8HsChWjBgbl0SOid3gF27nKu+POQoxhILYQBRJLnpB5Kf+42TMwVlxSywhp1t94B3R -LoGbw9ho972WG6xwsRYUC9tguSYBBQIDAQABo1EwTzALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUw -AwEB/zAdBgNVHQ4EFgQUswN+rja8sHnR3JQmthG+IbJphpQwEAYJKwYBBAGCNxUBBAMCAQAwDQYJ -KoZIhvcNAQEFBQADggEBAEuh/wuHbrP5wUOxSPMowB0uyQlB+pQAHKSkq0lPjz0e701vvbyk9vIm -MMkQyh2I+3QZH4VFvbBsUfk2ftv1TDI6QU9bR8/oCy22xBmddMVHxjtqD6wU2zz0c5ypBd8A3HR4 -+vg1YFkCExh8vPtNsCBtQ7tgMHpnM1zFmdH4LTlSc/uMqpclXHLZCB6rTjzjgTGfA6b7wP4piFXa -hNVQA7bihKOmNqoROgHhGEvWRGizPflTdISzRpFGlgC3gCy24eMQ4tui5yiPAZZiFj4A4xylNoEY -okxSdsARo27mHbrjWr42U8U+dY+GaSlYU7Wcu2+fXMUY7N0v4ZjJ/L7fCg0= ------END CERTIFICATE----- - -Microsec e-Szigno Root CA -========================= ------BEGIN CERTIFICATE----- -MIIHqDCCBpCgAwIBAgIRAMy4579OKRr9otxmpRwsDxEwDQYJKoZIhvcNAQEFBQAwcjELMAkGA1UE -BhMCSFUxETAPBgNVBAcTCEJ1ZGFwZXN0MRYwFAYDVQQKEw1NaWNyb3NlYyBMdGQuMRQwEgYDVQQL -EwtlLVN6aWdubyBDQTEiMCAGA1UEAxMZTWljcm9zZWMgZS1Temlnbm8gUm9vdCBDQTAeFw0wNTA0 -MDYxMjI4NDRaFw0xNzA0MDYxMjI4NDRaMHIxCzAJBgNVBAYTAkhVMREwDwYDVQQHEwhCdWRhcGVz -dDEWMBQGA1UEChMNTWljcm9zZWMgTHRkLjEUMBIGA1UECxMLZS1Temlnbm8gQ0ExIjAgBgNVBAMT -GU1pY3Jvc2VjIGUtU3ppZ25vIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIB -AQDtyADVgXvNOABHzNuEwSFpLHSQDCHZU4ftPkNEU6+r+ICbPHiN1I2uuO/TEdyB5s87lozWbxXG -d36hL+BfkrYn13aaHUM86tnsL+4582pnS4uCzyL4ZVX+LMsvfUh6PXX5qqAnu3jCBspRwn5mS6/N -oqdNAoI/gqyFxuEPkEeZlApxcpMqyabAvjxWTHOSJ/FrtfX9/DAFYJLG65Z+AZHCabEeHXtTRbjc -QR/Ji3HWVBTji1R4P770Yjtb9aPs1ZJ04nQw7wHb4dSrmZsqa/i9phyGI0Jf7Enemotb9HI6QMVJ -PqW+jqpx62z69Rrkav17fVVA71hu5tnVvCSrwe+3AgMBAAGjggQ3MIIEMzBnBggrBgEFBQcBAQRb -MFkwKAYIKwYBBQUHMAGGHGh0dHBzOi8vcmNhLmUtc3ppZ25vLmh1L29jc3AwLQYIKwYBBQUHMAKG -IWh0dHA6Ly93d3cuZS1zemlnbm8uaHUvUm9vdENBLmNydDAPBgNVHRMBAf8EBTADAQH/MIIBcwYD -VR0gBIIBajCCAWYwggFiBgwrBgEEAYGoGAIBAQEwggFQMCgGCCsGAQUFBwIBFhxodHRwOi8vd3d3 -LmUtc3ppZ25vLmh1L1NaU1ovMIIBIgYIKwYBBQUHAgIwggEUHoIBEABBACAAdABhAG4A+gBzAO0A -dAB2AOEAbgB5ACAA6QByAHQAZQBsAG0AZQB6AOkAcwDpAGgAZQB6ACAA6QBzACAAZQBsAGYAbwBn -AGEAZADhAHMA4QBoAG8AegAgAGEAIABTAHoAbwBsAGcA4QBsAHQAYQB0APMAIABTAHoAbwBsAGcA -4QBsAHQAYQB0AOEAcwBpACAAUwB6AGEAYgDhAGwAeQB6AGEAdABhACAAcwB6AGUAcgBpAG4AdAAg -AGsAZQBsAGwAIABlAGwAagDhAHIAbgBpADoAIABoAHQAdABwADoALwAvAHcAdwB3AC4AZQAtAHMA -egBpAGcAbgBvAC4AaAB1AC8AUwBaAFMAWgAvMIHIBgNVHR8EgcAwgb0wgbqggbeggbSGIWh0dHA6 -Ly93d3cuZS1zemlnbm8uaHUvUm9vdENBLmNybIaBjmxkYXA6Ly9sZGFwLmUtc3ppZ25vLmh1L0NO -PU1pY3Jvc2VjJTIwZS1Temlnbm8lMjBSb290JTIwQ0EsT1U9ZS1Temlnbm8lMjBDQSxPPU1pY3Jv -c2VjJTIwTHRkLixMPUJ1ZGFwZXN0LEM9SFU/Y2VydGlmaWNhdGVSZXZvY2F0aW9uTGlzdDtiaW5h -cnkwDgYDVR0PAQH/BAQDAgEGMIGWBgNVHREEgY4wgYuBEGluZm9AZS1zemlnbm8uaHWkdzB1MSMw -IQYDVQQDDBpNaWNyb3NlYyBlLVN6aWduw7MgUm9vdCBDQTEWMBQGA1UECwwNZS1TemlnbsOzIEhT -WjEWMBQGA1UEChMNTWljcm9zZWMgS2Z0LjERMA8GA1UEBxMIQnVkYXBlc3QxCzAJBgNVBAYTAkhV -MIGsBgNVHSMEgaQwgaGAFMegSXUWYYTbMUuE0vE3QJDvTtz3oXakdDByMQswCQYDVQQGEwJIVTER -MA8GA1UEBxMIQnVkYXBlc3QxFjAUBgNVBAoTDU1pY3Jvc2VjIEx0ZC4xFDASBgNVBAsTC2UtU3pp -Z25vIENBMSIwIAYDVQQDExlNaWNyb3NlYyBlLVN6aWdubyBSb290IENBghEAzLjnv04pGv2i3Gal -HCwPETAdBgNVHQ4EFgQUx6BJdRZhhNsxS4TS8TdAkO9O3PcwDQYJKoZIhvcNAQEFBQADggEBANMT -nGZjWS7KXHAM/IO8VbH0jgdsZifOwTsgqRy7RlRw7lrMoHfqaEQn6/Ip3Xep1fvj1KcExJW4C+FE -aGAHQzAxQmHl7tnlJNUb3+FKG6qfx1/4ehHqE5MAyopYse7tDk2016g2JnzgOsHVV4Lxdbb9iV/a -86g4nzUGCM4ilb7N1fy+W955a9x6qWVmvrElWl/tftOsRm1M9DKHtCAE4Gx4sHfRhUZLphK3dehK -yVZs15KrnfVJONJPU+NVkBHbmJbGSfI+9J8b4PeI3CVimUTYc78/MPMMNz7UwiiAc7EBt51alhQB -S6kRnSlqLtBdgcDPsiBDxwPgN05dCtxZICU= ------END CERTIFICATE----- - -Certigna -======== ------BEGIN CERTIFICATE----- -MIIDqDCCApCgAwIBAgIJAP7c4wEPyUj/MA0GCSqGSIb3DQEBBQUAMDQxCzAJBgNVBAYTAkZSMRIw -EAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hMB4XDTA3MDYyOTE1MTMwNVoXDTI3 -MDYyOTE1MTMwNVowNDELMAkGA1UEBhMCRlIxEjAQBgNVBAoMCURoaW15b3RpczERMA8GA1UEAwwI -Q2VydGlnbmEwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDIaPHJ1tazNHUmgh7stL7q -XOEm7RFHYeGifBZ4QCHkYJ5ayGPhxLGWkv8YbWkj4Sti993iNi+RB7lIzw7sebYs5zRLcAglozyH -GxnygQcPOJAZ0xH+hrTy0V4eHpbNgGzOOzGTtvKg0KmVEn2lmsxryIRWijOp5yIVUxbwzBfsV1/p -ogqYCd7jX5xv3EjjhQsVWqa6n6xI4wmy9/Qy3l40vhx4XUJbzg4ij02Q130yGLMLLGq/jj8UEYkg -DncUtT2UCIf3JR7VsmAA7G8qKCVuKj4YYxclPz5EIBb2JsglrgVKtOdjLPOMFlN+XPsRGgjBRmKf -Irjxwo1p3Po6WAbfAgMBAAGjgbwwgbkwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUGu3+QTmQ -tCRZvgHyUtVF9lo53BEwZAYDVR0jBF0wW4AUGu3+QTmQtCRZvgHyUtVF9lo53BGhOKQ2MDQxCzAJ -BgNVBAYTAkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hggkA/tzjAQ/J -SP8wDgYDVR0PAQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIABzANBgkqhkiG9w0BAQUFAAOCAQEA -hQMeknH2Qq/ho2Ge6/PAD/Kl1NqV5ta+aDY9fm4fTIrv0Q8hbV6lUmPOEvjvKtpv6zf+EwLHyzs+ -ImvaYS5/1HI93TDhHkxAGYwP15zRgzB7mFncfca5DClMoTOi62c6ZYTTluLtdkVwj7Ur3vkj1klu -PBS1xp81HlDQwY9qcEQCYsuuHWhBp6pX6FOqB9IG9tUUBguRA3UsbHK1YZWaDYu5Def131TN3ubY -1gkIl2PlwS6wt0QmwCbAr1UwnjvVNioZBPRcHv/PLLf/0P2HQBHVESO7SMAhqaQoLf0V+LBOK/Qw -WyH8EZE0vkHve52Xdf+XlcCWWC/qu0bXu+TZLg== ------END CERTIFICATE----- - -AC Ra\xC3\xADz Certic\xC3\xA1mara S.A. -====================================== ------BEGIN CERTIFICATE----- -MIIGZjCCBE6gAwIBAgIPB35Sk3vgFeNX8GmMy+wMMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNVBAYT -AkNPMUcwRQYDVQQKDD5Tb2NpZWRhZCBDYW1lcmFsIGRlIENlcnRpZmljYWNpw7NuIERpZ2l0YWwg -LSBDZXJ0aWPDoW1hcmEgUy5BLjEjMCEGA1UEAwwaQUMgUmHDrXogQ2VydGljw6FtYXJhIFMuQS4w -HhcNMDYxMTI3MjA0NjI5WhcNMzAwNDAyMjE0MjAyWjB7MQswCQYDVQQGEwJDTzFHMEUGA1UECgw+ -U29jaWVkYWQgQ2FtZXJhbCBkZSBDZXJ0aWZpY2FjacOzbiBEaWdpdGFsIC0gQ2VydGljw6FtYXJh -IFMuQS4xIzAhBgNVBAMMGkFDIFJhw616IENlcnRpY8OhbWFyYSBTLkEuMIICIjANBgkqhkiG9w0B -AQEFAAOCAg8AMIICCgKCAgEAq2uJo1PMSCMI+8PPUZYILrgIem08kBeGqentLhM0R7LQcNzJPNCN -yu5LF6vQhbCnIwTLqKL85XXbQMpiiY9QngE9JlsYhBzLfDe3fezTf3MZsGqy2IiKLUV0qPezuMDU -2s0iiXRNWhU5cxh0T7XrmafBHoi0wpOQY5fzp6cSsgkiBzPZkc0OnB8OIMfuuzONj8LSWKdf/WU3 -4ojC2I+GdV75LaeHM/J4Ny+LvB2GNzmxlPLYvEqcgxhaBvzz1NS6jBUJJfD5to0EfhcSM2tXSExP -2yYe68yQ54v5aHxwD6Mq0Do43zeX4lvegGHTgNiRg0JaTASJaBE8rF9ogEHMYELODVoqDA+bMMCm -8Ibbq0nXl21Ii/kDwFJnmxL3wvIumGVC2daa49AZMQyth9VXAnow6IYm+48jilSH5L887uvDdUhf -HjlvgWJsxS3EF1QZtzeNnDeRyPYL1epjb4OsOMLzP96a++EjYfDIJss2yKHzMI+ko6Kh3VOz3vCa -Mh+DkXkwwakfU5tTohVTP92dsxA7SH2JD/ztA/X7JWR1DhcZDY8AFmd5ekD8LVkH2ZD6mq093ICK -5lw1omdMEWux+IBkAC1vImHFrEsm5VoQgpukg3s0956JkSCXjrdCx2bD0Omk1vUgjcTDlaxECp1b -czwmPS9KvqfJpxAe+59QafMCAwEAAaOB5jCB4zAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQE -AwIBBjAdBgNVHQ4EFgQU0QnQ6dfOeXRU+Tows/RtLAMDG2gwgaAGA1UdIASBmDCBlTCBkgYEVR0g -ADCBiTArBggrBgEFBQcCARYfaHR0cDovL3d3dy5jZXJ0aWNhbWFyYS5jb20vZHBjLzBaBggrBgEF -BQcCAjBOGkxMaW1pdGFjaW9uZXMgZGUgZ2FyYW507WFzIGRlIGVzdGUgY2VydGlmaWNhZG8gc2Ug -cHVlZGVuIGVuY29udHJhciBlbiBsYSBEUEMuMA0GCSqGSIb3DQEBBQUAA4ICAQBclLW4RZFNjmEf -AygPU3zmpFmps4p6xbD/CHwso3EcIRNnoZUSQDWDg4902zNc8El2CoFS3UnUmjIz75uny3XlesuX -EpBcunvFm9+7OSPI/5jOCk0iAUgHforA1SBClETvv3eiiWdIG0ADBaGJ7M9i4z0ldma/Jre7Ir5v -/zlXdLp6yQGVwZVR6Kss+LGGIOk/yzVb0hfpKv6DExdA7ohiZVvVO2Dpezy4ydV/NgIlqmjCMRW3 -MGXrfx1IebHPOeJCgBbT9ZMj/EyXyVo3bHwi2ErN0o42gzmRkBDI8ck1fj+404HGIGQatlDCIaR4 -3NAvO2STdPCWkPHv+wlaNECW8DYSwaN0jJN+Qd53i+yG2dIPPy3RzECiiWZIHiCznCNZc6lEc7wk -eZBWN7PGKX6jD/EpOe9+XCgycDWs2rjIdWb8m0w5R44bb5tNAlQiM+9hup4phO9OSzNHdpdqy35f -/RWmnkJDW2ZaiogN9xa5P1FlK2Zqi9E4UqLWRhH6/JocdJ6PlwsCT2TG9WjTSy3/pDceiz+/RL5h -RqGEPQgnTIEgd4kI6mdAXmwIUV80WoyWaM3X94nCHNMyAK9Sy9NgWyo6R35rMDOhYil/SrnhLecU -Iw4OGEfhefwVVdCx/CVxY3UzHCMrr1zZ7Ud3YA47Dx7SwNxkBYn8eNZcLCZDqQ== ------END CERTIFICATE----- - -TC TrustCenter Class 2 CA II -============================ ------BEGIN CERTIFICATE----- -MIIEqjCCA5KgAwIBAgIOLmoAAQACH9dSISwRXDswDQYJKoZIhvcNAQEFBQAwdjELMAkGA1UEBhMC -REUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxIjAgBgNVBAsTGVRDIFRydXN0Q2VudGVy -IENsYXNzIDIgQ0ExJTAjBgNVBAMTHFRDIFRydXN0Q2VudGVyIENsYXNzIDIgQ0EgSUkwHhcNMDYw -MTEyMTQzODQzWhcNMjUxMjMxMjI1OTU5WjB2MQswCQYDVQQGEwJERTEcMBoGA1UEChMTVEMgVHJ1 -c3RDZW50ZXIgR21iSDEiMCAGA1UECxMZVEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMiBDQTElMCMGA1UE -AxMcVEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMiBDQSBJSTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC -AQoCggEBAKuAh5uO8MN8h9foJIIRszzdQ2Lu+MNF2ujhoF/RKrLqk2jftMjWQ+nEdVl//OEd+DFw -IxuInie5e/060smp6RQvkL4DUsFJzfb95AhmC1eKokKguNV/aVyQMrKXDcpK3EY+AlWJU+MaWss2 -xgdW94zPEfRMuzBwBJWl9jmM/XOBCH2JXjIeIqkiRUuwZi4wzJ9l/fzLganx4Duvo4bRierERXlQ -Xa7pIXSSTYtZgo+U4+lK8edJsBTj9WLL1XK9H7nSn6DNqPoByNkN39r8R52zyFTfSUrxIan+GE7u -SNQZu+995OKdy1u2bv/jzVrndIIFuoAlOMvkaZ6vQaoahPUCAwEAAaOCATQwggEwMA8GA1UdEwEB -/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBTjq1RMgKHbVkO3kUrL84J6E1wIqzCB -7QYDVR0fBIHlMIHiMIHfoIHcoIHZhjVodHRwOi8vd3d3LnRydXN0Y2VudGVyLmRlL2NybC92Mi90 -Y19jbGFzc18yX2NhX0lJLmNybIaBn2xkYXA6Ly93d3cudHJ1c3RjZW50ZXIuZGUvQ049VEMlMjBU -cnVzdENlbnRlciUyMENsYXNzJTIwMiUyMENBJTIwSUksTz1UQyUyMFRydXN0Q2VudGVyJTIwR21i -SCxPVT1yb290Y2VydHMsREM9dHJ1c3RjZW50ZXIsREM9ZGU/Y2VydGlmaWNhdGVSZXZvY2F0aW9u -TGlzdD9iYXNlPzANBgkqhkiG9w0BAQUFAAOCAQEAjNfffu4bgBCzg/XbEeprS6iSGNn3Bzn1LL4G -dXpoUxUc6krtXvwjshOg0wn/9vYua0Fxec3ibf2uWWuFHbhOIprtZjluS5TmVfwLG4t3wVMTZonZ -KNaL80VKY7f9ewthXbhtvsPcW3nS7Yblok2+XnR8au0WOB9/WIFaGusyiC2y8zl3gK9etmF1Kdsj -TYjKUCjLhdLTEKJZbtOTVAB6okaVhgWcqRmY5TFyDADiZ9lA4CQze28suVyrZZ0srHbqNZn1l7kP -JOzHdiEoZa5X6AeIdUpWoNIFOqTmjZKILPPy4cHGYdtBxceb9w4aUUXCYWvcZCcXjFq32nQozZfk -vQ== ------END CERTIFICATE----- - -TC TrustCenter Class 3 CA II -============================ ------BEGIN CERTIFICATE----- -MIIEqjCCA5KgAwIBAgIOSkcAAQAC5aBd1j8AUb8wDQYJKoZIhvcNAQEFBQAwdjELMAkGA1UEBhMC -REUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxIjAgBgNVBAsTGVRDIFRydXN0Q2VudGVy -IENsYXNzIDMgQ0ExJTAjBgNVBAMTHFRDIFRydXN0Q2VudGVyIENsYXNzIDMgQ0EgSUkwHhcNMDYw -MTEyMTQ0MTU3WhcNMjUxMjMxMjI1OTU5WjB2MQswCQYDVQQGEwJERTEcMBoGA1UEChMTVEMgVHJ1 -c3RDZW50ZXIgR21iSDEiMCAGA1UECxMZVEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMyBDQTElMCMGA1UE -AxMcVEMgVHJ1c3RDZW50ZXIgQ2xhc3MgMyBDQSBJSTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC -AQoCggEBALTgu1G7OVyLBMVMeRwjhjEQY0NVJz/GRcekPewJDRoeIMJWHt4bNwcwIi9v8Qbxq63W -yKthoy9DxLCyLfzDlml7forkzMA5EpBCYMnMNWju2l+QVl/NHE1bWEnrDgFPZPosPIlY2C8u4rBo -6SI7dYnWRBpl8huXJh0obazovVkdKyT21oQDZogkAHhg8fir/gKya/si+zXmFtGt9i4S5Po1auUZ -uV3bOx4a+9P/FRQI2AlqukWdFHlgfa9Aigdzs5OW03Q0jTo3Kd5c7PXuLjHCINy+8U9/I1LZW+Jk -2ZyqBwi1Rb3R0DHBq1SfqdLDYmAD8bs5SpJKPQq5ncWg/jcCAwEAAaOCATQwggEwMA8GA1UdEwEB -/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBTUovyfs8PYA9NXXAek0CSnwPIA1DCB -7QYDVR0fBIHlMIHiMIHfoIHcoIHZhjVodHRwOi8vd3d3LnRydXN0Y2VudGVyLmRlL2NybC92Mi90 -Y19jbGFzc18zX2NhX0lJLmNybIaBn2xkYXA6Ly93d3cudHJ1c3RjZW50ZXIuZGUvQ049VEMlMjBU -cnVzdENlbnRlciUyMENsYXNzJTIwMyUyMENBJTIwSUksTz1UQyUyMFRydXN0Q2VudGVyJTIwR21i -SCxPVT1yb290Y2VydHMsREM9dHJ1c3RjZW50ZXIsREM9ZGU/Y2VydGlmaWNhdGVSZXZvY2F0aW9u -TGlzdD9iYXNlPzANBgkqhkiG9w0BAQUFAAOCAQEANmDkcPcGIEPZIxpC8vijsrlNirTzwppVMXzE -O2eatN9NDoqTSheLG43KieHPOh6sHfGcMrSOWXaiQYUlN6AT0PV8TtXqluJucsG7Kv5sbviRmEb8 -yRtXW+rIGjs/sFGYPAfaLFkB2otE6OF0/ado3VS6g0bsyEa1+K+XwDsJHI/OcpY9M1ZwvJbL2NV9 -IJqDnxrcOfHFcqMRA/07QlIp2+gB95tejNaNhk4Z+rwcvsUhpYeeeC422wlxo3I0+GzjBgnyXlal -092Y+tTmBvTwtiBjS+opvaqCZh77gaqnN60TGOaSw4HBM7uIHqHn4rS9MWwOUT1v+5ZWgOI2F9Hc -5A== ------END CERTIFICATE----- - -TC TrustCenter Universal CA I -============================= ------BEGIN CERTIFICATE----- -MIID3TCCAsWgAwIBAgIOHaIAAQAC7LdggHiNtgYwDQYJKoZIhvcNAQEFBQAweTELMAkGA1UEBhMC -REUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxJDAiBgNVBAsTG1RDIFRydXN0Q2VudGVy -IFVuaXZlcnNhbCBDQTEmMCQGA1UEAxMdVEMgVHJ1c3RDZW50ZXIgVW5pdmVyc2FsIENBIEkwHhcN -MDYwMzIyMTU1NDI4WhcNMjUxMjMxMjI1OTU5WjB5MQswCQYDVQQGEwJERTEcMBoGA1UEChMTVEMg -VHJ1c3RDZW50ZXIgR21iSDEkMCIGA1UECxMbVEMgVHJ1c3RDZW50ZXIgVW5pdmVyc2FsIENBMSYw -JAYDVQQDEx1UQyBUcnVzdENlbnRlciBVbml2ZXJzYWwgQ0EgSTCCASIwDQYJKoZIhvcNAQEBBQAD -ggEPADCCAQoCggEBAKR3I5ZEr5D0MacQ9CaHnPM42Q9e3s9B6DGtxnSRJJZ4Hgmgm5qVSkr1YnwC -qMqs+1oEdjneX/H5s7/zA1hV0qq34wQi0fiU2iIIAI3TfCZdzHd55yx4Oagmcw6iXSVphU9VDprv -xrlE4Vc93x9UIuVvZaozhDrzznq+VZeujRIPFDPiUHDDSYcTvFHe15gSWu86gzOSBnWLknwSaHtw -ag+1m7Z3W0hZneTvWq3zwZ7U10VOylY0Ibw+F1tvdwxIAUMpsN0/lm7mlaoMwCC2/T42J5zjXM9O -gdwZu5GQfezmlwQek8wiSdeXhrYTCjxDI3d+8NzmzSQfO4ObNDqDNOMCAwEAAaNjMGEwHwYDVR0j -BBgwFoAUkqR1LKSevoFE63n8isWVpesQdXMwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC -AYYwHQYDVR0OBBYEFJKkdSyknr6BROt5/IrFlaXrEHVzMA0GCSqGSIb3DQEBBQUAA4IBAQAo0uCG -1eb4e/CX3CJrO5UUVg8RMKWaTzqwOuAGy2X17caXJ/4l8lfmXpWMPmRgFVp/Lw0BxbFg/UU1z/Cy -vwbZ71q+s2IhtNerNXxTPqYn8aEt2hojnczd7Dwtnic0XQ/CNnm8yUpiLe1r2X1BQ3y2qsrtYbE3 -ghUJGooWMNjsydZHcnhLEEYUjl8Or+zHL6sQ17bxbuyGssLoDZJz3KL0Dzq/YSMQiZxIQG5wALPT -ujdEWBF6AmqI8Dc08BnprNRlc/ZpjGSUOnmFKbAWKwyCPwacx/0QK54PLLae4xW/2TYcuiUaUj0a -7CIMHOCkoj3w6DnPgcB77V0fb8XQC9eY ------END CERTIFICATE----- - -Deutsche Telekom Root CA 2 -========================== ------BEGIN CERTIFICATE----- -MIIDnzCCAoegAwIBAgIBJjANBgkqhkiG9w0BAQUFADBxMQswCQYDVQQGEwJERTEcMBoGA1UEChMT -RGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0GA1UECxMWVC1UZWxlU2VjIFRydXN0IENlbnRlcjEjMCEG -A1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBSb290IENBIDIwHhcNOTkwNzA5MTIxMTAwWhcNMTkwNzA5 -MjM1OTAwWjBxMQswCQYDVQQGEwJERTEcMBoGA1UEChMTRGV1dHNjaGUgVGVsZWtvbSBBRzEfMB0G -A1UECxMWVC1UZWxlU2VjIFRydXN0IENlbnRlcjEjMCEGA1UEAxMaRGV1dHNjaGUgVGVsZWtvbSBS -b290IENBIDIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCrC6M14IspFLEUha88EOQ5 -bzVdSq7d6mGNlUn0b2SjGmBmpKlAIoTZ1KXleJMOaAGtuU1cOs7TuKhCQN/Po7qCWWqSG6wcmtoI -KyUn+WkjR/Hg6yx6m/UTAtB+NHzCnjwAWav12gz1MjwrrFDa1sPeg5TKqAyZMg4ISFZbavva4VhY -AUlfckE8FQYBjl2tqriTtM2e66foai1SNNs671x1Udrb8zH57nGYMsRUFUQM+ZtV7a3fGAigo4aK -Se5TBY8ZTNXeWHmb0mocQqvF1afPaA+W5OFhmHZhyJF81j4A4pFQh+GdCuatl9Idxjp9y7zaAzTV -jlsB9WoHtxa2bkp/AgMBAAGjQjBAMB0GA1UdDgQWBBQxw3kbuvVT1xfgiXotF2wKsyudMzAPBgNV -HRMECDAGAQH/AgEFMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAlGRZrTlk5ynr -E/5aw4sTV8gEJPB0d8Bg42f76Ymmg7+Wgnxu1MM9756AbrsptJh6sTtU6zkXR34ajgv8HzFZMQSy -zhfzLMdiNlXiItiJVbSYSKpk+tYcNthEeFpaIzpXl/V6ME+un2pMSyuOoAPjPuCp1NJ70rOo4nI8 -rZ7/gFnkm0W09juwzTkZmDLl6iFhkOQxIY40sfcvNUqFENrnijchvllj4PKFiDFT1FQUhXB59C4G -dyd1Lx+4ivn+xbrYNuSD7Odlt79jWvNGr4GUN9RBjNYj1h7P9WgbRGOiWrqnNVmh5XAFmw4jV5mU -Cm26OWMohpLzGITY+9HPBVZkVw== ------END CERTIFICATE----- - -ComSign Secured CA -================== ------BEGIN CERTIFICATE----- -MIIDqzCCApOgAwIBAgIRAMcoRwmzuGxFjB36JPU2TukwDQYJKoZIhvcNAQEFBQAwPDEbMBkGA1UE -AxMSQ29tU2lnbiBTZWN1cmVkIENBMRAwDgYDVQQKEwdDb21TaWduMQswCQYDVQQGEwJJTDAeFw0w -NDAzMjQxMTM3MjBaFw0yOTAzMTYxNTA0NTZaMDwxGzAZBgNVBAMTEkNvbVNpZ24gU2VjdXJlZCBD -QTEQMA4GA1UEChMHQ29tU2lnbjELMAkGA1UEBhMCSUwwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw -ggEKAoIBAQDGtWhfHZQVw6QIVS3joFd67+l0Kru5fFdJGhFeTymHDEjWaueP1H5XJLkGieQcPOqs -49ohgHMhCu95mGwfCP+hUH3ymBvJVG8+pSjsIQQPRbsHPaHA+iqYHU4Gk/v1iDurX8sWv+bznkqH -7Rnqwp9D5PGBpX8QTz7RSmKtUxvLg/8HZaWSLWapW7ha9B20IZFKF3ueMv5WJDmyVIRD9YTC2LxB -kMyd1mja6YJQqTtoz7VdApRgFrFD2UNd3V2Hbuq7s8lr9gOUCXDeFhF6K+h2j0kQmHe5Y1yLM5d1 -9guMsqtb3nQgJT/j8xH5h2iGNXHDHYwt6+UarA9z1YJZQIDTAgMBAAGjgacwgaQwDAYDVR0TBAUw -AwEB/zBEBgNVHR8EPTA7MDmgN6A1hjNodHRwOi8vZmVkaXIuY29tc2lnbi5jby5pbC9jcmwvQ29t -U2lnblNlY3VyZWRDQS5jcmwwDgYDVR0PAQH/BAQDAgGGMB8GA1UdIwQYMBaAFMFL7XC29z58ADsA -j8c+DkWfHl3sMB0GA1UdDgQWBBTBS+1wtvc+fAA7AI/HPg5Fnx5d7DANBgkqhkiG9w0BAQUFAAOC -AQEAFs/ukhNQq3sUnjO2QiBq1BW9Cav8cujvR3qQrFHBZE7piL1DRYHjZiM/EoZNGeQFsOY3wo3a -BijJD4mkU6l1P7CW+6tMM1X5eCZGbxs2mPtCdsGCuY7e+0X5YxtiOzkGynd6qDwJz2w2PQ8KRUtp -FhpFfTMDZflScZAmlaxMDPWLkz/MdXSFmLr/YnpNH4n+rr2UAJm/EaXc4HnFFgt9AmEd6oX5AhVP -51qJThRv4zdLhfXBPGHg/QVBspJ/wx2g0K5SZGBrGMYmnNj1ZOQ2GmKfig8+/21OGVZOIJFsnzQz -OjRXUDpvgV4GxvU+fE6OK85lBi5d0ipTdF7Tbieejw== ------END CERTIFICATE----- - -Cybertrust Global Root -====================== ------BEGIN CERTIFICATE----- -MIIDoTCCAomgAwIBAgILBAAAAAABD4WqLUgwDQYJKoZIhvcNAQEFBQAwOzEYMBYGA1UEChMPQ3li -ZXJ0cnVzdCwgSW5jMR8wHQYDVQQDExZDeWJlcnRydXN0IEdsb2JhbCBSb290MB4XDTA2MTIxNTA4 -MDAwMFoXDTIxMTIxNTA4MDAwMFowOzEYMBYGA1UEChMPQ3liZXJ0cnVzdCwgSW5jMR8wHQYDVQQD -ExZDeWJlcnRydXN0IEdsb2JhbCBSb290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA -+Mi8vRRQZhP/8NN57CPytxrHjoXxEnOmGaoQ25yiZXRadz5RfVb23CO21O1fWLE3TdVJDm71aofW -0ozSJ8bi/zafmGWgE07GKmSb1ZASzxQG9Dvj1Ci+6A74q05IlG2OlTEQXO2iLb3VOm2yHLtgwEZL -AfVJrn5GitB0jaEMAs7u/OePuGtm839EAL9mJRQr3RAwHQeWP032a7iPt3sMpTjr3kfb1V05/Iin -89cqdPHoWqI7n1C6poxFNcJQZZXcY4Lv3b93TZxiyWNzFtApD0mpSPCzqrdsxacwOUBdrsTiXSZT -8M4cIwhhqJQZugRiQOwfOHB3EgZxpzAYXSUnpQIDAQABo4GlMIGiMA4GA1UdDwEB/wQEAwIBBjAP -BgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBS2CHsNesysIEyGVjJez6tuhS1wVzA/BgNVHR8EODA2 -MDSgMqAwhi5odHRwOi8vd3d3Mi5wdWJsaWMtdHJ1c3QuY29tL2NybC9jdC9jdHJvb3QuY3JsMB8G -A1UdIwQYMBaAFLYIew16zKwgTIZWMl7Pq26FLXBXMA0GCSqGSIb3DQEBBQUAA4IBAQBW7wojoFRO -lZfJ+InaRcHUowAl9B8Tq7ejhVhpwjCt2BWKLePJzYFa+HMjWqd8BfP9IjsO0QbE2zZMcwSO5bAi -5MXzLqXZI+O4Tkogp24CJJ8iYGd7ix1yCcUxXOl5n4BHPa2hCwcUPUf/A2kaDAtE52Mlp3+yybh2 -hO0j9n0Hq0V+09+zv+mKts2oomcrUtW3ZfA5TGOgkXmTUg9U3YO7n9GPp1Nzw8v/MOx8BLjYRB+T -X3EJIrduPuocA06dGiBh+4E37F78CkWr1+cXVdCg6mCbpvbjjFspwgZgFJ0tl0ypkxWdYcQBX0jW -WL1WMRJOEcgh4LMRkWXbtKaIOM5V ------END CERTIFICATE----- - -ePKI Root Certification Authority -================================= ------BEGIN CERTIFICATE----- -MIIFsDCCA5igAwIBAgIQFci9ZUdcr7iXAF7kBtK8nTANBgkqhkiG9w0BAQUFADBeMQswCQYDVQQG -EwJUVzEjMCEGA1UECgwaQ2h1bmdod2EgVGVsZWNvbSBDby4sIEx0ZC4xKjAoBgNVBAsMIWVQS0kg -Um9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNDEyMjAwMjMxMjdaFw0zNDEyMjAwMjMx -MjdaMF4xCzAJBgNVBAYTAlRXMSMwIQYDVQQKDBpDaHVuZ2h3YSBUZWxlY29tIENvLiwgTHRkLjEq -MCgGA1UECwwhZVBLSSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIICIjANBgkqhkiG9w0B -AQEFAAOCAg8AMIICCgKCAgEA4SUP7o3biDN1Z82tH306Tm2d0y8U82N0ywEhajfqhFAHSyZbCUNs -IZ5qyNUD9WBpj8zwIuQf5/dqIjG3LBXy4P4AakP/h2XGtRrBp0xtInAhijHyl3SJCRImHJ7K2RKi -lTza6We/CKBk49ZCt0Xvl/T29de1ShUCWH2YWEtgvM3XDZoTM1PRYfl61dd4s5oz9wCGzh1NlDiv -qOx4UXCKXBCDUSH3ET00hl7lSM2XgYI1TBnsZfZrxQWh7kcT1rMhJ5QQCtkkO7q+RBNGMD+XPNjX -12ruOzjjK9SXDrkb5wdJfzcq+Xd4z1TtW0ado4AOkUPB1ltfFLqfpo0kR0BZv3I4sjZsN/+Z0V0O -WQqraffAsgRFelQArr5T9rXn4fg8ozHSqf4hUmTFpmfwdQcGlBSBVcYn5AGPF8Fqcde+S/uUWH1+ -ETOxQvdibBjWzwloPn9s9h6PYq2lY9sJpx8iQkEeb5mKPtf5P0B6ebClAZLSnT0IFaUQAS2zMnao -lQ2zepr7BxB4EW/hj8e6DyUadCrlHJhBmd8hh+iVBmoKs2pHdmX2Os+PYhcZewoozRrSgx4hxyy/ -vv9haLdnG7t4TY3OZ+XkwY63I2binZB1NJipNiuKmpS5nezMirH4JYlcWrYvjB9teSSnUmjDhDXi -Zo1jDiVN1Rmy5nk3pyKdVDECAwEAAaNqMGgwHQYDVR0OBBYEFB4M97Zn8uGSJglFwFU5Lnc/Qkqi -MAwGA1UdEwQFMAMBAf8wOQYEZyoHAAQxMC8wLQIBADAJBgUrDgMCGgUAMAcGBWcqAwAABBRFsMLH -ClZ87lt4DJX5GFPBphzYEDANBgkqhkiG9w0BAQUFAAOCAgEACbODU1kBPpVJufGBuvl2ICO1J2B0 -1GqZNF5sAFPZn/KmsSQHRGoqxqWOeBLoR9lYGxMqXnmbnwoqZ6YlPwZpVnPDimZI+ymBV3QGypzq -KOg4ZyYr8dW1P2WT+DZdjo2NQCCHGervJ8A9tDkPJXtoUHRVnAxZfVo9QZQlUgjgRywVMRnVvwdV -xrsStZf0X4OFunHB2WyBEXYKCrC/gpf36j36+uwtqSiUO1bd0lEursC9CBWMd1I0ltabrNMdjmEP -NXubrjlpC2JgQCA2j6/7Nu4tCEoduL+bXPjqpRugc6bY+G7gMwRfaKonh+3ZwZCc7b3jajWvY9+r -GNm65ulK6lCKD2GTHuItGeIwlDWSXQ62B68ZgI9HkFFLLk3dheLSClIKF5r8GrBQAuUBo2M3IUxE -xJtRmREOc5wGj1QupyheRDmHVi03vYVElOEMSyycw5KFNGHLD7ibSkNS/jQ6fbjpKdx2qcgw+BRx -gMYeNkh0IkFch4LoGHGLQYlE535YW6i4jRPpp2zDR+2zGp1iro2C6pSe3VkQw63d4k3jMdXH7Ojy -sP6SHhYKGvzZ8/gntsm+HbRsZJB/9OTEW9c3rkIO3aQab3yIVMUWbuF6aC74Or8NpDyJO3inTmOD -BCEIZ43ygknQW/2xzQ+DhNQ+IIX3Sj0rnP0qCglN6oH4EZw= ------END CERTIFICATE----- - -T\xc3\x9c\x42\xC4\xB0TAK UEKAE K\xC3\xB6k Sertifika Hizmet Sa\xC4\x9Flay\xc4\xb1\x63\xc4\xb1s\xc4\xb1 - S\xC3\xBCr\xC3\xBCm 3 -============================================================================================================================= ------BEGIN CERTIFICATE----- -MIIFFzCCA/+gAwIBAgIBETANBgkqhkiG9w0BAQUFADCCASsxCzAJBgNVBAYTAlRSMRgwFgYDVQQH -DA9HZWJ6ZSAtIEtvY2FlbGkxRzBFBgNVBAoMPlTDvHJraXllIEJpbGltc2VsIHZlIFRla25vbG9q -aWsgQXJhxZ90xLFybWEgS3VydW11IC0gVMOcQsSwVEFLMUgwRgYDVQQLDD9VbHVzYWwgRWxla3Ry -b25payB2ZSBLcmlwdG9sb2ppIEFyYcWfdMSxcm1hIEVuc3RpdMO8c8O8IC0gVUVLQUUxIzAhBgNV -BAsMGkthbXUgU2VydGlmaWthc3lvbiBNZXJrZXppMUowSAYDVQQDDEFUw5xCxLBUQUsgVUVLQUUg -S8O2ayBTZXJ0aWZpa2EgSGl6bWV0IFNhxJ9sYXnEsWPEsXPEsSAtIFPDvHLDvG0gMzAeFw0wNzA4 -MjQxMTM3MDdaFw0xNzA4MjExMTM3MDdaMIIBKzELMAkGA1UEBhMCVFIxGDAWBgNVBAcMD0dlYnpl -IC0gS29jYWVsaTFHMEUGA1UECgw+VMO8cmtpeWUgQmlsaW1zZWwgdmUgVGVrbm9sb2ppayBBcmHF -n3TEsXJtYSBLdXJ1bXUgLSBUw5xCxLBUQUsxSDBGBgNVBAsMP1VsdXNhbCBFbGVrdHJvbmlrIHZl -IEtyaXB0b2xvamkgQXJhxZ90xLFybWEgRW5zdGl0w7xzw7wgLSBVRUtBRTEjMCEGA1UECwwaS2Ft -dSBTZXJ0aWZpa2FzeW9uIE1lcmtlemkxSjBIBgNVBAMMQVTDnELEsFRBSyBVRUtBRSBLw7ZrIFNl -cnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxIC0gU8O8csO8bSAzMIIBIjANBgkqhkiG9w0B -AQEFAAOCAQ8AMIIBCgKCAQEAim1L/xCIOsP2fpTo6iBkcK4hgb46ezzb8R1Sf1n68yJMlaCQvEhO -Eav7t7WNeoMojCZG2E6VQIdhn8WebYGHV2yKO7Rm6sxA/OOqbLLLAdsyv9Lrhc+hDVXDWzhXcLh1 -xnnRFDDtG1hba+818qEhTsXOfJlfbLm4IpNQp81McGq+agV/E5wrHur+R84EpW+sky58K5+eeROR -6Oqeyjh1jmKwlZMq5d/pXpduIF9fhHpEORlAHLpVK/swsoHvhOPc7Jg4OQOFCKlUAwUp8MmPi+oL -hmUZEdPpCSPeaJMDyTYcIW7OjGbxmTDY17PDHfiBLqi9ggtm/oLL4eAagsNAgQIDAQABo0IwQDAd -BgNVHQ4EFgQUvYiHyY/2pAoLquvF/pEjnatKijIwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQF -MAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAB18+kmPNOm3JpIWmgV050vQbTlswyb2zrgxvMTfvCr4 -N5EY3ATIZJkrGG2AA1nJrvhY0D7twyOfaTyGOBye79oneNGEN3GKPEs5z35FBtYt2IpNeBLWrcLT -y9LQQfMmNkqblWwM7uXRQydmwYj3erMgbOqwaSvHIOgMA8RBBZniP+Rr+KCGgceExh/VS4ESshYh -LBOhgLJeDEoTniDYYkCrkOpkSi+sDQESeUWoL4cZaMjihccwsnX5OD+ywJO0a+IDRM5noN+J1q2M -dqMTw5RhK2vZbMEHCiIHhWyFJEapvj+LeISCfiQMnf2BN+MlqO02TpUsyZyQ2uypQjyttgI= ------END CERTIFICATE----- - -Buypass Class 2 CA 1 -==================== ------BEGIN CERTIFICATE----- -MIIDUzCCAjugAwIBAgIBATANBgkqhkiG9w0BAQUFADBLMQswCQYDVQQGEwJOTzEdMBsGA1UECgwU -QnV5cGFzcyBBUy05ODMxNjMzMjcxHTAbBgNVBAMMFEJ1eXBhc3MgQ2xhc3MgMiBDQSAxMB4XDTA2 -MTAxMzEwMjUwOVoXDTE2MTAxMzEwMjUwOVowSzELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBh -c3MgQVMtOTgzMTYzMzI3MR0wGwYDVQQDDBRCdXlwYXNzIENsYXNzIDIgQ0EgMTCCASIwDQYJKoZI -hvcNAQEBBQADggEPADCCAQoCggEBAIs8B0XY9t/mx8q6jUPFR42wWsE425KEHK8T1A9vNkYgxC7M -cXA0ojTTNy7Y3Tp3L8DrKehc0rWpkTSHIln+zNvnma+WwajHQN2lFYxuyHyXA8vmIPLXl18xoS83 -0r7uvqmtqEyeIWZDO6i88wmjONVZJMHCR3axiFyCO7srpgTXjAePzdVBHfCuuCkslFJgNJQ72uA4 -0Z0zPhX0kzLFANq1KWYOOngPIVJfAuWSeyXTkh4vFZ2B5J2O6O+JzhRMVB0cgRJNcKi+EAUXfh/R -uFdV7c27UsKwHnjCTTZoy1YmwVLBvXb3WNVyfh9EdrsAiR0WnVE1703CVu9r4Iw7DekCAwEAAaNC -MEAwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUP42aWYv8e3uco684sDntkHGA1sgwDgYDVR0P -AQH/BAQDAgEGMA0GCSqGSIb3DQEBBQUAA4IBAQAVGn4TirnoB6NLJzKyQJHyIdFkhb5jatLPgcIV -1Xp+DCmsNx4cfHZSldq1fyOhKXdlyTKdqC5Wq2B2zha0jX94wNWZUYN/Xtm+DKhQ7SLHrQVMdvvt -7h5HZPb3J31cKA9FxVxiXqaakZG3Uxcu3K1gnZZkOb1naLKuBctN518fV4bVIJwo+28TOPX2EZL2 -fZleHwzoq0QkKXJAPTZSr4xYkHPB7GEseaHsh7U/2k3ZIQAw3pDaDtMaSKk+hQsUi4y8QZ5q9w5w -wDX3OaJdZtB7WZ+oRxKaJyOkLY4ng5IgodcVf/EuGO70SH8vf/GhGLWhC5SgYiAynB321O+/TIho ------END CERTIFICATE----- - -Buypass Class 3 CA 1 -==================== ------BEGIN CERTIFICATE----- -MIIDUzCCAjugAwIBAgIBAjANBgkqhkiG9w0BAQUFADBLMQswCQYDVQQGEwJOTzEdMBsGA1UECgwU -QnV5cGFzcyBBUy05ODMxNjMzMjcxHTAbBgNVBAMMFEJ1eXBhc3MgQ2xhc3MgMyBDQSAxMB4XDTA1 -MDUwOTE0MTMwM1oXDTE1MDUwOTE0MTMwM1owSzELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1eXBh -c3MgQVMtOTgzMTYzMzI3MR0wGwYDVQQDDBRCdXlwYXNzIENsYXNzIDMgQ0EgMTCCASIwDQYJKoZI -hvcNAQEBBQADggEPADCCAQoCggEBAKSO13TZKWTeXx+HgJHqTjnmGcZEC4DVC69TB4sSveZn8AKx -ifZgisRbsELRwCGoy+Gb72RRtqfPFfV0gGgEkKBYouZ0plNTVUhjP5JW3SROjvi6K//zNIqeKNc0 -n6wv1g/xpC+9UrJJhW05NfBEMJNGJPO251P7vGGvqaMU+8IXF4Rs4HyI+MkcVyzwPX6UvCWThOia -AJpFBUJXgPROztmuOfbIUxAMZTpHe2DC1vqRycZxbL2RhzyRhkmr8w+gbCZ2Xhysm3HljbybIR6c -1jh+JIAVMYKWsUnTYjdbiAwKYjT+p0h+mbEwi5A3lRyoH6UsjfRVyNvdWQrCrXig9IsCAwEAAaNC -MEAwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUOBTmyPCppAP0Tj4io1vy1uCtQHQwDgYDVR0P -AQH/BAQDAgEGMA0GCSqGSIb3DQEBBQUAA4IBAQABZ6OMySU9E2NdFm/soT4JXJEVKirZgCFPBdy7 -pYmrEzMqnji3jG8CcmPHc3ceCQa6Oyh7pEfJYWsICCD8igWKH7y6xsL+z27sEzNxZy5p+qksP2bA -EllNC1QCkoS72xLvg3BweMhT+t/Gxv/ciC8HwEmdMldg0/L2mSlf56oBzKwzqBwKu5HEA6BvtjT5 -htOzdlSY9EqBs1OdTUDs5XcTRa9bqh/YL0yCe/4qxFi7T/ye/QNlGioOw6UgFpRreaaiErS7GqQj -el/wroQk5PMr+4okoyeYZdowdXb8GZHo2+ubPzK/QJcHJrrM85SFSnonk8+QQtS4Wxam58tAA915 ------END CERTIFICATE----- - -EBG Elektronik Sertifika Hizmet Sa\xC4\x9Flay\xc4\xb1\x63\xc4\xb1s\xc4\xb1 -========================================================================== ------BEGIN CERTIFICATE----- -MIIF5zCCA8+gAwIBAgIITK9zQhyOdAIwDQYJKoZIhvcNAQEFBQAwgYAxODA2BgNVBAMML0VCRyBF -bGVrdHJvbmlrIFNlcnRpZmlrYSBIaXptZXQgU2HEn2xhecSxY8Sxc8SxMTcwNQYDVQQKDC5FQkcg -QmlsacWfaW0gVGVrbm9sb2ppbGVyaSB2ZSBIaXptZXRsZXJpIEEuxZ4uMQswCQYDVQQGEwJUUjAe -Fw0wNjA4MTcwMDIxMDlaFw0xNjA4MTQwMDMxMDlaMIGAMTgwNgYDVQQDDC9FQkcgRWxla3Ryb25p -ayBTZXJ0aWZpa2EgSGl6bWV0IFNhxJ9sYXnEsWPEsXPEsTE3MDUGA1UECgwuRUJHIEJpbGnFn2lt -IFRla25vbG9qaWxlcmkgdmUgSGl6bWV0bGVyaSBBLsWeLjELMAkGA1UEBhMCVFIwggIiMA0GCSqG -SIb3DQEBAQUAA4ICDwAwggIKAoICAQDuoIRh0DpqZhAy2DE4f6en5f2h4fuXd7hxlugTlkaDT7by -X3JWbhNgpQGR4lvFzVcfd2NR/y8927k/qqk153nQ9dAktiHq6yOU/im/+4mRDGSaBUorzAzu8T2b -gmmkTPiab+ci2hC6X5L8GCcKqKpE+i4stPtGmggDg3KriORqcsnlZR9uKg+ds+g75AxuetpX/dfr -eYteIAbTdgtsApWjluTLdlHRKJ2hGvxEok3MenaoDT2/F08iiFD9rrbskFBKW5+VQarKD7JK/oCZ -TqNGFav4c0JqwmZ2sQomFd2TkuzbqV9UIlKRcF0T6kjsbgNs2d1s/OsNA/+mgxKb8amTD8UmTDGy -Y5lhcucqZJnSuOl14nypqZoaqsNW2xCaPINStnuWt6yHd6i58mcLlEOzrz5z+kI2sSXFCjEmN1Zn -uqMLfdb3ic1nobc6HmZP9qBVFCVMLDMNpkGMvQQxahByCp0OLna9XvNRiYuoP1Vzv9s6xiQFlpJI -qkuNKgPlV5EQ9GooFW5Hd4RcUXSfGenmHmMWOeMRFeNYGkS9y8RsZteEBt8w9DeiQyJ50hBs37vm -ExH8nYQKE3vwO9D8owrXieqWfo1IhR5kX9tUoqzVegJ5a9KK8GfaZXINFHDk6Y54jzJ0fFfy1tb0 -Nokb+Clsi7n2l9GkLqq+CxnCRelwXQIDAJ3Zo2MwYTAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB -/wQEAwIBBjAdBgNVHQ4EFgQU587GT/wWZ5b6SqMHwQSny2re2kcwHwYDVR0jBBgwFoAU587GT/wW -Z5b6SqMHwQSny2re2kcwDQYJKoZIhvcNAQEFBQADggIBAJuYml2+8ygjdsZs93/mQJ7ANtyVDR2t -FcU22NU57/IeIl6zgrRdu0waypIN30ckHrMk2pGI6YNw3ZPX6bqz3xZaPt7gyPvT/Wwp+BVGoGgm -zJNSroIBk5DKd8pNSe/iWtkqvTDOTLKBtjDOWU/aWR1qeqRFsIImgYZ29fUQALjuswnoT4cCB64k -XPBfrAowzIpAoHMEwfuJJPaaHFy3PApnNgUIMbOv2AFoKuB4j3TeuFGkjGwgPaL7s9QJ/XvCgKqT -bCmYIai7FvOpEl90tYeY8pUm3zTvilORiF0alKM/fCL414i6poyWqD1SNGKfAB5UVUJnxk1Gj7sU -RT0KlhaOEKGXmdXTMIXM3rRyt7yKPBgpaP3ccQfuJDlq+u2lrDgv+R4QDgZxGhBM/nV+/x5XOULK -1+EVoVZVWRvRo68R2E7DpSvvkL/A7IITW43WciyTTo9qKd+FPNMN4KIYEsxVL0e3p5sC/kH2iExt -2qkBR4NkJ2IQgtYSe14DHzSpyZH+r11thie3I6p1GMog57AP14kOpmciY/SDQSsGS7tY1dHXt7kQ -Y9iJSrSq3RZj9W6+YKH47ejWkE8axsWgKdOnIaj1Wjz3x0miIZpKlVIglnKaZsv30oZDfCK+lvm9 -AahH3eU7QPl1K5srRmSGjR70j/sHd9DqSaIcjVIUpgqT ------END CERTIFICATE----- - -certSIGN ROOT CA -================ ------BEGIN CERTIFICATE----- -MIIDODCCAiCgAwIBAgIGIAYFFnACMA0GCSqGSIb3DQEBBQUAMDsxCzAJBgNVBAYTAlJPMREwDwYD -VQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBDQTAeFw0wNjA3MDQxNzIwMDRa -Fw0zMTA3MDQxNzIwMDRaMDsxCzAJBgNVBAYTAlJPMREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UE -CxMQY2VydFNJR04gUk9PVCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALczuX7I -JUqOtdu0KBuqV5Do0SLTZLrTk+jUrIZhQGpgV2hUhE28alQCBf/fm5oqrl0Hj0rDKH/v+yv6efHH -rfAQUySQi2bJqIirr1qjAOm+ukbuW3N7LBeCgV5iLKECZbO9xSsAfsT8AzNXDe3i+s5dRdY4zTW2 -ssHQnIFKquSyAVwdj1+ZxLGt24gh65AIgoDzMKND5pCCrlUoSe1b16kQOA7+j0xbm0bqQfWwCHTD -0IgztnzXdN/chNFDDnU5oSVAKOp4yw4sLjmdjItuFhwvJoIQ4uNllAoEwF73XVv4EOLQunpL+943 -AAAaWyjj0pxzPjKHmKHJUS/X3qwzs08CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8B -Af8EBAMCAcYwHQYDVR0OBBYEFOCMm9slSbPxfIbWskKHC9BroNnkMA0GCSqGSIb3DQEBBQUAA4IB -AQA+0hyJLjX8+HXd5n9liPRyTMks1zJO890ZeUe9jjtbkw9QSSQTaxQGcu8J06Gh40CEyecYMnQ8 -SG4Pn0vU9x7Tk4ZkVJdjclDVVc/6IJMCopvDI5NOFlV2oHB5bc0hH88vLbwZ44gx+FkagQnIl6Z0 -x2DEW8xXjrJ1/RsCCdtZb3KTafcxQdaIOL+Hsr0Wefmq5L6IJd1hJyMctTEHBDa0GpC9oHRxUIlt -vBTjD4au8as+x6AJzKNI0eDbZOeStc+vckNwi/nDhDwTqn6Sm1dTk/pwwpEOMfmbZ13pljheX7Nz -TogVZ96edhBiIL5VaZVDADlN9u6wWk5JRFRYX0KD ------END CERTIFICATE----- - -CNNIC ROOT -========== ------BEGIN CERTIFICATE----- -MIIDVTCCAj2gAwIBAgIESTMAATANBgkqhkiG9w0BAQUFADAyMQswCQYDVQQGEwJDTjEOMAwGA1UE -ChMFQ05OSUMxEzARBgNVBAMTCkNOTklDIFJPT1QwHhcNMDcwNDE2MDcwOTE0WhcNMjcwNDE2MDcw -OTE0WjAyMQswCQYDVQQGEwJDTjEOMAwGA1UEChMFQ05OSUMxEzARBgNVBAMTCkNOTklDIFJPT1Qw -ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDTNfc/c3et6FtzF8LRb+1VvG7q6KR5smzD -o+/hn7E7SIX1mlwhIhAsxYLO2uOabjfhhyzcuQxauohV3/2q2x8x6gHx3zkBwRP9SFIhxFXf2tiz -VHa6dLG3fdfA6PZZxU3Iva0fFNrfWEQlMhkqx35+jq44sDB7R3IJMfAw28Mbdim7aXZOV/kbZKKT -VrdvmW7bCgScEeOAH8tjlBAKqeFkgjH5jCftppkA9nCTGPihNIaj3XrCGHn2emU1z5DrvTOTn1Or -czvmmzQgLx3vqR1jGqCA2wMv+SYahtKNu6m+UjqHZ0gNv7Sg2Ca+I19zN38m5pIEo3/PIKe38zrK -y5nLAgMBAAGjczBxMBEGCWCGSAGG+EIBAQQEAwIABzAfBgNVHSMEGDAWgBRl8jGtKvf33VKWCscC -wQ7vptU7ETAPBgNVHRMBAf8EBTADAQH/MAsGA1UdDwQEAwIB/jAdBgNVHQ4EFgQUZfIxrSr3991S -lgrHAsEO76bVOxEwDQYJKoZIhvcNAQEFBQADggEBAEs17szkrr/Dbq2flTtLP1se31cpolnKOOK5 -Gv+e5m4y3R6u6jW39ZORTtpC4cMXYFDy0VwmuYK36m3knITnA3kXr5g9lNvHugDnuL8BV8F3RTIM -O/G0HAiw/VGgod2aHRM2mm23xzy54cXZF/qD1T0VoDy7HgviyJA/qIYM/PmLXoXLT1tLYhFHxUV8 -BS9BsZ4QaRuZluBVeftOhpm4lNqGOGqTo+fLbuXf6iFViZx9fX+Y9QCJ7uOEwFyWtcVG6kbghVW2 -G8kS1sHNzYDzAgE8yGnLRUhj2JTQ7IUOO04RZfSCjKY9ri4ilAnIXOo8gV0WKgOXFlUJ24pBgp5m -mxE= ------END CERTIFICATE----- - -ApplicationCA - Japanese Government -=================================== ------BEGIN CERTIFICATE----- -MIIDoDCCAoigAwIBAgIBMTANBgkqhkiG9w0BAQUFADBDMQswCQYDVQQGEwJKUDEcMBoGA1UEChMT -SmFwYW5lc2UgR292ZXJubWVudDEWMBQGA1UECxMNQXBwbGljYXRpb25DQTAeFw0wNzEyMTIxNTAw -MDBaFw0xNzEyMTIxNTAwMDBaMEMxCzAJBgNVBAYTAkpQMRwwGgYDVQQKExNKYXBhbmVzZSBHb3Zl -cm5tZW50MRYwFAYDVQQLEw1BcHBsaWNhdGlvbkNBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB -CgKCAQEAp23gdE6Hj6UG3mii24aZS2QNcfAKBZuOquHMLtJqO8F6tJdhjYq+xpqcBrSGUeQ3DnR4 -fl+Kf5Sk10cI/VBaVuRorChzoHvpfxiSQE8tnfWuREhzNgaeZCw7NCPbXCbkcXmP1G55IrmTwcrN -wVbtiGrXoDkhBFcsovW8R0FPXjQilbUfKW1eSvNNcr5BViCH/OlQR9cwFO5cjFW6WY2H/CPek9AE -jP3vbb3QesmlOmpyM8ZKDQUXKi17safY1vC+9D/qDihtQWEjdnjDuGWk81quzMKq2edY3rZ+nYVu -nyoKb58DKTCXKB28t89UKU5RMfkntigm/qJj5kEW8DOYRwIDAQABo4GeMIGbMB0GA1UdDgQWBBRU -WssmP3HMlEYNllPqa0jQk/5CdTAOBgNVHQ8BAf8EBAMCAQYwWQYDVR0RBFIwUKROMEwxCzAJBgNV -BAYTAkpQMRgwFgYDVQQKDA/ml6XmnKzlm73mlL/lupwxIzAhBgNVBAsMGuOCouODl+ODquOCseOD -vOOCt+ODp+ODs0NBMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBADlqRHZ3ODrs -o2dGD/mLBqj7apAxzn7s2tGJfHrrLgy9mTLnsCTWw//1sogJhyzjVOGjprIIC8CFqMjSnHH2HZ9g -/DgzE+Ge3Atf2hZQKXsvcJEPmbo0NI2VdMV+eKlmXb3KIXdCEKxmJj3ekav9FfBv7WxfEPjzFvYD -io+nEhEMy/0/ecGc/WLuo89UDNErXxc+4z6/wCs+CZv+iKZ+tJIX/COUgb1up8WMwusRRdv4QcmW -dupwX3kSa+SjB1oF7ydJzyGfikwJcGapJsErEU4z0g781mzSDjJkaP+tBXhfAx2o45CsJOAPQKdL -rosot4LKGAfmt1t06SAZf7IbiVQ= ------END CERTIFICATE----- - -GeoTrust Primary Certification Authority - G3 -============================================= ------BEGIN CERTIFICATE----- -MIID/jCCAuagAwIBAgIQFaxulBmyeUtB9iepwxgPHzANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UE -BhMCVVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsTMChjKSAyMDA4IEdlb1RydXN0 -IEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTE2MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFy -eSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEczMB4XDTA4MDQwMjAwMDAwMFoXDTM3MTIwMTIz -NTk1OVowgZgxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAo -YykgMjAwOCBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0BgNVBAMT -LUdlb1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMzCCASIwDQYJKoZI -hvcNAQEBBQADggEPADCCAQoCggEBANziXmJYHTNXOTIz+uvLh4yn1ErdBojqZI4xmKU4kB6Yzy5j -K/BGvESyiaHAKAxJcCGVn2TAppMSAmUmhsalifD614SgcK9PGpc/BkTVyetyEH3kMSj7HGHmKAdE -c5IiaacDiGydY8hS2pgn5whMcD60yRLBxWeDXTPzAxHsatBT4tG6NmCUgLthY2xbF37fQJQeqw3C -IShwiP/WJmxsYAQlTlV+fe+/lEjetx3dcI0FX4ilm/LC7urRQEFtYjgdVgbFA0dRIBn8exALDmKu -dlW/X3e+PkkBUz2YJQN2JFodtNuJ6nnltrM7P7pMKEF/BqxqjsHQ9gUdfeZChuOl1UcCAwEAAaNC -MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFMR5yo6hTgMdHNxr -2zFblD4/MH8tMA0GCSqGSIb3DQEBCwUAA4IBAQAtxRPPVoB7eni9n64smefv2t+UXglpp+duaIy9 -cr5HqQ6XErhK8WTTOd8lNNTBzU6B8A8ExCSzNJbGpqow32hhc9f5joWJ7w5elShKKiePEI4ufIbE -Ap7aDHdlDkQNkv39sxY2+hENHYwOB4lqKVb3cvTdFZx3NWZXqxNT2I7BQMXXExZacse3aQHEerGD -AWh9jUGhlBjBJVz88P6DAod8DQ3PLghcSkANPuyBYeYk28rgDi0Hsj5W3I31QYUHSJsMC8tJP33s -t/3LjWeJGqvtux6jAAgIFyqCXDFdRootD4abdNlF+9RAsXqqaC2Gspki4cErx5z481+oghLrGREt ------END CERTIFICATE----- - -thawte Primary Root CA - G2 -=========================== ------BEGIN CERTIFICATE----- -MIICiDCCAg2gAwIBAgIQNfwmXNmET8k9Jj1Xm67XVjAKBggqhkjOPQQDAzCBhDELMAkGA1UEBhMC -VVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjE4MDYGA1UECxMvKGMpIDIwMDcgdGhhd3RlLCBJbmMu -IC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxJDAiBgNVBAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3Qg -Q0EgLSBHMjAeFw0wNzExMDUwMDAwMDBaFw0zODAxMTgyMzU5NTlaMIGEMQswCQYDVQQGEwJVUzEV -MBMGA1UEChMMdGhhd3RlLCBJbmMuMTgwNgYDVQQLEy8oYykgMjAwNyB0aGF3dGUsIEluYy4gLSBG -b3IgYXV0aG9yaXplZCB1c2Ugb25seTEkMCIGA1UEAxMbdGhhd3RlIFByaW1hcnkgUm9vdCBDQSAt -IEcyMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEotWcgnuVnfFSeIf+iha/BebfowJPDQfGAFG6DAJS -LSKkQjnE/o/qycG+1E3/n3qe4rF8mq2nhglzh9HnmuN6papu+7qzcMBniKI11KOasf2twu8x+qi5 -8/sIxpHR+ymVo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQU -mtgAMADna3+FGO6Lts6KDPgR4bswCgYIKoZIzj0EAwMDaQAwZgIxAN344FdHW6fmCsO99YCKlzUN -G4k8VIZ3KMqh9HneteY4sPBlcIx/AlTCv//YoT7ZzwIxAMSNlPzcU9LcnXgWHxUzI1NS41oxXZ3K -rr0TKUQNJ1uo52icEvdYPy5yAlejj6EULg== ------END CERTIFICATE----- - -thawte Primary Root CA - G3 -=========================== ------BEGIN CERTIFICATE----- -MIIEKjCCAxKgAwIBAgIQYAGXt0an6rS0mtZLL/eQ+zANBgkqhkiG9w0BAQsFADCBrjELMAkGA1UE -BhMCVVMxFTATBgNVBAoTDHRoYXd0ZSwgSW5jLjEoMCYGA1UECxMfQ2VydGlmaWNhdGlvbiBTZXJ2 -aWNlcyBEaXZpc2lvbjE4MDYGA1UECxMvKGMpIDIwMDggdGhhd3RlLCBJbmMuIC0gRm9yIGF1dGhv -cml6ZWQgdXNlIG9ubHkxJDAiBgNVBAMTG3RoYXd0ZSBQcmltYXJ5IFJvb3QgQ0EgLSBHMzAeFw0w -ODA0MDIwMDAwMDBaFw0zNzEyMDEyMzU5NTlaMIGuMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMdGhh -d3RlLCBJbmMuMSgwJgYDVQQLEx9DZXJ0aWZpY2F0aW9uIFNlcnZpY2VzIERpdmlzaW9uMTgwNgYD -VQQLEy8oYykgMjAwOCB0aGF3dGUsIEluYy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTEkMCIG -A1UEAxMbdGhhd3RlIFByaW1hcnkgUm9vdCBDQSAtIEczMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A -MIIBCgKCAQEAsr8nLPvb2FvdeHsbnndmgcs+vHyu86YnmjSjaDFxODNi5PNxZnmxqWWjpYvVj2At -P0LMqmsywCPLLEHd5N/8YZzic7IilRFDGF/Eth9XbAoFWCLINkw6fKXRz4aviKdEAhN0cXMKQlkC -+BsUa0Lfb1+6a4KinVvnSr0eAXLbS3ToO39/fR8EtCab4LRarEc9VbjXsCZSKAExQGbY2SS99irY -7CFJXJv2eul/VTV+lmuNk5Mny5K76qxAwJ/C+IDPXfRa3M50hqY+bAtTyr2SzhkGcuYMXDhpxwTW -vGzOW/b3aJzcJRVIiKHpqfiYnODz1TEoYRFsZ5aNOZnLwkUkOQIDAQABo0IwQDAPBgNVHRMBAf8E -BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUrWyqlGCc7eT/+j4KdCtjA/e2Wb8wDQYJ -KoZIhvcNAQELBQADggEBABpA2JVlrAmSicY59BDlqQ5mU1143vokkbvnRFHfxhY0Cu9qRFHqKweK -A3rD6z8KLFIWoCtDuSWQP3CpMyVtRRooOyfPqsMpQhvfO0zAMzRbQYi/aytlryjvsvXDqmbOe1bu -t8jLZ8HJnBoYuMTDSQPxYA5QzUbF83d597YV4Djbxy8ooAw/dyZ02SUS2jHaGh7cKUGRIjxpp7sC -8rZcJwOJ9Abqm+RyguOhCcHpABnTPtRwa7pxpqpYrvS76Wy274fMm7v/OeZWYdMKp8RcTGB7BXcm -er/YB1IsYvdwY9k5vG8cwnncdimvzsUsZAReiDZuMdRAGmI0Nj81Aa6sY6A= ------END CERTIFICATE----- - -GeoTrust Primary Certification Authority - G2 -============================================= ------BEGIN CERTIFICATE----- -MIICrjCCAjWgAwIBAgIQPLL0SAoA4v7rJDteYD7DazAKBggqhkjOPQQDAzCBmDELMAkGA1UEBhMC -VVMxFjAUBgNVBAoTDUdlb1RydXN0IEluYy4xOTA3BgNVBAsTMChjKSAyMDA3IEdlb1RydXN0IElu -Yy4gLSBGb3IgYXV0aG9yaXplZCB1c2Ugb25seTE2MDQGA1UEAxMtR2VvVHJ1c3QgUHJpbWFyeSBD -ZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAtIEcyMB4XDTA3MTEwNTAwMDAwMFoXDTM4MDExODIzNTk1 -OVowgZgxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1HZW9UcnVzdCBJbmMuMTkwNwYDVQQLEzAoYykg -MjAwNyBHZW9UcnVzdCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNlIG9ubHkxNjA0BgNVBAMTLUdl -b1RydXN0IFByaW1hcnkgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgLSBHMjB2MBAGByqGSM49AgEG -BSuBBAAiA2IABBWx6P0DFUPlrOuHNxFi79KDNlJ9RVcLSo17VDs6bl8VAsBQps8lL33KSLjHUGMc -KiEIfJo22Av+0SbFWDEwKCXzXV2juLaltJLtbCyf691DiaI8S0iRHVDsJt/WYC69IaNCMEAwDwYD -VR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBVfNVdRVfslsq0DafwBo/q+ -EVXVMAoGCCqGSM49BAMDA2cAMGQCMGSWWaboCd6LuvpaiIjwH5HTRqjySkwCY/tsXzjbLkGTqQ7m -ndwxHLKgpxgceeHHNgIwOlavmnRs9vuD4DPTCF+hnMJbn0bWtsuRBmOiBuczrD6ogRLQy7rQkgu2 -npaqBA+K ------END CERTIFICATE----- - -VeriSign Universal Root Certification Authority -=============================================== ------BEGIN CERTIFICATE----- -MIIEuTCCA6GgAwIBAgIQQBrEZCGzEyEDDrvkEhrFHTANBgkqhkiG9w0BAQsFADCBvTELMAkGA1UE -BhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBO -ZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwOCBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVk -IHVzZSBvbmx5MTgwNgYDVQQDEy9WZXJpU2lnbiBVbml2ZXJzYWwgUm9vdCBDZXJ0aWZpY2F0aW9u -IEF1dGhvcml0eTAeFw0wODA0MDIwMDAwMDBaFw0zNzEyMDEyMzU5NTlaMIG9MQswCQYDVQQGEwJV -UzEXMBUGA1UEChMOVmVyaVNpZ24sIEluYy4xHzAdBgNVBAsTFlZlcmlTaWduIFRydXN0IE5ldHdv -cmsxOjA4BgNVBAsTMShjKSAyMDA4IFZlcmlTaWduLCBJbmMuIC0gRm9yIGF1dGhvcml6ZWQgdXNl -IG9ubHkxODA2BgNVBAMTL1ZlcmlTaWduIFVuaXZlcnNhbCBSb290IENlcnRpZmljYXRpb24gQXV0 -aG9yaXR5MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAx2E3XrEBNNti1xWb/1hajCMj -1mCOkdeQmIN65lgZOIzF9uVkhbSicfvtvbnazU0AtMgtc6XHaXGVHzk8skQHnOgO+k1KxCHfKWGP -MiJhgsWHH26MfF8WIFFE0XBPV+rjHOPMee5Y2A7Cs0WTwCznmhcrewA3ekEzeOEz4vMQGn+HLL72 -9fdC4uW/h2KJXwBL38Xd5HVEMkE6HnFuacsLdUYI0crSK5XQz/u5QGtkjFdN/BMReYTtXlT2NJ8I -AfMQJQYXStrxHXpma5hgZqTZ79IugvHw7wnqRMkVauIDbjPTrJ9VAMf2CGqUuV/c4DPxhGD5WycR -tPwW8rtWaoAljQIDAQABo4GyMIGvMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMG0G -CCsGAQUFBwEMBGEwX6FdoFswWTBXMFUWCWltYWdlL2dpZjAhMB8wBwYFKw4DAhoEFI/l0xqGrI2O -a8PPgGrUSBgsexkuMCUWI2h0dHA6Ly9sb2dvLnZlcmlzaWduLmNvbS92c2xvZ28uZ2lmMB0GA1Ud -DgQWBBS2d/ppSEefUxLVwuoHMnYH0ZcHGTANBgkqhkiG9w0BAQsFAAOCAQEASvj4sAPmLGd75JR3 -Y8xuTPl9Dg3cyLk1uXBPY/ok+myDjEedO2Pzmvl2MpWRsXe8rJq+seQxIcaBlVZaDrHC1LGmWazx -Y8u4TB1ZkErvkBYoH1quEPuBUDgMbMzxPcP1Y+Oz4yHJJDnp/RVmRvQbEdBNc6N9Rvk97ahfYtTx -P/jgdFcrGJ2BtMQo2pSXpXDrrB2+BxHw1dvd5Yzw1TKwg+ZX4o+/vqGqvz0dtdQ46tewXDpPaj+P -wGZsY6rp2aQW9IHRlRQOfc2VNNnSj3BzgXucfr2YYdhFh5iQxeuGMMY1v/D/w1WIg0vvBZIGcfK4 -mJO37M2CYfE45k+XmCpajQ== ------END CERTIFICATE----- - -VeriSign Class 3 Public Primary Certification Authority - G4 -============================================================ ------BEGIN CERTIFICATE----- -MIIDhDCCAwqgAwIBAgIQL4D+I4wOIg9IZxIokYesszAKBggqhkjOPQQDAzCByjELMAkGA1UEBhMC -VVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBUcnVzdCBOZXR3 -b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRob3JpemVkIHVz -ZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5IENlcnRpZmlj -YXRpb24gQXV0aG9yaXR5IC0gRzQwHhcNMDcxMTA1MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCByjEL -MAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMR8wHQYDVQQLExZWZXJpU2lnbiBU -cnVzdCBOZXR3b3JrMTowOAYDVQQLEzEoYykgMjAwNyBWZXJpU2lnbiwgSW5jLiAtIEZvciBhdXRo -b3JpemVkIHVzZSBvbmx5MUUwQwYDVQQDEzxWZXJpU2lnbiBDbGFzcyAzIFB1YmxpYyBQcmltYXJ5 -IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzQwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAASnVnp8 -Utpkmw4tXNherJI9/gHmGUo9FANL+mAnINmDiWn6VMaaGF5VKmTeBvaNSjutEDxlPZCIBIngMGGz -rl0Bp3vefLK+ymVhAIau2o970ImtTR1ZmkGxvEeA3J5iw/mjgbIwga8wDwYDVR0TAQH/BAUwAwEB -/zAOBgNVHQ8BAf8EBAMCAQYwbQYIKwYBBQUHAQwEYTBfoV2gWzBZMFcwVRYJaW1hZ2UvZ2lmMCEw -HzAHBgUrDgMCGgQUj+XTGoasjY5rw8+AatRIGCx7GS4wJRYjaHR0cDovL2xvZ28udmVyaXNpZ24u -Y29tL3ZzbG9nby5naWYwHQYDVR0OBBYEFLMWkf3upm7ktS5Jj4d4gYDs5bG1MAoGCCqGSM49BAMD -A2gAMGUCMGYhDBgmYFo4e1ZC4Kf8NoRRkSAsdk1DPcQdhCPQrNZ8NQbOzWm9kA3bbEhCHQ6qQgIx -AJw9SDkjOVgaFRJZap7v1VmyHVIsmXHNxynfGyphe3HR3vPA5Q06Sqotp9iGKt0uEA== ------END CERTIFICATE----- - -NetLock Arany (Class Gold) Főtanúsítvány -============================================ ------BEGIN CERTIFICATE----- -MIIEFTCCAv2gAwIBAgIGSUEs5AAQMA0GCSqGSIb3DQEBCwUAMIGnMQswCQYDVQQGEwJIVTERMA8G -A1UEBwwIQnVkYXBlc3QxFTATBgNVBAoMDE5ldExvY2sgS2Z0LjE3MDUGA1UECwwuVGFuw7pzw610 -dsOhbnlraWFkw7NrIChDZXJ0aWZpY2F0aW9uIFNlcnZpY2VzKTE1MDMGA1UEAwwsTmV0TG9jayBB -cmFueSAoQ2xhc3MgR29sZCkgRsWRdGFuw7pzw610dsOhbnkwHhcNMDgxMjExMTUwODIxWhcNMjgx -MjA2MTUwODIxWjCBpzELMAkGA1UEBhMCSFUxETAPBgNVBAcMCEJ1ZGFwZXN0MRUwEwYDVQQKDAxO -ZXRMb2NrIEtmdC4xNzA1BgNVBAsMLlRhbsO6c8OtdHbDoW55a2lhZMOzayAoQ2VydGlmaWNhdGlv -biBTZXJ2aWNlcykxNTAzBgNVBAMMLE5ldExvY2sgQXJhbnkgKENsYXNzIEdvbGQpIEbFkXRhbsO6 -c8OtdHbDoW55MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxCRec75LbRTDofTjl5Bu -0jBFHjzuZ9lk4BqKf8owyoPjIMHj9DrTlF8afFttvzBPhCf2nx9JvMaZCpDyD/V/Q4Q3Y1GLeqVw -/HpYzY6b7cNGbIRwXdrzAZAj/E4wqX7hJ2Pn7WQ8oLjJM2P+FpD/sLj916jAwJRDC7bVWaaeVtAk -H3B5r9s5VA1lddkVQZQBr17s9o3x/61k/iCa11zr/qYfCGSji3ZVrR47KGAuhyXoqq8fxmRGILdw -fzzeSNuWU7c5d+Qa4scWhHaXWy+7GRWF+GmF9ZmnqfI0p6m2pgP8b4Y9VHx2BJtr+UBdADTHLpl1 -neWIA6pN+APSQnbAGwIDAKiLo0UwQzASBgNVHRMBAf8ECDAGAQH/AgEEMA4GA1UdDwEB/wQEAwIB -BjAdBgNVHQ4EFgQUzPpnk/C2uNClwB7zU/2MU9+D15YwDQYJKoZIhvcNAQELBQADggEBAKt/7hwW -qZw8UQCgwBEIBaeZ5m8BiFRhbvG5GK1Krf6BQCOUL/t1fC8oS2IkgYIL9WHxHG64YTjrgfpioTta -YtOUZcTh5m2C+C8lcLIhJsFyUR+MLMOEkMNaj7rP9KdlpeuY0fsFskZ1FSNqb4VjMIDw1Z4fKRzC -bLBQWV2QWzuoDTDPv31/zvGdg73JRm4gpvlhUbohL3u+pRVjodSVh/GeufOJ8z2FuLjbvrW5Kfna -NwUASZQDhETnv0Mxz3WLJdH0pmT1kvarBes96aULNmLazAZfNou2XjG4Kvte9nHfRCaexOYNkbQu -dZWAUWpLMKawYqGT8ZvYzsRjdT9ZR7E= ------END CERTIFICATE----- - -Staat der Nederlanden Root CA - G2 -================================== ------BEGIN CERTIFICATE----- -MIIFyjCCA7KgAwIBAgIEAJiWjDANBgkqhkiG9w0BAQsFADBaMQswCQYDVQQGEwJOTDEeMBwGA1UE -CgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSswKQYDVQQDDCJTdGFhdCBkZXIgTmVkZXJsYW5kZW4g -Um9vdCBDQSAtIEcyMB4XDTA4MDMyNjExMTgxN1oXDTIwMDMyNTExMDMxMFowWjELMAkGA1UEBhMC -TkwxHjAcBgNVBAoMFVN0YWF0IGRlciBOZWRlcmxhbmRlbjErMCkGA1UEAwwiU3RhYXQgZGVyIE5l -ZGVybGFuZGVuIFJvb3QgQ0EgLSBHMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMVZ -5291qj5LnLW4rJ4L5PnZyqtdj7U5EILXr1HgO+EASGrP2uEGQxGZqhQlEq0i6ABtQ8SpuOUfiUtn -vWFI7/3S4GCI5bkYYCjDdyutsDeqN95kWSpGV+RLufg3fNU254DBtvPUZ5uW6M7XxgpT0GtJlvOj -CwV3SPcl5XCsMBQgJeN/dVrlSPhOewMHBPqCYYdu8DvEpMfQ9XQ+pV0aCPKbJdL2rAQmPlU6Yiil -e7Iwr/g3wtG61jj99O9JMDeZJiFIhQGp5Rbn3JBV3w/oOM2ZNyFPXfUib2rFEhZgF1XyZWampzCR -OME4HYYEhLoaJXhena/MUGDWE4dS7WMfbWV9whUYdMrhfmQpjHLYFhN9C0lK8SgbIHRrxT3dsKpI -CT0ugpTNGmXZK4iambwYfp/ufWZ8Pr2UuIHOzZgweMFvZ9C+X+Bo7d7iscksWXiSqt8rYGPy5V65 -48r6f1CGPqI0GAwJaCgRHOThuVw+R7oyPxjMW4T182t0xHJ04eOLoEq9jWYv6q012iDTiIJh8BIi -trzQ1aTsr1SIJSQ8p22xcik/Plemf1WvbibG/ufMQFxRRIEKeN5KzlW/HdXZt1bv8Hb/C3m1r737 -qWmRRpdogBQ2HbN/uymYNqUg+oJgYjOk7Na6B6duxc8UpufWkjTYgfX8HV2qXB72o007uPc5AgMB -AAGjgZcwgZQwDwYDVR0TAQH/BAUwAwEB/zBSBgNVHSAESzBJMEcGBFUdIAAwPzA9BggrBgEFBQcC -ARYxaHR0cDovL3d3dy5wa2lvdmVyaGVpZC5ubC9wb2xpY2llcy9yb290LXBvbGljeS1HMjAOBgNV -HQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJFoMocVHYnitfGsNig0jQt8YojrMA0GCSqGSIb3DQEBCwUA -A4ICAQCoQUpnKpKBglBu4dfYszk78wIVCVBR7y29JHuIhjv5tLySCZa59sCrI2AGeYwRTlHSeYAz -+51IvuxBQ4EffkdAHOV6CMqqi3WtFMTC6GY8ggen5ieCWxjmD27ZUD6KQhgpxrRW/FYQoAUXvQwj -f/ST7ZwaUb7dRUG/kSS0H4zpX897IZmflZ85OkYcbPnNe5yQzSipx6lVu6xiNGI1E0sUOlWDuYaN -kqbG9AclVMwWVxJKgnjIFNkXgiYtXSAfea7+1HAWFpWD2DU5/1JddRwWxRNVz0fMdWVSSt7wsKfk -CpYL+63C4iWEst3kvX5ZbJvw8NjnyvLplzh+ib7M+zkXYT9y2zqR2GUBGR2tUKRXCnxLvJxxcypF -URmFzI79R6d0lR2o0a9OF7FpJsKqeFdbxU2n5Z4FF5TKsl+gSRiNNOkmbEgeqmiSBeGCc1qb3Adb -CG19ndeNIdn8FCCqwkXfP+cAslHkwvgFuXkajDTznlvkN1trSt8sV4pAWja63XVECDdCcAz+3F4h -oKOKwJCcaNpQ5kUQR3i2TtJlycM33+FCY7BXN0Ute4qcvwXqZVUz9zkQxSgqIXobisQk+T8VyJoV -IPVVYpbtbZNQvOSqeK3Zywplh6ZmwcSBo3c6WB4L7oOLnR7SUqTMHW+wmG2UMbX4cQrcufx9MmDm -66+KAQ== ------END CERTIFICATE----- - -CA Disig -======== ------BEGIN CERTIFICATE----- -MIIEDzCCAvegAwIBAgIBATANBgkqhkiG9w0BAQUFADBKMQswCQYDVQQGEwJTSzETMBEGA1UEBxMK -QnJhdGlzbGF2YTETMBEGA1UEChMKRGlzaWcgYS5zLjERMA8GA1UEAxMIQ0EgRGlzaWcwHhcNMDYw -MzIyMDEzOTM0WhcNMTYwMzIyMDEzOTM0WjBKMQswCQYDVQQGEwJTSzETMBEGA1UEBxMKQnJhdGlz -bGF2YTETMBEGA1UEChMKRGlzaWcgYS5zLjERMA8GA1UEAxMIQ0EgRGlzaWcwggEiMA0GCSqGSIb3 -DQEBAQUAA4IBDwAwggEKAoIBAQCS9jHBfYj9mQGp2HvycXXxMcbzdWb6UShGhJd4NLxs/LxFWYgm -GErENx+hSkS943EE9UQX4j/8SFhvXJ56CbpRNyIjZkMhsDxkovhqFQ4/61HhVKndBpnXmjxUizkD -Pw/Fzsbrg3ICqB9x8y34dQjbYkzo+s7552oftms1grrijxaSfQUMbEYDXcDtab86wYqg6I7ZuUUo -hwjstMoVvoLdtUSLLa2GDGhibYVW8qwUYzrG0ZmsNHhWS8+2rT+MitcE5eN4TPWGqvWP+j1scaMt -ymfraHtuM6kMgiioTGohQBUgDCZbg8KpFhXAJIJdKxatymP2dACw30PEEGBWZ2NFAgMBAAGjgf8w -gfwwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUjbJJaJ1yCCW5wCf1UJNWSEZx+Y8wDgYDVR0P -AQH/BAQDAgEGMDYGA1UdEQQvMC2BE2Nhb3BlcmF0b3JAZGlzaWcuc2uGFmh0dHA6Ly93d3cuZGlz -aWcuc2svY2EwZgYDVR0fBF8wXTAtoCugKYYnaHR0cDovL3d3dy5kaXNpZy5zay9jYS9jcmwvY2Ff -ZGlzaWcuY3JsMCygKqAohiZodHRwOi8vY2EuZGlzaWcuc2svY2EvY3JsL2NhX2Rpc2lnLmNybDAa -BgNVHSAEEzARMA8GDSuBHpGT5goAAAABAQEwDQYJKoZIhvcNAQEFBQADggEBAF00dGFMrzvY/59t -WDYcPQuBDRIrRhCA/ec8J9B6yKm2fnQwM6M6int0wHl5QpNt/7EpFIKrIYwvF/k/Ji/1WcbvgAa3 -mkkp7M5+cTxqEEHA9tOasnxakZzArFvITV734VP/Q3f8nktnbNfzg9Gg4H8l37iYC5oyOGwwoPP/ -CBUz91BKez6jPiCp3C9WgArtQVCwyfTssuMmRAAOb54GvCKWU3BlxFAKRmukLyeBEicTXxChds6K -ezfqwzlhA5WYOudsiCUI/HloDYd9Yvi0X/vF2Ey9WLw/Q1vUHgFNPGO+I++MzVpQuGhU+QqZMxEA -4Z7CRneC9VkGjCFMhwnN5ag= ------END CERTIFICATE----- - -Juur-SK -======= ------BEGIN CERTIFICATE----- -MIIE5jCCA86gAwIBAgIEO45L/DANBgkqhkiG9w0BAQUFADBdMRgwFgYJKoZIhvcNAQkBFglwa2lA -c2suZWUxCzAJBgNVBAYTAkVFMSIwIAYDVQQKExlBUyBTZXJ0aWZpdHNlZXJpbWlza2Vza3VzMRAw -DgYDVQQDEwdKdXVyLVNLMB4XDTAxMDgzMDE0MjMwMVoXDTE2MDgyNjE0MjMwMVowXTEYMBYGCSqG -SIb3DQEJARYJcGtpQHNrLmVlMQswCQYDVQQGEwJFRTEiMCAGA1UEChMZQVMgU2VydGlmaXRzZWVy -aW1pc2tlc2t1czEQMA4GA1UEAxMHSnV1ci1TSzCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC -ggEBAIFxNj4zB9bjMI0TfncyRsvPGbJgMUaXhvSYRqTCZUXP00B841oiqBB4M8yIsdOBSvZiF3tf -TQou0M+LI+5PAk676w7KvRhj6IAcjeEcjT3g/1tf6mTll+g/mX8MCgkzABpTpyHhOEvWgxutr2TC -+Rx6jGZITWYfGAriPrsfB2WThbkasLnE+w0R9vXW+RvHLCu3GFH+4Hv2qEivbDtPL+/40UceJlfw -UR0zlv/vWT3aTdEVNMfqPxZIe5EcgEMPPbgFPtGzlc3Yyg/CQ2fbt5PgIoIuvvVoKIO5wTtpeyDa -Tpxt4brNj3pssAki14sL2xzVWiZbDcDq5WDQn/413z8CAwEAAaOCAawwggGoMA8GA1UdEwEB/wQF -MAMBAf8wggEWBgNVHSAEggENMIIBCTCCAQUGCisGAQQBzh8BAQEwgfYwgdAGCCsGAQUFBwICMIHD -HoHAAFMAZQBlACAAcwBlAHIAdABpAGYAaQBrAGEAYQB0ACAAbwBuACAAdgDkAGwAagBhAHMAdABh -AHQAdQBkACAAQQBTAC0AaQBzACAAUwBlAHIAdABpAGYAaQB0AHMAZQBlAHIAaQBtAGkAcwBrAGUA -cwBrAHUAcwAgAGEAbABhAG0ALQBTAEsAIABzAGUAcgB0AGkAZgBpAGsAYQBhAHQAaQBkAGUAIABr -AGkAbgBuAGkAdABhAG0AaQBzAGUAawBzMCEGCCsGAQUFBwIBFhVodHRwOi8vd3d3LnNrLmVlL2Nw -cy8wKwYDVR0fBCQwIjAgoB6gHIYaaHR0cDovL3d3dy5zay5lZS9qdXVyL2NybC8wHQYDVR0OBBYE -FASqekej5ImvGs8KQKcYP2/v6X2+MB8GA1UdIwQYMBaAFASqekej5ImvGs8KQKcYP2/v6X2+MA4G -A1UdDwEB/wQEAwIB5jANBgkqhkiG9w0BAQUFAAOCAQEAe8EYlFOiCfP+JmeaUOTDBS8rNXiRTHyo -ERF5TElZrMj3hWVcRrs7EKACr81Ptcw2Kuxd/u+gkcm2k298gFTsxwhwDY77guwqYHhpNjbRxZyL -abVAyJRld/JXIWY7zoVAtjNjGr95HvxcHdMdkxuLDF2FvZkwMhgJkVLpfKG6/2SSmuz+Ne6ML678 -IIbsSt4beDI3poHSna9aEhbKmVv8b20OxaAehsmR0FyYgl9jDIpaq9iVpszLita/ZEuOyoqysOkh -Mp6qqIWYNIE5ITuoOlIyPfZrN4YGWhWY3PARZv40ILcD9EEQfTmEeZZyY7aWAuVrua0ZTbvGRNs2 -yyqcjg== ------END CERTIFICATE----- - -Hongkong Post Root CA 1 -======================= ------BEGIN CERTIFICATE----- -MIIDMDCCAhigAwIBAgICA+gwDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UEBhMCSEsxFjAUBgNVBAoT -DUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdrb25nIFBvc3QgUm9vdCBDQSAxMB4XDTAzMDUx -NTA1MTMxNFoXDTIzMDUxNTA0NTIyOVowRzELMAkGA1UEBhMCSEsxFjAUBgNVBAoTDUhvbmdrb25n -IFBvc3QxIDAeBgNVBAMTF0hvbmdrb25nIFBvc3QgUm9vdCBDQSAxMIIBIjANBgkqhkiG9w0BAQEF -AAOCAQ8AMIIBCgKCAQEArP84tulmAknjorThkPlAj3n54r15/gK97iSSHSL22oVyaf7XPwnU3ZG1 -ApzQjVrhVcNQhrkpJsLj2aDxaQMoIIBFIi1WpztUlVYiWR8o3x8gPW2iNr4joLFutbEnPzlTCeqr -auh0ssJlXI6/fMN4hM2eFvz1Lk8gKgifd/PFHsSaUmYeSF7jEAaPIpjhZY4bXSNmO7ilMlHIhqqh -qZ5/dpTCpmy3QfDVyAY45tQM4vM7TG1QjMSDJ8EThFk9nnV0ttgCXjqQesBCNnLsak3c78QA3xMY -V18meMjWCnl3v/evt3a5pQuEF10Q6m/hq5URX208o1xNg1vysxmKgIsLhwIDAQABoyYwJDASBgNV -HRMBAf8ECDAGAQH/AgEDMA4GA1UdDwEB/wQEAwIBxjANBgkqhkiG9w0BAQUFAAOCAQEADkbVPK7i -h9legYsCmEEIjEy82tvuJxuC52pF7BaLT4Wg87JwvVqWuspube5Gi27nKi6Wsxkz67SfqLI37pio -l7Yutmcn1KZJ/RyTZXaeQi/cImyaT/JaFTmxcdcrUehtHJjA2Sr0oYJ71clBoiMBdDhViw+5Lmei -IAQ32pwL0xch4I+XeTRvhEgCIDMb5jREn5Fw9IBehEPCKdJsEhTkYY2sEJCehFC78JZvRZ+K88ps -T/oROhUVRsPNH4NbLUES7VBnQRM9IauUiqpOfMGx+6fWtScvl6tu4B3i0RwsH0Ti/L6RoZz71ilT -c4afU9hDDl3WY4JxHYB0yvbiAmvZWg== ------END CERTIFICATE----- - -SecureSign RootCA11 -=================== ------BEGIN CERTIFICATE----- -MIIDbTCCAlWgAwIBAgIBATANBgkqhkiG9w0BAQUFADBYMQswCQYDVQQGEwJKUDErMCkGA1UEChMi -SmFwYW4gQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcywgSW5jLjEcMBoGA1UEAxMTU2VjdXJlU2lnbiBS -b290Q0ExMTAeFw0wOTA0MDgwNDU2NDdaFw0yOTA0MDgwNDU2NDdaMFgxCzAJBgNVBAYTAkpQMSsw -KQYDVQQKEyJKYXBhbiBDZXJ0aWZpY2F0aW9uIFNlcnZpY2VzLCBJbmMuMRwwGgYDVQQDExNTZWN1 -cmVTaWduIFJvb3RDQTExMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA/XeqpRyQBTvL -TJszi1oURaTnkBbR31fSIRCkF/3frNYfp+TbfPfs37gD2pRY/V1yfIw/XwFndBWW4wI8h9uuywGO -wvNmxoVF9ALGOrVisq/6nL+k5tSAMJjzDbaTj6nU2DbysPyKyiyhFTOVMdrAG/LuYpmGYz+/3ZMq -g6h2uRMft85OQoWPIucuGvKVCbIFtUROd6EgvanyTgp9UK31BQ1FT0Zx/Sg+U/sE2C3XZR1KG/rP -O7AxmjVuyIsG0wCR8pQIZUyxNAYAeoni8McDWc/V1uinMrPmmECGxc0nEovMe863ETxiYAcjPitA -bpSACW22s293bzUIUPsCh8U+iQIDAQABo0IwQDAdBgNVHQ4EFgQUW/hNT7KlhtQ60vFjmqC+CfZX -t94wDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAKCh -OBZmLqdWHyGcBvod7bkixTgm2E5P7KN/ed5GIaGHd48HCJqypMWvDzKYC3xmKbabfSVSSUOrTC4r -bnpwrxYO4wJs+0LmGJ1F2FXI6Dvd5+H0LgscNFxsWEr7jIhQX5Ucv+2rIrVls4W6ng+4reV6G4pQ -Oh29Dbx7VFALuUKvVaAYga1lme++5Jy/xIWrQbJUb9wlze144o4MjQlJ3WN7WmmWAiGovVJZ6X01 -y8hSyn+B/tlr0/cR7SXf+Of5pPpyl4RTDaXQMhhRdlkUbA/r7F+AjHVDg8OFmP9Mni0N5HeDk061 -lgeLKBObjBmNQSdJQO7e5iNEOdyhIta6A/I= ------END CERTIFICATE----- - -ACEDICOM Root -============= ------BEGIN CERTIFICATE----- -MIIFtTCCA52gAwIBAgIIYY3HhjsBggUwDQYJKoZIhvcNAQEFBQAwRDEWMBQGA1UEAwwNQUNFRElD -T00gUm9vdDEMMAoGA1UECwwDUEtJMQ8wDQYDVQQKDAZFRElDT00xCzAJBgNVBAYTAkVTMB4XDTA4 -MDQxODE2MjQyMloXDTI4MDQxMzE2MjQyMlowRDEWMBQGA1UEAwwNQUNFRElDT00gUm9vdDEMMAoG -A1UECwwDUEtJMQ8wDQYDVQQKDAZFRElDT00xCzAJBgNVBAYTAkVTMIICIjANBgkqhkiG9w0BAQEF -AAOCAg8AMIICCgKCAgEA/5KV4WgGdrQsyFhIyv2AVClVYyT/kGWbEHV7w2rbYgIB8hiGtXxaOLHk -WLn709gtn70yN78sFW2+tfQh0hOR2QetAQXW8713zl9CgQr5auODAKgrLlUTY4HKRxx7XBZXehuD -YAQ6PmXDzQHe3qTWDLqO3tkE7hdWIpuPY/1NFgu3e3eM+SW10W2ZEi5PGrjm6gSSrj0RuVFCPYew -MYWveVqc/udOXpJPQ/yrOq2lEiZmueIM15jO1FillUAKt0SdE3QrwqXrIhWYENiLxQSfHY9g5QYb -m8+5eaA9oiM/Qj9r+hwDezCNzmzAv+YbX79nuIQZ1RXve8uQNjFiybwCq0Zfm/4aaJQ0PZCOrfbk -HQl/Sog4P75n/TSW9R28MHTLOO7VbKvU/PQAtwBbhTIWdjPp2KOZnQUAqhbm84F9b32qhm2tFXTT -xKJxqvQUfecyuB+81fFOvW8XAjnXDpVCOscAPukmYxHqC9FK/xidstd7LzrZlvvoHpKuE1XI2Sf2 -3EgbsCTBheN3nZqk8wwRHQ3ItBTutYJXCb8gWH8vIiPYcMt5bMlL8qkqyPyHK9caUPgn6C9D4zq9 -2Fdx/c6mUlv53U3t5fZvie27k5x2IXXwkkwp9y+cAS7+UEaeZAwUswdbxcJzbPEHXEUkFDWug/Fq -TYl6+rPYLWbwNof1K1MCAwEAAaOBqjCBpzAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFKaz -4SsrSbbXc6GqlPUB53NlTKxQMA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUprPhKytJttdzoaqU -9QHnc2VMrFAwRAYDVR0gBD0wOzA5BgRVHSAAMDEwLwYIKwYBBQUHAgEWI2h0dHA6Ly9hY2VkaWNv -bS5lZGljb21ncm91cC5jb20vZG9jMA0GCSqGSIb3DQEBBQUAA4ICAQDOLAtSUWImfQwng4/F9tqg -aHtPkl7qpHMyEVNEskTLnewPeUKzEKbHDZ3Ltvo/Onzqv4hTGzz3gvoFNTPhNahXwOf9jU8/kzJP -eGYDdwdY6ZXIfj7QeQCM8htRM5u8lOk6e25SLTKeI6RF+7YuE7CLGLHdztUdp0J/Vb77W7tH1Pwk -zQSulgUV1qzOMPPKC8W64iLgpq0i5ALudBF/TP94HTXa5gI06xgSYXcGCRZj6hitoocf8seACQl1 -ThCojz2GuHURwCRiipZ7SkXp7FnFvmuD5uHorLUwHv4FB4D54SMNUI8FmP8sX+g7tq3PgbUhh8oI -KiMnMCArz+2UW6yyetLHKKGKC5tNSixthT8Jcjxn4tncB7rrZXtaAWPWkFtPF2Y9fwsZo5NjEFIq -nxQWWOLcpfShFosOkYuByptZ+thrkQdlVV9SH686+5DdaaVbnG0OLLb6zqylfDJKZ0DcMDQj3dcE -I2bw/FWAp/tmGYI1Z2JwOV5vx+qQQEQIHriy1tvuWacNGHk0vFQYXlPKNFHtRQrmjseCNj6nOGOp -MCwXEGCSn1WHElkQwg9naRHMTh5+Spqtr0CodaxWkHS4oJyleW/c6RrIaQXpuvoDs3zk4E7Czp3o -tkYNbn5XOmeUwssfnHdKZ05phkOTOPu220+DkdRgfks+KzgHVZhepA== ------END CERTIFICATE----- - -Verisign Class 3 Public Primary Certification Authority -======================================================= ------BEGIN CERTIFICATE----- -MIICPDCCAaUCEDyRMcsf9tAbDpq40ES/Er4wDQYJKoZIhvcNAQEFBQAwXzELMAkGA1UEBhMCVVMx -FzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMTcwNQYDVQQLEy5DbGFzcyAzIFB1YmxpYyBQcmltYXJ5 -IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTk2MDEyOTAwMDAwMFoXDTI4MDgwMjIzNTk1OVow -XzELMAkGA1UEBhMCVVMxFzAVBgNVBAoTDlZlcmlTaWduLCBJbmMuMTcwNQYDVQQLEy5DbGFzcyAz -IFB1YmxpYyBQcmltYXJ5IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIGfMA0GCSqGSIb3DQEBAQUA -A4GNADCBiQKBgQDJXFme8huKARS0EN8EQNvjV69qRUCPhAwL0TPZ2RHP7gJYHyX3KqhEBarsAx94 -f56TuZoAqiN91qyFomNFx3InzPRMxnVx0jnvT0Lwdd8KkMaOIG+YD/isI19wKTakyYbnsZogy1Ol -hec9vn2a/iRFM9x2Fe0PonFkTGUugWhFpwIDAQABMA0GCSqGSIb3DQEBBQUAA4GBABByUqkFFBky -CEHwxWsKzH4PIRnN5GfcX6kb5sroc50i2JhucwNhkcV8sEVAbkSdjbCxlnRhLQ2pRdKkkirWmnWX -bj9T/UWZYB2oK0z5XqcJ2HUw19JlYD1n1khVdWk/kfVIC0dpImmClr7JyDiGSnoscxlIaU5rfGW/ -D/xwzoiQ ------END CERTIFICATE----- - -Microsec e-Szigno Root CA 2009 -============================== ------BEGIN CERTIFICATE----- -MIIECjCCAvKgAwIBAgIJAMJ+QwRORz8ZMA0GCSqGSIb3DQEBCwUAMIGCMQswCQYDVQQGEwJIVTER -MA8GA1UEBwwIQnVkYXBlc3QxFjAUBgNVBAoMDU1pY3Jvc2VjIEx0ZC4xJzAlBgNVBAMMHk1pY3Jv -c2VjIGUtU3ppZ25vIFJvb3QgQ0EgMjAwOTEfMB0GCSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5o -dTAeFw0wOTA2MTYxMTMwMThaFw0yOTEyMzAxMTMwMThaMIGCMQswCQYDVQQGEwJIVTERMA8GA1UE -BwwIQnVkYXBlc3QxFjAUBgNVBAoMDU1pY3Jvc2VjIEx0ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUt -U3ppZ25vIFJvb3QgQ0EgMjAwOTEfMB0GCSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5odTCCASIw -DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOn4j/NjrdqG2KfgQvvPkd6mJviZpWNwrZuuyjNA -fW2WbqEORO7hE52UQlKavXWFdCyoDh2Tthi3jCyoz/tccbna7P7ofo/kLx2yqHWH2Leh5TvPmUpG -0IMZfcChEhyVbUr02MelTTMuhTlAdX4UfIASmFDHQWe4oIBhVKZsTh/gnQ4H6cm6M+f+wFUoLAKA -pxn1ntxVUwOXewdI/5n7N4okxFnMUBBjjqqpGrCEGob5X7uxUG6k0QrM1XF+H6cbfPVTbiJfyyvm -1HxdrtbCxkzlBQHZ7Vf8wSN5/PrIJIOV87VqUQHQd9bpEqH5GoP7ghu5sJf0dgYzQ0mg/wu1+rUC -AwEAAaOBgDB+MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBTLD8bf -QkPMPcu1SCOhGnqmKrs0aDAfBgNVHSMEGDAWgBTLD8bfQkPMPcu1SCOhGnqmKrs0aDAbBgNVHREE -FDASgRBpbmZvQGUtc3ppZ25vLmh1MA0GCSqGSIb3DQEBCwUAA4IBAQDJ0Q5eLtXMs3w+y/w9/w0o -lZMEyL/azXm4Q5DwpL7v8u8hmLzU1F0G9u5C7DBsoKqpyvGvivo/C3NqPuouQH4frlRheesuCDfX -I/OMn74dseGkddug4lQUsbocKaQY9hK6ohQU4zE1yED/t+AFdlfBHFny+L/k7SViXITwfn4fs775 -tyERzAMBVnCnEJIeGzSBHq2cGsMEPO0CYdYeBvNfOofyK/FFh+U9rNHHV4S9a67c2Pm2G2JwCz02 -yULyMtd6YebS2z3PyKnJm9zbWETXbzivf3jTo60adbocwTZ8jx5tHMN1Rq41Bab2XD0h7lbwyYIi -LXpUq3DDfSJlgnCW ------END CERTIFICATE----- - -E-Guven Kok Elektronik Sertifika Hizmet Saglayicisi -=================================================== ------BEGIN CERTIFICATE----- -MIIDtjCCAp6gAwIBAgIQRJmNPMADJ72cdpW56tustTANBgkqhkiG9w0BAQUFADB1MQswCQYDVQQG -EwJUUjEoMCYGA1UEChMfRWxla3Ryb25payBCaWxnaSBHdXZlbmxpZ2kgQS5TLjE8MDoGA1UEAxMz -ZS1HdXZlbiBLb2sgRWxla3Ryb25payBTZXJ0aWZpa2EgSGl6bWV0IFNhZ2xheWljaXNpMB4XDTA3 -MDEwNDExMzI0OFoXDTE3MDEwNDExMzI0OFowdTELMAkGA1UEBhMCVFIxKDAmBgNVBAoTH0VsZWt0 -cm9uaWsgQmlsZ2kgR3V2ZW5saWdpIEEuUy4xPDA6BgNVBAMTM2UtR3V2ZW4gS29rIEVsZWt0cm9u -aWsgU2VydGlmaWthIEhpem1ldCBTYWdsYXlpY2lzaTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC -AQoCggEBAMMSIJ6wXgBljU5Gu4Bc6SwGl9XzcslwuedLZYDBS75+PNdUMZTe1RK6UxYC6lhj71vY -8+0qGqpxSKPcEC1fX+tcS5yWCEIlKBHMilpiAVDV6wlTL/jDj/6z/P2douNffb7tC+Bg62nsM+3Y -jfsSSYMAyYuXjDtzKjKzEve5TfL0TW3H5tYmNwjy2f1rXKPlSFxYvEK+A1qBuhw1DADT9SN+cTAI -JjjcJRFHLfO6IxClv7wC90Nex/6wN1CZew+TzuZDLMN+DfIcQ2Zgy2ExR4ejT669VmxMvLz4Bcpk -9Ok0oSy1c+HCPujIyTQlCFzz7abHlJ+tiEMl1+E5YP6sOVkCAwEAAaNCMEAwDgYDVR0PAQH/BAQD -AgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFJ/uRLOU1fqRTy7ZVZoEVtstxNulMA0GCSqG -SIb3DQEBBQUAA4IBAQB/X7lTW2M9dTLn+sR0GstG30ZpHFLPqk/CaOv/gKlR6D1id4k9CnU58W5d -F4dvaAXBlGzZXd/aslnLpRCKysw5zZ/rTt5S/wzw9JKp8mxTq5vSR6AfdPebmvEvFZ96ZDAYBzwq -D2fK/A+JYZ1lpTzlvBNbCNvj/+27BrtqBrF6T2XGgv0enIu1De5Iu7i9qgi0+6N8y5/NkHZchpZ4 -Vwpm+Vganf2XKWDeEaaQHBkc7gGWIjQ0LpH5t8Qn0Xvmv/uARFoW5evg1Ao4vOSR49XrXMGs3xtq -fJ7lddK2l4fbzIcrQzqECK+rPNv3PGYxhrCdU3nt+CPeQuMtgvEP5fqX ------END CERTIFICATE----- - -GlobalSign Root CA - R3 -======================= ------BEGIN CERTIFICATE----- -MIIDXzCCAkegAwIBAgILBAAAAAABIVhTCKIwDQYJKoZIhvcNAQELBQAwTDEgMB4GA1UECxMXR2xv -YmFsU2lnbiBSb290IENBIC0gUjMxEzARBgNVBAoTCkdsb2JhbFNpZ24xEzARBgNVBAMTCkdsb2Jh -bFNpZ24wHhcNMDkwMzE4MTAwMDAwWhcNMjkwMzE4MTAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxT -aWduIFJvb3QgQ0EgLSBSMzETMBEGA1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2ln -bjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMwldpB5BngiFvXAg7aEyiie/QV2EcWt -iHL8RgJDx7KKnQRfJMsuS+FggkbhUqsMgUdwbN1k0ev1LKMPgj0MK66X17YUhhB5uzsTgHeMCOFJ -0mpiLx9e+pZo34knlTifBtc+ycsmWQ1z3rDI6SYOgxXG71uL0gRgykmmKPZpO/bLyCiR5Z2KYVc3 -rHQU3HTgOu5yLy6c+9C7v/U9AOEGM+iCK65TpjoWc4zdQQ4gOsC0p6Hpsk+QLjJg6VfLuQSSaGjl -OCZgdbKfd/+RFO+uIEn8rUAVSNECMWEZXriX7613t2Saer9fwRPvm2L7DWzgVGkWqQPabumDk3F2 -xmmFghcCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYE -FI/wS3+oLkUkrk1Q+mOai97i3Ru8MA0GCSqGSIb3DQEBCwUAA4IBAQBLQNvAUKr+yAzv95ZURUm7 -lgAJQayzE4aGKAczymvmdLm6AC2upArT9fHxD4q/c2dKg8dEe3jgr25sbwMpjjM5RcOO5LlXbKr8 -EpbsU8Yt5CRsuZRj+9xTaGdWPoO4zzUhw8lo/s7awlOqzJCK6fBdRoyV3XpYKBovHd7NADdBj+1E -bddTKJd+82cEHhXXipa0095MJ6RMG3NzdvQXmcIfeg7jLQitChws/zyrVQ4PkX4268NXSb7hLi18 -YIvDQVETI53O9zJrlAGomecsMx86OyXShkDOOyyGeMlhLxS67ttVb9+E7gUJTb0o2HLO02JQZR7r -kpeDMdmztcpHWD9f ------END CERTIFICATE----- - -TC TrustCenter Universal CA III -=============================== ------BEGIN CERTIFICATE----- -MIID4TCCAsmgAwIBAgIOYyUAAQACFI0zFQLkbPQwDQYJKoZIhvcNAQEFBQAwezELMAkGA1UEBhMC -REUxHDAaBgNVBAoTE1RDIFRydXN0Q2VudGVyIEdtYkgxJDAiBgNVBAsTG1RDIFRydXN0Q2VudGVy -IFVuaXZlcnNhbCBDQTEoMCYGA1UEAxMfVEMgVHJ1c3RDZW50ZXIgVW5pdmVyc2FsIENBIElJSTAe -Fw0wOTA5MDkwODE1MjdaFw0yOTEyMzEyMzU5NTlaMHsxCzAJBgNVBAYTAkRFMRwwGgYDVQQKExNU -QyBUcnVzdENlbnRlciBHbWJIMSQwIgYDVQQLExtUQyBUcnVzdENlbnRlciBVbml2ZXJzYWwgQ0Ex -KDAmBgNVBAMTH1RDIFRydXN0Q2VudGVyIFVuaXZlcnNhbCBDQSBJSUkwggEiMA0GCSqGSIb3DQEB -AQUAA4IBDwAwggEKAoIBAQDC2pxisLlxErALyBpXsq6DFJmzNEubkKLF5+cvAqBNLaT6hdqbJYUt -QCggbergvbFIgyIpRJ9Og+41URNzdNW88jBmlFPAQDYvDIRlzg9uwliT6CwLOunBjvvya8o84pxO -juT5fdMnnxvVZ3iHLX8LR7PH6MlIfK8vzArZQe+f/prhsq75U7Xl6UafYOPfjdN/+5Z+s7Vy+Eut -CHnNaYlAJ/Uqwa1D7KRTyGG299J5KmcYdkhtWyUB0SbFt1dpIxVbYYqt8Bst2a9c8SaQaanVDED1 -M4BDj5yjdipFtK+/fz6HP3bFzSreIMUWWMv5G/UPyw0RUmS40nZid4PxWJ//AgMBAAGjYzBhMB8G -A1UdIwQYMBaAFFbn4VslQ4Dg9ozhcbyO5YAvxEjiMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/ -BAQDAgEGMB0GA1UdDgQWBBRW5+FbJUOA4PaM4XG8juWAL8RI4jANBgkqhkiG9w0BAQUFAAOCAQEA -g8ev6n9NCjw5sWi+e22JLumzCecYV42FmhfzdkJQEw/HkG8zrcVJYCtsSVgZ1OK+t7+rSbyUyKu+ -KGwWaODIl0YgoGhnYIg5IFHYaAERzqf2EQf27OysGh+yZm5WZ2B6dF7AbZc2rrUNXWZzwCUyRdhK -BgePxLcHsU0GDeGl6/R1yrqc0L2z0zIkTO5+4nYES0lT2PLpVDP85XEfPRRclkvxOvIAu2y0+pZV -CIgJwcyRGSmwIC3/yzikQOEXvnlhgP8HA4ZMTnsGnxGGjYnuJ8Tb4rwZjgvDwxPHLQNjO9Po5KIq -woIIlBZU8O8fJ5AluA0OKBtHd0e9HKgl8ZS0Zg== ------END CERTIFICATE----- - -Autoridad de Certificacion Firmaprofesional CIF A62634068 -========================================================= ------BEGIN CERTIFICATE----- -MIIGFDCCA/ygAwIBAgIIU+w77vuySF8wDQYJKoZIhvcNAQEFBQAwUTELMAkGA1UEBhMCRVMxQjBA -BgNVBAMMOUF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIEZpcm1hcHJvZmVzaW9uYWwgQ0lGIEE2 -MjYzNDA2ODAeFw0wOTA1MjAwODM4MTVaFw0zMDEyMzEwODM4MTVaMFExCzAJBgNVBAYTAkVTMUIw -QAYDVQQDDDlBdXRvcmlkYWQgZGUgQ2VydGlmaWNhY2lvbiBGaXJtYXByb2Zlc2lvbmFsIENJRiBB -NjI2MzQwNjgwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKlmuO6vj78aI14H9M2uDD -Utd9thDIAl6zQyrET2qyyhxdKJp4ERppWVevtSBC5IsP5t9bpgOSL/UR5GLXMnE42QQMcas9UX4P -B99jBVzpv5RvwSmCwLTaUbDBPLutN0pcyvFLNg4kq7/DhHf9qFD0sefGL9ItWY16Ck6WaVICqjaY -7Pz6FIMMNx/Jkjd/14Et5cS54D40/mf0PmbR0/RAz15iNA9wBj4gGFrO93IbJWyTdBSTo3OxDqqH -ECNZXyAFGUftaI6SEspd/NYrspI8IM/hX68gvqB2f3bl7BqGYTM+53u0P6APjqK5am+5hyZvQWyI -plD9amML9ZMWGxmPsu2bm8mQ9QEM3xk9Dz44I8kvjwzRAv4bVdZO0I08r0+k8/6vKtMFnXkIoctX -MbScyJCyZ/QYFpM6/EfY0XiWMR+6KwxfXZmtY4laJCB22N/9q06mIqqdXuYnin1oKaPnirjaEbsX -LZmdEyRG98Xi2J+Of8ePdG1asuhy9azuJBCtLxTa/y2aRnFHvkLfuwHb9H/TKI8xWVvTyQKmtFLK -bpf7Q8UIJm+K9Lv9nyiqDdVF8xM6HdjAeI9BZzwelGSuewvF6NkBiDkal4ZkQdU7hwxu+g/GvUgU -vzlN1J5Bto+WHWOWk9mVBngxaJ43BjuAiUVhOSPHG0SjFeUc+JIwuwIDAQABo4HvMIHsMBIGA1Ud -EwEB/wQIMAYBAf8CAQEwDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRlzeurNR4APn7VdMActHNH -DhpkLzCBpgYDVR0gBIGeMIGbMIGYBgRVHSAAMIGPMC8GCCsGAQUFBwIBFiNodHRwOi8vd3d3LmZp -cm1hcHJvZmVzaW9uYWwuY29tL2NwczBcBggrBgEFBQcCAjBQHk4AUABhAHMAZQBvACAAZABlACAA -bABhACAAQgBvAG4AYQBuAG8AdgBhACAANAA3ACAAQgBhAHIAYwBlAGwAbwBuAGEAIAAwADgAMAAx -ADcwDQYJKoZIhvcNAQEFBQADggIBABd9oPm03cXF661LJLWhAqvdpYhKsg9VSytXjDvlMd3+xDLx -51tkljYyGOylMnfX40S2wBEqgLk9am58m9Ot/MPWo+ZkKXzR4Tgegiv/J2Wv+xYVxC5xhOW1//qk -R71kMrv2JYSiJ0L1ILDCExARzRAVukKQKtJE4ZYm6zFIEv0q2skGz3QeqUvVhyj5eTSSPi5E6PaP -T481PyWzOdxjKpBrIF/EUhJOlywqrJ2X3kjyo2bbwtKDlaZmp54lD+kLM5FlClrD2VQS3a/DTg4f -Jl4N3LON7NWBcN7STyQF82xO9UxJZo3R/9ILJUFI/lGExkKvgATP0H5kSeTy36LssUzAKh3ntLFl -osS88Zj0qnAHY7S42jtM+kAiMFsRpvAFDsYCA0irhpuF3dvd6qJ2gHN99ZwExEWN57kci57q13XR -crHedUTnQn3iV2t93Jm8PYMo6oCTjcVMZcFwgbg4/EMxsvYDNEeyrPsiBsse3RdHHF9mudMaotoR -saS8I8nkvof/uZS2+F0gStRf571oe2XyFR7SOqkt6dhrJKyXWERHrVkY8SFlcN7ONGCoQPHzPKTD -KCOM/iczQ0CgFzzr6juwcqajuUpLXhZI9LK8yIySxZ2frHI2vDSANGupi5LAuBft7HZT9SQBjLMi -6Et8Vcad+qMUu2WFbm5PEn4KPJ2V ------END CERTIFICATE----- - -Izenpe.com -========== ------BEGIN CERTIFICATE----- -MIIF8TCCA9mgAwIBAgIQALC3WhZIX7/hy/WL1xnmfTANBgkqhkiG9w0BAQsFADA4MQswCQYDVQQG -EwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6ZW5wZS5jb20wHhcNMDcxMjEz -MTMwODI4WhcNMzcxMjEzMDgyNzI1WjA4MQswCQYDVQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMu -QS4xEzARBgNVBAMMCkl6ZW5wZS5jb20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDJ -03rKDx6sp4boFmVqscIbRTJxldn+EFvMr+eleQGPicPK8lVx93e+d5TzcqQsRNiekpsUOqHnJJAK -ClaOxdgmlOHZSOEtPtoKct2jmRXagaKH9HtuJneJWK3W6wyyQXpzbm3benhB6QiIEn6HLmYRY2xU -+zydcsC8Lv/Ct90NduM61/e0aL6i9eOBbsFGb12N4E3GVFWJGjMxCrFXuaOKmMPsOzTFlUFpfnXC -PCDFYbpRR6AgkJOhkEvzTnyFRVSa0QUmQbC1TR0zvsQDyCV8wXDbO/QJLVQnSKwv4cSsPsjLkkxT -OTcj7NMB+eAJRE1NZMDhDVqHIrytG6P+JrUV86f8hBnp7KGItERphIPzidF0BqnMC9bC3ieFUCbK -F7jJeodWLBoBHmy+E60QrLUk9TiRodZL2vG70t5HtfG8gfZZa88ZU+mNFctKy6lvROUbQc/hhqfK -0GqfvEyNBjNaooXlkDWgYlwWTvDjovoDGrQscbNYLN57C9saD+veIR8GdwYDsMnvmfzAuU8Lhij+ -0rnq49qlw0dpEuDb8PYZi+17cNcC1u2HGCgsBCRMd+RIihrGO5rUD8r6ddIBQFqNeb+Lz0vPqhbB -leStTIo+F5HUsWLlguWABKQDfo2/2n+iD5dPDNMN+9fR5XJ+HMh3/1uaD7euBUbl8agW7EekFwID -AQABo4H2MIHzMIGwBgNVHREEgagwgaWBD2luZm9AaXplbnBlLmNvbaSBkTCBjjFHMEUGA1UECgw+ -SVpFTlBFIFMuQS4gLSBDSUYgQTAxMzM3MjYwLVJNZXJjLlZpdG9yaWEtR2FzdGVpeiBUMTA1NSBG -NjIgUzgxQzBBBgNVBAkMOkF2ZGEgZGVsIE1lZGl0ZXJyYW5lbyBFdG9yYmlkZWEgMTQgLSAwMTAx -MCBWaXRvcmlhLUdhc3RlaXowDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0O -BBYEFB0cZQ6o8iV7tJHP5LGx5r1VdGwFMA0GCSqGSIb3DQEBCwUAA4ICAQB4pgwWSp9MiDrAyw6l -Fn2fuUhfGI8NYjb2zRlrrKvV9pF9rnHzP7MOeIWblaQnIUdCSnxIOvVFfLMMjlF4rJUT3sb9fbga -kEyrkgPH7UIBzg/YsfqikuFgba56awmqxinuaElnMIAkejEWOVt+8Rwu3WwJrfIxwYJOubv5vr8q -hT/AQKM6WfxZSzwoJNu0FXWuDYi6LnPAvViH5ULy617uHjAimcs30cQhbIHsvm0m5hzkQiCeR7Cs -g1lwLDXWrzY0tM07+DKo7+N4ifuNRSzanLh+QBxh5z6ikixL8s36mLYp//Pye6kfLqCTVyvehQP5 -aTfLnnhqBbTFMXiJ7HqnheG5ezzevh55hM6fcA5ZwjUukCox2eRFekGkLhObNA5me0mrZJfQRsN5 -nXJQY6aYWwa9SG3YOYNw6DXwBdGqvOPbyALqfP2C2sJbUjWumDqtujWTI6cfSN01RpiyEGjkpTHC -ClguGYEQyVB1/OpaFs4R1+7vUIgtYf8/QnMFlEPVjjxOAToZpR9GTnfQXeWBIiGH/pR9hNiTrdZo -Q0iy2+tzJOeRf1SktoA+naM8THLCV8Sg1Mw4J87VBp6iSNnpn86CcDaTmjvfliHjWbcM2pE38P1Z -WrOZyGlsQyYBNWNgVYkDOnXYukrZVP/u3oDYLdE41V4tC5h9Pmzb/CaIxw== ------END CERTIFICATE----- - -Chambers of Commerce Root - 2008 -================================ ------BEGIN CERTIFICATE----- -MIIHTzCCBTegAwIBAgIJAKPaQn6ksa7aMA0GCSqGSIb3DQEBBQUAMIGuMQswCQYDVQQGEwJFVTFD -MEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNv -bS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMu -QS4xKTAnBgNVBAMTIENoYW1iZXJzIG9mIENvbW1lcmNlIFJvb3QgLSAyMDA4MB4XDTA4MDgwMTEy -Mjk1MFoXDTM4MDczMTEyMjk1MFowga4xCzAJBgNVBAYTAkVVMUMwQQYDVQQHEzpNYWRyaWQgKHNl -ZSBjdXJyZW50IGFkZHJlc3MgYXQgd3d3LmNhbWVyZmlybWEuY29tL2FkZHJlc3MpMRIwEAYDVQQF -EwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENhbWVyZmlybWEgUy5BLjEpMCcGA1UEAxMgQ2hhbWJl -cnMgb2YgQ29tbWVyY2UgUm9vdCAtIDIwMDgwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoIC -AQCvAMtwNyuAWko6bHiUfaN/Gh/2NdW928sNRHI+JrKQUrpjOyhYb6WzbZSm891kDFX29ufyIiKA -XuFixrYp4YFs8r/lfTJqVKAyGVn+H4vXPWCGhSRv4xGzdz4gljUha7MI2XAuZPeEklPWDrCQiorj -h40G072QDuKZoRuGDtqaCrsLYVAGUvGef3bsyw/QHg3PmTA9HMRFEFis1tPo1+XqxQEHd9ZR5gN/ -ikilTWh1uem8nk4ZcfUyS5xtYBkL+8ydddy/Js2Pk3g5eXNeJQ7KXOt3EgfLZEFHcpOrUMPrCXZk -NNI5t3YRCQ12RcSprj1qr7V9ZS+UWBDsXHyvfuK2GNnQm05aSd+pZgvMPMZ4fKecHePOjlO+Bd5g -D2vlGts/4+EhySnB8esHnFIbAURRPHsl18TlUlRdJQfKFiC4reRB7noI/plvg6aRArBsNlVq5331 -lubKgdaX8ZSD6e2wsWsSaR6s+12pxZjptFtYer49okQ6Y1nUCyXeG0+95QGezdIp1Z8XGQpvvwyQ -0wlf2eOKNcx5Wk0ZN5K3xMGtr/R5JJqyAQuxr1yW84Ay+1w9mPGgP0revq+ULtlVmhduYJ1jbLhj -ya6BXBg14JC7vjxPNyK5fuvPnnchpj04gftI2jE9K+OJ9dC1vX7gUMQSibMjmhAxhduub+84Mxh2 -EQIDAQABo4IBbDCCAWgwEgYDVR0TAQH/BAgwBgEB/wIBDDAdBgNVHQ4EFgQU+SSsD7K1+HnA+mCI -G8TZTQKeFxkwgeMGA1UdIwSB2zCB2IAU+SSsD7K1+HnA+mCIG8TZTQKeFxmhgbSkgbEwga4xCzAJ -BgNVBAYTAkVVMUMwQQYDVQQHEzpNYWRyaWQgKHNlZSBjdXJyZW50IGFkZHJlc3MgYXQgd3d3LmNh -bWVyZmlybWEuY29tL2FkZHJlc3MpMRIwEAYDVQQFEwlBODI3NDMyODcxGzAZBgNVBAoTEkFDIENh -bWVyZmlybWEgUy5BLjEpMCcGA1UEAxMgQ2hhbWJlcnMgb2YgQ29tbWVyY2UgUm9vdCAtIDIwMDiC -CQCj2kJ+pLGu2jAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRVHSAAMCowKAYIKwYBBQUH -AgEWHGh0dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20wDQYJKoZIhvcNAQEFBQADggIBAJASryI1 -wqM58C7e6bXpeHxIvj99RZJe6dqxGfwWPJ+0W2aeaufDuV2I6A+tzyMP3iU6XsxPpcG1Lawk0lgH -3qLPaYRgM+gQDROpI9CF5Y57pp49chNyM/WqfcZjHwj0/gF/JM8rLFQJ3uIrbZLGOU8W6jx+ekbU -RWpGqOt1glanq6B8aBMz9p0w8G8nOSQjKpD9kCk18pPfNKXG9/jvjA9iSnyu0/VU+I22mlaHFoI6 -M6taIgj3grrqLuBHmrS1RaMFO9ncLkVAO+rcf+g769HsJtg1pDDFOqxXnrN2pSB7+R5KBWIBpih1 -YJeSDW4+TTdDDZIVnBgizVGZoCkaPF+KMjNbMMeJL0eYD6MDxvbxrN8y8NmBGuScvfaAFPDRLLmF -9dijscilIeUcE5fuDr3fKanvNFNb0+RqE4QGtjICxFKuItLcsiFCGtpA8CnJ7AoMXOLQusxI0zcK -zBIKinmwPQN/aUv0NCB9szTqjktk9T79syNnFQ0EuPAtwQlRPLJsFfClI9eDdOTlLsn+mCdCxqvG -nrDQWzilm1DefhiYtUU79nm06PcaewaD+9CL2rvHvRirCG88gGtAPxkZumWK5r7VXNM21+9AUiRg -OGcEMeyP84LG3rlV8zsxkVrctQgVrXYlCg17LofiDKYGvCYQbTed7N14jHyAxfDZd0jQ ------END CERTIFICATE----- - -Global Chambersign Root - 2008 -============================== ------BEGIN CERTIFICATE----- -MIIHSTCCBTGgAwIBAgIJAMnN0+nVfSPOMA0GCSqGSIb3DQEBBQUAMIGsMQswCQYDVQQGEwJFVTFD -MEEGA1UEBxM6TWFkcmlkIChzZWUgY3VycmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNv -bS9hZGRyZXNzKTESMBAGA1UEBRMJQTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMu -QS4xJzAlBgNVBAMTHkdsb2JhbCBDaGFtYmVyc2lnbiBSb290IC0gMjAwODAeFw0wODA4MDExMjMx -NDBaFw0zODA3MzExMjMxNDBaMIGsMQswCQYDVQQGEwJFVTFDMEEGA1UEBxM6TWFkcmlkIChzZWUg -Y3VycmVudCBhZGRyZXNzIGF0IHd3dy5jYW1lcmZpcm1hLmNvbS9hZGRyZXNzKTESMBAGA1UEBRMJ -QTgyNzQzMjg3MRswGQYDVQQKExJBQyBDYW1lcmZpcm1hIFMuQS4xJzAlBgNVBAMTHkdsb2JhbCBD -aGFtYmVyc2lnbiBSb290IC0gMjAwODCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMDf -VtPkOpt2RbQT2//BthmLN0EYlVJH6xedKYiONWwGMi5HYvNJBL99RDaxccy9Wglz1dmFRP+RVyXf -XjaOcNFccUMd2drvXNL7G706tcuto8xEpw2uIRU/uXpbknXYpBI4iRmKt4DS4jJvVpyR1ogQC7N0 -ZJJ0YPP2zxhPYLIj0Mc7zmFLmY/CDNBAspjcDahOo7kKrmCgrUVSY7pmvWjg+b4aqIG7HkF4ddPB -/gBVsIdU6CeQNR1MM62X/JcumIS/LMmjv9GYERTtY/jKmIhYF5ntRQOXfjyGHoiMvvKRhI9lNNgA -TH23MRdaKXoKGCQwoze1eqkBfSbW+Q6OWfH9GzO1KTsXO0G2Id3UwD2ln58fQ1DJu7xsepeY7s2M -H/ucUa6LcL0nn3HAa6x9kGbo1106DbDVwo3VyJ2dwW3Q0L9R5OP4wzg2rtandeavhENdk5IMagfe -Ox2YItaswTXbo6Al/3K1dh3ebeksZixShNBFks4c5eUzHdwHU1SjqoI7mjcv3N2gZOnm3b2u/GSF -HTynyQbehP9r6GsaPMWis0L7iwk+XwhSx2LE1AVxv8Rk5Pihg+g+EpuoHtQ2TS9x9o0o9oOpE9Jh -wZG7SMA0j0GMS0zbaRL/UJScIINZc+18ofLx/d33SdNDWKBWY8o9PeU1VlnpDsogzCtLkykPAgMB -AAGjggFqMIIBZjASBgNVHRMBAf8ECDAGAQH/AgEMMB0GA1UdDgQWBBS5CcqcHtvTbDprru1U8VuT -BjUuXjCB4QYDVR0jBIHZMIHWgBS5CcqcHtvTbDprru1U8VuTBjUuXqGBsqSBrzCBrDELMAkGA1UE -BhMCRVUxQzBBBgNVBAcTOk1hZHJpZCAoc2VlIGN1cnJlbnQgYWRkcmVzcyBhdCB3d3cuY2FtZXJm -aXJtYS5jb20vYWRkcmVzcykxEjAQBgNVBAUTCUE4Mjc0MzI4NzEbMBkGA1UEChMSQUMgQ2FtZXJm -aXJtYSBTLkEuMScwJQYDVQQDEx5HbG9iYWwgQ2hhbWJlcnNpZ24gUm9vdCAtIDIwMDiCCQDJzdPp -1X0jzjAOBgNVHQ8BAf8EBAMCAQYwPQYDVR0gBDYwNDAyBgRVHSAAMCowKAYIKwYBBQUHAgEWHGh0 -dHA6Ly9wb2xpY3kuY2FtZXJmaXJtYS5jb20wDQYJKoZIhvcNAQEFBQADggIBAICIf3DekijZBZRG -/5BXqfEv3xoNa/p8DhxJJHkn2EaqbylZUohwEurdPfWbU1Rv4WCiqAm57OtZfMY18dwY6fFn5a+6 -ReAJ3spED8IXDneRRXozX1+WLGiLwUePmJs9wOzL9dWCkoQ10b42OFZyMVtHLaoXpGNR6woBrX/s -dZ7LoR/xfxKxueRkf2fWIyr0uDldmOghp+G9PUIadJpwr2hsUF1Jz//7Dl3mLEfXgTpZALVza2Mg -9jFFCDkO9HB+QHBaP9BrQql0PSgvAm11cpUJjUhjxsYjV5KTXjXBjfkK9yydYhz2rXzdpjEetrHH -foUm+qRqtdpjMNHvkzeyZi99Bffnt0uYlDXA2TopwZ2yUDMdSqlapskD7+3056huirRXhOukP9Du -qqqHW2Pok+JrqNS4cnhrG+055F3Lm6qH1U9OAP7Zap88MQ8oAgF9mOinsKJknnn4SPIVqczmyETr -P3iZ8ntxPjzxmKfFGBI/5rsoM0LpRQp8bfKGeS/Fghl9CYl8slR2iK7ewfPM4W7bMdaTrpmg7yVq -c5iJWzouE4gev8CSlDQb4ye3ix5vQv/n6TebUB0tovkC7stYWDpxvGjjqsGvHCgfotwjZT+B6q6Z -09gwzxMNTxXJhLynSC34MCN32EZLeW32jO06f2ARePTpm67VVMB0gNELQp/B ------END CERTIFICATE----- - -Go Daddy Root Certificate Authority - G2 -======================================== ------BEGIN CERTIFICATE----- -MIIDxTCCAq2gAwIBAgIBADANBgkqhkiG9w0BAQsFADCBgzELMAkGA1UEBhMCVVMxEDAOBgNVBAgT -B0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxGjAYBgNVBAoTEUdvRGFkZHkuY29tLCBJbmMu -MTEwLwYDVQQDEyhHbyBEYWRkeSBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5 -MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgYMxCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6 -b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMRowGAYDVQQKExFHb0RhZGR5LmNvbSwgSW5jLjExMC8G -A1UEAxMoR28gRGFkZHkgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZI -hvcNAQEBBQADggEPADCCAQoCggEBAL9xYgjx+lk09xvJGKP3gElY6SKDE6bFIEMBO4Tx5oVJnyfq -9oQbTqC023CYxzIBsQU+B07u9PpPL1kwIuerGVZr4oAH/PMWdYA5UXvl+TW2dE6pjYIT5LY/qQOD -+qK+ihVqf94Lw7YZFAXK6sOoBJQ7RnwyDfMAZiLIjWltNowRGLfTshxgtDj6AozO091GB94KPutd -fMh8+7ArU6SSYmlRJQVhGkSBjCypQ5Yj36w6gZoOKcUcqeldHraenjAKOc7xiID7S13MMuyFYkMl -NAJWJwGRtDtwKj9useiciAF9n9T521NtYJ2/LOdYq7hfRvzOxBsDPAnrSTFcaUaz4EcCAwEAAaNC -MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFDqahQcQZyi27/a9 -BUFuIMGU2g/eMA0GCSqGSIb3DQEBCwUAA4IBAQCZ21151fmXWWcDYfF+OwYxdS2hII5PZYe096ac -vNjpL9DbWu7PdIxztDhC2gV7+AJ1uP2lsdeu9tfeE8tTEH6KRtGX+rcuKxGrkLAngPnon1rpN5+r -5N9ss4UXnT3ZJE95kTXWXwTrgIOrmgIttRD02JDHBHNA7XIloKmf7J6raBKZV8aPEjoJpL1E/QYV -N8Gb5DKj7Tjo2GTzLH4U/ALqn83/B2gX2yKQOC16jdFU8WnjXzPKej17CuPKf1855eJ1usV2GDPO -LPAvTK33sefOT6jEm0pUBsV/fdUID+Ic/n4XuKxe9tQWskMJDE32p2u0mYRlynqI4uJEvlz36hz1 ------END CERTIFICATE----- - -Starfield Root Certificate Authority - G2 -========================================= ------BEGIN CERTIFICATE----- -MIID3TCCAsWgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBjzELMAkGA1UEBhMCVVMxEDAOBgNVBAgT -B0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoTHFN0YXJmaWVsZCBUZWNobm9s -b2dpZXMsIEluYy4xMjAwBgNVBAMTKVN0YXJmaWVsZCBSb290IENlcnRpZmljYXRlIEF1dGhvcml0 -eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgY8xCzAJBgNVBAYTAlVTMRAw -DgYDVQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFyZmllbGQg -VGVjaG5vbG9naWVzLCBJbmMuMTIwMAYDVQQDEylTdGFyZmllbGQgUm9vdCBDZXJ0aWZpY2F0ZSBB -dXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL3twQP89o/8ArFv -W59I2Z154qK3A2FWGMNHttfKPTUuiUP3oWmb3ooa/RMgnLRJdzIpVv257IzdIvpy3Cdhl+72WoTs -bhm5iSzchFvVdPtrX8WJpRBSiUZV9Lh1HOZ/5FSuS/hVclcCGfgXcVnrHigHdMWdSL5stPSksPNk -N3mSwOxGXn/hbVNMYq/NHwtjuzqd+/x5AJhhdM8mgkBj87JyahkNmcrUDnXMN/uLicFZ8WJ/X7Nf -ZTD4p7dNdloedl40wOiWVpmKs/B/pM293DIxfJHP4F8R+GuqSVzRmZTRouNjWwl2tVZi4Ut0HZbU -JtQIBFnQmA4O5t78w+wfkPECAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC -AQYwHQYDVR0OBBYEFHwMMh+n2TB/xH1oo2Kooc6rB1snMA0GCSqGSIb3DQEBCwUAA4IBAQARWfol -TwNvlJk7mh+ChTnUdgWUXuEok21iXQnCoKjUsHU48TRqneSfioYmUeYs0cYtbpUgSpIB7LiKZ3sx -4mcujJUDJi5DnUox9g61DLu34jd/IroAow57UvtruzvE03lRTs2Q9GcHGcg8RnoNAX3FWOdt5oUw -F5okxBDgBPfg8n/Uqgr/Qh037ZTlZFkSIHc40zI+OIF1lnP6aI+xy84fxez6nH7PfrHxBy22/L/K -pL/QlwVKvOoYKAKQvVR4CSFx09F9HdkWsKlhPdAKACL8x3vLCWRFCztAgfd9fDL1mMpYjn0q7pBZ -c2T5NnReJaH1ZgUufzkVqSr7UIuOhWn0 ------END CERTIFICATE----- - -Starfield Services Root Certificate Authority - G2 -================================================== ------BEGIN CERTIFICATE----- -MIID7zCCAtegAwIBAgIBADANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UEBhMCVVMxEDAOBgNVBAgT -B0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoTHFN0YXJmaWVsZCBUZWNobm9s -b2dpZXMsIEluYy4xOzA5BgNVBAMTMlN0YXJmaWVsZCBTZXJ2aWNlcyBSb290IENlcnRpZmljYXRl -IEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgZgxCzAJBgNV -BAYTAlVTMRAwDgYDVQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxT -dGFyZmllbGQgVGVjaG5vbG9naWVzLCBJbmMuMTswOQYDVQQDEzJTdGFyZmllbGQgU2VydmljZXMg -Um9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC -AQoCggEBANUMOsQq+U7i9b4Zl1+OiFOxHz/Lz58gE20pOsgPfTz3a3Y4Y9k2YKibXlwAgLIvWX/2 -h/klQ4bnaRtSmpDhcePYLQ1Ob/bISdm28xpWriu2dBTrz/sm4xq6HZYuajtYlIlHVv8loJNwU4Pa -hHQUw2eeBGg6345AWh1KTs9DkTvnVtYAcMtS7nt9rjrnvDH5RfbCYM8TWQIrgMw0R9+53pBlbQLP -LJGmpufehRhJfGZOozptqbXuNC66DQO4M99H67FrjSXZm86B0UVGMpZwh94CDklDhbZsc7tk6mFB -rMnUVN+HL8cisibMn1lUaJ/8viovxFUcdUBgF4UCVTmLfwUCAwEAAaNCMEAwDwYDVR0TAQH/BAUw -AwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJxfAN+qAdcwKziIorhtSpzyEZGDMA0GCSqG -SIb3DQEBCwUAA4IBAQBLNqaEd2ndOxmfZyMIbw5hyf2E3F/YNoHN2BtBLZ9g3ccaaNnRbobhiCPP -E95Dz+I0swSdHynVv/heyNXBve6SbzJ08pGCL72CQnqtKrcgfU28elUSwhXqvfdqlS5sdJ/PHLTy -xQGjhdByPq1zqwubdQxtRbeOlKyWN7Wg0I8VRw7j6IPdj/3vQQF3zCepYoUz8jcI73HPdwbeyBkd -iEDPfUYd/x7H4c7/I9vG+o1VTqkC50cRRj70/b17KSa7qWFiNyi2LSr2EIZkyXCn0q23KXB56jza -YyWf/Wi3MOxw+3WKt21gZ7IeyLnp2KhvAotnDU0mV3HaIPzBSlCNsSi6 ------END CERTIFICATE----- - -AffirmTrust Commercial -====================== ------BEGIN CERTIFICATE----- -MIIDTDCCAjSgAwIBAgIId3cGJyapsXwwDQYJKoZIhvcNAQELBQAwRDELMAkGA1UEBhMCVVMxFDAS -BgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBDb21tZXJjaWFsMB4XDTEw -MDEyOTE0MDYwNloXDTMwMTIzMTE0MDYwNlowRDELMAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmly -bVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBDb21tZXJjaWFsMIIBIjANBgkqhkiG9w0BAQEF -AAOCAQ8AMIIBCgKCAQEA9htPZwcroRX1BiLLHwGy43NFBkRJLLtJJRTWzsO3qyxPxkEylFf6Eqdb -DuKPHx6GGaeqtS25Xw2Kwq+FNXkyLbscYjfysVtKPcrNcV/pQr6U6Mje+SJIZMblq8Yrba0F8PrV -C8+a5fBQpIs7R6UjW3p6+DM/uO+Zl+MgwdYoic+U+7lF7eNAFxHUdPALMeIrJmqbTFeurCA+ukV6 -BfO9m2kVrn1OIGPENXY6BwLJN/3HR+7o8XYdcxXyl6S1yHp52UKqK39c/s4mT6NmgTWvRLpUHhww -MmWd5jyTXlBOeuM61G7MGvv50jeuJCqrVwMiKA1JdX+3KNp1v47j3A55MQIDAQABo0IwQDAdBgNV -HQ4EFgQUnZPGU4teyq8/nx4P5ZmVvCT2lI8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC -AQYwDQYJKoZIhvcNAQELBQADggEBAFis9AQOzcAN/wr91LoWXym9e2iZWEnStB03TX8nfUYGXUPG -hi4+c7ImfU+TqbbEKpqrIZcUsd6M06uJFdhrJNTxFq7YpFzUf1GO7RgBsZNjvbz4YYCanrHOQnDi -qX0GJX0nof5v7LMeJNrjS1UaADs1tDvZ110w/YETifLCBivtZ8SOyUOyXGsViQK8YvxO8rUzqrJv -0wqiUOP2O+guRMLbZjipM1ZI8W0bM40NjD9gN53Tym1+NH4Nn3J2ixufcv1SNUFFApYvHLKac0kh -sUlHRUe072o0EclNmsxZt9YCnlpOZbWUrhvfKbAW8b8Angc6F2S1BLUjIZkKlTuXfO8= ------END CERTIFICATE----- - -AffirmTrust Networking -====================== ------BEGIN CERTIFICATE----- -MIIDTDCCAjSgAwIBAgIIfE8EORzUmS0wDQYJKoZIhvcNAQEFBQAwRDELMAkGA1UEBhMCVVMxFDAS -BgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBOZXR3b3JraW5nMB4XDTEw -MDEyOTE0MDgyNFoXDTMwMTIzMTE0MDgyNFowRDELMAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmly -bVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBOZXR3b3JraW5nMIIBIjANBgkqhkiG9w0BAQEF -AAOCAQ8AMIIBCgKCAQEAtITMMxcua5Rsa2FSoOujz3mUTOWUgJnLVWREZY9nZOIG41w3SfYvm4SE -Hi3yYJ0wTsyEheIszx6e/jarM3c1RNg1lho9Nuh6DtjVR6FqaYvZ/Ls6rnla1fTWcbuakCNrmreI -dIcMHl+5ni36q1Mr3Lt2PpNMCAiMHqIjHNRqrSK6mQEubWXLviRmVSRLQESxG9fhwoXA3hA/Pe24 -/PHxI1Pcv2WXb9n5QHGNfb2V1M6+oF4nI979ptAmDgAp6zxG8D1gvz9Q0twmQVGeFDdCBKNwV6gb -h+0t+nvujArjqWaJGctB+d1ENmHP4ndGyH329JKBNv3bNPFyfvMMFr20FQIDAQABo0IwQDAdBgNV -HQ4EFgQUBx/S55zawm6iQLSwelAQUHTEyL0wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC -AQYwDQYJKoZIhvcNAQEFBQADggEBAIlXshZ6qML91tmbmzTCnLQyFE2npN/svqe++EPbkTfOtDIu -UFUaNU52Q3Eg75N3ThVwLofDwR1t3Mu1J9QsVtFSUzpE0nPIxBsFZVpikpzuQY0x2+c06lkh1QF6 -12S4ZDnNye2v7UsDSKegmQGA3GWjNq5lWUhPgkvIZfFXHeVZLgo/bNjR9eUJtGxUAArgFU2HdW23 -WJZa3W3SAKD0m0i+wzekujbgfIeFlxoVot4uolu9rxj5kFDNcFn4J2dHy8egBzp90SxdbBk6ZrV9 -/ZFvgrG+CJPbFEfxojfHRZ48x3evZKiT3/Zpg4Jg8klCNO1aAFSFHBY2kgxc+qatv9s= ------END CERTIFICATE----- - -AffirmTrust Premium -=================== ------BEGIN CERTIFICATE----- -MIIFRjCCAy6gAwIBAgIIbYwURrGmCu4wDQYJKoZIhvcNAQEMBQAwQTELMAkGA1UEBhMCVVMxFDAS -BgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVzdCBQcmVtaXVtMB4XDTEwMDEy -OTE0MTAzNloXDTQwMTIzMTE0MTAzNlowQTELMAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRy -dXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVzdCBQcmVtaXVtMIICIjANBgkqhkiG9w0BAQEFAAOCAg8A -MIICCgKCAgEAxBLfqV/+Qd3d9Z+K4/as4Tx4mrzY8H96oDMq3I0gW64tb+eT2TZwamjPjlGjhVtn -BKAQJG9dKILBl1fYSCkTtuG+kU3fhQxTGJoeJKJPj/CihQvL9Cl/0qRY7iZNyaqoe5rZ+jjeRFcV -5fiMyNlI4g0WJx0eyIOFJbe6qlVBzAMiSy2RjYvmia9mx+n/K+k8rNrSs8PhaJyJ+HoAVt70VZVs -+7pk3WKL3wt3MutizCaam7uqYoNMtAZ6MMgpv+0GTZe5HMQxK9VfvFMSF5yZVylmd2EhMQcuJUmd -GPLu8ytxjLW6OQdJd/zvLpKQBY0tL3d770O/Nbua2Plzpyzy0FfuKE4mX4+QaAkvuPjcBukumj5R -p9EixAqnOEhss/n/fauGV+O61oV4d7pD6kh/9ti+I20ev9E2bFhc8e6kGVQa9QPSdubhjL08s9NI -S+LI+H+SqHZGnEJlPqQewQcDWkYtuJfzt9WyVSHvutxMAJf7FJUnM7/oQ0dG0giZFmA7mn7S5u04 -6uwBHjxIVkkJx0w3AJ6IDsBz4W9m6XJHMD4Q5QsDyZpCAGzFlH5hxIrff4IaC1nEWTJ3s7xgaVY5 -/bQGeyzWZDbZvUjthB9+pSKPKrhC9IK31FOQeE4tGv2Bb0TXOwF0lkLgAOIua+rF7nKsu7/+6qqo -+Nz2snmKtmcCAwEAAaNCMEAwHQYDVR0OBBYEFJ3AZ6YMItkm9UWrpmVSESfYRaxjMA8GA1UdEwEB -/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBDAUAA4ICAQCzV00QYk465KzquByv -MiPIs0laUZx2KI15qldGF9X1Uva3ROgIRL8YhNILgM3FEv0AVQVhh0HctSSePMTYyPtwni94loMg -Nt58D2kTiKV1NpgIpsbfrM7jWNa3Pt668+s0QNiigfV4Py/VpfzZotReBA4Xrf5B8OWycvpEgjNC -6C1Y91aMYj+6QrCcDFx+LmUmXFNPALJ4fqENmS2NuB2OosSw/WDQMKSOyARiqcTtNd56l+0OOF6S -L5Nwpamcb6d9Ex1+xghIsV5n61EIJenmJWtSKZGc0jlzCFfemQa0W50QBuHCAKi4HEoCChTQwUHK -+4w1IX2COPKpVJEZNZOUbWo6xbLQu4mGk+ibyQ86p3q4ofB4Rvr8Ny/lioTz3/4E2aFooC8k4gmV -BtWVyuEklut89pMFu+1z6S3RdTnX5yTb2E5fQ4+e0BQ5v1VwSJlXMbSc7kqYA5YwH2AG7hsj/oFg -IxpHYoWlzBk0gG+zrBrjn/B7SK3VAdlntqlyk+otZrWyuOQ9PLLvTIzq6we/qzWaVYa8GKa1qF60 -g2xraUDTn9zxw2lrueFtCfTxqlB2Cnp9ehehVZZCmTEJ3WARjQUwfuaORtGdFNrHF+QFlozEJLUb -zxQHskD4o55BhrwE0GuWyCqANP2/7waj3VjFhT0+j/6eKeC2uAloGRwYQw== ------END CERTIFICATE----- - -AffirmTrust Premium ECC -======================= ------BEGIN CERTIFICATE----- -MIIB/jCCAYWgAwIBAgIIdJclisc/elQwCgYIKoZIzj0EAwMwRTELMAkGA1UEBhMCVVMxFDASBgNV -BAoMC0FmZmlybVRydXN0MSAwHgYDVQQDDBdBZmZpcm1UcnVzdCBQcmVtaXVtIEVDQzAeFw0xMDAx -MjkxNDIwMjRaFw00MDEyMzExNDIwMjRaMEUxCzAJBgNVBAYTAlVTMRQwEgYDVQQKDAtBZmZpcm1U -cnVzdDEgMB4GA1UEAwwXQWZmaXJtVHJ1c3QgUHJlbWl1bSBFQ0MwdjAQBgcqhkjOPQIBBgUrgQQA -IgNiAAQNMF4bFZ0D0KF5Nbc6PJJ6yhUczWLznCZcBz3lVPqj1swS6vQUX+iOGasvLkjmrBhDeKzQ -N8O9ss0s5kfiGuZjuD0uL3jET9v0D6RoTFVya5UdThhClXjMNzyR4ptlKymjQjBAMB0GA1UdDgQW -BBSaryl6wBE1NSZRMADDav5A1a7WPDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAK -BggqhkjOPQQDAwNnADBkAjAXCfOHiFBar8jAQr9HX/VsaobgxCd05DhT1wV/GzTjxi+zygk8N53X -57hG8f2h4nECMEJZh0PUUd+60wkyWs6Iflc9nF9Ca/UHLbXwgpP5WW+uZPpY5Yse42O+tYHNbwKM -eQ== ------END CERTIFICATE----- - -Certum Trusted Network CA -========================= ------BEGIN CERTIFICATE----- -MIIDuzCCAqOgAwIBAgIDBETAMA0GCSqGSIb3DQEBBQUAMH4xCzAJBgNVBAYTAlBMMSIwIAYDVQQK -ExlVbml6ZXRvIFRlY2hub2xvZ2llcyBTLkEuMScwJQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlv -biBBdXRob3JpdHkxIjAgBgNVBAMTGUNlcnR1bSBUcnVzdGVkIE5ldHdvcmsgQ0EwHhcNMDgxMDIy -MTIwNzM3WhcNMjkxMjMxMTIwNzM3WjB+MQswCQYDVQQGEwJQTDEiMCAGA1UEChMZVW5pemV0byBU -ZWNobm9sb2dpZXMgUy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRpZmljYXRpb24gQXV0aG9yaXR5 -MSIwIAYDVQQDExlDZXJ0dW0gVHJ1c3RlZCBOZXR3b3JrIENBMIIBIjANBgkqhkiG9w0BAQEFAAOC -AQ8AMIIBCgKCAQEA4/t9o3K6wvDJFIf1awFO4W5AB7ptJ11/91sts1rHUV+rpDKmYYe2bg+G0jAC -l/jXaVehGDldamR5xgFZrDwxSjh80gTSSyjoIF87B6LMTXPb865Px1bVWqeWifrzq2jUI4ZZJ88J -J7ysbnKDHDBy3+Ci6dLhdHUZvSqeexVUBBvXQzmtVSjF4hq79MDkrjhJM8x2hZ85RdKknvISjFH4 -fOQtf/WsX+sWn7Et0brMkUJ3TCXJkDhv2/DM+44el1k+1WBO5gUo7Ul5E0u6SNsv+XLTOcr+H9g0 -cvW0QM8xAcPs3hEtF10fuFDRXhmnad4HMyjKUJX5p1TLVIZQRan5SQIDAQABo0IwQDAPBgNVHRMB -Af8EBTADAQH/MB0GA1UdDgQWBBQIds3LB/8k9sXN7buQvOKEN0Z19zAOBgNVHQ8BAf8EBAMCAQYw -DQYJKoZIhvcNAQEFBQADggEBAKaorSLOAT2mo/9i0Eidi15ysHhE49wcrwn9I0j6vSrEuVUEtRCj -jSfeC4Jj0O7eDDd5QVsisrCaQVymcODU0HfLI9MA4GxWL+FpDQ3Zqr8hgVDZBqWo/5U30Kr+4rP1 -mS1FhIrlQgnXdAIv94nYmem8J9RHjboNRhx3zxSkHLmkMcScKHQDNP8zGSal6Q10tz6XxnboJ5aj -Zt3hrvJBW8qYVoNzcOSGGtIxQbovvi0TWnZvTuhOgQ4/WwMioBK+ZlgRSssDxLQqKi2WF+A5VLxI -03YnnZotBqbJ7DnSq9ufmgsnAjUpsUCV5/nonFWIGUbWtzT1fs45mtk48VH3Tyw= ------END CERTIFICATE----- - -Certinomis - Autorité Racine -============================= ------BEGIN CERTIFICATE----- -MIIFnDCCA4SgAwIBAgIBATANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJGUjETMBEGA1UEChMK -Q2VydGlub21pczEXMBUGA1UECxMOMDAwMiA0MzM5OTg5MDMxJjAkBgNVBAMMHUNlcnRpbm9taXMg -LSBBdXRvcml0w6kgUmFjaW5lMB4XDTA4MDkxNzA4Mjg1OVoXDTI4MDkxNzA4Mjg1OVowYzELMAkG -A1UEBhMCRlIxEzARBgNVBAoTCkNlcnRpbm9taXMxFzAVBgNVBAsTDjAwMDIgNDMzOTk4OTAzMSYw -JAYDVQQDDB1DZXJ0aW5vbWlzIC0gQXV0b3JpdMOpIFJhY2luZTCCAiIwDQYJKoZIhvcNAQEBBQAD -ggIPADCCAgoCggIBAJ2Fn4bT46/HsmtuM+Cet0I0VZ35gb5j2CN2DpdUzZlMGvE5x4jYF1AMnmHa -wE5V3udauHpOd4cN5bjr+p5eex7Ezyh0x5P1FMYiKAT5kcOrJ3NqDi5N8y4oH3DfVS9O7cdxbwly -Lu3VMpfQ8Vh30WC8Tl7bmoT2R2FFK/ZQpn9qcSdIhDWerP5pqZ56XjUl+rSnSTV3lqc2W+HN3yNw -2F1MpQiD8aYkOBOo7C+ooWfHpi2GR+6K/OybDnT0K0kCe5B1jPyZOQE51kqJ5Z52qz6WKDgmi92N -jMD2AR5vpTESOH2VwnHu7XSu5DaiQ3XV8QCb4uTXzEIDS3h65X27uK4uIJPT5GHfceF2Z5c/tt9q -c1pkIuVC28+BA5PY9OMQ4HL2AHCs8MF6DwV/zzRpRbWT5BnbUhYjBYkOjUjkJW+zeL9i9Qf6lSTC -lrLooyPCXQP8w9PlfMl1I9f09bze5N/NgL+RiH2nE7Q5uiy6vdFrzPOlKO1Enn1So2+WLhl+HPNb -xxaOu2B9d2ZHVIIAEWBsMsGoOBvrbpgT1u449fCfDu/+MYHB0iSVL1N6aaLwD4ZFjliCK0wi1F6g -530mJ0jfJUaNSih8hp75mxpZuWW/Bd22Ql095gBIgl4g9xGC3srYn+Y3RyYe63j3YcNBZFgCQfna -4NH4+ej9Uji29YnfAgMBAAGjWzBZMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0G -A1UdDgQWBBQNjLZh2kS40RR9w759XkjwzspqsDAXBgNVHSAEEDAOMAwGCiqBegFWAgIAAQEwDQYJ -KoZIhvcNAQEFBQADggIBACQ+YAZ+He86PtvqrxyaLAEL9MW12Ukx9F1BjYkMTv9sov3/4gbIOZ/x -WqndIlgVqIrTseYyCYIDbNc/CMf4uboAbbnW/FIyXaR/pDGUu7ZMOH8oMDX/nyNTt7buFHAAQCva -R6s0fl6nVjBhK4tDrP22iCj1a7Y+YEq6QpA0Z43q619FVDsXrIvkxmUP7tCMXWY5zjKn2BCXwH40 -nJ+U8/aGH88bc62UeYdocMMzpXDn2NU4lG9jeeu/Cg4I58UvD0KgKxRA/yHgBcUn4YQRE7rWhh1B -CxMjidPJC+iKunqjo3M3NYB9Ergzd0A4wPpeMNLytqOx1qKVl4GbUu1pTP+A5FPbVFsDbVRfsbjv -JL1vnxHDx2TCDyhihWZeGnuyt++uNckZM6i4J9szVb9o4XVIRFb7zdNIu0eJOqxp9YDG5ERQL1TE -qkPFMTFYvZbF6nVsmnWxTfj3l/+WFvKXTej28xH5On2KOG4Ey+HTRRWqpdEdnV1j6CTmNhTih60b -WfVEm/vXd3wfAXBioSAaosUaKPQhA+4u2cGA6rnZgtZbdsLLO7XSAPCjDuGtbkD326C00EauFddE -wk01+dIL8hf2rGbVJLJP0RyZwG71fet0BLj5TXcJ17TPBzAJ8bgAVtkXFhYKK4bfjwEZGuW7gmP/ -vgt2Fl43N+bYdJeimUV5 ------END CERTIFICATE----- - -Root CA Generalitat Valenciana -============================== ------BEGIN CERTIFICATE----- -MIIGizCCBXOgAwIBAgIEO0XlaDANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJFUzEfMB0GA1UE -ChMWR2VuZXJhbGl0YXQgVmFsZW5jaWFuYTEPMA0GA1UECxMGUEtJR1ZBMScwJQYDVQQDEx5Sb290 -IENBIEdlbmVyYWxpdGF0IFZhbGVuY2lhbmEwHhcNMDEwNzA2MTYyMjQ3WhcNMjEwNzAxMTUyMjQ3 -WjBoMQswCQYDVQQGEwJFUzEfMB0GA1UEChMWR2VuZXJhbGl0YXQgVmFsZW5jaWFuYTEPMA0GA1UE -CxMGUEtJR1ZBMScwJQYDVQQDEx5Sb290IENBIEdlbmVyYWxpdGF0IFZhbGVuY2lhbmEwggEiMA0G -CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDGKqtXETcvIorKA3Qdyu0togu8M1JAJke+WmmmO3I2 -F0zo37i7L3bhQEZ0ZQKQUgi0/6iMweDHiVYQOTPvaLRfX9ptI6GJXiKjSgbwJ/BXufjpTjJ3Cj9B -ZPPrZe52/lSqfR0grvPXdMIKX/UIKFIIzFVd0g/bmoGlu6GzwZTNVOAydTGRGmKy3nXiz0+J2ZGQ -D0EbtFpKd71ng+CT516nDOeB0/RSrFOyA8dEJvt55cs0YFAQexvba9dHq198aMpunUEDEO5rmXte -JajCq+TA81yc477OMUxkHl6AovWDfgzWyoxVjr7gvkkHD6MkQXpYHYTqWBLI4bft75PelAgxAgMB -AAGjggM7MIIDNzAyBggrBgEFBQcBAQQmMCQwIgYIKwYBBQUHMAGGFmh0dHA6Ly9vY3NwLnBraS5n -dmEuZXMwEgYDVR0TAQH/BAgwBgEB/wIBAjCCAjQGA1UdIASCAiswggInMIICIwYKKwYBBAG/VQIB -ADCCAhMwggHoBggrBgEFBQcCAjCCAdoeggHWAEEAdQB0AG8AcgBpAGQAYQBkACAAZABlACAAQwBl -AHIAdABpAGYAaQBjAGEAYwBpAPMAbgAgAFIAYQDtAHoAIABkAGUAIABsAGEAIABHAGUAbgBlAHIA -YQBsAGkAdABhAHQAIABWAGEAbABlAG4AYwBpAGEAbgBhAC4ADQAKAEwAYQAgAEQAZQBjAGwAYQBy -AGEAYwBpAPMAbgAgAGQAZQAgAFAAcgDhAGMAdABpAGMAYQBzACAAZABlACAAQwBlAHIAdABpAGYA -aQBjAGEAYwBpAPMAbgAgAHEAdQBlACAAcgBpAGcAZQAgAGUAbAAgAGYAdQBuAGMAaQBvAG4AYQBt -AGkAZQBuAHQAbwAgAGQAZQAgAGwAYQAgAHAAcgBlAHMAZQBuAHQAZQAgAEEAdQB0AG8AcgBpAGQA -YQBkACAAZABlACAAQwBlAHIAdABpAGYAaQBjAGEAYwBpAPMAbgAgAHMAZQAgAGUAbgBjAHUAZQBu -AHQAcgBhACAAZQBuACAAbABhACAAZABpAHIAZQBjAGMAaQDzAG4AIAB3AGUAYgAgAGgAdAB0AHAA -OgAvAC8AdwB3AHcALgBwAGsAaQAuAGcAdgBhAC4AZQBzAC8AYwBwAHMwJQYIKwYBBQUHAgEWGWh0 -dHA6Ly93d3cucGtpLmd2YS5lcy9jcHMwHQYDVR0OBBYEFHs100DSHHgZZu90ECjcPk+yeAT8MIGV -BgNVHSMEgY0wgYqAFHs100DSHHgZZu90ECjcPk+yeAT8oWykajBoMQswCQYDVQQGEwJFUzEfMB0G -A1UEChMWR2VuZXJhbGl0YXQgVmFsZW5jaWFuYTEPMA0GA1UECxMGUEtJR1ZBMScwJQYDVQQDEx5S -b290IENBIEdlbmVyYWxpdGF0IFZhbGVuY2lhbmGCBDtF5WgwDQYJKoZIhvcNAQEFBQADggEBACRh -TvW1yEICKrNcda3FbcrnlD+laJWIwVTAEGmiEi8YPyVQqHxK6sYJ2fR1xkDar1CdPaUWu20xxsdz -Ckj+IHLtb8zog2EWRpABlUt9jppSCS/2bxzkoXHPjCpaF3ODR00PNvsETUlR4hTJZGH71BTg9J63 -NI8KJr2XXPR5OkowGcytT6CYirQxlyric21+eLj4iIlPsSKRZEv1UN4D2+XFducTZnV+ZfsBn5OH -iJ35Rld8TWCvmHMTI6QgkYH60GFmuH3Rr9ZvHmw96RH9qfmCIoaZM3Fa6hlXPZHNqcCjbgcTpsnt -+GijnsNacgmHKNHEc8RzGF9QdRYxn7fofMM= ------END CERTIFICATE----- - -A-Trust-nQual-03 -================ ------BEGIN CERTIFICATE----- -MIIDzzCCAregAwIBAgIDAWweMA0GCSqGSIb3DQEBBQUAMIGNMQswCQYDVQQGEwJBVDFIMEYGA1UE -Cgw/QS1UcnVzdCBHZXMuIGYuIFNpY2hlcmhlaXRzc3lzdGVtZSBpbSBlbGVrdHIuIERhdGVudmVy -a2VociBHbWJIMRkwFwYDVQQLDBBBLVRydXN0LW5RdWFsLTAzMRkwFwYDVQQDDBBBLVRydXN0LW5R -dWFsLTAzMB4XDTA1MDgxNzIyMDAwMFoXDTE1MDgxNzIyMDAwMFowgY0xCzAJBgNVBAYTAkFUMUgw -RgYDVQQKDD9BLVRydXN0IEdlcy4gZi4gU2ljaGVyaGVpdHNzeXN0ZW1lIGltIGVsZWt0ci4gRGF0 -ZW52ZXJrZWhyIEdtYkgxGTAXBgNVBAsMEEEtVHJ1c3QtblF1YWwtMDMxGTAXBgNVBAMMEEEtVHJ1 -c3QtblF1YWwtMDMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtPWFuA/OQO8BBC4SA -zewqo51ru27CQoT3URThoKgtUaNR8t4j8DRE/5TrzAUjlUC5B3ilJfYKvUWG6Nm9wASOhURh73+n -yfrBJcyFLGM/BWBzSQXgYHiVEEvc+RFZznF/QJuKqiTfC0Li21a8StKlDJu3Qz7dg9MmEALP6iPE -SU7l0+m0iKsMrmKS1GWH2WrX9IWf5DMiJaXlyDO6w8dB3F/GaswADm0yqLaHNgBid5seHzTLkDx4 -iHQF63n1k3Flyp3HaxgtPVxO59X4PzF9j4fsCiIvI+n+u33J4PTs63zEsMMtYrWacdaxaujs2e3V -cuy+VwHOBVWf3tFgiBCzAgMBAAGjNjA0MA8GA1UdEwEB/wQFMAMBAf8wEQYDVR0OBAoECERqlWdV -eRFPMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQUFAAOCAQEAVdRU0VlIXLOThaq/Yy/kgM40 -ozRiPvbY7meIMQQDbwvUB/tOdQ/TLtPAF8fGKOwGDREkDg6lXb+MshOWcdzUzg4NCmgybLlBMRmr -sQd7TZjTXLDR8KdCoLXEjq/+8T/0709GAHbrAvv5ndJAlseIOrifEXnzgGWovR/TeIGgUUw3tKZd -JXDRZslo+S4RFGjxVJgIrCaSD96JntT6s3kr0qN51OyLrIdTaEJMUVF0HhsnLuP1Hyl0Te2v9+GS -mYHovjrHF1D2t8b8m7CKa9aIA5GPBnc6hQLdmNVDeD/GMBWsm2vLV7eJUYs66MmEDNuxUCAKGkq6 -ahq97BvIxYSazQ== ------END CERTIFICATE----- - -TWCA Root Certification Authority -================================= ------BEGIN CERTIFICATE----- -MIIDezCCAmOgAwIBAgIBATANBgkqhkiG9w0BAQUFADBfMQswCQYDVQQGEwJUVzESMBAGA1UECgwJ -VEFJV0FOLUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFUV0NBIFJvb3QgQ2VydGlmaWNh -dGlvbiBBdXRob3JpdHkwHhcNMDgwODI4MDcyNDMzWhcNMzAxMjMxMTU1OTU5WjBfMQswCQYDVQQG -EwJUVzESMBAGA1UECgwJVEFJV0FOLUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFUV0NB -IFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK -AoIBAQCwfnK4pAOU5qfeCTiRShFAh6d8WWQUe7UREN3+v9XAu1bihSX0NXIP+FPQQeFEAcK0HMMx -QhZHhTMidrIKbw/lJVBPhYa+v5guEGcevhEFhgWQxFnQfHgQsIBct+HHK3XLfJ+utdGdIzdjp9xC -oi2SBBtQwXu4PhvJVgSLL1KbralW6cH/ralYhzC2gfeXRfwZVzsrb+RH9JlF/h3x+JejiB03HFyP -4HYlmlD4oFT/RJB2I9IyxsOrBr/8+7/zrX2SYgJbKdM1o5OaQ2RgXbL6Mv87BK9NQGr5x+PvI/1r -y+UPizgN7gr8/g+YnzAx3WxSZfmLgb4i4RxYA7qRG4kHAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIB -BjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqOFsmjd6LWvJPelSDGRjjCDWmujANBgkqhkiG -9w0BAQUFAAOCAQEAPNV3PdrfibqHDAhUaiBQkr6wQT25JmSDCi/oQMCXKCeCMErJk/9q56YAf4lC -mtYR5VPOL8zy2gXE/uJQxDqGfczafhAJO5I1KlOy/usrBdlsXebQ79NqZp4VKIV66IIArB6nCWlW -QtNoURi+VJq/REG6Sb4gumlc7rh3zc5sH62Dlhh9DrUUOYTxKOkto557HnpyWoOzeW/vtPzQCqVY -T0bf+215WfKEIlKuD8z7fDvnaspHYcN6+NOSBB+4IIThNlQWx0DeO4pz3N/GCUzf7Nr/1FNCocny -Yh0igzyXxfkZYiesZSLX0zzG5Y6yU8xJzrww/nsOM5D77dIUkR8Hrw== ------END CERTIFICATE----- - -Security Communication RootCA2 -============================== ------BEGIN CERTIFICATE----- -MIIDdzCCAl+gAwIBAgIBADANBgkqhkiG9w0BAQsFADBdMQswCQYDVQQGEwJKUDElMCMGA1UEChMc -U0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEnMCUGA1UECxMeU2VjdXJpdHkgQ29tbXVuaWNh -dGlvbiBSb290Q0EyMB4XDTA5MDUyOTA1MDAzOVoXDTI5MDUyOTA1MDAzOVowXTELMAkGA1UEBhMC -SlAxJTAjBgNVBAoTHFNFQ09NIFRydXN0IFN5c3RlbXMgQ08uLExURC4xJzAlBgNVBAsTHlNlY3Vy -aXR5IENvbW11bmljYXRpb24gUm9vdENBMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB -ANAVOVKxUrO6xVmCxF1SrjpDZYBLx/KWvNs2l9amZIyoXvDjChz335c9S672XewhtUGrzbl+dp++ -+T42NKA7wfYxEUV0kz1XgMX5iZnK5atq1LXaQZAQwdbWQonCv/Q4EpVMVAX3NuRFg3sUZdbcDE3R -3n4MqzvEFb46VqZab3ZpUql6ucjrappdUtAtCms1FgkQhNBqyjoGADdH5H5XTz+L62e4iKrFvlNV -spHEfbmwhRkGeC7bYRr6hfVKkaHnFtWOojnflLhwHyg/i/xAXmODPIMqGplrz95Zajv8bxbXH/1K -EOtOghY6rCcMU/Gt1SSwawNQwS08Ft1ENCcadfsCAwEAAaNCMEAwHQYDVR0OBBYEFAqFqXdlBZh8 -QIH4D5csOPEK7DzPMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEB -CwUAA4IBAQBMOqNErLlFsceTfsgLCkLfZOoc7llsCLqJX2rKSpWeeo8HxdpFcoJxDjrSzG+ntKEj -u/Ykn8sX/oymzsLS28yN/HH8AynBbF0zX2S2ZTuJbxh2ePXcokgfGT+Ok+vx+hfuzU7jBBJV1uXk -3fs+BXziHV7Gp7yXT2g69ekuCkO2r1dcYmh8t/2jioSgrGK+KwmHNPBqAbubKVY8/gA3zyNs8U6q -tnRGEmyR7jTV7JqR50S+kDFy1UkC9gLl9B/rfNmWVan/7Ir5mUf/NVoCqgTLiluHcSmRvaS0eg29 -mvVXIwAHIRc/SjnRBUkLp7Y3gaVdjKozXoEofKd9J+sAro03 ------END CERTIFICATE----- - -EC-ACC -====== ------BEGIN CERTIFICATE----- -MIIFVjCCBD6gAwIBAgIQ7is969Qh3hSoYqwE893EATANBgkqhkiG9w0BAQUFADCB8zELMAkGA1UE -BhMCRVMxOzA5BgNVBAoTMkFnZW5jaWEgQ2F0YWxhbmEgZGUgQ2VydGlmaWNhY2lvIChOSUYgUS0w -ODAxMTc2LUkpMSgwJgYDVQQLEx9TZXJ2ZWlzIFB1YmxpY3MgZGUgQ2VydGlmaWNhY2lvMTUwMwYD -VQQLEyxWZWdldSBodHRwczovL3d3dy5jYXRjZXJ0Lm5ldC92ZXJhcnJlbCAoYykwMzE1MDMGA1UE -CxMsSmVyYXJxdWlhIEVudGl0YXRzIGRlIENlcnRpZmljYWNpbyBDYXRhbGFuZXMxDzANBgNVBAMT -BkVDLUFDQzAeFw0wMzAxMDcyMzAwMDBaFw0zMTAxMDcyMjU5NTlaMIHzMQswCQYDVQQGEwJFUzE7 -MDkGA1UEChMyQWdlbmNpYSBDYXRhbGFuYSBkZSBDZXJ0aWZpY2FjaW8gKE5JRiBRLTA4MDExNzYt -SSkxKDAmBgNVBAsTH1NlcnZlaXMgUHVibGljcyBkZSBDZXJ0aWZpY2FjaW8xNTAzBgNVBAsTLFZl -Z2V1IGh0dHBzOi8vd3d3LmNhdGNlcnQubmV0L3ZlcmFycmVsIChjKTAzMTUwMwYDVQQLEyxKZXJh -cnF1aWEgRW50aXRhdHMgZGUgQ2VydGlmaWNhY2lvIENhdGFsYW5lczEPMA0GA1UEAxMGRUMtQUND -MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAsyLHT+KXQpWIR4NA9h0X84NzJB5R85iK -w5K4/0CQBXCHYMkAqbWUZRkiFRfCQ2xmRJoNBD45b6VLeqpjt4pEndljkYRm4CgPukLjbo73FCeT -ae6RDqNfDrHrZqJyTxIThmV6PttPB/SnCWDaOkKZx7J/sxaVHMf5NLWUhdWZXqBIoH7nF2W4onW4 -HvPlQn2v7fOKSGRdghST2MDk/7NQcvJ29rNdQlB50JQ+awwAvthrDk4q7D7SzIKiGGUzE3eeml0a -E9jD2z3Il3rucO2n5nzbcc8tlGLfbdb1OL4/pYUKGbio2Al1QnDE6u/LDsg0qBIimAy4E5S2S+zw -0JDnJwIDAQABo4HjMIHgMB0GA1UdEQQWMBSBEmVjX2FjY0BjYXRjZXJ0Lm5ldDAPBgNVHRMBAf8E -BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUoMOLRKo3pUW/l4Ba0fF4opvpXY0wfwYD -VR0gBHgwdjB0BgsrBgEEAfV4AQMBCjBlMCwGCCsGAQUFBwIBFiBodHRwczovL3d3dy5jYXRjZXJ0 -Lm5ldC92ZXJhcnJlbDA1BggrBgEFBQcCAjApGidWZWdldSBodHRwczovL3d3dy5jYXRjZXJ0Lm5l -dC92ZXJhcnJlbCAwDQYJKoZIhvcNAQEFBQADggEBAKBIW4IB9k1IuDlVNZyAelOZ1Vr/sXE7zDkJ -lF7W2u++AVtd0x7Y/X1PzaBB4DSTv8vihpw3kpBWHNzrKQXlxJ7HNd+KDM3FIUPpqojlNcAZQmNa -Al6kSBg6hW/cnbw/nZzBh7h6YQjpdwt/cKt63dmXLGQehb+8dJahw3oS7AwaboMMPOhyRp/7SNVe -l+axofjk70YllJyJ22k4vuxcDlbHZVHlUIiIv0LVKz3l+bqeLrPK9HOSAgu+TGbrIP65y7WZf+a2 -E/rKS03Z7lNGBjvGTq2TWoF+bCpLagVFjPIhpDGQh2xlnJ2lYJU6Un/10asIbvPuW/mIPX64b24D -5EI= ------END CERTIFICATE----- - -Hellenic Academic and Research Institutions RootCA 2011 -======================================================= ------BEGIN CERTIFICATE----- -MIIEMTCCAxmgAwIBAgIBADANBgkqhkiG9w0BAQUFADCBlTELMAkGA1UEBhMCR1IxRDBCBgNVBAoT -O0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgQ2VydC4gQXV0aG9y -aXR5MUAwPgYDVQQDEzdIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25z -IFJvb3RDQSAyMDExMB4XDTExMTIwNjEzNDk1MloXDTMxMTIwMTEzNDk1MlowgZUxCzAJBgNVBAYT -AkdSMUQwQgYDVQQKEztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25z -IENlcnQuIEF1dGhvcml0eTFAMD4GA1UEAxM3SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJlc2VhcmNo -IEluc3RpdHV0aW9ucyBSb290Q0EgMjAxMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB -AKlTAOMupvaO+mDYLZU++CwqVE7NuYRhlFhPjz2L5EPzdYmNUeTDN9KKiE15HrcS3UN4SoqS5tdI -1Q+kOilENbgH9mgdVc04UfCMJDGFr4PJfel3r+0ae50X+bOdOFAPplp5kYCvN66m0zH7tSYJnTxa -71HFK9+WXesyHgLacEnsbgzImjeN9/E2YEsmLIKe0HjzDQ9jpFEw4fkrJxIH2Oq9GGKYsFk3fb7u -8yBRQlqD75O6aRXxYp2fmTmCobd0LovUxQt7L/DICto9eQqakxylKHJzkUOap9FNhYS5qXSPFEDH -3N6sQWRstBmbAmNtJGSPRLIl6s5ddAxjMlyNh+UCAwEAAaOBiTCBhjAPBgNVHRMBAf8EBTADAQH/ -MAsGA1UdDwQEAwIBBjAdBgNVHQ4EFgQUppFC/RNhSiOeCKQp5dgTBCPuQSUwRwYDVR0eBEAwPqA8 -MAWCAy5ncjAFggMuZXUwBoIELmVkdTAGggQub3JnMAWBAy5ncjAFgQMuZXUwBoEELmVkdTAGgQQu -b3JnMA0GCSqGSIb3DQEBBQUAA4IBAQAf73lB4XtuP7KMhjdCSk4cNx6NZrokgclPEg8hwAOXhiVt -XdMiKahsog2p6z0GW5k6x8zDmjR/qw7IThzh+uTczQ2+vyT+bOdrwg3IBp5OjWEopmr95fZi6hg8 -TqBTnbI6nOulnJEWtk2C4AwFSKls9cz4y51JtPACpf1wA+2KIaWuE4ZJwzNzvoc7dIsXRSZMFpGD -/md9zU1jZ/rzAxKWeAaNsWftjj++n08C9bMJL/NMh98qy5V8AcysNnq/onN694/BtZqhFLKPM58N -7yLcZnuEvUUXBj08yrl3NI/K6s8/MT7jiOOASSXIl7WdmplNsDz4SgCbZN2fOUvRJ9e4 ------END CERTIFICATE----- - -Actalis Authentication Root CA -============================== ------BEGIN CERTIFICATE----- -MIIFuzCCA6OgAwIBAgIIVwoRl0LE48wwDQYJKoZIhvcNAQELBQAwazELMAkGA1UEBhMCSVQxDjAM -BgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8wMzM1ODUyMDk2NzEnMCUGA1UE -AwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290IENBMB4XDTExMDkyMjExMjIwMloXDTMwMDky -MjExMjIwMlowazELMAkGA1UEBhMCSVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlz -IFMucC5BLi8wMzM1ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290 -IENBMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAp8bEpSmkLO/lGMWwUKNvUTufClrJ -wkg4CsIcoBh/kbWHuUA/3R1oHwiD1S0eiKD4j1aPbZkCkpAW1V8IbInX4ay8IMKx4INRimlNAJZa -by/ARH6jDuSRzVju3PvHHkVH3Se5CAGfpiEd9UEtL0z9KK3giq0itFZljoZUj5NDKd45RnijMCO6 -zfB9E1fAXdKDa0hMxKufgFpbOr3JpyI/gCczWw63igxdBzcIy2zSekciRDXFzMwujt0q7bd9Zg1f -YVEiVRvjRuPjPdA1YprbrxTIW6HMiRvhMCb8oJsfgadHHwTrozmSBp+Z07/T6k9QnBn+locePGX2 -oxgkg4YQ51Q+qDp2JE+BIcXjDwL4k5RHILv+1A7TaLndxHqEguNTVHnd25zS8gebLra8Pu2Fbe8l -EfKXGkJh90qX6IuxEAf6ZYGyojnP9zz/GPvG8VqLWeICrHuS0E4UT1lF9gxeKF+w6D9Fz8+vm2/7 -hNN3WpVvrJSEnu68wEqPSpP4RCHiMUVhUE4Q2OM1fEwZtN4Fv6MGn8i1zeQf1xcGDXqVdFUNaBr8 -EBtiZJ1t4JWgw5QHVw0U5r0F+7if5t+L4sbnfpb2U8WANFAoWPASUHEXMLrmeGO89LKtmyuy/uE5 -jF66CyCU3nuDuP/jVo23Eek7jPKxwV2dpAtMK9myGPW1n0sCAwEAAaNjMGEwHQYDVR0OBBYEFFLY -iDrIn3hm7YnzezhwlMkCAjbQMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUUtiIOsifeGbt -ifN7OHCUyQICNtAwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQALe3KHwGCmSUyI -WOYdiPcUZEim2FgKDk8TNd81HdTtBjHIgT5q1d07GjLukD0R0i70jsNjLiNmsGe+b7bAEzlgqqI0 -JZN1Ut6nna0Oh4lScWoWPBkdg/iaKWW+9D+a2fDzWochcYBNy+A4mz+7+uAwTc+G02UQGRjRlwKx -K3JCaKygvU5a2hi/a5iB0P2avl4VSM0RFbnAKVy06Ij3Pjaut2L9HmLecHgQHEhb2rykOLpn7VU+ -Xlff1ANATIGk0k9jpwlCCRT8AKnCgHNPLsBA2RF7SOp6AsDT6ygBJlh0wcBzIm2Tlf05fbsq4/aC -4yyXX04fkZT6/iyj2HYauE2yOE+b+h1IYHkm4vP9qdCa6HCPSXrW5b0KDtst842/6+OkfcvHlXHo -2qN8xcL4dJIEG4aspCJTQLas/kx2z/uUMsA1n3Y/buWQbqCmJqK4LL7RK4X9p2jIugErsWx0Hbhz -lefut8cl8ABMALJ+tguLHPPAUJ4lueAI3jZm/zel0btUZCzJJ7VLkn5l/9Mt4blOvH+kQSGQQXem -OR/qnuOf0GZvBeyqdn6/axag67XH/JJULysRJyU3eExRarDzzFhdFPFqSBX/wge2sY0PjlxQRrM9 -vwGYT7JZVEc+NHt4bVaTLnPqZih4zR0Uv6CPLy64Lo7yFIrM6bV8+2ydDKXhlg== ------END CERTIFICATE----- - -Trustis FPS Root CA -=================== ------BEGIN CERTIFICATE----- -MIIDZzCCAk+gAwIBAgIQGx+ttiD5JNM2a/fH8YygWTANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQG -EwJHQjEYMBYGA1UEChMPVHJ1c3RpcyBMaW1pdGVkMRwwGgYDVQQLExNUcnVzdGlzIEZQUyBSb290 -IENBMB4XDTAzMTIyMzEyMTQwNloXDTI0MDEyMTExMzY1NFowRTELMAkGA1UEBhMCR0IxGDAWBgNV -BAoTD1RydXN0aXMgTGltaXRlZDEcMBoGA1UECxMTVHJ1c3RpcyBGUFMgUm9vdCBDQTCCASIwDQYJ -KoZIhvcNAQEBBQADggEPADCCAQoCggEBAMVQe547NdDfxIzNjpvto8A2mfRC6qc+gIMPpqdZh8mQ -RUN+AOqGeSoDvT03mYlmt+WKVoaTnGhLaASMk5MCPjDSNzoiYYkchU59j9WvezX2fihHiTHcDnlk -H5nSW7r+f2C/revnPDgpai/lkQtV/+xvWNUtyd5MZnGPDNcE2gfmHhjjvSkCqPoc4Vu5g6hBSLwa -cY3nYuUtsuvffM/bq1rKMfFMIvMFE/eC+XN5DL7XSxzA0RU8k0Fk0ea+IxciAIleH2ulrG6nS4zt -o3Lmr2NNL4XSFDWaLk6M6jKYKIahkQlBOrTh4/L68MkKokHdqeMDx4gVOxzUGpTXn2RZEm0CAwEA -AaNTMFEwDwYDVR0TAQH/BAUwAwEB/zAfBgNVHSMEGDAWgBS6+nEleYtXQSUhhgtx67JkDoshZzAd -BgNVHQ4EFgQUuvpxJXmLV0ElIYYLceuyZA6LIWcwDQYJKoZIhvcNAQEFBQADggEBAH5Y//01GX2c -GE+esCu8jowU/yyg2kdbw++BLa8F6nRIW/M+TgfHbcWzk88iNVy2P3UnXwmWzaD+vkAMXBJV+JOC -yinpXj9WV4s4NvdFGkwozZ5BuO1WTISkQMi4sKUraXAEasP41BIy+Q7DsdwyhEQsb8tGD+pmQQ9P -8Vilpg0ND2HepZ5dfWWhPBfnqFVO76DH7cZEf1T1o+CP8HxVIo8ptoGj4W1OLBuAZ+ytIJ8MYmHV -l/9D7S3B2l0pKoU/rGXuhg8FjZBf3+6f9L/uHfuY5H+QK4R4EA5sSVPvFVtlRkpdr7r7OnIdzfYl -iB6XzCGcKQENZetX2fNXlrtIzYE= ------END CERTIFICATE----- - -StartCom Certification Authority -================================ ------BEGIN CERTIFICATE----- -MIIHhzCCBW+gAwIBAgIBLTANBgkqhkiG9w0BAQsFADB9MQswCQYDVQQGEwJJTDEWMBQGA1UEChMN -U3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmlu -ZzEpMCcGA1UEAxMgU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDYwOTE3MTk0 -NjM3WhcNMzYwOTE3MTk0NjM2WjB9MQswCQYDVQQGEwJJTDEWMBQGA1UEChMNU3RhcnRDb20gTHRk -LjErMCkGA1UECxMiU2VjdXJlIERpZ2l0YWwgQ2VydGlmaWNhdGUgU2lnbmluZzEpMCcGA1UEAxMg -U3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw -ggIKAoICAQDBiNsJvGxGfHiflXu1M5DycmLWwTYgIiRezul38kMKogZkpMyONvg45iPwbm2xPN1y -o4UcodM9tDMr0y+v/uqwQVlntsQGfQqedIXWeUyAN3rfOQVSWff0G0ZDpNKFhdLDcfN1YjS6LIp/ -Ho/u7TTQEceWzVI9ujPW3U3eCztKS5/CJi/6tRYccjV3yjxd5srhJosaNnZcAdt0FCX+7bWgiA/d -eMotHweXMAEtcnn6RtYTKqi5pquDSR3l8u/d5AGOGAqPY1MWhWKpDhk6zLVmpsJrdAfkK+F2PrRt -2PZE4XNiHzvEvqBTViVsUQn3qqvKv3b9bZvzndu/PWa8DFaqr5hIlTpL36dYUNk4dalb6kMMAv+Z -6+hsTXBbKWWc3apdzK8BMewM69KN6Oqce+Zu9ydmDBpI125C4z/eIT574Q1w+2OqqGwaVLRcJXrJ -osmLFqa7LH4XXgVNWG4SHQHuEhANxjJ/GP/89PrNbpHoNkm+Gkhpi8KWTRoSsmkXwQqQ1vp5Iki/ -untp+HDH+no32NgN0nZPV/+Qt+OR0t3vwmC3Zzrd/qqc8NSLf3Iizsafl7b4r4qgEKjZ+xjGtrVc -UjyJthkqcwEKDwOzEmDyei+B26Nu/yYwl/WL3YlXtq09s68rxbd2AvCl1iuahhQqcvbjM4xdCUsT -37uMdBNSSwIDAQABo4ICEDCCAgwwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYD -VR0OBBYEFE4L7xqkQFulF2mHMMo0aEPQQa7yMB8GA1UdIwQYMBaAFE4L7xqkQFulF2mHMMo0aEPQ -Qa7yMIIBWgYDVR0gBIIBUTCCAU0wggFJBgsrBgEEAYG1NwEBATCCATgwLgYIKwYBBQUHAgEWImh0 -dHA6Ly93d3cuc3RhcnRzc2wuY29tL3BvbGljeS5wZGYwNAYIKwYBBQUHAgEWKGh0dHA6Ly93d3cu -c3RhcnRzc2wuY29tL2ludGVybWVkaWF0ZS5wZGYwgc8GCCsGAQUFBwICMIHCMCcWIFN0YXJ0IENv -bW1lcmNpYWwgKFN0YXJ0Q29tKSBMdGQuMAMCAQEagZZMaW1pdGVkIExpYWJpbGl0eSwgcmVhZCB0 -aGUgc2VjdGlvbiAqTGVnYWwgTGltaXRhdGlvbnMqIG9mIHRoZSBTdGFydENvbSBDZXJ0aWZpY2F0 -aW9uIEF1dGhvcml0eSBQb2xpY3kgYXZhaWxhYmxlIGF0IGh0dHA6Ly93d3cuc3RhcnRzc2wuY29t -L3BvbGljeS5wZGYwEQYJYIZIAYb4QgEBBAQDAgAHMDgGCWCGSAGG+EIBDQQrFilTdGFydENvbSBG -cmVlIFNTTCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTANBgkqhkiG9w0BAQsFAAOCAgEAjo/n3JR5 -fPGFf59Jb2vKXfuM/gTFwWLRfUKKvFO3lANmMD+x5wqnUCBVJX92ehQN6wQOQOY+2IirByeDqXWm -N3PH/UvSTa0XQMhGvjt/UfzDtgUx3M2FIk5xt/JxXrAaxrqTi3iSSoX4eA+D/i+tLPfkpLst0OcN -Org+zvZ49q5HJMqjNTbOx8aHmNrs++myziebiMMEofYLWWivydsQD032ZGNcpRJvkrKTlMeIFw6T -tn5ii5B/q06f/ON1FE8qMt9bDeD1e5MNq6HPh+GlBEXoPBKlCcWw0bdT82AUuoVpaiF8H3VhFyAX -e2w7QSlc4axa0c2Mm+tgHRns9+Ww2vl5GKVFP0lDV9LdJNUso/2RjSe15esUBppMeyG7Oq0wBhjA -2MFrLH9ZXF2RsXAiV+uKa0hK1Q8p7MZAwC+ITGgBF3f0JBlPvfrhsiAhS90a2Cl9qrjeVOwhVYBs -HvUwyKMQ5bLmKhQxw4UtjJixhlpPiVktucf3HMiKf8CdBUrmQk9io20ppB+Fq9vlgcitKj1MXVuE -JnHEhV5xJMqlG2zYYdMa4FTbzrqpMrUi9nNBCV24F10OD5mQ1kfabwo6YigUZ4LZ8dCAWZvLMdib -D4x3TrVoivJs9iQOLWxwxXPR3hTQcY+203sC9uO41Alua551hDnmfyWl8kgAwKQB2j8= ------END CERTIFICATE----- - -StartCom Certification Authority G2 -=================================== ------BEGIN CERTIFICATE----- -MIIFYzCCA0ugAwIBAgIBOzANBgkqhkiG9w0BAQsFADBTMQswCQYDVQQGEwJJTDEWMBQGA1UEChMN -U3RhcnRDb20gTHRkLjEsMCoGA1UEAxMjU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkg -RzIwHhcNMTAwMTAxMDEwMDAxWhcNMzkxMjMxMjM1OTAxWjBTMQswCQYDVQQGEwJJTDEWMBQGA1UE -ChMNU3RhcnRDb20gTHRkLjEsMCoGA1UEAxMjU3RhcnRDb20gQ2VydGlmaWNhdGlvbiBBdXRob3Jp -dHkgRzIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2iTZbB7cgNr2Cu+EWIAOVeq8O -o1XJJZlKxdBWQYeQTSFgpBSHO839sj60ZwNq7eEPS8CRhXBF4EKe3ikj1AENoBB5uNsDvfOpL9HG -4A/LnooUCri99lZi8cVytjIl2bLzvWXFDSxu1ZJvGIsAQRSCb0AgJnooD/Uefyf3lLE3PbfHkffi -Aez9lInhzG7TNtYKGXmu1zSCZf98Qru23QumNK9LYP5/Q0kGi4xDuFby2X8hQxfqp0iVAXV16iul -Q5XqFYSdCI0mblWbq9zSOdIxHWDirMxWRST1HFSr7obdljKF+ExP6JV2tgXdNiNnvP8V4so75qbs -O+wmETRIjfaAKxojAuuKHDp2KntWFhxyKrOq42ClAJ8Em+JvHhRYW6Vsi1g8w7pOOlz34ZYrPu8H -vKTlXcxNnw3h3Kq74W4a7I/htkxNeXJdFzULHdfBR9qWJODQcqhaX2YtENwvKhOuJv4KHBnM0D4L -nMgJLvlblnpHnOl68wVQdJVznjAJ85eCXuaPOQgeWeU1FEIT/wCc976qUM/iUUjXuG+v+E5+M5iS -FGI6dWPPe/regjupuznixL0sAA7IF6wT700ljtizkC+p2il9Ha90OrInwMEePnWjFqmveiJdnxMa -z6eg6+OGCtP95paV1yPIN93EfKo2rJgaErHgTuixO/XWb/Ew1wIDAQABo0IwQDAPBgNVHRMBAf8E -BTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUS8W0QGutHLOlHGVuRjaJhwUMDrYwDQYJ -KoZIhvcNAQELBQADggIBAHNXPyzVlTJ+N9uWkusZXn5T50HsEbZH77Xe7XRcxfGOSeD8bpkTzZ+K -2s06Ctg6Wgk/XzTQLwPSZh0avZyQN8gMjgdalEVGKua+etqhqaRpEpKwfTbURIfXUfEpY9Z1zRbk -J4kd+MIySP3bmdCPX1R0zKxnNBFi2QwKN4fRoxdIjtIXHfbX/dtl6/2o1PXWT6RbdejF0mCy2wl+ -JYt7ulKSnj7oxXehPOBKc2thz4bcQ///If4jXSRK9dNtD2IEBVeC2m6kMyV5Sy5UGYvMLD0w6dEG -/+gyRr61M3Z3qAFdlsHB1b6uJcDJHgoJIIihDsnzb02CVAAgp9KP5DlUFy6NHrgbuxu9mk47EDTc -nIhT76IxW1hPkWLIwpqazRVdOKnWvvgTtZ8SafJQYqz7Fzf07rh1Z2AQ+4NQ+US1dZxAF7L+/Xld -blhYXzD8AK6vM8EOTmy6p6ahfzLbOOCxchcKK5HsamMm7YnUeMx0HgX4a/6ManY5Ka5lIxKVCCIc -l85bBu4M4ru8H0ST9tg4RQUh7eStqxK2A6RCLi3ECToDZ2mEmuFZkIoohdVddLHRDiBYmxOlsGOm -7XtH/UVVMKTumtTm4ofvmMkyghEpIrwACjFeLQ/Ajulrso8uBtjRkcfGEvRM/TAXw8HaOFvjqerm -obp573PYtlNXLfbQ4ddI ------END CERTIFICATE----- - -Buypass Class 2 Root CA -======================= ------BEGIN CERTIFICATE----- -MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEdMBsGA1UECgwU -QnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3MgQ2xhc3MgMiBSb290IENBMB4X -DTEwMTAyNjA4MzgwM1oXDTQwMTAyNjA4MzgwM1owTjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1 -eXBhc3MgQVMtOTgzMTYzMzI3MSAwHgYDVQQDDBdCdXlwYXNzIENsYXNzIDIgUm9vdCBDQTCCAiIw -DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANfHXvfBB9R3+0Mh9PT1aeTuMgHbo4Yf5FkNuud1 -g1Lr6hxhFUi7HQfKjK6w3Jad6sNgkoaCKHOcVgb/S2TwDCo3SbXlzwx87vFKu3MwZfPVL4O2fuPn -9Z6rYPnT8Z2SdIrkHJasW4DptfQxh6NR/Md+oW+OU3fUl8FVM5I+GC911K2GScuVr1QGbNgGE41b -/+EmGVnAJLqBcXmQRFBoJJRfuLMR8SlBYaNByyM21cHxMlAQTn/0hpPshNOOvEu/XAFOBz3cFIqU -CqTqc/sLUegTBxj6DvEr0VQVfTzh97QZQmdiXnfgolXsttlpF9U6r0TtSsWe5HonfOV116rLJeff -awrbD02TTqigzXsu8lkBarcNuAeBfos4GzjmCleZPe4h6KP1DBbdi+w0jpwqHAAVF41og9JwnxgI -zRFo1clrUs3ERo/ctfPYV3Me6ZQ5BL/T3jjetFPsaRyifsSP5BtwrfKi+fv3FmRmaZ9JUaLiFRhn -Bkp/1Wy1TbMz4GHrXb7pmA8y1x1LPC5aAVKRCfLf6o3YBkBjqhHk/sM3nhRSP/TizPJhk9H9Z2vX -Uq6/aKtAQ6BXNVN48FP4YUIHZMbXb5tMOA1jrGKvNouicwoN9SG9dKpN6nIDSdvHXx1iY8f93ZHs -M+71bbRuMGjeyNYmsHVee7QHIJihdjK4TWxPAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYD -VR0OBBYEFMmAd+BikoL1RpzzuvdMw964o605MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsF -AAOCAgEAU18h9bqwOlI5LJKwbADJ784g7wbylp7ppHR/ehb8t/W2+xUbP6umwHJdELFx7rxP462s -A20ucS6vxOOto70MEae0/0qyexAQH6dXQbLArvQsWdZHEIjzIVEpMMpghq9Gqx3tOluwlN5E40EI -osHsHdb9T7bWR9AUC8rmyrV7d35BH16Dx7aMOZawP5aBQW9gkOLo+fsicdl9sz1Gv7SEr5AcD48S -aq/v7h56rgJKihcrdv6sVIkkLE8/trKnToyokZf7KcZ7XC25y2a2t6hbElGFtQl+Ynhw/qlqYLYd -DnkM/crqJIByw5c/8nerQyIKx+u2DISCLIBrQYoIwOula9+ZEsuK1V6ADJHgJgg2SMX6OBE1/yWD -LfJ6v9r9jv6ly0UsH8SIU653DtmadsWOLB2jutXsMq7Aqqz30XpN69QH4kj3Io6wpJ9qzo6ysmD0 -oyLQI+uUWnpp3Q+/QFesa1lQ2aOZ4W7+jQF5JyMV3pKdewlNWudLSDBaGOYKbeaP4NK75t98biGC -wWg5TbSYWGZizEqQXsP6JwSxeRV0mcy+rSDeJmAc61ZRpqPq5KM/p/9h3PFaTWwyI0PurKju7koS -CTxdccK+efrCh2gdC/1cacwG0Jp9VJkqyTkaGa9LKkPzY11aWOIv4x3kqdbQCtCev9eBCfHJxyYN -rJgWVqA= ------END CERTIFICATE----- - -Buypass Class 3 Root CA -======================= ------BEGIN CERTIFICATE----- -MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEdMBsGA1UECgwU -QnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3MgQ2xhc3MgMyBSb290IENBMB4X -DTEwMTAyNjA4Mjg1OFoXDTQwMTAyNjA4Mjg1OFowTjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1 -eXBhc3MgQVMtOTgzMTYzMzI3MSAwHgYDVQQDDBdCdXlwYXNzIENsYXNzIDMgUm9vdCBDQTCCAiIw -DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAKXaCpUWUOOV8l6ddjEGMnqb8RB2uACatVI2zSRH -sJ8YZLya9vrVediQYkwiL944PdbgqOkcLNt4EemOaFEVcsfzM4fkoF0LXOBXByow9c3EN3coTRiR -5r/VUv1xLXA+58bEiuPwKAv0dpihi4dVsjoT/Lc+JzeOIuOoTyrvYLs9tznDDgFHmV0ST9tD+leh -7fmdvhFHJlsTmKtdFoqwNxxXnUX/iJY2v7vKB3tvh2PX0DJq1l1sDPGzbjniazEuOQAnFN44wOwZ -ZoYS6J1yFhNkUsepNxz9gjDthBgd9K5c/3ATAOux9TN6S9ZV+AWNS2mw9bMoNlwUxFFzTWsL8TQH -2xc519woe2v1n/MuwU8XKhDzzMro6/1rqy6any2CbgTUUgGTLT2G/H783+9CHaZr77kgxve9oKeV -/afmiSTYzIw0bOIjL9kSGiG5VZFvC5F5GQytQIgLcOJ60g7YaEi7ghM5EFjp2CoHxhLbWNvSO1UQ -RwUVZ2J+GGOmRj8JDlQyXr8NYnon74Do29lLBlo3WiXQCBJ31G8JUJc9yB3D34xFMFbG02SrZvPA -Xpacw8Tvw3xrizp5f7NJzz3iiZ+gMEuFuZyUJHmPfWupRWgPK9Dx2hzLabjKSWJtyNBjYt1gD1iq -j6G8BaVmos8bdrKEZLFMOVLAMLrwjEsCsLa3AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYD -VR0OBBYEFEe4zf/lb+74suwvTg75JbCOPGvDMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsF -AAOCAgEAACAjQTUEkMJAYmDv4jVM1z+s4jSQuKFvdvoWFqRINyzpkMLyPPgKn9iB5btb2iUspKdV -cSQy9sgL8rxq+JOssgfCX5/bzMiKqr5qb+FJEMwx14C7u8jYog5kV+qi9cKpMRXSIGrs/CIBKM+G -uIAeqcwRpTzyFrNHnfzSgCHEy9BHcEGhyoMZCCxt8l13nIoUE9Q2HJLw5QY33KbmkJs4j1xrG0aG -Q0JfPgEHU1RdZX33inOhmlRaHylDFCfChQ+1iHsaO5S3HWCntZznKWlXWpuTekMwGwPXYshApqr8 -ZORK15FTAaggiG6cX0S5y2CBNOxv033aSF/rtJC8LakcC6wc1aJoIIAE1vyxjy+7SjENSoYc6+I2 -KSb12tjE8nVhz36udmNKekBlk4f4HoCMhuWG1o8O/FMsYOgWYRqiPkN7zTlgVGr18okmAWiDSKIz -6MkEkbIRNBE+6tBDGR8Dk5AM/1E9V/RBbuHLoL7ryWPNbczk+DaqaJ3tvV2XcEQNtg413OEMXbug -UZTLfhbrES+jkkXITHHZvMmZUldGL1DPvTVp9D0VzgalLA8+9oG6lLvDu79leNKGef9JOxqDDPDe -eOzI8k1MGt6CKfjBWtrt7uYnXuhF0J0cUahoq0Tj0Itq4/g7u9xN12TyUb7mqqta6THuBrxzvxNi -Cp/HuZc= ------END CERTIFICATE----- - -T-TeleSec GlobalRoot Class 3 -============================ ------BEGIN CERTIFICATE----- -MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoM -IlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBU -cnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDMwHhcNMDgx -MDAxMTAyOTU2WhcNMzMxMDAxMjM1OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lz -dGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBD -ZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDMwggEiMA0GCSqGSIb3 -DQEBAQUAA4IBDwAwggEKAoIBAQC9dZPwYiJvJK7genasfb3ZJNW4t/zN8ELg63iIVl6bmlQdTQyK -9tPPcPRStdiTBONGhnFBSivwKixVA9ZIw+A5OO3yXDw/RLyTPWGrTs0NvvAgJ1gORH8EGoel15YU -NpDQSXuhdfsaa3Ox+M6pCSzyU9XDFES4hqX2iys52qMzVNn6chr3IhUciJFrf2blw2qAsCTz34ZF -iP0Zf3WHHx+xGwpzJFu5ZeAsVMhg02YXP+HMVDNzkQI6pn97djmiH5a2OK61yJN0HZ65tOVgnS9W -0eDrXltMEnAMbEQgqxHY9Bn20pxSN+f6tsIxO0rUFJmtxxr1XV/6B7h8DR/Wgx6zAgMBAAGjQjBA -MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS1A/d2O2GCahKqGFPr -AyGUv/7OyjANBgkqhkiG9w0BAQsFAAOCAQEAVj3vlNW92nOyWL6ukK2YJ5f+AbGwUgC4TeQbIXQb -fsDuXmkqJa9c1h3a0nnJ85cp4IaH3gRZD/FZ1GSFS5mvJQQeyUapl96Cshtwn5z2r3Ex3XsFpSzT -ucpH9sry9uetuUg/vBa3wW306gmv7PO15wWeph6KU1HWk4HMdJP2udqmJQV0eVp+QD6CSyYRMG7h -P0HHRwA11fXT91Q+gT3aSWqas+8QPebrb9HIIkfLzM8BMZLZGOMivgkeGj5asuRrDFR6fUNOuIml -e9eiPZaGzPImNC1qkp2aGtAw4l1OBLBfiyB+d8E9lYLRRpo7PHi4b6HQDWSieB4pTpPDpFQUWw== ------END CERTIFICATE----- - -EE Certification Centre Root CA -=============================== ------BEGIN CERTIFICATE----- -MIIEAzCCAuugAwIBAgIQVID5oHPtPwBMyonY43HmSjANBgkqhkiG9w0BAQUFADB1MQswCQYDVQQG -EwJFRTEiMCAGA1UECgwZQVMgU2VydGlmaXRzZWVyaW1pc2tlc2t1czEoMCYGA1UEAwwfRUUgQ2Vy -dGlmaWNhdGlvbiBDZW50cmUgUm9vdCBDQTEYMBYGCSqGSIb3DQEJARYJcGtpQHNrLmVlMCIYDzIw -MTAxMDMwMTAxMDMwWhgPMjAzMDEyMTcyMzU5NTlaMHUxCzAJBgNVBAYTAkVFMSIwIAYDVQQKDBlB -UyBTZXJ0aWZpdHNlZXJpbWlza2Vza3VzMSgwJgYDVQQDDB9FRSBDZXJ0aWZpY2F0aW9uIENlbnRy -ZSBSb290IENBMRgwFgYJKoZIhvcNAQkBFglwa2lAc2suZWUwggEiMA0GCSqGSIb3DQEBAQUAA4IB -DwAwggEKAoIBAQDIIMDs4MVLqwd4lfNE7vsLDP90jmG7sWLqI9iroWUyeuuOF0+W2Ap7kaJjbMeM -TC55v6kF/GlclY1i+blw7cNRfdCT5mzrMEvhvH2/UpvObntl8jixwKIy72KyaOBhU8E2lf/slLo2 -rpwcpzIP5Xy0xm90/XsY6KxX7QYgSzIwWFv9zajmofxwvI6Sc9uXp3whrj3B9UiHbCe9nyV0gVWw -93X2PaRka9ZP585ArQ/dMtO8ihJTmMmJ+xAdTX7Nfh9WDSFwhfYggx/2uh8Ej+p3iDXE/+pOoYtN -P2MbRMNE1CV2yreN1x5KZmTNXMWcg+HCCIia7E6j8T4cLNlsHaFLAgMBAAGjgYowgYcwDwYDVR0T -AQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFBLyWj7qVhy/zQas8fElyalL1BSZ -MEUGA1UdJQQ+MDwGCCsGAQUFBwMCBggrBgEFBQcDAQYIKwYBBQUHAwMGCCsGAQUFBwMEBggrBgEF -BQcDCAYIKwYBBQUHAwkwDQYJKoZIhvcNAQEFBQADggEBAHv25MANqhlHt01Xo/6tu7Fq1Q+e2+Rj -xY6hUFaTlrg4wCQiZrxTFGGVv9DHKpY5P30osxBAIWrEr7BSdxjhlthWXePdNl4dp1BUoMUq5KqM -lIpPnTX/dqQGE5Gion0ARD9V04I8GtVbvFZMIi5GQ4okQC3zErg7cBqklrkar4dBGmoYDQZPxz5u -uSlNDUmJEYcyW+ZLBMjkXOZ0c5RdFpgTlf7727FE5TpwrDdr5rMzcijJs1eg9gIWiAYLtqZLICjU -3j2LrTcFU3T+bsy8QxdxXvnFzBqpYe73dgzzcvRyrc9yAjYHR8/vGVCJYMzpJJUPwssd8m92kMfM -dcGWxZ0= ------END CERTIFICATE----- diff --git a/libs/tornado/concurrent.py b/libs/tornado/concurrent.py index a9002b16..63b0a8c1 100755 --- a/libs/tornado/concurrent.py +++ b/libs/tornado/concurrent.py @@ -40,52 +40,132 @@ class ReturnValueIgnoredError(Exception): pass -class _DummyFuture(object): +class Future(object): + """Placeholder for an asynchronous result. + + A ``Future`` encapsulates the result of an asynchronous + operation. In synchronous applications ``Futures`` are used + to wait for the result from a thread or process pool; in + Tornado they are normally used with `.IOLoop.add_future` or by + yielding them in a `.gen.coroutine`. + + `tornado.concurrent.Future` is similar to + `concurrent.futures.Future`, but not thread-safe (and therefore + faster for use with single-threaded event loops). + + In addition to ``exception`` and ``set_exception``, methods ``exc_info`` + and ``set_exc_info`` are supported to capture tracebacks in Python 2. + The traceback is automatically available in Python 3, but in the + Python 2 futures backport this information is discarded. + This functionality was previously available in a separate class + ``TracebackFuture``, which is now a deprecated alias for this class. + + .. versionchanged:: 3.3 + `tornado.concurrent.Future` is always a thread-unsafe ``Future`` + with support for the ``exc_info`` methods. Previously it would + be an alias for the thread-safe `concurrent.futures.Future` + if that package was available and fall back to the thread-unsafe + implementation if it was not. + + """ def __init__(self): self._done = False self._result = None self._exception = None + self._exc_info = None self._callbacks = [] def cancel(self): + """Cancel the operation, if possible. + + Tornado ``Futures`` do not support cancellation, so this method always + returns False. + """ return False def cancelled(self): + """Returns True if the operation has been cancelled. + + Tornado ``Futures`` do not support cancellation, so this method + always returns False. + """ return False def running(self): + """Returns True if this operation is currently running.""" return not self._done def done(self): + """Returns True if the future has finished running.""" return self._done def result(self, timeout=None): - self._check_done() - if self._exception: + """If the operation succeeded, return its result. If it failed, + re-raise its exception. + """ + if self._result is not None: + return self._result + if self._exc_info is not None: + raise_exc_info(self._exc_info) + elif self._exception is not None: raise self._exception + self._check_done() return self._result def exception(self, timeout=None): - self._check_done() - if self._exception: + """If the operation raised an exception, return the `Exception` + object. Otherwise returns None. + """ + if self._exception is not None: return self._exception else: + self._check_done() return None def add_done_callback(self, fn): + """Attaches the given callback to the `Future`. + + It will be invoked with the `Future` as its argument when the Future + has finished running and its result is available. In Tornado + consider using `.IOLoop.add_future` instead of calling + `add_done_callback` directly. + """ if self._done: fn(self) else: self._callbacks.append(fn) def set_result(self, result): + """Sets the result of a ``Future``. + + It is undefined to call any of the ``set`` methods more than once + on the same object. + """ self._result = result self._set_done() def set_exception(self, exception): + """Sets the exception of a ``Future.``""" self._exception = exception self._set_done() + def exc_info(self): + """Returns a tuple in the same format as `sys.exc_info` or None. + + .. versionadded:: 3.3 + """ + return self._exc_info + + def set_exc_info(self, exc_info): + """Sets the exception information of a ``Future.`` + + Preserves tracebacks on Python 2. + + .. versionadded:: 3.3 + """ + self._exc_info = exc_info + self.set_exception(exc_info[1]) + def _check_done(self): if not self._done: raise Exception("DummyFuture does not support blocking for results") @@ -97,38 +177,16 @@ class _DummyFuture(object): cb(self) self._callbacks = None +TracebackFuture = Future + if futures is None: - Future = _DummyFuture + FUTURES = Future else: - Future = futures.Future + FUTURES = (futures.Future, Future) -class TracebackFuture(Future): - """Subclass of `Future` which can store a traceback with - exceptions. - - The traceback is automatically available in Python 3, but in the - Python 2 futures backport this information is discarded. - """ - def __init__(self): - super(TracebackFuture, self).__init__() - self.__exc_info = None - - def exc_info(self): - return self.__exc_info - - def set_exc_info(self, exc_info): - """Traceback-aware replacement for - `~concurrent.futures.Future.set_exception`. - """ - self.__exc_info = exc_info - self.set_exception(exc_info[1]) - - def result(self, timeout=None): - if self.__exc_info is not None: - raise_exc_info(self.__exc_info) - else: - return super(TracebackFuture, self).result(timeout=timeout) +def is_future(x): + return isinstance(x, FUTURES) class DummyExecutor(object): @@ -254,10 +312,13 @@ def return_future(f): def chain_future(a, b): """Chain two futures together so that when one completes, so does the other. - The result (success or failure) of ``a`` will be copied to ``b``. + The result (success or failure) of ``a`` will be copied to ``b``, unless + ``b`` has already been completed or cancelled by the time ``a`` finishes. """ def copy(future): assert future is a + if b.done(): + return if (isinstance(a, TracebackFuture) and isinstance(b, TracebackFuture) and a.exc_info() is not None): b.set_exc_info(a.exc_info()) diff --git a/libs/tornado/curl_httpclient.py b/libs/tornado/curl_httpclient.py index 0df7a7ee..fc7d7f26 100755 --- a/libs/tornado/curl_httpclient.py +++ b/libs/tornado/curl_httpclient.py @@ -268,6 +268,7 @@ class CurlAsyncHTTPClient(AsyncHTTPClient): info["callback"](HTTPResponse( request=info["request"], code=code, headers=info["headers"], buffer=buffer, effective_url=effective_url, error=error, + reason=info['headers'].get("X-Http-Reason", None), request_time=time.time() - info["curl_start_time"], time_info=time_info)) except Exception: @@ -470,7 +471,11 @@ def _curl_header_callback(headers, header_line): header_line = header_line.strip() if header_line.startswith("HTTP/"): headers.clear() - return + try: + (__, __, reason) = httputil.parse_response_start_line(header_line) + header_line = "X-Http-Reason: %s" % reason + except httputil.HTTPInputException: + return if not header_line: return headers.parse_line(header_line) diff --git a/libs/tornado/escape.py b/libs/tornado/escape.py index 95c0f24e..48fa673c 100755 --- a/libs/tornado/escape.py +++ b/libs/tornado/escape.py @@ -75,7 +75,7 @@ def xhtml_unescape(value): # The fact that json_encode wraps json.dumps is an implementation detail. -# Please see https://github.com/facebook/tornado/pull/706 +# Please see https://github.com/tornadoweb/tornado/pull/706 # before sending a pull request that adds **kwargs to this function. def json_encode(value): """JSON-encodes the given Python object.""" diff --git a/libs/tornado/gen.py b/libs/tornado/gen.py index aa931b45..4d1dc6e1 100755 --- a/libs/tornado/gen.py +++ b/libs/tornado/gen.py @@ -87,9 +87,9 @@ import itertools import sys import types -from tornado.concurrent import Future, TracebackFuture +from tornado.concurrent import Future, TracebackFuture, is_future, chain_future from tornado.ioloop import IOLoop -from tornado.stack_context import ExceptionStackContext, wrap +from tornado import stack_context class KeyReuseError(Exception): @@ -112,6 +112,10 @@ class ReturnValueIgnoredError(Exception): pass +class TimeoutError(Exception): + """Exception raised by ``with_timeout``.""" + + def engine(func): """Callback-oriented decorator for asynchronous generators. @@ -129,45 +133,20 @@ def engine(func): `~tornado.web.RequestHandler` :ref:`HTTP verb methods `, which use ``self.finish()`` in place of a callback argument. """ + func = _make_coroutine_wrapper(func, replace_callback=False) @functools.wraps(func) def wrapper(*args, **kwargs): - runner = None - - def handle_exception(typ, value, tb): - # if the function throws an exception before its first "yield" - # (or is not a generator at all), the Runner won't exist yet. - # However, in that case we haven't reached anything asynchronous - # yet, so we can just let the exception propagate. - if runner is not None: - return runner.handle_exception(typ, value, tb) - return False - with ExceptionStackContext(handle_exception) as deactivate: - try: - result = func(*args, **kwargs) - except (Return, StopIteration) as e: - result = getattr(e, 'value', None) - else: - if isinstance(result, types.GeneratorType): - def final_callback(value): - if value is not None: - raise ReturnValueIgnoredError( - "@gen.engine functions cannot return values: " - "%r" % (value,)) - assert value is None - deactivate() - runner = Runner(result, final_callback) - runner.run() - return - if result is not None: + future = func(*args, **kwargs) + def final_callback(future): + if future.result() is not None: raise ReturnValueIgnoredError( "@gen.engine functions cannot return values: %r" % - (result,)) - deactivate() - # no yield, so we're done + (future.result(),)) + future.add_done_callback(final_callback) return wrapper -def coroutine(func): +def coroutine(func, replace_callback=True): """Decorator for asynchronous generators. Any generator that yields objects from this module must be wrapped @@ -191,43 +170,56 @@ def coroutine(func): From the caller's perspective, ``@gen.coroutine`` is similar to the combination of ``@return_future`` and ``@gen.engine``. """ + return _make_coroutine_wrapper(func, replace_callback=True) + + +def _make_coroutine_wrapper(func, replace_callback): + """The inner workings of ``@gen.coroutine`` and ``@gen.engine``. + + The two decorators differ in their treatment of the ``callback`` + argument, so we cannot simply implement ``@engine`` in terms of + ``@coroutine``. + """ @functools.wraps(func) def wrapper(*args, **kwargs): - runner = None future = TracebackFuture() - if 'callback' in kwargs: + if replace_callback and 'callback' in kwargs: callback = kwargs.pop('callback') IOLoop.current().add_future( future, lambda future: callback(future.result())) - def handle_exception(typ, value, tb): - try: - if runner is not None and runner.handle_exception(typ, value, tb): - return True - except Exception: - typ, value, tb = sys.exc_info() - future.set_exc_info((typ, value, tb)) - return True - with ExceptionStackContext(handle_exception) as deactivate: - try: - result = func(*args, **kwargs) - except (Return, StopIteration) as e: - result = getattr(e, 'value', None) - except Exception: - deactivate() - future.set_exc_info(sys.exc_info()) + try: + result = func(*args, **kwargs) + except (Return, StopIteration) as e: + result = getattr(e, 'value', None) + except Exception: + future.set_exc_info(sys.exc_info()) + return future + else: + if isinstance(result, types.GeneratorType): + # Inline the first iteration of Runner.run. This lets us + # avoid the cost of creating a Runner when the coroutine + # never actually yields, which in turn allows us to + # use "optional" coroutines in critical path code without + # performance penalty for the synchronous case. + try: + orig_stack_contexts = stack_context._state.contexts + yielded = next(result) + if stack_context._state.contexts is not orig_stack_contexts: + yielded = TracebackFuture() + yielded.set_exception( + stack_context.StackContextInconsistentError( + 'stack_context inconsistency (probably caused ' + 'by yield within a "with StackContext" block)')) + except (StopIteration, Return) as e: + future.set_result(getattr(e, 'value', None)) + except Exception: + future.set_exc_info(sys.exc_info()) + else: + Runner(result, future, yielded) return future - else: - if isinstance(result, types.GeneratorType): - def final_callback(value): - deactivate() - future.set_result(value) - runner = Runner(result, final_callback) - runner.run() - return future - deactivate() - future.set_result(result) + future.set_result(result) return future return wrapper @@ -348,7 +340,7 @@ class WaitAll(YieldPoint): return [self.runner.pop_result(key) for key in self.keys] -class Task(YieldPoint): +def Task(func, *args, **kwargs): """Runs a single asynchronous operation. Takes a function (and optional additional arguments) and runs it with @@ -362,25 +354,25 @@ class Task(YieldPoint): func(args, callback=(yield gen.Callback(key))) result = yield gen.Wait(key) + + .. versionchanged:: 3.3 + ``gen.Task`` is now a function that returns a `.Future`, instead of + a subclass of `YieldPoint`. It still behaves the same way when + yielded. """ - def __init__(self, func, *args, **kwargs): - assert "callback" not in kwargs - self.args = args - self.kwargs = kwargs - self.func = func - - def start(self, runner): - self.runner = runner - self.key = object() - runner.register_callback(self.key) - self.kwargs["callback"] = runner.result_callback(self.key) - self.func(*self.args, **self.kwargs) - - def is_ready(self): - return self.runner.is_ready(self.key) - - def get_result(self): - return self.runner.pop_result(self.key) + future = Future() + def handle_exception(typ, value, tb): + if future.done(): + return False + future.set_exc_info((typ, value, tb)) + return True + def set_result(result): + if future.done(): + return + future.set_result(result) + with stack_context.ExceptionStackContext(handle_exception): + func(*args, callback=_argument_adapter(set_result), **kwargs) + return future class YieldFuture(YieldPoint): @@ -414,10 +406,14 @@ class YieldFuture(YieldPoint): class Multi(YieldPoint): """Runs multiple asynchronous operations in parallel. - Takes a list of ``Tasks`` or other ``YieldPoints`` and returns a list of + Takes a list of ``YieldPoints`` or ``Futures`` and returns a list of their responses. It is not necessary to call `Multi` explicitly, since the engine will do so automatically when the generator yields - a list of ``YieldPoints``. + a list of ``YieldPoints`` or a mixture of ``YieldPoints`` and ``Futures``. + + Instead of a list, the argument may also be a dictionary whose values are + Futures, in which case a parallel dictionary is returned mapping the same + keys to their results. """ def __init__(self, children): self.keys = None @@ -426,7 +422,7 @@ class Multi(YieldPoint): children = children.values() self.children = [] for i in children: - if isinstance(i, Future): + if is_future(i): i = YieldFuture(i) self.children.append(i) assert all(isinstance(i, YieldPoint) for i in self.children) @@ -450,18 +446,127 @@ class Multi(YieldPoint): return list(result) -class _NullYieldPoint(YieldPoint): - def start(self, runner): - pass +def multi_future(children): + """Wait for multiple asynchronous futures in parallel. - def is_ready(self): - return True + Takes a list of ``Futures`` (but *not* other ``YieldPoints``) and returns + a new Future that resolves when all the other Futures are done. + If all the ``Futures`` succeeded, the returned Future's result is a list + of their results. If any failed, the returned Future raises the exception + of the first one to fail. - def get_result(self): - return None + Instead of a list, the argument may also be a dictionary whose values are + Futures, in which case a parallel dictionary is returned mapping the same + keys to their results. + + It is not necessary to call `multi_future` explcitly, since the engine will + do so automatically when the generator yields a list of `Futures`. + This function is faster than the `Multi` `YieldPoint` because it does not + require the creation of a stack context. + + .. versionadded:: 3.3 + """ + if isinstance(children, dict): + keys = list(children.keys()) + children = children.values() + else: + keys = None + assert all(is_future(i) for i in children) + unfinished_children = set(children) + + future = Future() + if not children: + future.set_result({} if keys is not None else []) + def callback(f): + unfinished_children.remove(f) + if not unfinished_children: + try: + result_list = [i.result() for i in children] + except Exception: + future.set_exc_info(sys.exc_info()) + else: + if keys is not None: + future.set_result(dict(zip(keys, result_list))) + else: + future.set_result(result_list) + for f in children: + f.add_done_callback(callback) + return future -_null_yield_point = _NullYieldPoint() +def maybe_future(x): + """Converts ``x`` into a `.Future`. + + If ``x`` is already a `.Future`, it is simply returned; otherwise + it is wrapped in a new `.Future`. This is suitable for use as + ``result = yield gen.maybe_future(f())`` when you don't know whether + ``f()`` returns a `.Future` or not. + """ + if is_future(x): + return x + else: + fut = Future() + fut.set_result(x) + return fut + + +def with_timeout(timeout, future, io_loop=None): + """Wraps a `.Future` in a timeout. + + Raises `TimeoutError` if the input future does not complete before + ``timeout``, which may be specified in any form allowed by + `.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or an absolute time + relative to `.IOLoop.time`) + + Currently only supports Futures, not other `YieldPoint` classes. + + .. versionadded:: 3.3 + """ + # TODO: allow yield points in addition to futures? + # Tricky to do with stack_context semantics. + # + # It's tempting to optimize this by cancelling the input future on timeout + # instead of creating a new one, but A) we can't know if we are the only + # one waiting on the input future, so cancelling it might disrupt other + # callers and B) concurrent futures can only be cancelled while they are + # in the queue, so cancellation cannot reliably bound our waiting time. + result = Future() + chain_future(future, result) + if io_loop is None: + io_loop = IOLoop.current() + timeout_handle = io_loop.add_timeout( + timeout, + lambda: result.set_exception(TimeoutError("Timeout"))) + if isinstance(future, Future): + # We know this future will resolve on the IOLoop, so we don't + # need the extra thread-safety of IOLoop.add_future (and we also + # don't care about StackContext here. + future.add_done_callback( + lambda future: io_loop.remove_timeout(timeout_handle)) + else: + # concurrent.futures.Futures may resolve on any thread, so we + # need to route them back to the IOLoop. + io_loop.add_future( + future, lambda future: io_loop.remove_timeout(timeout_handle)) + return result + + +_null_future = Future() +_null_future.set_result(None) + +moment = Future() +moment.__doc__ = \ + """A special object which may be yielded to allow the IOLoop to run for +one iteration. + +This is not needed in normal use but it can be helpful in long-running +coroutines that are likely to yield Futures that are ready instantly. + +Usage: ``yield gen.moment`` + +.. versionadded:: 3.3 +""" +moment.set_result(None) class Runner(object): @@ -469,35 +574,55 @@ class Runner(object): Maintains information about pending callbacks and their results. - ``final_callback`` is run after the generator exits. + The results of the generator are stored in ``result_future`` (a + `.TracebackFuture`) """ - def __init__(self, gen, final_callback): + def __init__(self, gen, result_future, first_yielded): self.gen = gen - self.final_callback = final_callback - self.yield_point = _null_yield_point - self.pending_callbacks = set() - self.results = {} + self.result_future = result_future + self.future = _null_future + self.yield_point = None + self.pending_callbacks = None + self.results = None self.running = False self.finished = False - self.exc_info = None self.had_exception = False + self.io_loop = IOLoop.current() + # For efficiency, we do not create a stack context until we + # reach a YieldPoint (stack contexts are required for the historical + # semantics of YieldPoints, but not for Futures). When we have + # done so, this field will be set and must be called at the end + # of the coroutine. + self.stack_context_deactivate = None + if self.handle_yield(first_yielded): + self.run() def register_callback(self, key): """Adds ``key`` to the list of callbacks.""" + if self.pending_callbacks is None: + # Lazily initialize the old-style YieldPoint data structures. + self.pending_callbacks = set() + self.results = {} if key in self.pending_callbacks: raise KeyReuseError("key %r is already pending" % (key,)) self.pending_callbacks.add(key) def is_ready(self, key): """Returns true if a result is available for ``key``.""" - if key not in self.pending_callbacks: + if self.pending_callbacks is None or key not in self.pending_callbacks: raise UnknownKeyError("key %r is not pending" % (key,)) return key in self.results def set_result(self, key, result): """Sets the result for ``key`` and attempts to resume the generator.""" self.results[key] = result - self.run() + if self.yield_point is not None and self.yield_point.is_ready(): + try: + self.future.set_result(self.yield_point.get_result()) + except: + self.future.set_exc_info(sys.exc_info()) + self.yield_point = None + self.run() def pop_result(self, key): """Returns the result for ``key`` and unregisters it.""" @@ -513,25 +638,27 @@ class Runner(object): try: self.running = True while True: - if self.exc_info is None: - try: - if not self.yield_point.is_ready(): - return - next = self.yield_point.get_result() - self.yield_point = None - except Exception: - self.exc_info = sys.exc_info() + future = self.future + if not future.done(): + return + self.future = None try: - if self.exc_info is not None: + orig_stack_contexts = stack_context._state.contexts + try: + value = future.result() + except Exception: self.had_exception = True - exc_info = self.exc_info - self.exc_info = None - yielded = self.gen.throw(*exc_info) + yielded = self.gen.throw(*sys.exc_info()) else: - yielded = self.gen.send(next) + yielded = self.gen.send(value) + if stack_context._state.contexts is not orig_stack_contexts: + self.gen.throw( + stack_context.StackContextInconsistentError( + 'stack_context inconsistency (probably caused ' + 'by yield within a "with StackContext" block)')) except (StopIteration, Return) as e: self.finished = True - self.yield_point = _null_yield_point + self.future = _null_future if self.pending_callbacks and not self.had_exception: # If we ran cleanly without waiting on all callbacks # raise an error (really more of a warning). If we @@ -540,46 +667,105 @@ class Runner(object): raise LeakedCallbackError( "finished without waiting for callbacks %r" % self.pending_callbacks) - self.final_callback(getattr(e, 'value', None)) - self.final_callback = None + self.result_future.set_result(getattr(e, 'value', None)) + self.result_future = None + self._deactivate_stack_context() return except Exception: self.finished = True - self.yield_point = _null_yield_point - raise - if isinstance(yielded, (list, dict)): - yielded = Multi(yielded) - elif isinstance(yielded, Future): - yielded = YieldFuture(yielded) - if isinstance(yielded, YieldPoint): - self.yield_point = yielded - try: - self.yield_point.start(self) - except Exception: - self.exc_info = sys.exc_info() - else: - self.exc_info = (BadYieldError( - "yielded unknown object %r" % (yielded,)),) + self.future = _null_future + self.result_future.set_exc_info(sys.exc_info()) + self.result_future = None + self._deactivate_stack_context() + return + if not self.handle_yield(yielded): + return finally: self.running = False - def result_callback(self, key): - def inner(*args, **kwargs): - if kwargs or len(args) > 1: - result = Arguments(args, kwargs) - elif args: - result = args[0] + def handle_yield(self, yielded): + if isinstance(yielded, list): + if all(is_future(f) for f in yielded): + yielded = multi_future(yielded) else: - result = None - self.set_result(key, result) - return wrap(inner) + yielded = Multi(yielded) + elif isinstance(yielded, dict): + if all(is_future(f) for f in yielded.values()): + yielded = multi_future(yielded) + else: + yielded = Multi(yielded) + + if isinstance(yielded, YieldPoint): + self.future = TracebackFuture() + def start_yield_point(): + try: + yielded.start(self) + if yielded.is_ready(): + self.future.set_result( + yielded.get_result()) + else: + self.yield_point = yielded + except Exception: + self.future = TracebackFuture() + self.future.set_exc_info(sys.exc_info()) + if self.stack_context_deactivate is None: + # Start a stack context if this is the first + # YieldPoint we've seen. + with stack_context.ExceptionStackContext( + self.handle_exception) as deactivate: + self.stack_context_deactivate = deactivate + def cb(): + start_yield_point() + self.run() + self.io_loop.add_callback(cb) + return False + else: + start_yield_point() + elif is_future(yielded): + self.future = yielded + if not self.future.done() or self.future is moment: + self.io_loop.add_future( + self.future, lambda f: self.run()) + return False + else: + self.future = TracebackFuture() + self.future.set_exception(BadYieldError( + "yielded unknown object %r" % (yielded,))) + return True + + def result_callback(self, key): + return stack_context.wrap(_argument_adapter( + functools.partial(self.set_result, key))) def handle_exception(self, typ, value, tb): if not self.running and not self.finished: - self.exc_info = (typ, value, tb) + self.future = TracebackFuture() + self.future.set_exc_info((typ, value, tb)) self.run() return True else: return False + def _deactivate_stack_context(self): + if self.stack_context_deactivate is not None: + self.stack_context_deactivate() + self.stack_context_deactivate = None + Arguments = collections.namedtuple('Arguments', ['args', 'kwargs']) + + +def _argument_adapter(callback): + """Returns a function that when invoked runs ``callback`` with one arg. + + If the function returned by this function is called with exactly + one argument, that argument is passed to ``callback``. Otherwise + the args tuple and kwargs dict are wrapped in an `Arguments` object. + """ + def wrapper(*args, **kwargs): + if kwargs or len(args) > 1: + callback(Arguments(args, kwargs)) + elif args: + callback(args[0]) + else: + callback(None) + return wrapper diff --git a/libs/tornado/http1connection.py b/libs/tornado/http1connection.py new file mode 100644 index 00000000..edaa5d9e --- /dev/null +++ b/libs/tornado/http1connection.py @@ -0,0 +1,650 @@ +#!/usr/bin/env python +# +# Copyright 2014 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Client and server implementations of HTTP/1.x. + +.. versionadded:: 3.3 +""" + +from __future__ import absolute_import, division, print_function, with_statement + +from tornado.concurrent import Future +from tornado.escape import native_str, utf8 +from tornado import gen +from tornado import httputil +from tornado import iostream +from tornado.log import gen_log, app_log +from tornado import stack_context +from tornado.util import GzipDecompressor + + +class _QuietException(Exception): + def __init__(self): + pass + +class _ExceptionLoggingContext(object): + """Used with the ``with`` statement when calling delegate methods to + log any exceptions with the given logger. Any exceptions caught are + converted to _QuietException + """ + def __init__(self, logger): + self.logger = logger + + def __enter__(self): + pass + + def __exit__(self, typ, value, tb): + if value is not None: + self.logger.error("Uncaught exception", exc_info=(typ, value, tb)) + raise _QuietException + +class HTTP1ConnectionParameters(object): + """Parameters for `.HTTP1Connection` and `.HTTP1ServerConnection`. + """ + def __init__(self, no_keep_alive=False, chunk_size=None, + max_header_size=None, header_timeout=None, max_body_size=None, + body_timeout=None, use_gzip=False): + """ + :arg bool no_keep_alive: If true, always close the connection after + one request. + :arg int chunk_size: how much data to read into memory at once + :arg int max_header_size: maximum amount of data for HTTP headers + :arg float header_timeout: how long to wait for all headers (seconds) + :arg int max_body_size: maximum amount of data for body + :arg float body_timeout: how long to wait while reading body (seconds) + :arg bool use_gzip: if true, decode incoming ``Content-Encoding: gzip`` + """ + self.no_keep_alive = no_keep_alive + self.chunk_size = chunk_size or 65536 + self.max_header_size = max_header_size or 65536 + self.header_timeout = header_timeout + self.max_body_size = max_body_size + self.body_timeout = body_timeout + self.use_gzip = use_gzip + + +class HTTP1Connection(httputil.HTTPConnection): + """Implements the HTTP/1.x protocol. + + This class can be on its own for clients, or via `HTTP1ServerConnection` + for servers. + """ + def __init__(self, stream, is_client, params=None, context=None): + """ + :arg stream: an `.IOStream` + :arg bool is_client: client or server + :arg params: a `.HTTP1ConnectionParameters` instance or ``None`` + :arg context: an opaque application-defined object that can be accessed + as ``connection.context``. + """ + self.is_client = is_client + self.stream = stream + if params is None: + params = HTTP1ConnectionParameters() + self.params = params + self.context = context + self.no_keep_alive = params.no_keep_alive + # The body limits can be altered by the delegate, so save them + # here instead of just referencing self.params later. + self._max_body_size = (self.params.max_body_size or + self.stream.max_buffer_size) + self._body_timeout = self.params.body_timeout + # _write_finished is set to True when finish() has been called, + # i.e. there will be no more data sent. Data may still be in the + # stream's write buffer. + self._write_finished = False + # True when we have read the entire incoming body. + self._read_finished = False + # _finish_future resolves when all data has been written and flushed + # to the IOStream. + self._finish_future = Future() + # If true, the connection should be closed after this request + # (after the response has been written in the server side, + # and after it has been read in the client) + self._disconnect_on_finish = False + self._clear_callbacks() + # Save the start lines after we read or write them; they + # affect later processing (e.g. 304 responses and HEAD methods + # have content-length but no bodies) + self._request_start_line = None + self._response_start_line = None + self._request_headers = None + # True if we are writing output with chunked encoding. + self._chunking_output = None + # While reading a body with a content-length, this is the + # amount left to read. + self._expected_content_remaining = None + # A Future for our outgoing writes, returned by IOStream.write. + self._pending_write = None + + def read_response(self, delegate): + """Read a single HTTP response. + + Typical client-mode usage is to write a request using `write_headers`, + `write`, and `finish`, and then call ``read_response``. + + :arg delegate: a `.HTTPMessageDelegate` + + Returns a `.Future` that resolves to None after the full response has + been read. + """ + if self.params.use_gzip: + delegate = _GzipMessageDelegate(delegate, self.params.chunk_size) + return self._read_message(delegate) + + @gen.coroutine + def _read_message(self, delegate): + need_delegate_close = False + try: + header_future = self.stream.read_until_regex( + b"\r?\n\r?\n", + max_bytes=self.params.max_header_size) + if self.params.header_timeout is None: + header_data = yield header_future + else: + try: + header_data = yield gen.with_timeout( + self.stream.io_loop.time() + self.params.header_timeout, + header_future, + io_loop=self.stream.io_loop) + except gen.TimeoutError: + self.close() + raise gen.Return(False) + start_line, headers = self._parse_headers(header_data) + if self.is_client: + start_line = httputil.parse_response_start_line(start_line) + self._response_start_line = start_line + else: + start_line = httputil.parse_request_start_line(start_line) + self._request_start_line = start_line + self._request_headers = headers + + self._disconnect_on_finish = not self._can_keep_alive( + start_line, headers) + need_delegate_close = True + with _ExceptionLoggingContext(app_log): + header_future = delegate.headers_received(start_line, headers) + if header_future is not None: + yield header_future + if self.stream is None: + # We've been detached. + need_delegate_close = False + raise gen.Return(False) + skip_body = False + if self.is_client: + if (self._request_start_line is not None and + self._request_start_line.method == 'HEAD'): + skip_body = True + code = start_line.code + if code == 304: + skip_body = True + if code >= 100 and code < 200: + # TODO: client delegates will get headers_received twice + # in the case of a 100-continue. Document or change? + yield self._read_message(delegate) + else: + if (headers.get("Expect") == "100-continue" and + not self._write_finished): + self.stream.write(b"HTTP/1.1 100 (Continue)\r\n\r\n") + if not skip_body: + body_future = self._read_body(headers, delegate) + if body_future is not None: + if self._body_timeout is None: + yield body_future + else: + try: + yield gen.with_timeout( + self.stream.io_loop.time() + self._body_timeout, + body_future, self.stream.io_loop) + except gen.TimeoutError: + gen_log.info("Timeout reading body from %s", + self.context) + self.stream.close() + raise gen.Return(False) + self._read_finished = True + if not self._write_finished or self.is_client: + need_delegate_close = False + with _ExceptionLoggingContext(app_log): + delegate.finish() + # If we're waiting for the application to produce an asynchronous + # response, and we're not detached, register a close callback + # on the stream (we didn't need one while we were reading) + if (not self._finish_future.done() and + self.stream is not None and + not self.stream.closed()): + self.stream.set_close_callback(self._on_connection_close) + yield self._finish_future + if self.is_client and self._disconnect_on_finish: + self.close() + if self.stream is None: + raise gen.Return(False) + except httputil.HTTPInputException as e: + gen_log.info("Malformed HTTP message from %s: %s", + self.context, e) + self.close() + raise gen.Return(False) + finally: + if need_delegate_close: + with _ExceptionLoggingContext(app_log): + delegate.on_connection_close() + self._clear_callbacks() + raise gen.Return(True) + + def _clear_callbacks(self): + """Clears the callback attributes. + + This allows the request handler to be garbage collected more + quickly in CPython by breaking up reference cycles. + """ + self._write_callback = None + self._write_future = None + self._close_callback = None + if self.stream is not None: + self.stream.set_close_callback(None) + + def set_close_callback(self, callback): + """Sets a callback that will be run when the connection is closed. + + .. deprecated:: 3.3 + Use `.HTTPMessageDelegate.on_connection_close` instead. + """ + self._close_callback = stack_context.wrap(callback) + + def _on_connection_close(self): + # Note that this callback is only registered on the IOStream + # when we have finished reading the request and are waiting for + # the application to produce its response. + if self._close_callback is not None: + callback = self._close_callback + self._close_callback = None + callback() + if not self._finish_future.done(): + self._finish_future.set_result(None) + self._clear_callbacks() + + def close(self): + if self.stream is not None: + self.stream.close() + self._clear_callbacks() + if not self._finish_future.done(): + self._finish_future.set_result(None) + + def detach(self): + """Take control of the underlying stream. + + Returns the underlying `.IOStream` object and stops all further + HTTP processing. May only be called during + `.HTTPMessageDelegate.headers_received`. Intended for implementing + protocols like websockets that tunnel over an HTTP handshake. + """ + self._clear_callbacks() + stream = self.stream + self.stream = None + return stream + + def set_body_timeout(self, timeout): + """Sets the body timeout for a single request. + + Overrides the value from `.HTTP1ConnectionParameters`. + """ + self._body_timeout = timeout + + def set_max_body_size(self, max_body_size): + """Sets the body size limit for a single request. + + Overrides the value from `.HTTP1ConnectionParameters`. + """ + self._max_body_size = max_body_size + + def write_headers(self, start_line, headers, chunk=None, callback=None): + """Implements `.HTTPConnection.write_headers`.""" + if self.is_client: + self._request_start_line = start_line + # Client requests with a non-empty body must have either a + # Content-Length or a Transfer-Encoding. + self._chunking_output = ( + start_line.method in ('POST', 'PUT', 'PATCH') and + 'Content-Length' not in headers and + 'Transfer-Encoding' not in headers) + else: + self._response_start_line = start_line + self._chunking_output = ( + # TODO: should this use + # self._request_start_line.version or + # start_line.version? + self._request_start_line.version == 'HTTP/1.1' and + # 304 responses have no body (not even a zero-length body), and so + # should not have either Content-Length or Transfer-Encoding. + # headers. + start_line.code != 304 and + # No need to chunk the output if a Content-Length is specified. + 'Content-Length' not in headers and + # Applications are discouraged from touching Transfer-Encoding, + # but if they do, leave it alone. + 'Transfer-Encoding' not in headers) + # If a 1.0 client asked for keep-alive, add the header. + if (self._request_start_line.version == 'HTTP/1.0' and + (self._request_headers.get('Connection', '').lower() + == 'keep-alive')): + headers['Connection'] = 'Keep-Alive' + if self._chunking_output: + headers['Transfer-Encoding'] = 'chunked' + if (not self.is_client and + (self._request_start_line.method == 'HEAD' or + start_line.code == 304)): + self._expected_content_remaining = 0 + elif 'Content-Length' in headers: + self._expected_content_remaining = int(headers['Content-Length']) + else: + self._expected_content_remaining = None + lines = [utf8("%s %s %s" % start_line)] + lines.extend([utf8(n) + b": " + utf8(v) for n, v in headers.get_all()]) + for line in lines: + if b'\n' in line: + raise ValueError('Newline in header: ' + repr(line)) + future = None + if self.stream.closed(): + future = self._write_future = Future() + future.set_exception(iostream.StreamClosedError()) + else: + if callback is not None: + self._write_callback = stack_context.wrap(callback) + else: + future = self._write_future = Future() + data = b"\r\n".join(lines) + b"\r\n\r\n" + if chunk: + data += self._format_chunk(chunk) + self._pending_write = self.stream.write(data) + self._pending_write.add_done_callback(self._on_write_complete) + return future + + def _format_chunk(self, chunk): + if self._expected_content_remaining is not None: + self._expected_content_remaining -= len(chunk) + if self._expected_content_remaining < 0: + # Close the stream now to stop further framing errors. + self.stream.close() + raise httputil.HTTPOutputException( + "Tried to write more data than Content-Length") + if self._chunking_output and chunk: + # Don't write out empty chunks because that means END-OF-STREAM + # with chunked encoding + return utf8("%x" % len(chunk)) + b"\r\n" + chunk + b"\r\n" + else: + return chunk + + def write(self, chunk, callback=None): + """Implements `.HTTPConnection.write`. + + For backwards compatibility is is allowed but deprecated to + skip `write_headers` and instead call `write()` with a + pre-encoded header block. + """ + future = None + if self.stream.closed(): + future = self._write_future = Future() + self._write_future.set_exception(iostream.StreamClosedError()) + else: + if callback is not None: + self._write_callback = stack_context.wrap(callback) + else: + future = self._write_future = Future() + self._pending_write = self.stream.write(self._format_chunk(chunk)) + self._pending_write.add_done_callback(self._on_write_complete) + return future + + def finish(self): + """Implements `.HTTPConnection.finish`.""" + if (self._expected_content_remaining is not None and + self._expected_content_remaining != 0 and + not self.stream.closed()): + self.stream.close() + raise httputil.HTTPOutputException( + "Tried to write %d bytes less than Content-Length" % + self._expected_content_remaining) + if self._chunking_output: + if not self.stream.closed(): + self._pending_write = self.stream.write(b"0\r\n\r\n") + self._pending_write.add_done_callback(self._on_write_complete) + self._write_finished = True + # If the app finished the request while we're still reading, + # divert any remaining data away from the delegate and + # close the connection when we're done sending our response. + # Closing the connection is the only way to avoid reading the + # whole input body. + if not self._read_finished: + self._disconnect_on_finish = True + # No more data is coming, so instruct TCP to send any remaining + # data immediately instead of waiting for a full packet or ack. + self.stream.set_nodelay(True) + if self._pending_write is None: + self._finish_request(None) + else: + self._pending_write.add_done_callback(self._finish_request) + + def _on_write_complete(self, future): + if self._write_callback is not None: + callback = self._write_callback + self._write_callback = None + self.stream.io_loop.add_callback(callback) + if self._write_future is not None: + future = self._write_future + self._write_future = None + future.set_result(None) + + def _can_keep_alive(self, start_line, headers): + if self.params.no_keep_alive: + return False + connection_header = headers.get("Connection") + if connection_header is not None: + connection_header = connection_header.lower() + if start_line.version == "HTTP/1.1": + return connection_header != "close" + elif ("Content-Length" in headers + or start_line.method in ("HEAD", "GET")): + return connection_header == "keep-alive" + return False + + def _finish_request(self, future): + self._clear_callbacks() + if not self.is_client and self._disconnect_on_finish: + self.close() + return + # Turn Nagle's algorithm back on, leaving the stream in its + # default state for the next request. + self.stream.set_nodelay(False) + if not self._finish_future.done(): + self._finish_future.set_result(None) + + def _parse_headers(self, data): + data = native_str(data.decode('latin1')) + eol = data.find("\r\n") + start_line = data[:eol] + try: + headers = httputil.HTTPHeaders.parse(data[eol:]) + except ValueError: + # probably form split() if there was no ':' in the line + raise httputil.HTTPInputException("Malformed HTTP headers: %r" % + data[eol:100]) + return start_line, headers + + def _read_body(self, headers, delegate): + content_length = headers.get("Content-Length") + if content_length: + content_length = int(content_length) + if content_length > self._max_body_size: + raise httputil.HTTPInputException("Content-Length too long") + return self._read_fixed_body(content_length, delegate) + if headers.get("Transfer-Encoding") == "chunked": + return self._read_chunked_body(delegate) + if self.is_client: + return self._read_body_until_close(delegate) + return None + + @gen.coroutine + def _read_fixed_body(self, content_length, delegate): + while content_length > 0: + body = yield self.stream.read_bytes( + min(self.params.chunk_size, content_length), partial=True) + content_length -= len(body) + if not self._write_finished or self.is_client: + with _ExceptionLoggingContext(app_log): + yield gen.maybe_future(delegate.data_received(body)) + + @gen.coroutine + def _read_chunked_body(self, delegate): + # TODO: "chunk extensions" http://tools.ietf.org/html/rfc2616#section-3.6.1 + total_size = 0 + while True: + chunk_len = yield self.stream.read_until(b"\r\n", max_bytes=64) + chunk_len = int(chunk_len.strip(), 16) + if chunk_len == 0: + return + total_size += chunk_len + if total_size > self._max_body_size: + raise httputil.HTTPInputException("chunked body too large") + bytes_to_read = chunk_len + while bytes_to_read: + chunk = yield self.stream.read_bytes( + min(bytes_to_read, self.params.chunk_size), partial=True) + bytes_to_read -= len(chunk) + if not self._write_finished or self.is_client: + with _ExceptionLoggingContext(app_log): + yield gen.maybe_future(delegate.data_received(chunk)) + # chunk ends with \r\n + crlf = yield self.stream.read_bytes(2) + assert crlf == b"\r\n" + + @gen.coroutine + def _read_body_until_close(self, delegate): + body = yield self.stream.read_until_close() + if not self._write_finished or self.is_client: + with _ExceptionLoggingContext(app_log): + delegate.data_received(body) + + +class _GzipMessageDelegate(httputil.HTTPMessageDelegate): + """Wraps an `HTTPMessageDelegate` to decode ``Content-Encoding: gzip``. + """ + def __init__(self, delegate, chunk_size): + self._delegate = delegate + self._chunk_size = chunk_size + self._decompressor = None + + def headers_received(self, start_line, headers): + if headers.get("Content-Encoding") == "gzip": + self._decompressor = GzipDecompressor() + # Downstream delegates will only see uncompressed data, + # so rename the content-encoding header. + # (but note that curl_httpclient doesn't do this). + headers.add("X-Consumed-Content-Encoding", + headers["Content-Encoding"]) + del headers["Content-Encoding"] + return self._delegate.headers_received(start_line, headers) + + @gen.coroutine + def data_received(self, chunk): + if self._decompressor: + compressed_data = chunk + while compressed_data: + decompressed = self._decompressor.decompress( + compressed_data, self._chunk_size) + if decompressed: + yield gen.maybe_future( + self._delegate.data_received(decompressed)) + compressed_data = self._decompressor.unconsumed_tail + else: + yield gen.maybe_future(self._delegate.data_received(chunk)) + + def finish(self): + if self._decompressor is not None: + tail = self._decompressor.flush() + if tail: + # I believe the tail will always be empty (i.e. + # decompress will return all it can). The purpose + # of the flush call is to detect errors such + # as truncated input. But in case it ever returns + # anything, treat it as an extra chunk + self._delegate.data_received(tail) + return self._delegate.finish() + + +class HTTP1ServerConnection(object): + """An HTTP/1.x server.""" + def __init__(self, stream, params=None, context=None): + """ + :arg stream: an `.IOStream` + :arg params: a `.HTTP1ConnectionParameters` or None + :arg context: an opaque application-defined object that is accessible + as ``connection.context`` + """ + self.stream = stream + if params is None: + params = HTTP1ConnectionParameters() + self.params = params + self.context = context + self._serving_future = None + + @gen.coroutine + def close(self): + """Closes the connection. + + Returns a `.Future` that resolves after the serving loop has exited. + """ + self.stream.close() + # Block until the serving loop is done, but ignore any exceptions + # (start_serving is already responsible for logging them). + try: + yield self._serving_future + except Exception: + pass + + def start_serving(self, delegate): + """Starts serving requests on this connection. + + :arg delegate: a `.HTTPServerConnectionDelegate` + """ + assert isinstance(delegate, httputil.HTTPServerConnectionDelegate) + self._serving_future = self._server_request_loop(delegate) + # Register the future on the IOLoop so its errors get logged. + self.stream.io_loop.add_future(self._serving_future, + lambda f: f.result()) + + @gen.coroutine + def _server_request_loop(self, delegate): + try: + while True: + conn = HTTP1Connection(self.stream, False, + self.params, self.context) + request_delegate = delegate.start_request(self, conn) + try: + ret = yield conn.read_response(request_delegate) + except (iostream.StreamClosedError, + iostream.UnsatisfiableReadError): + return + except _QuietException: + # This exception was already logged. + conn.close() + return + except Exception: + gen_log.error("Uncaught exception", exc_info=True) + conn.close() + return + if not ret: + return + yield gen.moment + finally: + delegate.on_close(self) diff --git a/libs/tornado/httpclient.py b/libs/tornado/httpclient.py index 9b42d401..94a4593a 100755 --- a/libs/tornado/httpclient.py +++ b/libs/tornado/httpclient.py @@ -25,6 +25,11 @@ to switch to ``curl_httpclient`` for reasons such as the following: Note that if you are using ``curl_httpclient``, it is highly recommended that you use a recent version of ``libcurl`` and ``pycurl``. Currently the minimum supported version is 7.18.2, and the recommended version is 7.21.1 or newer. +It is highly recommended that your ``libcurl`` installation is built with +asynchronous DNS resolver (threaded or c-ares), otherwise you may encounter +various problems with request timeouts (for more information, see +http://curl.haxx.se/libcurl/c/curl_easy_setopt.html#CURLOPTCONNECTTIMEOUTMS +and comments in curl_httpclient.py). """ from __future__ import absolute_import, division, print_function, with_statement @@ -34,7 +39,7 @@ import time import weakref from tornado.concurrent import TracebackFuture -from tornado.escape import utf8 +from tornado.escape import utf8, native_str from tornado import httputil, stack_context from tornado.ioloop import IOLoop from tornado.util import Configurable @@ -166,7 +171,7 @@ class AsyncHTTPClient(Configurable): kwargs: ``HTTPRequest(request, **kwargs)`` This method returns a `.Future` whose result is an - `HTTPResponse`. The ``Future`` wil raise an `HTTPError` if + `HTTPResponse`. The ``Future`` will raise an `HTTPError` if the request returned a non-200 response code. If a ``callback`` is given, it will be invoked with the `HTTPResponse`. @@ -259,14 +264,27 @@ class HTTPRequest(object): proxy_password=None, allow_nonstandard_methods=None, validate_cert=None, ca_certs=None, allow_ipv6=None, - client_key=None, client_cert=None): + client_key=None, client_cert=None, body_producer=None, + expect_100_continue=False): r"""All parameters except ``url`` are optional. :arg string url: URL to fetch :arg string method: HTTP method, e.g. "GET" or "POST" :arg headers: Additional HTTP headers to pass on the request - :arg body: HTTP body to pass on the request :type headers: `~tornado.httputil.HTTPHeaders` or `dict` + :arg body: HTTP request body as a string (byte or unicode; if unicode + the utf-8 encoding will be used) + :arg body_producer: Callable used for lazy/asynchronous request bodies. + It is called with one argument, a ``write`` function, and should + return a `.Future`. It should call the write function with new + data as it becomes available. The write function returns a + `.Future` which can be used for flow control. + Only one of ``body`` and ``body_producer`` may + be specified. ``body_producer`` is not supported on + ``curl_httpclient``. When using ``body_producer`` it is recommended + to pass a ``Content-Length`` in the headers as otherwise chunked + encoding will be used, and many servers do not support chunked + encoding on requests. New in Tornado 3.3 :arg string auth_username: Username for HTTP authentication :arg string auth_password: Password for HTTP authentication :arg string auth_mode: Authentication mode; default is "basic". @@ -319,6 +337,11 @@ class HTTPRequest(object): note below when used with ``curl_httpclient``. :arg string client_cert: Filename for client SSL certificate, if any. See note below when used with ``curl_httpclient``. + :arg bool expect_100_continue: If true, send the + ``Expect: 100-continue`` header and wait for a continue response + before sending the request body. Only supported with + simple_httpclient. + .. note:: @@ -334,6 +357,9 @@ class HTTPRequest(object): .. versionadded:: 3.1 The ``auth_mode`` argument. + + .. versionadded:: 3.3 + The ``body_producer`` and ``expect_100_continue`` arguments. """ # Note that some of these attributes go through property setters # defined below. @@ -348,6 +374,7 @@ class HTTPRequest(object): self.url = url self.method = method self.body = body + self.body_producer = body_producer self.auth_username = auth_username self.auth_password = auth_password self.auth_mode = auth_mode @@ -367,6 +394,7 @@ class HTTPRequest(object): self.allow_ipv6 = allow_ipv6 self.client_key = client_key self.client_cert = client_cert + self.expect_100_continue = expect_100_continue self.start_time = time.time() @property @@ -388,6 +416,14 @@ class HTTPRequest(object): def body(self, value): self._body = utf8(value) + @property + def body_producer(self): + return self._body_producer + + @body_producer.setter + def body_producer(self, value): + self._body_producer = stack_context.wrap(value) + @property def streaming_callback(self): return self._streaming_callback @@ -423,8 +459,6 @@ class HTTPResponse(object): * code: numeric HTTP status code, e.g. 200 or 404 * reason: human-readable reason phrase describing the status code - (with curl_httpclient, this is a default value rather than the - server's actual response) * headers: `tornado.httputil.HTTPHeaders` object @@ -466,7 +500,8 @@ class HTTPResponse(object): self.effective_url = effective_url if error is None: if self.code < 200 or self.code >= 300: - self.error = HTTPError(self.code, response=self) + self.error = HTTPError(self.code, message=self.reason, + response=self) else: self.error = None else: @@ -556,7 +591,7 @@ def main(): if options.print_headers: print(response.headers) if options.print_body: - print(response.body) + print(native_str(response.body)) client.close() if __name__ == "__main__": diff --git a/libs/tornado/httpserver.py b/libs/tornado/httpserver.py index 34e7b768..469374e1 100755 --- a/libs/tornado/httpserver.py +++ b/libs/tornado/httpserver.py @@ -20,70 +20,55 @@ Typical applications have little direct interaction with the `HTTPServer` class except to start a server at the beginning of the process (and even that is often done indirectly via `tornado.web.Application.listen`). -This module also defines the `HTTPRequest` class which is exposed via -`tornado.web.RequestHandler.request`. +.. versionchanged:: 3.3 + + The ``HTTPRequest`` class that used to live in this module has been moved + to `tornado.httputil.HTTPServerRequest`. The old name remains as an alias. """ from __future__ import absolute_import, division, print_function, with_statement import socket -import ssl -import time -import copy -from tornado.escape import native_str, parse_qs_bytes +from tornado.escape import native_str +from tornado.http1connection import HTTP1ServerConnection, HTTP1ConnectionParameters +from tornado import gen from tornado import httputil from tornado import iostream -from tornado.log import gen_log from tornado import netutil from tornado.tcpserver import TCPServer -from tornado import stack_context -from tornado.util import bytes_type - -try: - import Cookie # py2 -except ImportError: - import http.cookies as Cookie # py3 -class HTTPServer(TCPServer): +class HTTPServer(TCPServer, httputil.HTTPServerConnectionDelegate): r"""A non-blocking, single-threaded HTTP server. - A server is defined by a request callback that takes an HTTPRequest - instance as an argument and writes a valid HTTP response with - `HTTPRequest.write`. `HTTPRequest.finish` finishes the request (but does - not necessarily close the connection in the case of HTTP/1.1 keep-alive - requests). A simple example server that echoes back the URI you - requested:: + A server is defined by either a request callback that takes a + `.HTTPServerRequest` as an argument or a `.HTTPServerConnectionDelegate` + instance. + + A simple example server that echoes back the URI you requested:: import tornado.httpserver import tornado.ioloop def handle_request(request): message = "You requested %s\n" % request.uri - request.write("HTTP/1.1 200 OK\r\nContent-Length: %d\r\n\r\n%s" % ( - len(message), message)) - request.finish() + request.connection.write_headers( + httputil.ResponseStartLine('HTTP/1.1', 200, 'OK'), + {"Content-Length": str(len(message))}) + request.connection.write(message) + request.connection.finish() http_server = tornado.httpserver.HTTPServer(handle_request) http_server.listen(8888) tornado.ioloop.IOLoop.instance().start() - `HTTPServer` is a very basic connection handler. It parses the request - headers and body, but the request callback is responsible for producing - the response exactly as it will appear on the wire. This affords - maximum flexibility for applications to implement whatever parts - of HTTP responses are required. + Applications should use the methods of `.HTTPConnection` to write + their response. `HTTPServer` supports keep-alive connections by default (automatically for HTTP/1.1, or for HTTP/1.0 when the client - requests ``Connection: keep-alive``). This means that the request - callback must generate a properly-framed response, using either - the ``Content-Length`` header or ``Transfer-Encoding: chunked``. - Applications that are unable to frame their responses properly - should instead return a ``Connection: close`` header in each - response and pass ``no_keep_alive=True`` to the `HTTPServer` - constructor. + requests ``Connection: keep-alive``). If ``xheaders`` is ``True``, we support the ``X-Real-Ip``/``X-Forwarded-For`` and @@ -143,407 +128,169 @@ class HTTPServer(TCPServer): servers if you want to create your listening sockets in some way other than `tornado.netutil.bind_sockets`. + .. versionchanged:: 3.3 + Added ``gzip``, ``chunk_size``, ``max_header_size``, + ``idle_connection_timeout``, ``body_timeout``, ``max_body_size`` + arguments. Added support for `.HTTPServerConnectionDelegate` + instances as ``request_callback``. """ def __init__(self, request_callback, no_keep_alive=False, io_loop=None, - xheaders=False, ssl_options=None, protocol=None, **kwargs): + xheaders=False, ssl_options=None, protocol=None, gzip=False, + chunk_size=None, max_header_size=None, + idle_connection_timeout=None, body_timeout=None, + max_body_size=None, max_buffer_size=None): self.request_callback = request_callback self.no_keep_alive = no_keep_alive self.xheaders = xheaders self.protocol = protocol + self.conn_params = HTTP1ConnectionParameters( + use_gzip=gzip, + chunk_size=chunk_size, + max_header_size=max_header_size, + header_timeout=idle_connection_timeout or 3600, + max_body_size=max_body_size, + body_timeout=body_timeout) TCPServer.__init__(self, io_loop=io_loop, ssl_options=ssl_options, - **kwargs) + max_buffer_size=max_buffer_size, + read_chunk_size=chunk_size) + self._connections = set() + + @gen.coroutine + def close_all_connections(self): + while self._connections: + # Peek at an arbitrary element of the set + conn = next(iter(self._connections)) + yield conn.close() def handle_stream(self, stream, address): - HTTPConnection(stream, address, self.request_callback, - self.no_keep_alive, self.xheaders, self.protocol) + context = _HTTPRequestContext(stream, address, + self.protocol) + conn = HTTP1ServerConnection( + stream, self.conn_params, context) + self._connections.add(conn) + conn.start_serving(self) + + def start_request(self, server_conn, request_conn): + return _ServerRequestAdapter(self, request_conn) + + def on_close(self, server_conn): + self._connections.remove(server_conn) -class _BadRequestException(Exception): - """Exception class for malformed HTTP requests.""" - pass - - -class HTTPConnection(object): - """Handles a connection to an HTTP client, executing HTTP requests. - - We parse HTTP headers and bodies, and execute the request callback - until the HTTP conection is closed. - """ - def __init__(self, stream, address, request_callback, no_keep_alive=False, - xheaders=False, protocol=None): - self.stream = stream +class _HTTPRequestContext(object): + def __init__(self, stream, address, protocol): self.address = address + self.protocol = protocol # Save the socket's address family now so we know how to # interpret self.address even after the stream is closed # and its socket attribute replaced with None. - self.address_family = stream.socket.family - self.request_callback = request_callback - self.no_keep_alive = no_keep_alive - self.xheaders = xheaders - self.protocol = protocol - self._clear_request_state() - # Save stack context here, outside of any request. This keeps - # contexts from one request from leaking into the next. - self._header_callback = stack_context.wrap(self._on_headers) - self.stream.set_close_callback(self._on_connection_close) - self.stream.read_until(b"\r\n\r\n", self._header_callback) - - def _clear_request_state(self): - """Clears the per-request state. - - This is run in between requests to allow the previous handler - to be garbage collected (and prevent spurious close callbacks), - and when the connection is closed (to break up cycles and - facilitate garbage collection in cpython). - """ - self._request = None - self._request_finished = False - self._write_callback = None - self._close_callback = None - - def set_close_callback(self, callback): - """Sets a callback that will be run when the connection is closed. - - Use this instead of accessing - `HTTPConnection.stream.set_close_callback - <.BaseIOStream.set_close_callback>` directly (which was the - recommended approach prior to Tornado 3.0). - """ - self._close_callback = stack_context.wrap(callback) - - def _on_connection_close(self): - if self._close_callback is not None: - callback = self._close_callback - self._close_callback = None - callback() - # Delete any unfinished callbacks to break up reference cycles. - self._header_callback = None - self._clear_request_state() - - def close(self): - self.stream.close() - # Remove this reference to self, which would otherwise cause a - # cycle and delay garbage collection of this connection. - self._header_callback = None - self._clear_request_state() - - def write(self, chunk, callback=None): - """Writes a chunk of output to the stream.""" - if not self.stream.closed(): - self._write_callback = stack_context.wrap(callback) - self.stream.write(chunk, self._on_write_complete) - - def finish(self): - """Finishes the request.""" - self._request_finished = True - # No more data is coming, so instruct TCP to send any remaining - # data immediately instead of waiting for a full packet or ack. - self.stream.set_nodelay(True) - if not self.stream.writing(): - self._finish_request() - - def _on_write_complete(self): - if self._write_callback is not None: - callback = self._write_callback - self._write_callback = None - callback() - # _on_write_complete is enqueued on the IOLoop whenever the - # IOStream's write buffer becomes empty, but it's possible for - # another callback that runs on the IOLoop before it to - # simultaneously write more data and finish the request. If - # there is still data in the IOStream, a future - # _on_write_complete will be responsible for calling - # _finish_request. - if self._request_finished and not self.stream.writing(): - self._finish_request() - - def _finish_request(self): - if self.no_keep_alive or self._request is None: - disconnect = True + if stream.socket is not None: + self.address_family = stream.socket.family else: - connection_header = self._request.headers.get("Connection") - if connection_header is not None: - connection_header = connection_header.lower() - if self._request.supports_http_1_1(): - disconnect = connection_header == "close" - elif ("Content-Length" in self._request.headers - or self._request.method in ("HEAD", "GET")): - disconnect = connection_header != "keep-alive" - else: - disconnect = True - self._clear_request_state() - if disconnect: - self.close() - return - try: - # Use a try/except instead of checking stream.closed() - # directly, because in some cases the stream doesn't discover - # that it's closed until you try to read from it. - self.stream.read_until(b"\r\n\r\n", self._header_callback) - - # Turn Nagle's algorithm back on, leaving the stream in its - # default state for the next request. - self.stream.set_nodelay(False) - except iostream.StreamClosedError: - self.close() - - def _on_headers(self, data): - try: - data = native_str(data.decode('latin1')) - eol = data.find("\r\n") - start_line = data[:eol] - try: - method, uri, version = start_line.split(" ") - except ValueError: - raise _BadRequestException("Malformed HTTP request line") - if not version.startswith("HTTP/"): - raise _BadRequestException("Malformed HTTP version in HTTP Request-Line") - try: - headers = httputil.HTTPHeaders.parse(data[eol:]) - except ValueError: - # Probably from split() if there was no ':' in the line - raise _BadRequestException("Malformed HTTP headers") - - # HTTPRequest wants an IP, not a full socket address - if self.address_family in (socket.AF_INET, socket.AF_INET6): - remote_ip = self.address[0] - else: - # Unix (or other) socket; fake the remote address - remote_ip = '0.0.0.0' - - self._request = HTTPRequest( - connection=self, method=method, uri=uri, version=version, - headers=headers, remote_ip=remote_ip, protocol=self.protocol) - - content_length = headers.get("Content-Length") - if content_length: - content_length = int(content_length) - if content_length > self.stream.max_buffer_size: - raise _BadRequestException("Content-Length too long") - if headers.get("Expect") == "100-continue": - self.stream.write(b"HTTP/1.1 100 (Continue)\r\n\r\n") - self.stream.read_bytes(content_length, self._on_request_body) - return - - self.request_callback(self._request) - except _BadRequestException as e: - gen_log.info("Malformed HTTP request from %r: %s", - self.address, e) - self.close() - return - - def _on_request_body(self, data): - self._request.body = data - if self._request.method in ("POST", "PATCH", "PUT"): - httputil.parse_body_arguments( - self._request.headers.get("Content-Type", ""), data, - self._request.body_arguments, self._request.files) - - for k, v in self._request.body_arguments.items(): - self._request.arguments.setdefault(k, []).extend(v) - self.request_callback(self._request) - - -class HTTPRequest(object): - """A single HTTP request. - - All attributes are type `str` unless otherwise noted. - - .. attribute:: method - - HTTP request method, e.g. "GET" or "POST" - - .. attribute:: uri - - The requested uri. - - .. attribute:: path - - The path portion of `uri` - - .. attribute:: query - - The query portion of `uri` - - .. attribute:: version - - HTTP version specified in request, e.g. "HTTP/1.1" - - .. attribute:: headers - - `.HTTPHeaders` dictionary-like object for request headers. Acts like - a case-insensitive dictionary with additional methods for repeated - headers. - - .. attribute:: body - - Request body, if present, as a byte string. - - .. attribute:: remote_ip - - Client's IP address as a string. If ``HTTPServer.xheaders`` is set, - will pass along the real IP address provided by a load balancer - in the ``X-Real-Ip`` or ``X-Forwarded-For`` header. - - .. versionchanged:: 3.1 - The list format of ``X-Forwarded-For`` is now supported. - - .. attribute:: protocol - - The protocol used, either "http" or "https". If ``HTTPServer.xheaders`` - is set, will pass along the protocol used by a load balancer if - reported via an ``X-Scheme`` header. - - .. attribute:: host - - The requested hostname, usually taken from the ``Host`` header. - - .. attribute:: arguments - - GET/POST arguments are available in the arguments property, which - maps arguments names to lists of values (to support multiple values - for individual names). Names are of type `str`, while arguments - are byte strings. Note that this is different from - `.RequestHandler.get_argument`, which returns argument values as - unicode strings. - - .. attribute:: query_arguments - - Same format as ``arguments``, but contains only arguments extracted - from the query string. - - .. versionadded:: 3.2 - - .. attribute:: body_arguments - - Same format as ``arguments``, but contains only arguments extracted - from the request body. - - .. versionadded:: 3.2 - - .. attribute:: files - - File uploads are available in the files property, which maps file - names to lists of `.HTTPFile`. - - .. attribute:: connection - - An HTTP request is attached to a single HTTP connection, which can - be accessed through the "connection" attribute. Since connections - are typically kept open in HTTP/1.1, multiple requests can be handled - sequentially on a single connection. - """ - def __init__(self, method, uri, version="HTTP/1.0", headers=None, - body=None, remote_ip=None, protocol=None, host=None, - files=None, connection=None): - self.method = method - self.uri = uri - self.version = version - self.headers = headers or httputil.HTTPHeaders() - self.body = body or "" - - # set remote IP and protocol - self.remote_ip = remote_ip + self.address_family = None + # In HTTPServerRequest we want an IP, not a full socket address. + if (self.address_family in (socket.AF_INET, socket.AF_INET6) and + address is not None): + self.remote_ip = address[0] + else: + # Unix (or other) socket; fake the remote address. + self.remote_ip = '0.0.0.0' if protocol: self.protocol = protocol - elif connection and isinstance(connection.stream, - iostream.SSLIOStream): + elif isinstance(stream, iostream.SSLIOStream): self.protocol = "https" else: self.protocol = "http" + self._orig_remote_ip = self.remote_ip + self._orig_protocol = self.protocol - # xheaders can override the defaults - if connection and connection.xheaders: - # Squid uses X-Forwarded-For, others use X-Real-Ip - ip = self.headers.get("X-Forwarded-For", self.remote_ip) - ip = ip.split(',')[-1].strip() - ip = self.headers.get( - "X-Real-Ip", ip) - if netutil.is_valid_ip(ip): - self.remote_ip = ip - # AWS uses X-Forwarded-Proto - proto = self.headers.get( - "X-Scheme", self.headers.get("X-Forwarded-Proto", self.protocol)) - if proto in ("http", "https"): - self.protocol = proto + def __str__(self): + if self.address_family in (socket.AF_INET, socket.AF_INET6): + return self.remote_ip + elif isinstance(self.address, bytes): + # Python 3 with the -bb option warns about str(bytes), + # so convert it explicitly. + # Unix socket addresses are str on mac but bytes on linux. + return native_str(self.address) + else: + return str(self.address) - self.host = host or self.headers.get("Host") or "127.0.0.1" - self.files = files or {} + def _apply_xheaders(self, headers): + """Rewrite the ``remote_ip`` and ``protocol`` fields.""" + # Squid uses X-Forwarded-For, others use X-Real-Ip + ip = headers.get("X-Forwarded-For", self.remote_ip) + ip = ip.split(',')[-1].strip() + ip = headers.get("X-Real-Ip", ip) + if netutil.is_valid_ip(ip): + self.remote_ip = ip + # AWS uses X-Forwarded-Proto + proto_header = headers.get( + "X-Scheme", headers.get("X-Forwarded-Proto", + self.protocol)) + if proto_header in ("http", "https"): + self.protocol = proto_header + + def _unapply_xheaders(self): + """Undo changes from `_apply_xheaders`. + + Xheaders are per-request so they should not leak to the next + request on the same connection. + """ + self.remote_ip = self._orig_remote_ip + self.protocol = self._orig_protocol + + +class _ServerRequestAdapter(httputil.HTTPMessageDelegate): + """Adapts the `HTTPMessageDelegate` interface to the interface expected + by our clients. + """ + def __init__(self, server, connection): + self.server = server self.connection = connection - self._start_time = time.time() - self._finish_time = None + self.request = None + if isinstance(server.request_callback, + httputil.HTTPServerConnectionDelegate): + self.delegate = server.request_callback.start_request(connection) + self._chunks = None + else: + self.delegate = None + self._chunks = [] - self.path, sep, self.query = uri.partition('?') - self.arguments = parse_qs_bytes(self.query, keep_blank_values=True) - self.query_arguments = copy.deepcopy(self.arguments) - self.body_arguments = {} + def headers_received(self, start_line, headers): + if self.server.xheaders: + self.connection.context._apply_xheaders(headers) + if self.delegate is None: + self.request = httputil.HTTPServerRequest( + connection=self.connection, start_line=start_line, + headers=headers) + else: + return self.delegate.headers_received(start_line, headers) - def supports_http_1_1(self): - """Returns True if this request supports HTTP/1.1 semantics""" - return self.version == "HTTP/1.1" - - @property - def cookies(self): - """A dictionary of Cookie.Morsel objects.""" - if not hasattr(self, "_cookies"): - self._cookies = Cookie.SimpleCookie() - if "Cookie" in self.headers: - try: - self._cookies.load( - native_str(self.headers["Cookie"])) - except Exception: - self._cookies = {} - return self._cookies - - def write(self, chunk, callback=None): - """Writes the given chunk to the response stream.""" - assert isinstance(chunk, bytes_type) - self.connection.write(chunk, callback=callback) + def data_received(self, chunk): + if self.delegate is None: + self._chunks.append(chunk) + else: + return self.delegate.data_received(chunk) def finish(self): - """Finishes this HTTP request on the open connection.""" - self.connection.finish() - self._finish_time = time.time() - - def full_url(self): - """Reconstructs the full URL for this request.""" - return self.protocol + "://" + self.host + self.uri - - def request_time(self): - """Returns the amount of time it took for this request to execute.""" - if self._finish_time is None: - return time.time() - self._start_time + if self.delegate is None: + self.request.body = b''.join(self._chunks) + self.request._parse_body() + self.server.request_callback(self.request) else: - return self._finish_time - self._start_time + self.delegate.finish() + self._cleanup() - def get_ssl_certificate(self, binary_form=False): - """Returns the client's SSL certificate, if any. + def on_connection_close(self): + if self.delegate is None: + self._chunks = None + else: + self.delegate.on_connection_close() + self._cleanup() - To use client certificates, the HTTPServer must have been constructed - with cert_reqs set in ssl_options, e.g.:: + def _cleanup(self): + if self.server.xheaders: + self.connection.context._unapply_xheaders() - server = HTTPServer(app, - ssl_options=dict( - certfile="foo.crt", - keyfile="foo.key", - cert_reqs=ssl.CERT_REQUIRED, - ca_certs="cacert.crt")) - By default, the return value is a dictionary (or None, if no - client certificate is present). If ``binary_form`` is true, a - DER-encoded form of the certificate is returned instead. See - SSLSocket.getpeercert() in the standard library for more - details. - http://docs.python.org/library/ssl.html#sslsocket-objects - """ - try: - return self.connection.stream.socket.getpeercert( - binary_form=binary_form) - except ssl.SSLError: - return None - - def __repr__(self): - attrs = ("protocol", "host", "method", "uri", "version", "remote_ip") - args = ", ".join(["%s=%r" % (n, getattr(self, n)) for n in attrs]) - return "%s(%s, headers=%s)" % ( - self.__class__.__name__, args, dict(self.headers)) +HTTPRequest = httputil.HTTPServerRequest diff --git a/libs/tornado/httputil.py b/libs/tornado/httputil.py index 2575bc56..6e110d90 100755 --- a/libs/tornado/httputil.py +++ b/libs/tornado/httputil.py @@ -14,20 +14,31 @@ # License for the specific language governing permissions and limitations # under the License. -"""HTTP utility code shared by clients and servers.""" +"""HTTP utility code shared by clients and servers. + +This module also defines the `HTTPServerRequest` class which is exposed +via `tornado.web.RequestHandler.request`. +""" from __future__ import absolute_import, division, print_function, with_statement import calendar import collections +import copy import datetime import email.utils import numbers +import re import time from tornado.escape import native_str, parse_qs_bytes, utf8 from tornado.log import gen_log -from tornado.util import ObjectDict +from tornado.util import ObjectDict, bytes_type + +try: + import Cookie # py2 +except ImportError: + import http.cookies as Cookie # py3 try: from httplib import responses # py2 @@ -43,6 +54,13 @@ try: except ImportError: from urllib.parse import urlencode # py3 +try: + from ssl import SSLError +except ImportError: + # ssl is unavailable on app engine. + class SSLError(Exception): + pass + class _NormalizedHeaderCache(dict): """Dynamic cached mapping of header names to Http-Header-Case. @@ -212,6 +230,337 @@ class HTTPHeaders(dict): return HTTPHeaders(self) +class HTTPServerRequest(object): + """A single HTTP request. + + All attributes are type `str` unless otherwise noted. + + .. attribute:: method + + HTTP request method, e.g. "GET" or "POST" + + .. attribute:: uri + + The requested uri. + + .. attribute:: path + + The path portion of `uri` + + .. attribute:: query + + The query portion of `uri` + + .. attribute:: version + + HTTP version specified in request, e.g. "HTTP/1.1" + + .. attribute:: headers + + `.HTTPHeaders` dictionary-like object for request headers. Acts like + a case-insensitive dictionary with additional methods for repeated + headers. + + .. attribute:: body + + Request body, if present, as a byte string. + + .. attribute:: remote_ip + + Client's IP address as a string. If ``HTTPServer.xheaders`` is set, + will pass along the real IP address provided by a load balancer + in the ``X-Real-Ip`` or ``X-Forwarded-For`` header. + + .. versionchanged:: 3.1 + The list format of ``X-Forwarded-For`` is now supported. + + .. attribute:: protocol + + The protocol used, either "http" or "https". If ``HTTPServer.xheaders`` + is set, will pass along the protocol used by a load balancer if + reported via an ``X-Scheme`` header. + + .. attribute:: host + + The requested hostname, usually taken from the ``Host`` header. + + .. attribute:: arguments + + GET/POST arguments are available in the arguments property, which + maps arguments names to lists of values (to support multiple values + for individual names). Names are of type `str`, while arguments + are byte strings. Note that this is different from + `.RequestHandler.get_argument`, which returns argument values as + unicode strings. + + .. attribute:: query_arguments + + Same format as ``arguments``, but contains only arguments extracted + from the query string. + + .. versionadded:: 3.2 + + .. attribute:: body_arguments + + Same format as ``arguments``, but contains only arguments extracted + from the request body. + + .. versionadded:: 3.2 + + .. attribute:: files + + File uploads are available in the files property, which maps file + names to lists of `.HTTPFile`. + + .. attribute:: connection + + An HTTP request is attached to a single HTTP connection, which can + be accessed through the "connection" attribute. Since connections + are typically kept open in HTTP/1.1, multiple requests can be handled + sequentially on a single connection. + + .. versionchanged:: 3.3 + Moved from ``tornado.httpserver.HTTPRequest``. + """ + def __init__(self, method=None, uri=None, version="HTTP/1.0", headers=None, + body=None, host=None, files=None, connection=None, + start_line=None): + if start_line is not None: + method, uri, version = start_line + self.method = method + self.uri = uri + self.version = version + self.headers = headers or HTTPHeaders() + self.body = body or "" + + # set remote IP and protocol + context = getattr(connection, 'context', None) + self.remote_ip = getattr(context, 'remote_ip') + self.protocol = getattr(context, 'protocol', "http") + + self.host = host or self.headers.get("Host") or "127.0.0.1" + self.files = files or {} + self.connection = connection + self._start_time = time.time() + self._finish_time = None + + self.path, sep, self.query = uri.partition('?') + self.arguments = parse_qs_bytes(self.query, keep_blank_values=True) + self.query_arguments = copy.deepcopy(self.arguments) + self.body_arguments = {} + + def supports_http_1_1(self): + """Returns True if this request supports HTTP/1.1 semantics. + + .. deprecated:: 3.3 + Applications are less likely to need this information with the + introduction of `.HTTPConnection`. If you still need it, access + the ``version`` attribute directly. + """ + return self.version == "HTTP/1.1" + + @property + def cookies(self): + """A dictionary of Cookie.Morsel objects.""" + if not hasattr(self, "_cookies"): + self._cookies = Cookie.SimpleCookie() + if "Cookie" in self.headers: + try: + self._cookies.load( + native_str(self.headers["Cookie"])) + except Exception: + self._cookies = {} + return self._cookies + + def write(self, chunk, callback=None): + """Writes the given chunk to the response stream. + + .. deprecated:: 3.3 + Use ``request.connection`` and the `.HTTPConnection` methods + to write the response. + """ + assert isinstance(chunk, bytes_type) + self.connection.write(chunk, callback=callback) + + def finish(self): + """Finishes this HTTP request on the open connection. + + .. deprecated:: 3.3 + Use ``request.connection`` and the `.HTTPConnection` methods + to write the response. + """ + self.connection.finish() + self._finish_time = time.time() + + def full_url(self): + """Reconstructs the full URL for this request.""" + return self.protocol + "://" + self.host + self.uri + + def request_time(self): + """Returns the amount of time it took for this request to execute.""" + if self._finish_time is None: + return time.time() - self._start_time + else: + return self._finish_time - self._start_time + + def get_ssl_certificate(self, binary_form=False): + """Returns the client's SSL certificate, if any. + + To use client certificates, the HTTPServer must have been constructed + with cert_reqs set in ssl_options, e.g.:: + + server = HTTPServer(app, + ssl_options=dict( + certfile="foo.crt", + keyfile="foo.key", + cert_reqs=ssl.CERT_REQUIRED, + ca_certs="cacert.crt")) + + By default, the return value is a dictionary (or None, if no + client certificate is present). If ``binary_form`` is true, a + DER-encoded form of the certificate is returned instead. See + SSLSocket.getpeercert() in the standard library for more + details. + http://docs.python.org/library/ssl.html#sslsocket-objects + """ + try: + return self.connection.stream.socket.getpeercert( + binary_form=binary_form) + except SSLError: + return None + + def _parse_body(self): + parse_body_arguments( + self.headers.get("Content-Type", ""), self.body, + self.body_arguments, self.files, + self.headers) + + for k, v in self.body_arguments.items(): + self.arguments.setdefault(k, []).extend(v) + + def __repr__(self): + attrs = ("protocol", "host", "method", "uri", "version", "remote_ip") + args = ", ".join(["%s=%r" % (n, getattr(self, n)) for n in attrs]) + return "%s(%s, headers=%s)" % ( + self.__class__.__name__, args, dict(self.headers)) + + +class HTTPInputException(Exception): + """Exception class for malformed HTTP requests or responses + from remote sources. + + .. versionadded:: 3.3 + """ + pass + + +class HTTPOutputException(Exception): + """Exception class for errors in HTTP output. + + .. versionadded:: 3.3 + """ + pass + + +class HTTPServerConnectionDelegate(object): + """Implement this interface to handle requests from `.HTTPServer`. + + .. versionadded:: 3.3 + """ + def start_request(self, server_conn, request_conn): + """This method is called by the server when a new request has started. + + :arg server_conn: is an opaque object representing the long-lived + (e.g. tcp-level) connection. + :arg request_conn: is a `.HTTPConnection` object for a single + request/response exchange. + + This method should return a `.HTTPMessageDelegate`. + """ + raise NotImplementedError() + + def on_close(self, server_conn): + """This method is called when a connection has been closed. + + :arg server_conn: is a server connection that has previously been + passed to ``start_request``. + """ + pass + + +class HTTPMessageDelegate(object): + """Implement this interface to handle an HTTP request or response. + + .. versionadded:: 3.3 + """ + def headers_received(self, start_line, headers): + """Called when the HTTP headers have been received and parsed. + + :arg start_line: a `.RequestStartLine` or `.ResponseStartLine` + depending on whether this is a client or server message. + :arg headers: a `.HTTPHeaders` instance. + + Some `.HTTPConnection` methods can only be called during + ``headers_received``. + + May return a `.Future`; if it does the body will not be read + until it is done. + """ + pass + + def data_received(self, chunk): + """Called when a chunk of data has been received. + + May return a `.Future` for flow control. + """ + pass + + def finish(self): + """Called after the last chunk of data has been received.""" + pass + + def on_connection_close(self): + """Called if the connection is closed without finishing the request. + + If ``headers_received`` is called, either ``finish`` or + ``on_connection_close`` will be called, but not both. + """ + pass + + +class HTTPConnection(object): + """Applications use this interface to write their responses. + + .. versionadded:: 3.3 + """ + def write_headers(self, start_line, headers, chunk=None, callback=None): + """Write an HTTP header block. + + :arg start_line: a `.RequestStartLine` or `.ResponseStartLine`. + :arg headers: a `.HTTPHeaders` instance. + :arg chunk: the first (optional) chunk of data. This is an optimization + so that small responses can be written in the same call as their + headers. + :arg callback: a callback to be run when the write is complete. + + Returns a `.Future` if no callback is given. + """ + raise NotImplementedError() + + def write(self, chunk, callback=None): + """Writes a chunk of body data. + + The callback will be run when the write is complete. If no callback + is given, returns a Future. + """ + raise NotImplementedError() + + def finish(self): + """Indicates that the last body data has been written. + """ + raise NotImplementedError() + + def url_concat(url, args): """Concatenate url and argument dictionary regardless of whether url has existing query parameters. @@ -310,7 +659,7 @@ def _int_or_none(val): return int(val) -def parse_body_arguments(content_type, body, arguments, files): +def parse_body_arguments(content_type, body, arguments, files, headers=None): """Parses a form request body. Supports ``application/x-www-form-urlencoded`` and @@ -319,6 +668,10 @@ def parse_body_arguments(content_type, body, arguments, files): and ``files`` parameters are dictionaries that will be updated with the parsed contents. """ + if headers and 'Content-Encoding' in headers: + gen_log.warning("Unsupported Content-Encoding: %s", + headers['Content-Encoding']) + return if content_type.startswith("application/x-www-form-urlencoded"): try: uri_arguments = parse_qs_bytes(native_str(body), keep_blank_values=True) @@ -405,6 +758,48 @@ def format_timestamp(ts): raise TypeError("unknown timestamp type: %r" % ts) return email.utils.formatdate(ts, usegmt=True) + +RequestStartLine = collections.namedtuple( + 'RequestStartLine', ['method', 'path', 'version']) + + +def parse_request_start_line(line): + """Returns a (method, path, version) tuple for an HTTP 1.x request line. + + The response is a `collections.namedtuple`. + + >>> parse_request_start_line("GET /foo HTTP/1.1") + RequestStartLine(method='GET', path='/foo', version='HTTP/1.1') + """ + try: + method, path, version = line.split(" ") + except ValueError: + raise HTTPInputException("Malformed HTTP request line") + if not version.startswith("HTTP/"): + raise HTTPInputException( + "Malformed HTTP version in HTTP Request-Line: %r" % version) + return RequestStartLine(method, path, version) + + +ResponseStartLine = collections.namedtuple( + 'ResponseStartLine', ['version', 'code', 'reason']) + + +def parse_response_start_line(line): + """Returns a (version, code, reason) tuple for an HTTP 1.x response line. + + The response is a `collections.namedtuple`. + + >>> parse_response_start_line("HTTP/1.1 200 OK") + ResponseStartLine(version='HTTP/1.1', code=200, reason='OK') + """ + line = native_str(line) + match = re.match("(HTTP/1.[01]) ([0-9]+) ([^\r]*)", line) + if not match: + raise HTTPInputException("Error parsing response start line") + return ResponseStartLine(match.group(1), int(match.group(2)), + match.group(3)) + # _parseparam and _parse_header are copied and modified from python2.7's cgi.py # The original 2.7 version of this code did not correctly support some # combinations of semicolons and double quotes. diff --git a/libs/tornado/ioloop.py b/libs/tornado/ioloop.py index e7b84dd7..cd59bfee 100755 --- a/libs/tornado/ioloop.py +++ b/libs/tornado/ioloop.py @@ -32,6 +32,7 @@ import datetime import errno import functools import heapq +import itertools import logging import numbers import os @@ -41,10 +42,11 @@ import threading import time import traceback -from tornado.concurrent import Future, TracebackFuture +from tornado.concurrent import TracebackFuture, is_future from tornado.log import app_log, gen_log from tornado import stack_context from tornado.util import Configurable +from tornado.util import errno_from_exception try: import signal @@ -156,6 +158,15 @@ class IOLoop(Configurable): assert not IOLoop.initialized() IOLoop._instance = self + @staticmethod + def clear_instance(): + """Clear the global `IOLoop` instance. + + .. versionadded:: 3.3 + """ + if hasattr(IOLoop, "_instance"): + del IOLoop._instance + @staticmethod def current(): """Returns the current thread's `IOLoop`. @@ -244,21 +255,40 @@ class IOLoop(Configurable): raise NotImplementedError() def add_handler(self, fd, handler, events): - """Registers the given handler to receive the given events for fd. + """Registers the given handler to receive the given events for ``fd``. + + The ``fd`` argument may either be an integer file descriptor or + a file-like object with a ``fileno()`` method (and optionally a + ``close()`` method, which may be called when the `IOLoop` is shut + down). The ``events`` argument is a bitwise or of the constants ``IOLoop.READ``, ``IOLoop.WRITE``, and ``IOLoop.ERROR``. When an event occurs, ``handler(fd, events)`` will be run. + + .. versionchanged:: 3.3 + Added the ability to pass file-like objects in addition to + raw file descriptors. """ raise NotImplementedError() def update_handler(self, fd, events): - """Changes the events we listen for fd.""" + """Changes the events we listen for ``fd``. + + .. versionchanged:: 3.3 + Added the ability to pass file-like objects in addition to + raw file descriptors. + """ raise NotImplementedError() def remove_handler(self, fd): - """Stop listening for events on fd.""" + """Stop listening for events on ``fd``. + + .. versionchanged:: 3.3 + Added the ability to pass file-like objects in addition to + raw file descriptors. + """ raise NotImplementedError() def set_blocking_signal_threshold(self, seconds, action): @@ -372,7 +402,7 @@ class IOLoop(Configurable): future_cell[0] = TracebackFuture() future_cell[0].set_exc_info(sys.exc_info()) else: - if isinstance(result, Future): + if is_future(result): future_cell[0] = result else: future_cell[0] = TracebackFuture() @@ -463,7 +493,7 @@ class IOLoop(Configurable): The callback is invoked with one argument, the `.Future`. """ - assert isinstance(future, Future) + assert is_future(future) callback = stack_context.wrap(callback) future.add_done_callback( lambda future: self.add_callback(callback, future)) @@ -490,6 +520,47 @@ class IOLoop(Configurable): """ app_log.error("Exception in callback %r", callback, exc_info=True) + def split_fd(self, fd): + """Returns an (fd, obj) pair from an ``fd`` parameter. + + We accept both raw file descriptors and file-like objects as + input to `add_handler` and related methods. When a file-like + object is passed, we must retain the object itself so we can + close it correctly when the `IOLoop` shuts down, but the + poller interfaces favor file descriptors (they will accept + file-like objects and call ``fileno()`` for you, but they + always return the descriptor itself). + + This method is provided for use by `IOLoop` subclasses and should + not generally be used by application code. + + .. versionadded:: 3.3 + """ + try: + return fd.fileno(), fd + except AttributeError: + return fd, fd + + def close_fd(self, fd): + """Utility method to close an ``fd``. + + If ``fd`` is a file-like object, we close it directly; otherwise + we use `os.close`. + + This method is provided for use by `IOLoop` subclasses (in + implementations of ``IOLoop.close(all_fds=True)`` and should + not generally be used by application code. + + .. versionadded:: 3.3 + """ + try: + try: + fd.close() + except AttributeError: + os.close(fd) + except OSError: + pass + class PollIOLoop(IOLoop): """Base class for IOLoops built around a select-like function. @@ -515,7 +586,8 @@ class PollIOLoop(IOLoop): self._closing = False self._thread_ident = None self._blocking_signal_threshold = None - + self._timeout_counter = itertools.count() + # Create a pipe that we send bogus data to when we want to wake # the I/O loop when it is idle self._waker = Waker() @@ -528,26 +600,24 @@ class PollIOLoop(IOLoop): self._closing = True self.remove_handler(self._waker.fileno()) if all_fds: - for fd in self._handlers.keys(): - try: - close_method = getattr(fd, 'close', None) - if close_method is not None: - close_method() - else: - os.close(fd) - except Exception: - gen_log.debug("error closing fd %s", fd, exc_info=True) + for fd, handler in self._handlers.values(): + self.close_fd(fd) self._waker.close() self._impl.close() + self._callbacks = None + self._timeouts = None def add_handler(self, fd, handler, events): - self._handlers[fd] = stack_context.wrap(handler) + fd, obj = self.split_fd(fd) + self._handlers[fd] = (obj, stack_context.wrap(handler)) self._impl.register(fd, events | self.ERROR) def update_handler(self, fd, events): + fd, obj = self.split_fd(fd) self._impl.modify(fd, events | self.ERROR) def remove_handler(self, fd): + fd, obj = self.split_fd(fd) self._handlers.pop(fd, None) self._events.pop(fd, None) try: @@ -566,6 +636,8 @@ class PollIOLoop(IOLoop): action if action is not None else signal.SIG_DFL) def start(self): + if self._running: + raise RuntimeError("IOLoop is already running") self._setup_logging() if self._stopped: self._stopped = False @@ -666,9 +738,7 @@ class PollIOLoop(IOLoop): # two ways EINTR might be signaled: # * e.errno == errno.EINTR # * e.args is like (errno.EINTR, 'Interrupted system call') - if (getattr(e, 'errno', None) == errno.EINTR or - (isinstance(getattr(e, 'args', None), tuple) and - len(e.args) == 2 and e.args[0] == errno.EINTR)): + if errno_from_exception(e) == errno.EINTR: continue else: raise @@ -685,15 +755,17 @@ class PollIOLoop(IOLoop): while self._events: fd, events = self._events.popitem() try: - self._handlers[fd](fd, events) + fd_obj, handler_func = self._handlers[fd] + handler_func(fd_obj, events) except (OSError, IOError) as e: - if e.args[0] == errno.EPIPE: + if errno_from_exception(e) == errno.EPIPE: # Happens when the client closes the connection pass else: self.handle_callback_exception(self._handlers.get(fd)) except Exception: self.handle_callback_exception(self._handlers.get(fd)) + fd_obj = handler_func = None finally: # reset the stopped flag so another start/stop pair can be issued @@ -765,16 +837,21 @@ class _Timeout(object): """An IOLoop timeout, a UNIX timestamp and a callback""" # Reduce memory overhead when there are lots of pending callbacks - __slots__ = ['deadline', 'callback'] + __slots__ = ['deadline', 'callback', 'tiebreaker'] def __init__(self, deadline, callback, io_loop): if isinstance(deadline, numbers.Real): self.deadline = deadline elif isinstance(deadline, datetime.timedelta): - self.deadline = io_loop.time() + _Timeout.timedelta_to_seconds(deadline) + now = io_loop.time() + try: + self.deadline = now + deadline.total_seconds() + except AttributeError: # py2.6 + self.deadline = now + _Timeout.timedelta_to_seconds(deadline) else: raise TypeError("Unsupported deadline %r" % deadline) self.callback = callback + self.tiebreaker = next(io_loop._timeout_counter) @staticmethod def timedelta_to_seconds(td): @@ -786,12 +863,12 @@ class _Timeout(object): # in python2.5, and __lt__ in 2.6+ (sort() and most other comparisons # use __lt__). def __lt__(self, other): - return ((self.deadline, id(self)) < - (other.deadline, id(other))) + return ((self.deadline, self.tiebreaker) < + (other.deadline, other.tiebreaker)) def __le__(self, other): - return ((self.deadline, id(self)) <= - (other.deadline, id(other))) + return ((self.deadline, self.tiebreaker) <= + (other.deadline, other.tiebreaker)) class PeriodicCallback(object): diff --git a/libs/tornado/iostream.py b/libs/tornado/iostream.py index 5d4d08ac..3874bf75 100755 --- a/libs/tornado/iostream.py +++ b/libs/tornado/iostream.py @@ -31,21 +31,27 @@ import errno import numbers import os import socket -import ssl import sys import re +from tornado.concurrent import TracebackFuture from tornado import ioloop from tornado.log import gen_log, app_log from tornado.netutil import ssl_wrap_socket, ssl_match_hostname, SSLCertificateError from tornado import stack_context -from tornado.util import bytes_type +from tornado.util import bytes_type, errno_from_exception try: from tornado.platform.posix import _set_nonblocking except ImportError: _set_nonblocking = None +try: + import ssl +except ImportError: + # ssl is not available on Google App Engine + ssl = None + # These errnos indicate that a non-blocking operation must be retried # at a later time. On most platforms they're the same value, but on # some they differ. @@ -66,12 +72,31 @@ class StreamClosedError(IOError): pass +class UnsatisfiableReadError(Exception): + """Exception raised when a read cannot be satisfied. + + Raised by ``read_until`` and ``read_until_regex`` with a ``max_bytes`` + argument. + """ + pass + + +class StreamBufferFullError(Exception): + """Exception raised by `IOStream` methods when the buffer is full. + """ + + class BaseIOStream(object): """A utility class to write to and read from a non-blocking file or socket. We support a non-blocking ``write()`` and a family of ``read_*()`` methods. - All of the methods take callbacks (since writing and reading are - non-blocking and asynchronous). + All of the methods take an optional ``callback`` argument and return a + `.Future` only if no callback is given. When the operation completes, + the callback will be run or the `.Future` will resolve with the data + read (or ``None`` for ``write()``). All outstanding ``Futures`` will + resolve with a `StreamClosedError` when the stream is closed; users + of the callback interface will be notified via + `.BaseIOStream.set_close_callback` instead. When a stream is closed due to an error, the IOStream's ``error`` attribute contains the exception object. @@ -80,24 +105,48 @@ class BaseIOStream(object): `read_from_fd`, and optionally `get_fd_error`. """ def __init__(self, io_loop=None, max_buffer_size=None, - read_chunk_size=4096): + read_chunk_size=None, max_write_buffer_size=None): + """`BaseIOStream` constructor. + + :arg io_loop: The `.IOLoop` to use; defaults to `.IOLoop.current`. + :arg max_buffer_size: Maximum amount of incoming data to buffer; + defaults to 100MB. + :arg read_chunk_size: Amount of data to read at one time from the + underlying transport; defaults to 64KB. + :arg max_write_buffer_size: Amount of outgoing data to buffer; + defaults to unlimited. + + .. versionchanged:: 3.3 + Add the ``max_write_buffer_size`` parameter. Changed default + ``read_chunk_size`` to 64KB. + """ self.io_loop = io_loop or ioloop.IOLoop.current() self.max_buffer_size = max_buffer_size or 104857600 - self.read_chunk_size = read_chunk_size + # A chunk size that is too close to max_buffer_size can cause + # spurious failures. + self.read_chunk_size = min(read_chunk_size or 65536, + self.max_buffer_size // 2) + self.max_write_buffer_size = max_write_buffer_size self.error = None self._read_buffer = collections.deque() self._write_buffer = collections.deque() self._read_buffer_size = 0 + self._write_buffer_size = 0 self._write_buffer_frozen = False self._read_delimiter = None self._read_regex = None + self._read_max_bytes = None self._read_bytes = None + self._read_partial = False self._read_until_close = False self._read_callback = None + self._read_future = None self._streaming_callback = None self._write_callback = None + self._write_future = None self._close_callback = None self._connect_callback = None + self._connect_future = None self._connecting = False self._state = None self._pending_callbacks = 0 @@ -142,98 +191,162 @@ class BaseIOStream(object): """ return None - def read_until_regex(self, regex, callback): - """Run ``callback`` when we read the given regex pattern. + def read_until_regex(self, regex, callback=None, max_bytes=None): + """Asynchronously read until we have matched the given regex. - The callback will get the data read (including the data that - matched the regex and anything that came before it) as an argument. + The result includes the data that matches the regex and anything + that came before it. If a callback is given, it will be run + with the data as an argument; if not, this method returns a + `.Future`. + + If ``max_bytes`` is not None, the connection will be closed + if more than ``max_bytes`` bytes have been read and the regex is + not satisfied. + + .. versionchanged:: 3.3 + Added the ``max_bytes`` argument. The ``callback`` argument is + now optional and a `.Future` will be returned if it is omitted. """ - self._set_read_callback(callback) + future = self._set_read_callback(callback) self._read_regex = re.compile(regex) - self._try_inline_read() + self._read_max_bytes = max_bytes + try: + self._try_inline_read() + except UnsatisfiableReadError as e: + # Handle this the same way as in _handle_events. + gen_log.info("Unsatisfiable read, closing connection: %s" % e) + self.close(exc_info=True) + return future + return future - def read_until(self, delimiter, callback): - """Run ``callback`` when we read the given delimiter. + def read_until(self, delimiter, callback=None, max_bytes=None): + """Asynchronously read until we have found the given delimiter. - The callback will get the data read (including the delimiter) - as an argument. + The result includes all the data read including the delimiter. + If a callback is given, it will be run with the data as an argument; + if not, this method returns a `.Future`. + + If ``max_bytes`` is not None, the connection will be closed + if more than ``max_bytes`` bytes have been read and the delimiter + is not found. + + .. versionchanged:: 3.3 + Added the ``max_bytes`` argument. The ``callback`` argument is + now optional and a `.Future` will be returned if it is omitted. """ - self._set_read_callback(callback) + future = self._set_read_callback(callback) self._read_delimiter = delimiter - self._try_inline_read() + self._read_max_bytes = max_bytes + try: + self._try_inline_read() + except UnsatisfiableReadError as e: + # Handle this the same way as in _handle_events. + gen_log.info("Unsatisfiable read, closing connection: %s" % e) + self.close(exc_info=True) + return future + return future - def read_bytes(self, num_bytes, callback, streaming_callback=None): - """Run callback when we read the given number of bytes. + def read_bytes(self, num_bytes, callback=None, streaming_callback=None, + partial=False): + """Asynchronously read a number of bytes. If a ``streaming_callback`` is given, it will be called with chunks - of data as they become available, and the argument to the final - ``callback`` will be empty. Otherwise, the ``callback`` gets - the data as an argument. + of data as they become available, and the final result will be empty. + Otherwise, the result is all the data that was read. + If a callback is given, it will be run with the data as an argument; + if not, this method returns a `.Future`. + + If ``partial`` is true, the callback is run as soon as we have + any bytes to return (but never more than ``num_bytes``) + + .. versionchanged:: 3.3 + Added the ``partial`` argument. The callback argument is now + optional and a `.Future` will be returned if it is omitted. """ - self._set_read_callback(callback) + future = self._set_read_callback(callback) assert isinstance(num_bytes, numbers.Integral) self._read_bytes = num_bytes + self._read_partial = partial self._streaming_callback = stack_context.wrap(streaming_callback) self._try_inline_read() + return future - def read_until_close(self, callback, streaming_callback=None): - """Reads all data from the socket until it is closed. + def read_until_close(self, callback=None, streaming_callback=None): + """Asynchronously reads all data from the socket until it is closed. If a ``streaming_callback`` is given, it will be called with chunks - of data as they become available, and the argument to the final - ``callback`` will be empty. Otherwise, the ``callback`` gets the - data as an argument. + of data as they become available, and the final result will be empty. + Otherwise, the result is all the data that was read. + If a callback is given, it will be run with the data as an argument; + if not, this method returns a `.Future`. - Subject to ``max_buffer_size`` limit from `IOStream` constructor if - a ``streaming_callback`` is not used. + .. versionchanged:: 3.3 + The callback argument is now optional and a `.Future` will + be returned if it is omitted. """ - self._set_read_callback(callback) + future = self._set_read_callback(callback) self._streaming_callback = stack_context.wrap(streaming_callback) if self.closed(): if self._streaming_callback is not None: - self._run_callback(self._streaming_callback, - self._consume(self._read_buffer_size)) - self._run_callback(self._read_callback, - self._consume(self._read_buffer_size)) - self._streaming_callback = None - self._read_callback = None - return + self._run_read_callback(self._read_buffer_size, True) + self._run_read_callback(self._read_buffer_size, False) + return future self._read_until_close = True - self._streaming_callback = stack_context.wrap(streaming_callback) self._try_inline_read() + return future def write(self, data, callback=None): - """Write the given data to this stream. + """Asynchronously write the given data to this stream. If ``callback`` is given, we call it when all of the buffered write data has been successfully written to the stream. If there was previously buffered write data and an old write callback, that callback is simply overwritten with this new callback. + + If no ``callback`` is given, this method returns a `.Future` that + resolves (with a result of ``None``) when the write has been + completed. If `write` is called again before that `.Future` has + resolved, the previous future will be orphaned and will never resolve. + + .. versionchanged:: 3.3 + Now returns a `.Future` if no callback is given. """ assert isinstance(data, bytes_type) self._check_closed() # We use bool(_write_buffer) as a proxy for write_buffer_size>0, # so never put empty strings in the buffer. if data: + if (self.max_write_buffer_size is not None and + self._write_buffer_size + len(data) > self.max_write_buffer_size): + raise StreamBufferFullError("Reached maximum read buffer size") # Break up large contiguous strings before inserting them in the # write buffer, so we don't have to recopy the entire thing # as we slice off pieces to send to the socket. WRITE_BUFFER_CHUNK_SIZE = 128 * 1024 - if len(data) > WRITE_BUFFER_CHUNK_SIZE: - for i in range(0, len(data), WRITE_BUFFER_CHUNK_SIZE): - self._write_buffer.append(data[i:i + WRITE_BUFFER_CHUNK_SIZE]) - else: - self._write_buffer.append(data) - self._write_callback = stack_context.wrap(callback) + for i in range(0, len(data), WRITE_BUFFER_CHUNK_SIZE): + self._write_buffer.append(data[i:i + WRITE_BUFFER_CHUNK_SIZE]) + self._write_buffer_size += len(data) + if callback is not None: + self._write_callback = stack_context.wrap(callback) + future = None + else: + future = self._write_future = TracebackFuture() if not self._connecting: self._handle_write() if self._write_buffer: self._add_io_state(self.io_loop.WRITE) self._maybe_add_error_listener() + return future def set_close_callback(self, callback): - """Call the given callback when the stream is closed.""" + """Call the given callback when the stream is closed. + + This is not necessary for applications that use the `.Future` + interface; all outstanding ``Futures`` will resolve with a + `StreamClosedError` when the stream is closed. + """ self._close_callback = stack_context.wrap(callback) + self._maybe_add_error_listener() def close(self, exc_info=False): """Close this stream. @@ -251,13 +364,9 @@ class BaseIOStream(object): if self._read_until_close: if (self._streaming_callback is not None and self._read_buffer_size): - self._run_callback(self._streaming_callback, - self._consume(self._read_buffer_size)) - callback = self._read_callback - self._read_callback = None + self._run_read_callback(self._read_buffer_size, True) self._read_until_close = False - self._run_callback(callback, - self._consume(self._read_buffer_size)) + self._run_read_callback(self._read_buffer_size, False) if self._state is not None: self.io_loop.remove_handler(self.fileno()) self._state = None @@ -269,6 +378,25 @@ class BaseIOStream(object): # If there are pending callbacks, don't run the close callback # until they're done (see _maybe_add_error_handler) if self.closed() and self._pending_callbacks == 0: + futures = [] + if self._read_future is not None: + futures.append(self._read_future) + self._read_future = None + if self._write_future is not None: + futures.append(self._write_future) + self._write_future = None + if self._connect_future is not None: + futures.append(self._connect_future) + self._connect_future = None + for future in futures: + if (isinstance(self.error, (socket.error, IOError)) and + errno_from_exception(self.error) in _ERRNO_CONNRESET): + # Treat connection resets as closed connections so + # clients only have to catch one kind of exception + # to avoid logging. + future.set_exception(StreamClosedError()) + else: + future.set_exception(self.error or StreamClosedError()) if self._close_callback is not None: cb = self._close_callback self._close_callback = None @@ -282,7 +410,7 @@ class BaseIOStream(object): def reading(self): """Returns true if we are currently reading from the stream.""" - return self._read_callback is not None + return self._read_callback is not None or self._read_future is not None def writing(self): """Returns true if we are currently writing to the stream.""" @@ -309,16 +437,22 @@ class BaseIOStream(object): def _handle_events(self, fd, events): if self.closed(): - gen_log.warning("Got events for closed stream %d", fd) + gen_log.warning("Got events for closed stream %s", fd) return try: + if self._connecting: + # Most IOLoops will report a write failed connect + # with the WRITE event, but SelectIOLoop reports a + # READ as well so we must check for connecting before + # either. + self._handle_connect() + if self.closed(): + return if events & self.io_loop.READ: self._handle_read() if self.closed(): return if events & self.io_loop.WRITE: - if self._connecting: - self._handle_connect() self._handle_write() if self.closed(): return @@ -334,13 +468,20 @@ class BaseIOStream(object): state |= self.io_loop.READ if self.writing(): state |= self.io_loop.WRITE - if state == self.io_loop.ERROR: + if state == self.io_loop.ERROR and self._read_buffer_size == 0: + # If the connection is idle, listen for reads too so + # we can tell if the connection is closed. If there is + # data in the read buffer we won't run the close callback + # yet anyway, so we don't need to listen in this case. state |= self.io_loop.READ if state != self._state: assert self._state is not None, \ "shouldn't happen: _handle_events without self._state" self._state = state self.io_loop.update_handler(self.fileno(), self._state) + except UnsatisfiableReadError as e: + gen_log.info("Unsatisfiable read, closing connection: %s" % e) + self.close(exc_info=True) except Exception: gen_log.error("Uncaught exception, closing connection.", exc_info=True) @@ -381,42 +522,108 @@ class BaseIOStream(object): self._pending_callbacks += 1 self.io_loop.add_callback(wrapper) + def _read_to_buffer_loop(self): + # This method is called from _handle_read and _try_inline_read. + try: + if self._read_bytes is not None: + target_bytes = self._read_bytes + elif self._read_max_bytes is not None: + target_bytes = self._read_max_bytes + elif self.reading(): + # For read_until without max_bytes, or + # read_until_close, read as much as we can before + # scanning for the delimiter. + target_bytes = None + else: + target_bytes = 0 + next_find_pos = 0 + # Pretend to have a pending callback so that an EOF in + # _read_to_buffer doesn't trigger an immediate close + # callback. At the end of this method we'll either + # estabilsh a real pending callback via + # _read_from_buffer or run the close callback. + # + # We need two try statements here so that + # pending_callbacks is decremented before the `except` + # clause below (which calls `close` and does need to + # trigger the callback) + self._pending_callbacks += 1 + while not self.closed(): + # Read from the socket until we get EWOULDBLOCK or equivalent. + # SSL sockets do some internal buffering, and if the data is + # sitting in the SSL object's buffer select() and friends + # can't see it; the only way to find out if it's there is to + # try to read it. + if self._read_to_buffer() == 0: + break + + self._run_streaming_callback() + + # If we've read all the bytes we can use, break out of + # this loop. We can't just call read_from_buffer here + # because of subtle interactions with the + # pending_callback and error_listener mechanisms. + # + # If we've reached target_bytes, we know we're done. + if (target_bytes is not None and + self._read_buffer_size >= target_bytes): + break + + # Otherwise, we need to call the more expensive find_read_pos. + # It's inefficient to do this on every read, so instead + # do it on the first read and whenever the read buffer + # size has doubled. + if self._read_buffer_size >= next_find_pos: + pos = self._find_read_pos() + if pos is not None: + return pos + next_find_pos = self._read_buffer_size * 2 + return self._find_read_pos() + finally: + self._pending_callbacks -= 1 + def _handle_read(self): try: - try: - # Pretend to have a pending callback so that an EOF in - # _read_to_buffer doesn't trigger an immediate close - # callback. At the end of this method we'll either - # estabilsh a real pending callback via - # _read_from_buffer or run the close callback. - # - # We need two try statements here so that - # pending_callbacks is decremented before the `except` - # clause below (which calls `close` and does need to - # trigger the callback) - self._pending_callbacks += 1 - while not self.closed(): - # Read from the socket until we get EWOULDBLOCK or equivalent. - # SSL sockets do some internal buffering, and if the data is - # sitting in the SSL object's buffer select() and friends - # can't see it; the only way to find out if it's there is to - # try to read it. - if self._read_to_buffer() == 0: - break - finally: - self._pending_callbacks -= 1 + pos = self._read_to_buffer_loop() + except UnsatisfiableReadError: + raise except Exception: gen_log.warning("error on read", exc_info=True) self.close(exc_info=True) return - if self._read_from_buffer(): + if pos is not None: + self._read_from_buffer(pos) return else: self._maybe_run_close_callback() def _set_read_callback(self, callback): - assert not self._read_callback, "Already reading" - self._read_callback = stack_context.wrap(callback) + assert self._read_callback is None, "Already reading" + assert self._read_future is None, "Already reading" + if callback is not None: + self._read_callback = stack_context.wrap(callback) + else: + self._read_future = TracebackFuture() + return self._read_future + + def _run_read_callback(self, size, streaming): + if streaming: + callback = self._streaming_callback + else: + callback = self._read_callback + self._read_callback = self._streaming_callback = None + if self._read_future is not None: + assert callback is None + future = self._read_future + self._read_future = None + future.set_result(self._consume(size)) + if callback is not None: + assert self._read_future is None + self._run_callback(callback, self._consume(size)) + else: + # If we scheduled a callback, we will add the error listener + # afterwards. If we didn't, we have to do it now. + self._maybe_add_error_listener() def _try_inline_read(self): """Attempt to complete the current read operation from buffered data. @@ -426,18 +633,14 @@ class BaseIOStream(object): listening for reads on the socket. """ # See if we've already got the data from a previous read - if self._read_from_buffer(): + self._run_streaming_callback() + pos = self._find_read_pos() + if pos is not None: + self._read_from_buffer(pos) return self._check_closed() try: - try: - # See comments in _handle_read about incrementing _pending_callbacks - self._pending_callbacks += 1 - while not self.closed(): - if self._read_to_buffer() == 0: - break - finally: - self._pending_callbacks -= 1 + pos = self._read_to_buffer_loop() except Exception: # If there was an in _read_to_buffer, we called close() already, # but couldn't run the close callback because of _pending_callbacks. @@ -445,9 +648,15 @@ class BaseIOStream(object): # applicable. self._maybe_run_close_callback() raise - if self._read_from_buffer(): + if pos is not None: + self._read_from_buffer(pos) return - self._maybe_add_error_listener() + # We couldn't satisfy the read inline, so either close the stream + # or listen for new data. + if self.closed(): + self._maybe_run_close_callback() + else: + self._add_io_state(ioloop.IOLoop.READ) def _read_to_buffer(self): """Reads from the socket and appends the result to the read buffer. @@ -472,32 +681,42 @@ class BaseIOStream(object): return 0 self._read_buffer.append(chunk) self._read_buffer_size += len(chunk) - if self._read_buffer_size >= self.max_buffer_size: + if self._read_buffer_size > self.max_buffer_size: gen_log.error("Reached maximum read buffer size") self.close() - raise IOError("Reached maximum read buffer size") + raise StreamBufferFullError("Reached maximum read buffer size") return len(chunk) - def _read_from_buffer(self): - """Attempts to complete the currently-pending read from the buffer. - - Returns True if the read was completed. - """ + def _run_streaming_callback(self): if self._streaming_callback is not None and self._read_buffer_size: bytes_to_consume = self._read_buffer_size if self._read_bytes is not None: bytes_to_consume = min(self._read_bytes, bytes_to_consume) self._read_bytes -= bytes_to_consume - self._run_callback(self._streaming_callback, - self._consume(bytes_to_consume)) - if self._read_bytes is not None and self._read_buffer_size >= self._read_bytes: - num_bytes = self._read_bytes - callback = self._read_callback - self._read_callback = None - self._streaming_callback = None - self._read_bytes = None - self._run_callback(callback, self._consume(num_bytes)) - return True + self._run_read_callback(bytes_to_consume, True) + + def _read_from_buffer(self, pos): + """Attempts to complete the currently-pending read from the buffer. + + The argument is either a position in the read buffer or None, + as returned by _find_read_pos. + """ + self._read_bytes = self._read_delimiter = self._read_regex = None + self._read_partial = False + self._run_read_callback(pos, False) + + def _find_read_pos(self): + """Attempts to find a position in the read buffer that satisfies + the currently-pending read. + + Returns a position in the buffer if the current read can be satisfied, + or None if it cannot. + """ + if (self._read_bytes is not None and + (self._read_buffer_size >= self._read_bytes or + (self._read_partial and self._read_buffer_size > 0))): + num_bytes = min(self._read_bytes, self._read_buffer_size) + return num_bytes elif self._read_delimiter is not None: # Multi-byte delimiters (e.g. '\r\n') may straddle two # chunks in the read buffer, so we can't easily find them @@ -506,37 +725,40 @@ class BaseIOStream(object): # length) tend to be "line" oriented, the delimiter is likely # to be in the first few chunks. Merge the buffer gradually # since large merges are relatively expensive and get undone in - # consume(). + # _consume(). if self._read_buffer: while True: loc = self._read_buffer[0].find(self._read_delimiter) if loc != -1: - callback = self._read_callback delimiter_len = len(self._read_delimiter) - self._read_callback = None - self._streaming_callback = None - self._read_delimiter = None - self._run_callback(callback, - self._consume(loc + delimiter_len)) - return True + self._check_max_bytes(self._read_delimiter, + loc + delimiter_len) + return loc + delimiter_len if len(self._read_buffer) == 1: break _double_prefix(self._read_buffer) + self._check_max_bytes(self._read_delimiter, + len(self._read_buffer[0])) elif self._read_regex is not None: if self._read_buffer: while True: m = self._read_regex.search(self._read_buffer[0]) if m is not None: - callback = self._read_callback - self._read_callback = None - self._streaming_callback = None - self._read_regex = None - self._run_callback(callback, self._consume(m.end())) - return True + self._check_max_bytes(self._read_regex, m.end()) + return m.end() if len(self._read_buffer) == 1: break _double_prefix(self._read_buffer) - return False + self._check_max_bytes(self._read_regex, + len(self._read_buffer[0])) + return None + + def _check_max_bytes(self, delimiter, size): + if (self._read_max_bytes is not None and + size > self._read_max_bytes): + raise UnsatisfiableReadError( + "delimiter %r not found within %d bytes" % ( + delimiter, self._read_max_bytes)) def _handle_write(self): while self._write_buffer: @@ -563,6 +785,7 @@ class BaseIOStream(object): self._write_buffer_frozen = False _merge_prefix(self._write_buffer, num_bytes) self._write_buffer.popleft() + self._write_buffer_size -= num_bytes except (socket.error, IOError, OSError) as e: if e.args[0] in _ERRNO_WOULDBLOCK: self._write_buffer_frozen = True @@ -572,14 +795,19 @@ class BaseIOStream(object): # Broken pipe errors are usually caused by connection # reset, and its better to not log EPIPE errors to # minimize log spam - gen_log.warning("Write error on %d: %s", + gen_log.warning("Write error on %s: %s", self.fileno(), e) self.close(exc_info=True) return - if not self._write_buffer and self._write_callback: - callback = self._write_callback - self._write_callback = None - self._run_callback(callback) + if not self._write_buffer: + if self._write_callback: + callback = self._write_callback + self._write_callback = None + self._run_callback(callback) + if self._write_future: + future = self._write_future + self._write_future = None + future.set_result(None) def _consume(self, loc): if loc == 0: @@ -593,10 +821,19 @@ class BaseIOStream(object): raise StreamClosedError("Stream is closed") def _maybe_add_error_listener(self): - if self._state is None and self._pending_callbacks == 0: + # This method is part of an optimization: to detect a connection that + # is closed when we're not actively reading or writing, we must listen + # for read events. However, it is inefficient to do this when the + # connection is first established because we are going to read or write + # immediately anyway. Instead, we insert checks at various times to + # see if the connection is idle and add the read listener then. + if self._pending_callbacks != 0: + return + if self._state is None or self._state == ioloop.IOLoop.ERROR: if self.closed(): self._maybe_run_close_callback() - else: + elif (self._read_buffer_size == 0 and + self._close_callback is not None): self._add_io_state(ioloop.IOLoop.READ) def _add_io_state(self, state): @@ -680,7 +917,7 @@ class IOStream(BaseIOStream): super(IOStream, self).__init__(*args, **kwargs) def fileno(self): - return self.socket.fileno() + return self.socket def close_fd(self): self.socket.close() @@ -714,7 +951,9 @@ class IOStream(BaseIOStream): not previously connected. The address parameter is in the same format as for `socket.connect `, i.e. a ``(host, port)`` tuple. If ``callback`` is specified, - it will be called when the connection is completed. + it will be called with no arguments when the connection is + completed; if not this method returns a `.Future` (whose result + after a successful connection will be the stream itself). If specified, the ``server_hostname`` parameter will be used in SSL connections for certificate validation (if requested in @@ -726,6 +965,9 @@ class IOStream(BaseIOStream): which case the data will be written as soon as the connection is ready. Calling `IOStream` read methods before the socket is connected works on some platforms but is non-portable. + + .. versionchanged:: 3.3 + If no callback is given, returns a `.Future`. """ self._connecting = True try: @@ -738,14 +980,83 @@ class IOStream(BaseIOStream): # returned immediately when attempting to connect to # localhost, so handle them the same way as an error # reported later in _handle_connect. - if (e.args[0] != errno.EINPROGRESS and - e.args[0] not in _ERRNO_WOULDBLOCK): - gen_log.warning("Connect error on fd %d: %s", + if (errno_from_exception(e) != errno.EINPROGRESS and + errno_from_exception(e) not in _ERRNO_WOULDBLOCK): + gen_log.warning("Connect error on fd %s: %s", self.socket.fileno(), e) self.close(exc_info=True) return - self._connect_callback = stack_context.wrap(callback) + if callback is not None: + self._connect_callback = stack_context.wrap(callback) + future = None + else: + future = self._connect_future = TracebackFuture() self._add_io_state(self.io_loop.WRITE) + return future + + def start_tls(self, server_side, ssl_options=None, server_hostname=None): + """Convert this `IOStream` to an `SSLIOStream`. + + This enables protocols that begin in clear-text mode and + switch to SSL after some initial negotiation (such as the + ``STARTTLS`` extension to SMTP and IMAP). + + This method cannot be used if there are outstanding reads + or writes on the stream, or if there is any data in the + IOStream's buffer (data in the operating system's socket + buffer is allowed). This means it must generally be used + immediately after reading or writing the last clear-text + data. It can also be used immediately after connecting, + before any reads or writes. + + The ``ssl_options`` argument may be either a dictionary + of options or an `ssl.SSLContext`. If a ``server_hostname`` + is given, it will be used for certificate verification + (as configured in the ``ssl_options``). + + This method returns a `.Future` whose result is the new + `SSLIOStream`. After this method has been called, + any other operation on the original stream is undefined. + + If a close callback is defined on this stream, it will be + transferred to the new stream. + + .. versionadded:: 3.3 + """ + if (self._read_callback or self._read_future or + self._write_callback or self._write_future or + self._connect_callback or self._connect_future or + self._pending_callbacks or self._closed or + self._read_buffer or self._write_buffer): + raise ValueError("IOStream is not idle; cannot convert to SSL") + if ssl_options is None: + ssl_options = {} + + socket = self.socket + self.io_loop.remove_handler(socket) + self.socket = None + socket = ssl_wrap_socket(socket, ssl_options, server_side=server_side, + do_handshake_on_connect=False) + orig_close_callback = self._close_callback + self._close_callback = None + + future = TracebackFuture() + ssl_stream = SSLIOStream(socket, ssl_options=ssl_options, + io_loop=self.io_loop) + # Wrap the original close callback so we can fail our Future as well. + # If we had an "unwrap" counterpart to this method we would need + # to restore the original callback after our Future resolves + # so that repeated wrap/unwrap calls don't build up layers. + def close_callback(): + if not future.done(): + future.set_exception(ssl_stream.error or StreamClosedError()) + if orig_close_callback is not None: + orig_close_callback() + ssl_stream.set_close_callback(close_callback) + ssl_stream._ssl_connect_callback = lambda: future.set_result(ssl_stream) + ssl_stream.max_buffer_size = self.max_buffer_size + ssl_stream.read_chunk_size = self.read_chunk_size + return future def _handle_connect(self): err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR) @@ -755,14 +1066,19 @@ class IOStream(BaseIOStream): # an error state before the socket becomes writable, so # in that case a connection failure would be handled by the # error path in _handle_events instead of here. - gen_log.warning("Connect error on fd %d: %s", - self.socket.fileno(), errno.errorcode[err]) + if self._connect_future is None: + gen_log.warning("Connect error on fd %s: %s", + self.socket.fileno(), errno.errorcode[err]) self.close() return if self._connect_callback is not None: callback = self._connect_callback self._connect_callback = None self._run_callback(callback) + if self._connect_future is not None: + future = self._connect_future + self._connect_future = None + future.set_result(self) self._connecting = False def set_nodelay(self, value): @@ -841,7 +1157,7 @@ class SSLIOStream(IOStream): peer = self.socket.getpeername() except Exception: peer = '(not connected)' - gen_log.warning("SSL Error on %d %s: %s", + gen_log.warning("SSL Error on %s %s: %s", self.socket.fileno(), peer, err) return self.close(exc_info=True) raise @@ -907,19 +1223,33 @@ class SSLIOStream(IOStream): # has completed. self._ssl_connect_callback = stack_context.wrap(callback) self._server_hostname = server_hostname - super(SSLIOStream, self).connect(address, callback=None) + # Note: Since we don't pass our callback argument along to + # super.connect(), this will always return a Future. + # This is harmless, but a bit less efficient than it could be. + return super(SSLIOStream, self).connect(address, callback=None) def _handle_connect(self): + # Call the superclass method to check for errors. + super(SSLIOStream, self)._handle_connect() + if self.closed(): + return # When the connection is complete, wrap the socket for SSL # traffic. Note that we do this by overriding _handle_connect # instead of by passing a callback to super().connect because # user callbacks are enqueued asynchronously on the IOLoop, # but since _handle_events calls _handle_connect immediately # followed by _handle_write we need this to be synchronous. + # + # The IOLoop will get confused if we swap out self.socket while the + # fd is registered, so remove it now and re-register after + # wrap_socket(). + self.io_loop.remove_handler(self.socket) + old_state = self._state + self._state = None self.socket = ssl_wrap_socket(self.socket, self._ssl_options, server_hostname=self._server_hostname, do_handshake_on_connect=False) - super(SSLIOStream, self)._handle_connect() + self._add_io_state(old_state) def read_from_fd(self): if self._ssl_accepting: @@ -978,9 +1308,9 @@ class PipeIOStream(BaseIOStream): try: chunk = os.read(self.fd, self.read_chunk_size) except (IOError, OSError) as e: - if e.args[0] in _ERRNO_WOULDBLOCK: + if errno_from_exception(e) in _ERRNO_WOULDBLOCK: return None - elif e.args[0] == errno.EBADF: + elif errno_from_exception(e) == errno.EBADF: # If the writing half of a pipe is closed, select will # report it as readable but reads will fail with EBADF. self.close(exc_info=True) diff --git a/libs/tornado/log.py b/libs/tornado/log.py index 36c3dd40..70664664 100755 --- a/libs/tornado/log.py +++ b/libs/tornado/log.py @@ -83,10 +83,10 @@ class LogFormatter(logging.Formatter): DEFAULT_FORMAT = '%(color)s[%(levelname)1.1s %(asctime)s %(module)s:%(lineno)d]%(end_color)s %(message)s' DEFAULT_DATE_FORMAT = '%y%m%d %H:%M:%S' DEFAULT_COLORS = { - logging.DEBUG: 4, # Blue - logging.INFO: 2, # Green - logging.WARNING: 3, # Yellow - logging.ERROR: 1, # Red + logging.DEBUG: 4, # Blue + logging.INFO: 2, # Green + logging.WARNING: 3, # Yellow + logging.ERROR: 1, # Red } def __init__(self, color=True, fmt=DEFAULT_FORMAT, @@ -184,7 +184,7 @@ def enable_pretty_logging(options=None, logger=None): """ if options is None: from tornado.options import options - if options.logging == 'none': + if options.logging is None or options.logging.lower() == 'none': return if logger is None: logger = logging.getLogger() diff --git a/libs/tornado/netutil.py b/libs/tornado/netutil.py index 8ebe604d..a9e05d1e 100755 --- a/libs/tornado/netutil.py +++ b/libs/tornado/netutil.py @@ -20,18 +20,26 @@ from __future__ import absolute_import, division, print_function, with_statement import errno import os +import platform import socket -import ssl import stat from tornado.concurrent import dummy_executor, run_on_executor from tornado.ioloop import IOLoop from tornado.platform.auto import set_close_exec -from tornado.util import u, Configurable +from tornado.util import u, Configurable, errno_from_exception + +try: + import ssl +except ImportError: + # ssl is not available on Google App Engine + ssl = None if hasattr(ssl, 'match_hostname') and hasattr(ssl, 'CertificateError'): # python 3.2+ ssl_match_hostname = ssl.match_hostname SSLCertificateError = ssl.CertificateError +elif ssl is None: + ssl_match_hostname = SSLCertificateError = None else: import backports.ssl_match_hostname ssl_match_hostname = backports.ssl_match_hostname.match_hostname @@ -44,6 +52,11 @@ else: # thread now. u('foo').encode('idna') +# These errnos indicate that a non-blocking operation must be retried +# at a later time. On most platforms they're the same value, but on +# some they differ. +_ERRNO_WOULDBLOCK = (errno.EWOULDBLOCK, errno.EAGAIN) + def bind_sockets(port, address=None, family=socket.AF_UNSPEC, backlog=128, flags=None): """Creates listening sockets bound to the given port and address. @@ -77,13 +90,23 @@ def bind_sockets(port, address=None, family=socket.AF_UNSPEC, backlog=128, flags family = socket.AF_INET if flags is None: flags = socket.AI_PASSIVE + bound_port = None for res in set(socket.getaddrinfo(address, port, family, socket.SOCK_STREAM, 0, flags)): af, socktype, proto, canonname, sockaddr = res + if (platform.system() == 'Darwin' and address == 'localhost' and + af == socket.AF_INET6 and sockaddr[3] != 0): + # Mac OS X includes a link-local address fe80::1%lo0 in the + # getaddrinfo results for 'localhost'. However, the firewall + # doesn't understand that this is a local address and will + # prompt for access (often repeatedly, due to an apparent + # bug in its ability to remember granting access to an + # application). Skip these addresses. + continue try: sock = socket.socket(af, socktype, proto) except socket.error as e: - if e.args[0] == errno.EAFNOSUPPORT: + if errno_from_exception(e) == errno.EAFNOSUPPORT: continue raise set_close_exec(sock.fileno()) @@ -100,8 +123,16 @@ def bind_sockets(port, address=None, family=socket.AF_UNSPEC, backlog=128, flags # Python 2.x on windows doesn't have IPPROTO_IPV6. if hasattr(socket, "IPPROTO_IPV6"): sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1) + + # automatic port allocation with port=None + # should bind on the same port on IPv4 and IPv6 + host, requested_port = sockaddr[:2] + if requested_port == 0 and bound_port is not None: + sockaddr = tuple([host, bound_port] + list(sockaddr[2:])) + sock.setblocking(0) sock.bind(sockaddr) + bound_port = sock.getsockname()[1] sock.listen(backlog) sockets.append(sock) return sockets @@ -124,7 +155,7 @@ if hasattr(socket, 'AF_UNIX'): try: st = os.stat(file) except OSError as err: - if err.errno != errno.ENOENT: + if errno_from_exception(err) != errno.ENOENT: raise else: if stat.S_ISSOCK(st.st_mode): @@ -154,18 +185,18 @@ def add_accept_handler(sock, callback, io_loop=None): try: connection, address = sock.accept() except socket.error as e: - # EWOULDBLOCK and EAGAIN indicate we have accepted every + # _ERRNO_WOULDBLOCK indicate we have accepted every # connection that is available. - if e.args[0] in (errno.EWOULDBLOCK, errno.EAGAIN): + if errno_from_exception(e) in _ERRNO_WOULDBLOCK: return # ECONNABORTED indicates that there was a connection # but it was closed while still in the accept queue. # (observed on FreeBSD). - if e.args[0] == errno.ECONNABORTED: + if errno_from_exception(e) == errno.ECONNABORTED: continue raise callback(connection, address) - io_loop.add_handler(sock.fileno(), accept_handler, IOLoop.READ) + io_loop.add_handler(sock, accept_handler, IOLoop.READ) def is_valid_ip(ip): @@ -381,6 +412,10 @@ def ssl_options_to_context(ssl_options): context.load_verify_locations(ssl_options['ca_certs']) if 'ciphers' in ssl_options: context.set_ciphers(ssl_options['ciphers']) + if hasattr(ssl, 'OP_NO_COMPRESSION'): + # Disable TLS compression to avoid CRIME and related attacks. + # This constant wasn't added until python 3.3. + context.options |= ssl.OP_NO_COMPRESSION return context diff --git a/libs/tornado/options.py b/libs/tornado/options.py index 1105c0e9..fa9c269e 100755 --- a/libs/tornado/options.py +++ b/libs/tornado/options.py @@ -56,6 +56,18 @@ We support `datetimes `, `timedeltas the top-level functions in this module (`define`, `parse_command_line`, etc) simply call methods on it. You may create additional `OptionParser` instances to define isolated sets of options, such as for subcommands. + +.. note:: + + By default, several options are defined that will configure the + standard `logging` module when `parse_command_line` or `parse_config_file` + are called. If you want Tornado to leave the logging configuration + alone so you can manage it yourself, either pass ``--logging=none`` + on the command line or do the following to disable it in code:: + + from tornado.options import options, parse_command_line + options.logging = None + parse_command_line() """ from __future__ import absolute_import, division, print_function, with_statement @@ -360,6 +372,8 @@ class _Mockable(object): class _Option(object): + UNSET = object() + def __init__(self, name, default=None, type=basestring_type, help=None, metavar=None, multiple=False, file_name=None, group_name=None, callback=None): @@ -374,10 +388,10 @@ class _Option(object): self.group_name = group_name self.callback = callback self.default = default - self._value = None + self._value = _Option.UNSET def value(self): - return self.default if self._value is None else self._value + return self.default if self._value is _Option.UNSET else self._value def parse(self, value): _parse = { diff --git a/libs/tornado/platform/asyncio.py b/libs/tornado/platform/asyncio.py index 162b3673..552476bc 100644 --- a/libs/tornado/platform/asyncio.py +++ b/libs/tornado/platform/asyncio.py @@ -10,21 +10,31 @@ unfinished callbacks on the event loop that fail when it resumes) """ from __future__ import absolute_import, division, print_function, with_statement -import asyncio import datetime import functools -import os -from tornado.ioloop import IOLoop +# _Timeout is used for its timedelta_to_seconds method for py26 compatibility. +from tornado.ioloop import IOLoop, _Timeout from tornado import stack_context +try: + # Import the real asyncio module for py33+ first. Older versions of the + # trollius backport also use this name. + import asyncio +except ImportError as e: + # Asyncio itself isn't available; see if trollius is (backport to py26+). + try: + import trollius as asyncio + except ImportError: + # Re-raise the original asyncio error, not the trollius one. + raise e class BaseAsyncIOLoop(IOLoop): def initialize(self, asyncio_loop, close_loop=False): self.asyncio_loop = asyncio_loop self.close_loop = close_loop self.asyncio_loop.call_soon(self.make_current) - # Maps fd to handler function (as in IOLoop.add_handler) + # Maps fd to (fileobj, handler function) pair (as in IOLoop.add_handler) self.handlers = {} # Set of fds listening for reads/writes self.readers = set() @@ -34,19 +44,18 @@ class BaseAsyncIOLoop(IOLoop): def close(self, all_fds=False): self.closing = True for fd in list(self.handlers): + fileobj, handler_func = self.handlers[fd] self.remove_handler(fd) if all_fds: - try: - os.close(fd) - except OSError: - pass + self.close_fd(fileobj) if self.close_loop: self.asyncio_loop.close() def add_handler(self, fd, handler, events): + fd, fileobj = self.split_fd(fd) if fd in self.handlers: - raise ValueError("fd %d added twice" % fd) - self.handlers[fd] = stack_context.wrap(handler) + raise ValueError("fd %s added twice" % fd) + self.handlers[fd] = (fileobj, stack_context.wrap(handler)) if events & IOLoop.READ: self.asyncio_loop.add_reader( fd, self._handle_events, fd, IOLoop.READ) @@ -57,6 +66,7 @@ class BaseAsyncIOLoop(IOLoop): self.writers.add(fd) def update_handler(self, fd, events): + fd, fileobj = self.split_fd(fd) if events & IOLoop.READ: if fd not in self.readers: self.asyncio_loop.add_reader( @@ -77,6 +87,7 @@ class BaseAsyncIOLoop(IOLoop): self.writers.remove(fd) def remove_handler(self, fd): + fd, fileobj = self.split_fd(fd) if fd not in self.handlers: return if fd in self.readers: @@ -88,7 +99,8 @@ class BaseAsyncIOLoop(IOLoop): del self.handlers[fd] def _handle_events(self, fd, events): - self.handlers[fd](fd, events) + fileobj, handler_func = self.handlers[fd] + handler_func(fileobj, events) def start(self): self._setup_logging() @@ -107,7 +119,7 @@ class BaseAsyncIOLoop(IOLoop): if isinstance(deadline, (int, float)): delay = max(deadline - self.time(), 0) elif isinstance(deadline, datetime.timedelta): - delay = deadline.total_seconds() + delay = _Timeout.timedelta_to_seconds(deadline) else: raise TypeError("Unsupported deadline %r", deadline) return self.asyncio_loop.call_later(delay, self._run_callback, diff --git a/libs/tornado/platform/auto.py b/libs/tornado/platform/auto.py index e55725b3..ddfe06b4 100755 --- a/libs/tornado/platform/auto.py +++ b/libs/tornado/platform/auto.py @@ -30,6 +30,10 @@ import os if os.name == 'nt': from tornado.platform.common import Waker from tornado.platform.windows import set_close_exec +elif 'APPENGINE_RUNTIME' in os.environ: + from tornado.platform.common import Waker + def set_close_exec(fd): + pass else: from tornado.platform.posix import set_close_exec, Waker diff --git a/libs/tornado/platform/common.py b/libs/tornado/platform/common.py index d9c4cf9f..b409a903 100755 --- a/libs/tornado/platform/common.py +++ b/libs/tornado/platform/common.py @@ -15,7 +15,8 @@ class Waker(interface.Waker): and Jython. """ def __init__(self): - # Based on Zope async.py: http://svn.zope.org/zc.ngi/trunk/src/zc/ngi/async.py + # Based on Zope select_trigger.py: + # https://github.com/zopefoundation/Zope/blob/master/src/ZServer/medusa/thread/select_trigger.py self.writer = socket.socket() # Disable buffering -- pulling the trigger sends 1 byte, diff --git a/libs/tornado/platform/kqueue.py b/libs/tornado/platform/kqueue.py index ceff0a43..de8c046d 100755 --- a/libs/tornado/platform/kqueue.py +++ b/libs/tornado/platform/kqueue.py @@ -37,7 +37,7 @@ class _KQueue(object): def register(self, fd, events): if fd in self._active: - raise IOError("fd %d already registered" % fd) + raise IOError("fd %s already registered" % fd) self._control(fd, events, select.KQ_EV_ADD) self._active[fd] = events diff --git a/libs/tornado/platform/select.py b/libs/tornado/platform/select.py index 8bbb1f4f..9a879562 100755 --- a/libs/tornado/platform/select.py +++ b/libs/tornado/platform/select.py @@ -37,7 +37,7 @@ class _Select(object): def register(self, fd, events): if fd in self.read_fds or fd in self.write_fds or fd in self.error_fds: - raise IOError("fd %d already registered" % fd) + raise IOError("fd %s already registered" % fd) if events & IOLoop.READ: self.read_fds.add(fd) if events & IOLoop.WRITE: diff --git a/libs/tornado/platform/twisted.py b/libs/tornado/platform/twisted.py index 0c8a3105..889aa3c4 100755 --- a/libs/tornado/platform/twisted.py +++ b/libs/tornado/platform/twisted.py @@ -91,6 +91,11 @@ from tornado.netutil import Resolver from tornado.stack_context import NullContext, wrap from tornado.ioloop import IOLoop +try: + long # py2 +except NameError: + long = int # py3 + @implementer(IDelayedCall) class TornadoDelayedCall(object): @@ -365,8 +370,9 @@ def install(io_loop=None): @implementer(IReadDescriptor, IWriteDescriptor) class _FD(object): - def __init__(self, fd, handler): + def __init__(self, fd, fileobj, handler): self.fd = fd + self.fileobj = fileobj self.handler = handler self.reading = False self.writing = False @@ -377,15 +383,15 @@ class _FD(object): def doRead(self): if not self.lost: - self.handler(self.fd, tornado.ioloop.IOLoop.READ) + self.handler(self.fileobj, tornado.ioloop.IOLoop.READ) def doWrite(self): if not self.lost: - self.handler(self.fd, tornado.ioloop.IOLoop.WRITE) + self.handler(self.fileobj, tornado.ioloop.IOLoop.WRITE) def connectionLost(self, reason): if not self.lost: - self.handler(self.fd, tornado.ioloop.IOLoop.ERROR) + self.handler(self.fileobj, tornado.ioloop.IOLoop.ERROR) self.lost = True def logPrefix(self): @@ -412,14 +418,19 @@ class TwistedIOLoop(tornado.ioloop.IOLoop): self.reactor.callWhenRunning(self.make_current) def close(self, all_fds=False): + fds = self.fds self.reactor.removeAll() for c in self.reactor.getDelayedCalls(): c.cancel() + if all_fds: + for fd in fds.values(): + self.close_fd(fd.fileobj) def add_handler(self, fd, handler, events): if fd in self.fds: - raise ValueError('fd %d added twice' % fd) - self.fds[fd] = _FD(fd, wrap(handler)) + raise ValueError('fd %s added twice' % fd) + fd, fileobj = self.split_fd(fd) + self.fds[fd] = _FD(fd, fileobj, wrap(handler)) if events & tornado.ioloop.IOLoop.READ: self.fds[fd].reading = True self.reactor.addReader(self.fds[fd]) @@ -428,6 +439,7 @@ class TwistedIOLoop(tornado.ioloop.IOLoop): self.reactor.addWriter(self.fds[fd]) def update_handler(self, fd, events): + fd, fileobj = self.split_fd(fd) if events & tornado.ioloop.IOLoop.READ: if not self.fds[fd].reading: self.fds[fd].reading = True @@ -446,6 +458,7 @@ class TwistedIOLoop(tornado.ioloop.IOLoop): self.reactor.removeWriter(self.fds[fd]) def remove_handler(self, fd): + fd, fileobj = self.split_fd(fd) if fd not in self.fds: return self.fds[fd].lost = True diff --git a/libs/tornado/process.py b/libs/tornado/process.py index 942c5c3f..0f38b856 100755 --- a/libs/tornado/process.py +++ b/libs/tornado/process.py @@ -21,7 +21,6 @@ the server into multiple processes and managing subprocesses. from __future__ import absolute_import, division, print_function, with_statement import errno -import multiprocessing import os import signal import subprocess @@ -35,6 +34,13 @@ from tornado.iostream import PipeIOStream from tornado.log import gen_log from tornado.platform.auto import set_close_exec from tornado import stack_context +from tornado.util import errno_from_exception + +try: + import multiprocessing +except ImportError: + # Multiprocessing is not availble on Google App Engine. + multiprocessing = None try: long # py2 @@ -44,6 +50,8 @@ except NameError: def cpu_count(): """Returns the number of processors on this machine.""" + if multiprocessing is None: + return 1 try: return multiprocessing.cpu_count() except NotImplementedError: @@ -136,7 +144,7 @@ def fork_processes(num_processes, max_restarts=100): try: pid, status = os.wait() except OSError as e: - if e.errno == errno.EINTR: + if errno_from_exception(e) == errno.EINTR: continue raise if pid not in children: @@ -283,7 +291,7 @@ class Subprocess(object): try: ret_pid, status = os.waitpid(pid, os.WNOHANG) except OSError as e: - if e.args[0] == errno.ECHILD: + if errno_from_exception(e) == errno.ECHILD: return if ret_pid == 0: return diff --git a/libs/tornado/simple_httpclient.py b/libs/tornado/simple_httpclient.py index 73bfee89..06d7ecfa 100755 --- a/libs/tornado/simple_httpclient.py +++ b/libs/tornado/simple_httpclient.py @@ -1,23 +1,23 @@ #!/usr/bin/env python from __future__ import absolute_import, division, print_function, with_statement -from tornado.escape import utf8, _unicode, native_str +from tornado.concurrent import is_future +from tornado.escape import utf8, _unicode from tornado.httpclient import HTTPResponse, HTTPError, AsyncHTTPClient, main, _RequestProxy -from tornado.httputil import HTTPHeaders -from tornado.iostream import IOStream, SSLIOStream +from tornado import httputil +from tornado.http1connection import HTTP1Connection, HTTP1ConnectionParameters +from tornado.iostream import StreamClosedError from tornado.netutil import Resolver, OverrideResolver from tornado.log import gen_log from tornado import stack_context -from tornado.util import GzipDecompressor +from tornado.tcpclient import TCPClient import base64 import collections import copy import functools -import os.path import re import socket -import ssl import sys try: @@ -30,7 +30,23 @@ try: except ImportError: import urllib.parse as urlparse # py3 -_DEFAULT_CA_CERTS = os.path.dirname(__file__) + '/ca-certificates.crt' +try: + import ssl +except ImportError: + # ssl is not available on Google App Engine. + ssl = None + +try: + import certifi +except ImportError: + certifi = None + + +def _default_ca_certs(): + if certifi is None: + raise Exception("The 'certifi' package is required to use https " + "in simple_httpclient") + return certifi.where() class SimpleAsyncHTTPClient(AsyncHTTPClient): @@ -47,7 +63,7 @@ class SimpleAsyncHTTPClient(AsyncHTTPClient): """ def initialize(self, io_loop, max_clients=10, hostname_mapping=None, max_buffer_size=104857600, - resolver=None, defaults=None): + resolver=None, defaults=None, max_header_size=None): """Creates a AsyncHTTPClient. Only a single AsyncHTTPClient instance exists per IOLoop @@ -74,6 +90,9 @@ class SimpleAsyncHTTPClient(AsyncHTTPClient): self.active = {} self.waiting = {} self.max_buffer_size = max_buffer_size + self.max_header_size = max_header_size + # TCPClient could create a Resolver for us, but we have to do it + # ourselves to support hostname_mapping. if resolver: self.resolver = resolver self.own_resolver = False @@ -83,11 +102,13 @@ class SimpleAsyncHTTPClient(AsyncHTTPClient): if hostname_mapping is not None: self.resolver = OverrideResolver(resolver=self.resolver, mapping=hostname_mapping) + self.tcp_client = TCPClient(resolver=self.resolver, io_loop=io_loop) def close(self): super(SimpleAsyncHTTPClient, self).close() if self.own_resolver: self.resolver.close() + self.tcp_client.close() def fetch_impl(self, request, callback): key = object() @@ -119,7 +140,8 @@ class SimpleAsyncHTTPClient(AsyncHTTPClient): def _handle_request(self, request, release_callback, final_callback): _HTTPConnection(self.io_loop, self, request, release_callback, - final_callback, self.max_buffer_size, self.resolver) + final_callback, self.max_buffer_size, self.tcp_client, + self.max_header_size) def _release_fetch(self, key): del self.active[key] @@ -142,11 +164,12 @@ class SimpleAsyncHTTPClient(AsyncHTTPClient): del self.waiting[key] -class _HTTPConnection(object): +class _HTTPConnection(httputil.HTTPMessageDelegate): _SUPPORTED_METHODS = set(["GET", "HEAD", "POST", "PUT", "DELETE", "PATCH", "OPTIONS"]) def __init__(self, io_loop, client, request, release_callback, - final_callback, max_buffer_size, resolver): + final_callback, max_buffer_size, tcp_client, + max_header_size): self.start_time = io_loop.time() self.io_loop = io_loop self.client = client @@ -154,13 +177,15 @@ class _HTTPConnection(object): self.release_callback = release_callback self.final_callback = final_callback self.max_buffer_size = max_buffer_size - self.resolver = resolver + self.tcp_client = tcp_client + self.max_header_size = max_header_size self.code = None self.headers = None - self.chunks = None + self.chunks = [] self._decompressor = None # Timeout handle returned by IOLoop.add_timeout self._timeout = None + self._sockaddr = None with stack_context.ExceptionStackContext(self._handle_exception): self.parsed = urlparse.urlsplit(_unicode(self.request.url)) if self.parsed.scheme not in ("http", "https"): @@ -183,42 +208,31 @@ class _HTTPConnection(object): host = host[1:-1] self.parsed_hostname = host # save final host for _on_connect - if request.allow_ipv6: - af = socket.AF_UNSPEC - else: - # We only try the first IP we get from getaddrinfo, - # so restrict to ipv4 by default. + if request.allow_ipv6 is False: af = socket.AF_INET + else: + af = socket.AF_UNSPEC + + ssl_options = self._get_ssl_options(self.parsed.scheme) timeout = min(self.request.connect_timeout, self.request.request_timeout) if timeout: self._timeout = self.io_loop.add_timeout( self.start_time + timeout, stack_context.wrap(self._on_timeout)) - self.resolver.resolve(host, port, af, callback=self._on_resolve) + self.tcp_client.connect(host, port, af=af, + ssl_options=ssl_options, + callback=self._on_connect) - def _on_resolve(self, addrinfo): - if self.final_callback is None: - # final_callback is cleared if we've hit our timeout - return - self.stream = self._create_stream(addrinfo) - self.stream.set_close_callback(self._on_close) - # ipv6 addresses are broken (in self.parsed.hostname) until - # 2.7, here is correctly parsed value calculated in __init__ - sockaddr = addrinfo[0][1] - self.stream.connect(sockaddr, self._on_connect, - server_hostname=self.parsed_hostname) - - def _create_stream(self, addrinfo): - af = addrinfo[0][0] - if self.parsed.scheme == "https": + def _get_ssl_options(self, scheme): + if scheme == "https": ssl_options = {} if self.request.validate_cert: ssl_options["cert_reqs"] = ssl.CERT_REQUIRED if self.request.ca_certs is not None: ssl_options["ca_certs"] = self.request.ca_certs else: - ssl_options["ca_certs"] = _DEFAULT_CA_CERTS + ssl_options["ca_certs"] = _default_ca_certs() if self.request.client_key is not None: ssl_options["keyfile"] = self.request.client_key if self.request.client_cert is not None: @@ -236,21 +250,16 @@ class _HTTPConnection(object): # but nearly all servers support both SSLv3 and TLSv1: # http://blog.ivanristic.com/2011/09/ssl-survey-protocol-support.html if sys.version_info >= (2, 7): - ssl_options["ciphers"] = "DEFAULT:!SSLv2" + # In addition to disabling SSLv2, we also exclude certain + # classes of insecure ciphers. + ssl_options["ciphers"] = "DEFAULT:!SSLv2:!EXPORT:!DES" else: # This is really only necessary for pre-1.0 versions # of openssl, but python 2.6 doesn't expose version # information. ssl_options["ssl_version"] = ssl.PROTOCOL_TLSv1 - - return SSLIOStream(socket.socket(af), - io_loop=self.io_loop, - ssl_options=ssl_options, - max_buffer_size=self.max_buffer_size) - else: - return IOStream(socket.socket(af), - io_loop=self.io_loop, - max_buffer_size=self.max_buffer_size) + return ssl_options + return None def _on_timeout(self): self._timeout = None @@ -262,7 +271,13 @@ class _HTTPConnection(object): self.io_loop.remove_timeout(self._timeout) self._timeout = None - def _on_connect(self): + def _on_connect(self, stream): + if self.final_callback is None: + # final_callback is cleared if we've hit our timeout. + stream.close() + return + self.stream = stream + self.stream.set_close_callback(self._on_close) self._remove_timeout() if self.final_callback is None: return @@ -302,16 +317,22 @@ class _HTTPConnection(object): self.request.headers["User-Agent"] = self.request.user_agent if not self.request.allow_nonstandard_methods: if self.request.method in ("POST", "PATCH", "PUT"): - if self.request.body is None: + if (self.request.body is None and + self.request.body_producer is None): raise AssertionError( 'Body must not be empty for "%s" request' % self.request.method) else: - if self.request.body is not None: + if (self.request.body is not None or + self.request.body_producer is not None): raise AssertionError( 'Body must be empty for "%s" request' % self.request.method) + if self.request.expect_100_continue: + self.request.headers["Expect"] = "100-continue" if self.request.body is not None: + # When body_producer is used the caller is responsible for + # setting Content-Length (or else chunked encoding will be used). self.request.headers["Content-Length"] = str(len( self.request.body)) if (self.request.method == "POST" and @@ -320,20 +341,47 @@ class _HTTPConnection(object): if self.request.use_gzip: self.request.headers["Accept-Encoding"] = "gzip" req_path = ((self.parsed.path or '/') + - (('?' + self.parsed.query) if self.parsed.query else '')) - request_lines = [utf8("%s %s HTTP/1.1" % (self.request.method, - req_path))] - for k, v in self.request.headers.get_all(): - line = utf8(k) + b": " + utf8(v) - if b'\n' in line: - raise ValueError('Newline in header: ' + repr(line)) - request_lines.append(line) - request_str = b"\r\n".join(request_lines) + b"\r\n\r\n" - if self.request.body is not None: - request_str += self.request.body + (('?' + self.parsed.query) if self.parsed.query else '')) self.stream.set_nodelay(True) - self.stream.write(request_str) - self.stream.read_until_regex(b"\r?\n\r?\n", self._on_headers) + self.connection = HTTP1Connection( + self.stream, True, + HTTP1ConnectionParameters( + no_keep_alive=True, + max_header_size=self.max_header_size, + use_gzip=self.request.use_gzip), + self._sockaddr) + start_line = httputil.RequestStartLine(self.request.method, + req_path, 'HTTP/1.1') + self.connection.write_headers(start_line, self.request.headers) + if self.request.expect_100_continue: + self._read_response() + else: + self._write_body(True) + + def _write_body(self, start_read): + if self.request.body is not None: + self.connection.write(self.request.body) + self.connection.finish() + elif self.request.body_producer is not None: + fut = self.request.body_producer(self.connection.write) + if is_future(fut): + def on_body_written(fut): + fut.result() + self.connection.finish() + if start_read: + self._read_response() + self.io_loop.add_future(fut, on_body_written) + return + self.connection.finish() + if start_read: + self._read_response() + + def _read_response(self): + # Ensure that any exception raised in read_response ends up in our + # stack context. + self.io_loop.add_future( + self.connection.read_response(self), + lambda f: f.result()) def _release(self): if self.release_callback is not None: @@ -351,43 +399,39 @@ class _HTTPConnection(object): def _handle_exception(self, typ, value, tb): if self.final_callback: self._remove_timeout() + if isinstance(value, StreamClosedError): + value = HTTPError(599, "Stream closed") self._run_callback(HTTPResponse(self.request, 599, error=value, request_time=self.io_loop.time() - self.start_time, )) if hasattr(self, "stream"): + # TODO: this may cause a StreamClosedError to be raised + # by the connection's Future. Should we cancel the + # connection more gracefully? self.stream.close() return True else: # If our callback has already been called, we are probably # catching an exception that is not caused by us but rather # some child of our callback. Rather than drop it on the floor, - # pass it along. - return False + # pass it along, unless it's just the stream being closed. + return isinstance(value, StreamClosedError) def _on_close(self): if self.final_callback is not None: message = "Connection closed" if self.stream.error: - message = str(self.stream.error) + raise self.stream.error raise HTTPError(599, message) - def _handle_1xx(self, code): - self.stream.read_until_regex(b"\r?\n\r?\n", self._on_headers) - - def _on_headers(self, data): - data = native_str(data.decode("latin1")) - first_line, _, header_data = data.partition("\n") - match = re.match("HTTP/1.[01] ([0-9]+) ([^\r]*)", first_line) - assert match - code = int(match.group(1)) - self.headers = HTTPHeaders.parse(header_data) - if 100 <= code < 200: - self._handle_1xx(code) + def headers_received(self, first_line, headers): + if self.request.expect_100_continue and first_line.code == 100: + self._write_body(False) return - else: - self.code = code - self.reason = match.group(2) + self.headers = headers + self.code = first_line.code + self.reason = first_line.reason if "Content-Length" in self.headers: if "," in self.headers["Content-Length"]: @@ -404,17 +448,12 @@ class _HTTPConnection(object): content_length = None if self.request.header_callback is not None: - # re-attach the newline we split on earlier - self.request.header_callback(first_line + _) + # Reassemble the start line. + self.request.header_callback('%s %s %s\r\n' % first_line) for k, v in self.headers.get_all(): self.request.header_callback("%s: %s\r\n" % (k, v)) self.request.header_callback('\r\n') - if self.request.method == "HEAD" or self.code == 304: - # HEAD requests and 304 responses never have content, even - # though they may have content-length headers - self._on_body(b"") - return if 100 <= self.code < 200 or self.code == 204: # These response codes never have bodies # http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.3 @@ -422,21 +461,9 @@ class _HTTPConnection(object): content_length not in (None, 0)): raise ValueError("Response with code %d should not have body" % self.code) - self._on_body(b"") - return - if (self.request.use_gzip and - self.headers.get("Content-Encoding") == "gzip"): - self._decompressor = GzipDecompressor() - if self.headers.get("Transfer-Encoding") == "chunked": - self.chunks = [] - self.stream.read_until(b"\r\n", self._on_chunk_length) - elif content_length is not None: - self.stream.read_bytes(content_length, self._on_body) - else: - self.stream.read_until_close(self._on_body) - - def _on_body(self, data): + def finish(self): + data = b''.join(self.chunks) self._remove_timeout() original_request = getattr(self.request, "original_request", self.request) @@ -472,19 +499,12 @@ class _HTTPConnection(object): self.client.fetch(new_request, final_callback) self._on_end_request() return - if self._decompressor: - data = (self._decompressor.decompress(data) + - self._decompressor.flush()) if self.request.streaming_callback: - if self.chunks is None: - # if chunks is not None, we already called streaming_callback - # in _on_chunk_data - self.request.streaming_callback(data) buffer = BytesIO() else: buffer = BytesIO(data) # TODO: don't require one big string? response = HTTPResponse(original_request, - self.code, reason=self.reason, + self.code, reason=getattr(self, 'reason', None), headers=self.headers, request_time=self.io_loop.time() - self.start_time, buffer=buffer, @@ -495,40 +515,11 @@ class _HTTPConnection(object): def _on_end_request(self): self.stream.close() - def _on_chunk_length(self, data): - # TODO: "chunk extensions" http://tools.ietf.org/html/rfc2616#section-3.6.1 - length = int(data.strip(), 16) - if length == 0: - if self._decompressor is not None: - tail = self._decompressor.flush() - if tail: - # I believe the tail will always be empty (i.e. - # decompress will return all it can). The purpose - # of the flush call is to detect errors such - # as truncated input. But in case it ever returns - # anything, treat it as an extra chunk - if self.request.streaming_callback is not None: - self.request.streaming_callback(tail) - else: - self.chunks.append(tail) - # all the data has been decompressed, so we don't need to - # decompress again in _on_body - self._decompressor = None - self._on_body(b''.join(self.chunks)) - else: - self.stream.read_bytes(length + 2, # chunk ends with \r\n - self._on_chunk_data) - - def _on_chunk_data(self, data): - assert data[-2:] == b"\r\n" - chunk = data[:-2] - if self._decompressor: - chunk = self._decompressor.decompress(chunk) + def data_received(self, chunk): if self.request.streaming_callback is not None: self.request.streaming_callback(chunk) else: self.chunks.append(chunk) - self.stream.read_until(b"\r\n", self._on_chunk_length) if __name__ == "__main__": diff --git a/libs/tornado/speedups.c b/libs/tornado/speedups.c index 8a316c58..174a6129 100644 --- a/libs/tornado/speedups.c +++ b/libs/tornado/speedups.c @@ -1,21 +1,24 @@ +#define PY_SSIZE_T_CLEAN #include static PyObject* websocket_mask(PyObject* self, PyObject* args) { const char* mask; - int mask_len; + Py_ssize_t mask_len; const char* data; - int data_len; - int i; + Py_ssize_t data_len; + Py_ssize_t i; + PyObject* result; + char* buf; if (!PyArg_ParseTuple(args, "s#s#", &mask, &mask_len, &data, &data_len)) { return NULL; } - PyObject* result = PyBytes_FromStringAndSize(NULL, data_len); + result = PyBytes_FromStringAndSize(NULL, data_len); if (!result) { return NULL; } - char* buf = PyBytes_AsString(result); + buf = PyBytes_AsString(result); for (i = 0; i < data_len; i++) { buf[i] = data[i] ^ mask[i % 4]; } diff --git a/libs/tornado/stack_context.py b/libs/tornado/stack_context.py index b1e82b0e..2e845ab2 100755 --- a/libs/tornado/stack_context.py +++ b/libs/tornado/stack_context.py @@ -266,6 +266,18 @@ def wrap(fn): # TODO: Any other better way to store contexts and update them in wrapped function? cap_contexts = [_state.contexts] + if not cap_contexts[0][0] and not cap_contexts[0][1]: + # Fast path when there are no active contexts. + def null_wrapper(*args, **kwargs): + try: + current_state = _state.contexts + _state.contexts = cap_contexts[0] + return fn(*args, **kwargs) + finally: + _state.contexts = current_state + null_wrapper._wrapped = True + return null_wrapper + def wrapped(*args, **kwargs): ret = None try: diff --git a/libs/tornado/tcpclient.py b/libs/tornado/tcpclient.py new file mode 100644 index 00000000..d49eb5cd --- /dev/null +++ b/libs/tornado/tcpclient.py @@ -0,0 +1,179 @@ +#!/usr/bin/env python +# +# Copyright 2014 Facebook +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""A non-blocking TCP connection factory. +""" +from __future__ import absolute_import, division, print_function, with_statement + +import functools +import socket + +from tornado.concurrent import Future +from tornado.ioloop import IOLoop +from tornado.iostream import IOStream +from tornado import gen +from tornado.netutil import Resolver + +_INITIAL_CONNECT_TIMEOUT = 0.3 + + +class _Connector(object): + """A stateless implementation of the "Happy Eyeballs" algorithm. + + "Happy Eyeballs" is documented in RFC6555 as the recommended practice + for when both IPv4 and IPv6 addresses are available. + + In this implementation, we partition the addresses by family, and + make the first connection attempt to whichever address was + returned first by ``getaddrinfo``. If that connection fails or + times out, we begin a connection in parallel to the first address + of the other family. If there are additional failures we retry + with other addresses, keeping one connection attempt per family + in flight at a time. + + http://tools.ietf.org/html/rfc6555 + + """ + def __init__(self, addrinfo, io_loop, connect): + self.io_loop = io_loop + self.connect = connect + + self.future = Future() + self.timeout = None + self.last_error = None + self.remaining = len(addrinfo) + self.primary_addrs, self.secondary_addrs = self.split(addrinfo) + + @staticmethod + def split(addrinfo): + """Partition the ``addrinfo`` list by address family. + + Returns two lists. The first list contains the first entry from + ``addrinfo`` and all others with the same family, and the + second list contains all other addresses (normally one list will + be AF_INET and the other AF_INET6, although non-standard resolvers + may return additional families). + """ + primary = [] + secondary = [] + primary_af = addrinfo[0][0] + for af, addr in addrinfo: + if af == primary_af: + primary.append((af, addr)) + else: + secondary.append((af, addr)) + return primary, secondary + + def start(self, timeout=_INITIAL_CONNECT_TIMEOUT): + self.try_connect(iter(self.primary_addrs)) + self.set_timout(timeout) + return self.future + + def try_connect(self, addrs): + try: + af, addr = next(addrs) + except StopIteration: + # We've reached the end of our queue, but the other queue + # might still be working. Send a final error on the future + # only when both queues are finished. + if self.remaining == 0 and not self.future.done(): + self.future.set_exception(self.last_error or + IOError("connection failed")) + return + future = self.connect(af, addr) + future.add_done_callback(functools.partial(self.on_connect_done, + addrs, af, addr)) + + def on_connect_done(self, addrs, af, addr, future): + self.remaining -= 1 + try: + stream = future.result() + except Exception as e: + if self.future.done(): + return + # Error: try again (but remember what happened so we have an + # error to raise in the end) + self.last_error = e + self.try_connect(addrs) + if self.timeout is not None: + # If the first attempt failed, don't wait for the + # timeout to try an address from the secondary queue. + self.on_timeout() + return + self.clear_timeout() + if self.future.done(): + # This is a late arrival; just drop it. + stream.close() + else: + self.future.set_result((af, addr, stream)) + + def set_timout(self, timeout): + self.timeout = self.io_loop.add_timeout(self.io_loop.time() + timeout, + self.on_timeout) + + def on_timeout(self): + self.timeout = None + self.try_connect(iter(self.secondary_addrs)) + + def clear_timeout(self): + if self.timeout is not None: + self.io_loop.remove_timeout(self.timeout) + + +class TCPClient(object): + """A non-blocking TCP connection factory. + """ + def __init__(self, resolver=None, io_loop=None): + self.io_loop = io_loop or IOLoop.current() + if resolver is not None: + self.resolver = resolver + self._own_resolver = False + else: + self.resolver = Resolver(io_loop=io_loop) + self._own_resolver = True + + def close(self): + if self._own_resolver: + self.resolver.close() + + @gen.coroutine + def connect(self, host, port, af=socket.AF_UNSPEC, ssl_options=None, + max_buffer_size=None): + """Connect to the given host and port. + + Asynchronously returns an `.IOStream` (or `.SSLIOStream` if + ``ssl_options`` is not None). + """ + addrinfo = yield self.resolver.resolve(host, port, af) + connector = _Connector( + addrinfo, self.io_loop, + functools.partial(self._create_stream, max_buffer_size)) + af, addr, stream = yield connector.start() + # TODO: For better performance we could cache the (af, addr) + # information here and re-use it on sbusequent connections to + # the same host. (http://tools.ietf.org/html/rfc6555#section-4.2) + if ssl_options is not None: + stream = yield stream.start_tls(False, ssl_options=ssl_options, + server_hostname=host) + raise gen.Return(stream) + + def _create_stream(self, max_buffer_size, af, addr): + # Always connect in plaintext; we'll convert to ssl if necessary + # after one connection has completed. + stream = IOStream(socket.socket(af), + io_loop=self.io_loop, + max_buffer_size=max_buffer_size) + return stream.connect(addr) diff --git a/libs/tornado/tcpserver.py b/libs/tornado/tcpserver.py index c0773732..427acec5 100755 --- a/libs/tornado/tcpserver.py +++ b/libs/tornado/tcpserver.py @@ -20,13 +20,19 @@ from __future__ import absolute_import, division, print_function, with_statement import errno import os import socket -import ssl from tornado.log import app_log from tornado.ioloop import IOLoop from tornado.iostream import IOStream, SSLIOStream from tornado.netutil import bind_sockets, add_accept_handler, ssl_wrap_socket from tornado import process +from tornado.util import errno_from_exception + +try: + import ssl +except ImportError: + # ssl is not available on Google App Engine. + ssl = None class TCPServer(object): @@ -81,13 +87,15 @@ class TCPServer(object): .. versionadded:: 3.1 The ``max_buffer_size`` argument. """ - def __init__(self, io_loop=None, ssl_options=None, max_buffer_size=None): + def __init__(self, io_loop=None, ssl_options=None, max_buffer_size=None, + read_chunk_size=None): self.io_loop = io_loop self.ssl_options = ssl_options self._sockets = {} # fd -> socket object self._pending_sockets = [] self._started = False self.max_buffer_size = max_buffer_size + self.read_chunk_size = None # Verify the SSL options. Otherwise we don't get errors until clients # connect. This doesn't verify that the keys are legitimate, but @@ -230,16 +238,20 @@ class TCPServer(object): # catch another error later on (AttributeError in # SSLIOStream._do_ssl_handshake). # To test this behavior, try nmap with the -sT flag. - # https://github.com/facebook/tornado/pull/750 - if err.args[0] in (errno.ECONNABORTED, errno.EINVAL): + # https://github.com/tornadoweb/tornado/pull/750 + if errno_from_exception(err) in (errno.ECONNABORTED, errno.EINVAL): return connection.close() else: raise try: if self.ssl_options is not None: - stream = SSLIOStream(connection, io_loop=self.io_loop, max_buffer_size=self.max_buffer_size) + stream = SSLIOStream(connection, io_loop=self.io_loop, + max_buffer_size=self.max_buffer_size, + read_chunk_size=self.read_chunk_size) else: - stream = IOStream(connection, io_loop=self.io_loop, max_buffer_size=self.max_buffer_size) + stream = IOStream(connection, io_loop=self.io_loop, + max_buffer_size=self.max_buffer_size, + read_chunk_size=self.read_chunk_size) self.handle_stream(stream, address) except Exception: app_log.error("Error in connection callback", exc_info=True) diff --git a/libs/tornado/template.py b/libs/tornado/template.py index db5a528d..4dcec5d5 100755 --- a/libs/tornado/template.py +++ b/libs/tornado/template.py @@ -180,7 +180,7 @@ with ``{# ... #}``. ``{% set *x* = *y* %}`` Sets a local variable. -``{% try %}...{% except %}...{% finally %}...{% else %}...{% end %}`` +``{% try %}...{% except %}...{% else %}...{% finally %}...{% end %}`` Same as the python ``try`` statement. ``{% while *condition* %}... {% end %}`` @@ -367,10 +367,9 @@ class Loader(BaseLoader): def _create_template(self, name): path = os.path.join(self.root, name) - f = open(path, "rb") - template = Template(f.read(), name=name, loader=self) - f.close() - return template + with open(path, "rb") as f: + template = Template(f.read(), name=name, loader=self) + return template class DictLoader(BaseLoader): @@ -785,7 +784,7 @@ def _parse(reader, template, in_block=None, in_loop=None): if allowed_parents is not None: if not in_block: raise ParseError("%s outside %s block" % - (operator, allowed_parents)) + (operator, allowed_parents)) if in_block not in allowed_parents: raise ParseError("%s block cannot be attached to %s block" % (operator, in_block)) body.chunks.append(_IntermediateControlBlock(contents, line)) diff --git a/libs/tornado/testing.py b/libs/tornado/testing.py index 8355dcfc..dc30e94f 100755 --- a/libs/tornado/testing.py +++ b/libs/tornado/testing.py @@ -17,7 +17,7 @@ try: from tornado.httpclient import AsyncHTTPClient from tornado.httpserver import HTTPServer from tornado.simple_httpclient import SimpleAsyncHTTPClient - from tornado.ioloop import IOLoop + from tornado.ioloop import IOLoop, TimeoutError from tornado import netutil except ImportError: # These modules are not importable on app engine. Parts of this module @@ -38,6 +38,7 @@ import re import signal import socket import sys +import types try: from cStringIO import StringIO # py2 @@ -48,10 +49,16 @@ except ImportError: # (either py27+ or unittest2) so tornado.test.util enforces # this requirement, but for other users of tornado.testing we want # to allow the older version if unitest2 is not available. -try: - import unittest2 as unittest -except ImportError: +if sys.version_info >= (3,): + # On python 3, mixing unittest2 and unittest (including doctest) + # doesn't seem to work, so always use unittest. import unittest +else: + # On python 2, prefer unittest2 when available. + try: + import unittest2 as unittest + except ImportError: + import unittest _next_port = 10000 @@ -95,6 +102,36 @@ def get_async_test_timeout(): return 5 +class _TestMethodWrapper(object): + """Wraps a test method to raise an error if it returns a value. + + This is mainly used to detect undecorated generators (if a test + method yields it must use a decorator to consume the generator), + but will also detect other kinds of return values (these are not + necessarily errors, but we alert anyway since there is no good + reason to return a value from a test. + """ + def __init__(self, orig_method): + self.orig_method = orig_method + + def __call__(self): + result = self.orig_method() + if isinstance(result, types.GeneratorType): + raise TypeError("Generator test methods should be decorated with " + "tornado.testing.gen_test") + elif result is not None: + raise ValueError("Return value from test method ignored: %r" % + result) + + def __getattr__(self, name): + """Proxy all unknown attributes to the original method. + + This is important for some of the decorators in the `unittest` + module, such as `unittest.skipIf`. + """ + return getattr(self.orig_method, name) + + class AsyncTestCase(unittest.TestCase): """`~unittest.TestCase` subclass for testing `.IOLoop`-based asynchronous code. @@ -157,14 +194,20 @@ class AsyncTestCase(unittest.TestCase): self.assertIn("FriendFeed", response.body) self.stop() """ - def __init__(self, *args, **kwargs): - super(AsyncTestCase, self).__init__(*args, **kwargs) + def __init__(self, methodName='runTest', **kwargs): + super(AsyncTestCase, self).__init__(methodName, **kwargs) self.__stopped = False self.__running = False self.__failure = None self.__stop_args = None self.__timeout = None + # It's easy to forget the @gen_test decorator, but if you do + # the test will silently be ignored because nothing will consume + # the generator. Replace the test method with a wrapper that will + # make sure it's not an undecorated generator. + setattr(self, methodName, _TestMethodWrapper(getattr(self, methodName))) + def setUp(self): super(AsyncTestCase, self).setUp() self.io_loop = self.get_new_ioloop() @@ -352,6 +395,7 @@ class AsyncHTTPTestCase(AsyncTestCase): def tearDown(self): self.http_server.stop() + self.io_loop.run_sync(self.http_server.close_all_connections) if (not IOLoop.initialized() or self.http_client.io_loop is not IOLoop.instance()): self.http_client.close() @@ -414,18 +458,50 @@ def gen_test(func=None, timeout=None): .. versionadded:: 3.1 The ``timeout`` argument and ``ASYNC_TEST_TIMEOUT`` environment variable. + + .. versionchanged:: 3.3 + The wrapper now passes along ``*args, **kwargs`` so it can be used + on functions with arguments. """ if timeout is None: timeout = get_async_test_timeout() def wrap(f): - f = gen.coroutine(f) - + # Stack up several decorators to allow us to access the generator + # object itself. In the innermost wrapper, we capture the generator + # and save it in an attribute of self. Next, we run the wrapped + # function through @gen.coroutine. Finally, the coroutine is + # wrapped again to make it synchronous with run_sync. + # + # This is a good case study arguing for either some sort of + # extensibility in the gen decorators or cancellation support. @functools.wraps(f) - def wrapper(self): - return self.io_loop.run_sync( - functools.partial(f, self), timeout=timeout) - return wrapper + def pre_coroutine(self, *args, **kwargs): + result = f(self, *args, **kwargs) + if isinstance(result, types.GeneratorType): + self._test_generator = result + else: + self._test_generator = None + return result + + coro = gen.coroutine(pre_coroutine) + + @functools.wraps(coro) + def post_coroutine(self, *args, **kwargs): + try: + return self.io_loop.run_sync( + functools.partial(coro, self, *args, **kwargs), + timeout=timeout) + except TimeoutError as e: + # run_sync raises an error with an unhelpful traceback. + # If we throw it back into the generator the stack trace + # will be replaced by the point where the test is stopped. + self._test_generator.throw(e) + # In case the test contains an overly broad except clause, + # we may get back here. In this case re-raise the original + # exception, which is better than nothing. + raise + return post_coroutine if func is not None: # Used like: diff --git a/libs/tornado/util.py b/libs/tornado/util.py index a2fba779..49eea2c3 100755 --- a/libs/tornado/util.py +++ b/libs/tornado/util.py @@ -12,11 +12,19 @@ and `.Resolver`. from __future__ import absolute_import, division, print_function, with_statement +import array import inspect +import os import sys import zlib +try: + xrange # py2 +except NameError: + xrange = range # py3 + + class ObjectDict(dict): """Makes a dictionary behave like an object, with attribute-style access. """ @@ -33,7 +41,7 @@ class ObjectDict(dict): class GzipDecompressor(object): """Streaming gzip decompressor. - The interface is like that of `zlib.decompressobj` (without the + The interface is like that of `zlib.decompressobj` (without some of the optional arguments, but it understands gzip headers and checksums. """ def __init__(self): @@ -42,14 +50,24 @@ class GzipDecompressor(object): # This works on cpython and pypy, but not jython. self.decompressobj = zlib.decompressobj(16 + zlib.MAX_WBITS) - def decompress(self, value): + def decompress(self, value, max_length=None): """Decompress a chunk, returning newly-available data. Some data may be buffered for later processing; `flush` must be called when there is no more input data to ensure that all data was processed. + + If ``max_length`` is given, some input data may be left over + in ``unconsumed_tail``; you must retrieve this value and pass + it back to a future call to `decompress` if it is not empty. """ - return self.decompressobj.decompress(value) + return self.decompressobj.decompress(value, max_length) + + @property + def unconsumed_tail(self): + """Returns the unconsumed portion left over + """ + return self.decompressobj.unconsumed_tail def flush(self): """Return any remaining buffered data not yet returned by decompress. @@ -132,6 +150,24 @@ def exec_in(code, glob, loc=None): """) +def errno_from_exception(e): + """Provides the errno from an Exception object. + + There are cases that the errno attribute was not set so we pull + the errno out of the args but if someone instatiates an Exception + without any args you will get a tuple error. So this function + abstracts all that behavior to give you a safe way to get the + errno. + """ + + if hasattr(e, 'errno'): + return e.errno + elif e.args: + return e.args[0] + else: + return None + + class Configurable(object): """Base class for configurable interfaces. @@ -243,6 +279,16 @@ class ArgReplacer(object): # Not a positional parameter self.arg_pos = None + def get_old_value(self, args, kwargs, default=None): + """Returns the old value of the named argument without replacing it. + + Returns ``default`` if the argument is not present. + """ + if self.arg_pos is not None and len(args) > self.arg_pos: + return args[self.arg_pos] + else: + return kwargs.get(self.name, default) + def replace(self, new_value, args, kwargs): """Replace the named argument in ``args, kwargs`` with ``new_value``. @@ -265,6 +311,41 @@ class ArgReplacer(object): return old_value, args, kwargs +def _websocket_mask_python(mask, data): + """Websocket masking function. + + `mask` is a `bytes` object of length 4; `data` is a `bytes` object of any length. + Returns a `bytes` object of the same length as `data` with the mask applied + as specified in section 5.3 of RFC 6455. + + This pure-python implementation may be replaced by an optimized version when available. + """ + mask = array.array("B", mask) + unmasked = array.array("B", data) + for i in xrange(len(data)): + unmasked[i] = unmasked[i] ^ mask[i % 4] + if hasattr(unmasked, 'tobytes'): + # tostring was deprecated in py32. It hasn't been removed, + # but since we turn on deprecation warnings in our tests + # we need to use the right one. + return unmasked.tobytes() + else: + return unmasked.tostring() + +if (os.environ.get('TORNADO_NO_EXTENSION') or + os.environ.get('TORNADO_EXTENSION') == '0'): + # These environment variables exist to make it easier to do performance + # comparisons; they are not guaranteed to remain supported in the future. + _websocket_mask = _websocket_mask_python +else: + try: + from tornado.speedups import websocket_mask as _websocket_mask + except ImportError: + if os.environ.get('TORNADO_EXTENSION') == '1': + raise + _websocket_mask = _websocket_mask_python + + def doctests(): import doctest return doctest.DocTestSuite() diff --git a/libs/tornado/web.py b/libs/tornado/web.py index b22b11fe..dd2b5ef5 100755 --- a/libs/tornado/web.py +++ b/libs/tornado/web.py @@ -72,17 +72,18 @@ import time import tornado import traceback import types -import uuid -from tornado.concurrent import Future +from tornado.concurrent import Future, is_future from tornado import escape +from tornado import gen from tornado import httputil +from tornado import iostream from tornado import locale from tornado.log import access_log, app_log, gen_log from tornado import stack_context from tornado import template from tornado.escape import utf8, _unicode -from tornado.util import bytes_type, import_object, ObjectDict, raise_exc_info, unicode_type +from tornado.util import bytes_type, import_object, ObjectDict, raise_exc_info, unicode_type, _websocket_mask try: from io import BytesIO # python 3 @@ -105,6 +106,39 @@ except ImportError: from urllib.parse import urlencode # py3 +MIN_SUPPORTED_SIGNED_VALUE_VERSION = 1 +"""The oldest signed value version supported by this version of Tornado. + +Signed values older than this version cannot be decoded. + +.. versionadded:: 3.2.1 +""" + +MAX_SUPPORTED_SIGNED_VALUE_VERSION = 2 +"""The newest signed value version supported by this version of Tornado. + +Signed values newer than this version cannot be decoded. + +.. versionadded:: 3.2.1 +""" + +DEFAULT_SIGNED_VALUE_VERSION = 2 +"""The signed value version produced by `.RequestHandler.create_signed_value`. + +May be overridden by passing a ``version`` keyword argument. + +.. versionadded:: 3.2.1 +""" + +DEFAULT_SIGNED_VALUE_MIN_VERSION = 1 +"""The oldest signed value accepted by `.RequestHandler.get_secure_cookie`. + +May be overrided by passing a ``min_version`` keyword argument. + +.. versionadded:: 3.2.1 +""" + + class RequestHandler(object): """Subclass this class and define `get()` or `post()` to make a handler. @@ -128,6 +162,7 @@ class RequestHandler(object): self._finished = False self._auto_finish = True self._transforms = None # will be set in _execute + self._prepared_future = None self.path_args = None self.path_kwargs = None self.ui = ObjectDict((n, self._ui_method(m)) for n, m in @@ -141,10 +176,7 @@ class RequestHandler(object): application.ui_modules) self.ui["modules"] = self.ui["_tt_modules"] self.clear() - # Check since connection is not available in WSGI - if getattr(self.request, "connection", None): - self.request.connection.set_close_callback( - self.on_connection_close) + self.request.connection.set_close_callback(self.on_connection_close) self.initialize(**kwargs) def initialize(self): @@ -235,7 +267,9 @@ class RequestHandler(object): may not be called promptly after the end user closes their connection. """ - pass + if _has_stream_request_body(self.__class__): + if not self.request.body.done(): + self.request.body.set_exception(iostream.StreamClosedError()) def clear(self): """Resets all headers and content for this response.""" @@ -245,12 +279,6 @@ class RequestHandler(object): "Date": httputil.format_timestamp(time.time()), }) self.set_default_headers() - if (not self.request.supports_http_1_1() and - getattr(self.request, 'connection', None) and - not self.request.connection.no_keep_alive): - conn_header = self.request.headers.get("Connection") - if conn_header and (conn_header.lower() == "keep-alive"): - self._headers["Connection"] = "Keep-Alive" self._write_buffer = [] self._status_code = 200 self._reason = httputil.responses[200] @@ -455,7 +483,7 @@ class RequestHandler(object): @property def cookies(self): - """An alias for `self.request.cookies <.httpserver.HTTPRequest.cookies>`.""" + """An alias for `self.request.cookies <.httputil.HTTPServerRequest.cookies>`.""" return self.request.cookies def get_cookie(self, name, default=None): @@ -524,7 +552,8 @@ class RequestHandler(object): for name in self.request.cookies: self.clear_cookie(name, path=path, domain=domain) - def set_secure_cookie(self, name, value, expires_days=30, **kwargs): + def set_secure_cookie(self, name, value, expires_days=30, version=None, + **kwargs): """Signs and timestamps a cookie so it cannot be forged. You must specify the ``cookie_secret`` setting in your Application @@ -539,32 +568,50 @@ class RequestHandler(object): Secure cookies may contain arbitrary byte values, not just unicode strings (unlike regular cookies) + + .. versionchanged:: 3.2.1 + + Added the ``version`` argument. Introduced cookie version 2 + and made it the default. """ - self.set_cookie(name, self.create_signed_value(name, value), + self.set_cookie(name, self.create_signed_value(name, value, + version=version), expires_days=expires_days, **kwargs) - def create_signed_value(self, name, value): + def create_signed_value(self, name, value, version=None): """Signs and timestamps a string so it cannot be forged. Normally used via set_secure_cookie, but provided as a separate method for non-cookie uses. To decode a value not stored as a cookie use the optional value argument to get_secure_cookie. + + .. versionchanged:: 3.2.1 + + Added the ``version`` argument. Introduced cookie version 2 + and made it the default. """ self.require_setting("cookie_secret", "secure cookies") return create_signed_value(self.application.settings["cookie_secret"], - name, value) + name, value, version=version) - def get_secure_cookie(self, name, value=None, max_age_days=31): + def get_secure_cookie(self, name, value=None, max_age_days=31, + min_version=None): """Returns the given signed cookie if it validates, or None. The decoded cookie value is returned as a byte string (unlike `get_cookie`). + + .. versionchanged:: 3.2.1 + + Added the ``min_version`` argument. Introduced cookie version 2; + both versions 1 and 2 are accepted by default. """ self.require_setting("cookie_secret", "secure cookies") if value is None: value = self.get_cookie(name) return decode_signed_value(self.application.settings["cookie_secret"], - name, value, max_age_days=max_age_days) + name, value, max_age_days=max_age_days, + min_version=min_version) def redirect(self, url, permanent=False, status=None): """Sends a redirect to the given (optionally relative) URL. @@ -598,12 +645,15 @@ class RequestHandler(object): Note that lists are not converted to JSON because of a potential cross-site security vulnerability. All JSON output should be wrapped in a dictionary. More details at - http://haacked.com/archive/2008/11/20/anatomy-of-a-subtle-json-vulnerability.aspx + http://haacked.com/archive/2009/06/25/json-hijacking.aspx/ and + https://github.com/facebook/tornado/issues/1009 """ if self._finished: raise RuntimeError("Cannot write() after finish(). May be caused " "by using async operations without the " "@asynchronous decorator.") + if not isinstance(chunk, (bytes_type, unicode_type, dict)): + raise TypeError("write() only accepts bytes, unicode, and dict objects") if isinstance(chunk, dict): chunk = escape.json_encode(chunk) self.set_header("Content-Type", "application/json; charset=UTF-8") @@ -769,14 +819,10 @@ class RequestHandler(object): Note that only one flush callback can be outstanding at a time; if another flush occurs before the previous flush's callback has been run, the previous callback will be discarded. - """ - if self.application._wsgi: - # WSGI applications cannot usefully support flush, so just make - # it a no-op (and run the callback immediately). - if callback is not None: - callback() - return + .. versionchanged:: 3.3 + Now returns a `.Future` if no callback is given. + """ chunk = b"".join(self._write_buffer) self._write_buffer = [] if not self._headers_written: @@ -785,19 +831,32 @@ class RequestHandler(object): self._status_code, self._headers, chunk = \ transform.transform_first_chunk( self._status_code, self._headers, chunk, include_footers) - headers = self._generate_headers() + # Ignore the chunk and only write the headers for HEAD requests + if self.request.method == "HEAD": + chunk = None + + # Finalize the cookie headers (which have been stored in a side + # object so an outgoing cookie could be overwritten before it + # is sent). + if hasattr(self, "_new_cookie"): + for cookie in self._new_cookie.values(): + self.add_header("Set-Cookie", cookie.OutputString(None)) + + start_line = httputil.ResponseStartLine(self.request.version, + self._status_code, + self._reason) + return self.request.connection.write_headers( + start_line, self._headers, chunk, callback=callback) else: for transform in self._transforms: chunk = transform.transform_chunk(chunk, include_footers) - headers = b"" - - # Ignore the chunk and only write the headers for HEAD requests - if self.request.method == "HEAD": - if headers: - self.request.write(headers, callback=callback) - return - - self.request.write(headers + chunk, callback=callback) + # Ignore the chunk and only write the headers for HEAD requests + if self.request.method != "HEAD": + return self.request.connection.write(chunk, callback=callback) + else: + future = Future() + future.set_result(None) + return future def finish(self, chunk=None): """Finishes this response, ending the HTTP request.""" @@ -833,10 +892,9 @@ class RequestHandler(object): # are keepalive connections) self.request.connection.set_close_callback(None) - if not self.application._wsgi: - self.flush(include_footers=True) - self.request.finish() - self._log() + self.flush(include_footers=True) + self.request.finish() + self._log() self._finished = True self.on_finish() # Break up a reference cycle between this handler and the @@ -1017,16 +1075,87 @@ class RequestHandler(object): as a potential forgery. See http://en.wikipedia.org/wiki/Cross-site_request_forgery + + .. versionchanged:: 3.2.2 + The xsrf token will now be have a random mask applied in every + request, which makes it safe to include the token in pages + that are compressed. See http://breachattack.com for more + information on the issue fixed by this change. Old (version 1) + cookies will be converted to version 2 when this method is called + unless the ``xsrf_cookie_version`` `Application` setting is + set to 1. """ if not hasattr(self, "_xsrf_token"): - token = self.get_cookie("_xsrf") - if not token: - token = binascii.b2a_hex(uuid.uuid4().bytes) + version, token, timestamp = self._get_raw_xsrf_token() + output_version = self.settings.get("xsrf_cookie_version", 2) + if output_version == 1: + self._xsrf_token = binascii.b2a_hex(token) + elif output_version == 2: + mask = os.urandom(4) + self._xsrf_token = b"|".join([ + b"2", + binascii.b2a_hex(mask), + binascii.b2a_hex(_websocket_mask(mask, token)), + utf8(str(int(timestamp)))]) + else: + raise ValueError("unknown xsrf cookie version %d", + output_version) + if version is None: expires_days = 30 if self.current_user else None - self.set_cookie("_xsrf", token, expires_days=expires_days) - self._xsrf_token = token + self.set_cookie("_xsrf", self._xsrf_token, + expires_days=expires_days) return self._xsrf_token + def _get_raw_xsrf_token(self): + """Read or generate the xsrf token in its raw form. + + The raw_xsrf_token is a tuple containing: + + * version: the version of the cookie from which this token was read, + or None if we generated a new token in this request. + * token: the raw token data; random (non-ascii) bytes. + * timestamp: the time this token was generated (will not be accurate + for version 1 cookies) + """ + if not hasattr(self, '_raw_xsrf_token'): + cookie = self.get_cookie("_xsrf") + if cookie: + version, token, timestamp = self._decode_xsrf_token(cookie) + else: + version, token, timestamp = None, None, None + if token is None: + version = None + token = os.urandom(16) + timestamp = time.time() + self._raw_xsrf_token = (version, token, timestamp) + return self._raw_xsrf_token + + def _decode_xsrf_token(self, cookie): + """Convert a cookie string into a the tuple form returned by + _get_raw_xsrf_token. + """ + m = _signed_value_version_re.match(utf8(cookie)) + if m: + version = int(m.group(1)) + if version == 2: + _, mask, masked_token, timestamp = cookie.split("|") + mask = binascii.a2b_hex(utf8(mask)) + token = _websocket_mask( + mask, binascii.a2b_hex(utf8(masked_token))) + timestamp = int(timestamp) + return version, token, timestamp + else: + # Treat unknown versions as not present instead of failing. + return None, None, None + elif len(cookie) == 32: + version = 1 + token = binascii.a2b_hex(utf8(cookie)) + # We don't have a usable timestamp in older versions. + timestamp = int(time.time()) + return (version, token, timestamp) + else: + return None, None, None + def check_xsrf_cookie(self): """Verifies that the ``_xsrf`` cookie matches the ``_xsrf`` argument. @@ -1047,13 +1176,19 @@ class RequestHandler(object): information please see http://www.djangoproject.com/weblog/2011/feb/08/security/ http://weblog.rubyonrails.org/2011/2/8/csrf-protection-bypass-in-ruby-on-rails + + .. versionchanged:: 3.2.2 + Added support for cookie version 2. Both versions 1 and 2 are + supported. """ token = (self.get_argument("_xsrf", None) or self.request.headers.get("X-Xsrftoken") or self.request.headers.get("X-Csrftoken")) if not token: raise HTTPError(403, "'_xsrf' argument missing from POST") - if self.xsrf_token != token: + _, token, _ = self._decode_xsrf_token(token) + _, expected_token, _ = self._get_raw_xsrf_token() + if not _time_independent_equals(utf8(token), utf8(expected_token)): raise HTTPError(403, "XSRF cookie does not match POST argument") def xsrf_form_html(self): @@ -1194,6 +1329,7 @@ class RequestHandler(object): self._handle_request_exception(value) return True + @gen.coroutine def _execute(self, transforms, *args, **kwargs): """Executes this request with the given output transforms.""" self._transforms = transforms @@ -1208,52 +1344,52 @@ class RequestHandler(object): if self.request.method not in ("GET", "HEAD", "OPTIONS") and \ self.application.settings.get("xsrf_cookies"): self.check_xsrf_cookie() - self._when_complete(self.prepare(), self._execute_method) - except Exception as e: - self._handle_request_exception(e) - def _when_complete(self, result, callback): - try: - if result is None: - callback() - elif isinstance(result, Future): - if result.done(): - if result.result() is not None: - raise ValueError('Expected None, got %r' % result.result()) - callback() - else: - # Delayed import of IOLoop because it's not available - # on app engine - from tornado.ioloop import IOLoop - IOLoop.current().add_future( - result, functools.partial(self._when_complete, - callback=callback)) - else: - raise ValueError("Expected Future or None, got %r" % result) - except Exception as e: - self._handle_request_exception(e) + result = self.prepare() + if is_future(result): + result = yield result + if result is not None: + raise TypeError("Expected None, got %r" % result) + if self._prepared_future is not None: + # Tell the Application we've finished with prepare() + # and are ready for the body to arrive. + self._prepared_future.set_result(None) + if self._finished: + return + + if _has_stream_request_body(self.__class__): + # In streaming mode request.body is a Future that signals + # the body has been completely received. The Future has no + # result; the data has been passed to self.data_received + # instead. + try: + yield self.request.body + except iostream.StreamClosedError: + return - def _execute_method(self): - if not self._finished: method = getattr(self, self.request.method.lower()) - self._when_complete(method(*self.path_args, **self.path_kwargs), - self._execute_finish) + result = method(*self.path_args, **self.path_kwargs) + if is_future(result): + result = yield result + if result is not None: + raise TypeError("Expected None, got %r" % result) + if self._auto_finish and not self._finished: + self.finish() + except Exception as e: + self._handle_request_exception(e) + if (self._prepared_future is not None and + not self._prepared_future.done()): + # In case we failed before setting _prepared_future, do it + # now (to unblock the HTTP server). Note that this is not + # in a finally block to avoid GC issues prior to Python 3.4. + self._prepared_future.set_result(None) - def _execute_finish(self): - if self._auto_finish and not self._finished: - self.finish() + def data_received(self, chunk): + """Implement this method to handle streamed request data. - def _generate_headers(self): - reason = self._reason - lines = [utf8(self.request.version + " " + - str(self._status_code) + - " " + reason)] - lines.extend([utf8(n) + b": " + utf8(v) for n, v in self._headers.get_all()]) - - if hasattr(self, "_new_cookie"): - for cookie in self._new_cookie.values(): - lines.append(utf8("Set-Cookie: " + cookie.OutputString(None))) - return b"\r\n".join(lines) + b"\r\n\r\n" + Requires the `.stream_request_body` decorator. + """ + raise NotImplementedError() def _log(self): """Logs the current request. @@ -1367,8 +1503,6 @@ def asynchronous(method): from tornado.ioloop import IOLoop @functools.wraps(method) def wrapper(self, *args, **kwargs): - if self.application._wsgi: - raise Exception("@asynchronous is not supported for WSGI apps") self._auto_finish = False with stack_context.ExceptionStackContext( self._stack_context_handle_exception): @@ -1395,6 +1529,40 @@ def asynchronous(method): return wrapper +def stream_request_body(cls): + """Apply to `RequestHandler` subclasses to enable streaming body support. + + This decorator implies the following changes: + + * `.HTTPServerRequest.body` is undefined, and body arguments will not + be included in `RequestHandler.get_argument`. + * `RequestHandler.prepare` is called when the request headers have been + read instead of after the entire body has been read. + * The subclass must define a method ``data_received(self, data):``, which + will be called zero or more times as data is available. Note that + if the request has an empty body, ``data_received`` may not be called. + * ``prepare`` and ``data_received`` may return Futures (such as via + ``@gen.coroutine``, in which case the next method will not be called + until those futures have completed. + * The regular HTTP method (``post``, ``put``, etc) will be called after + the entire body has been read. + + There is a subtle interaction between ``data_received`` and asynchronous + ``prepare``: The first call to ``data_recieved`` may occur at any point + after the call to ``prepare`` has returned *or yielded*. + """ + if not issubclass(cls, RequestHandler): + raise TypeError("expected subclass of RequestHandler, got %r", cls) + cls._stream_request_body = True + return cls + + +def _has_stream_request_body(cls): + if not issubclass(cls, RequestHandler): + raise TypeError("expected subclass of RequestHandler, got %r", cls) + return getattr(cls, '_stream_request_body', False) + + def removeslash(method): """Use this decorator to remove trailing slashes from the request path. @@ -1439,7 +1607,7 @@ def addslash(method): return wrapper -class Application(object): +class Application(httputil.HTTPServerConnectionDelegate): """A collection of request handlers that make up a web application. Instances of this class are callable and can be passed directly to @@ -1491,12 +1659,11 @@ class Application(object): """ def __init__(self, handlers=None, default_host="", transforms=None, - wsgi=False, **settings): + **settings): if transforms is None: self.transforms = [] if settings.get("gzip"): self.transforms.append(GZipContentEncoding) - self.transforms.append(ChunkedTransferEncoding) else: self.transforms = transforms self.handlers = [] @@ -1508,7 +1675,6 @@ class Application(object): 'Template': TemplateModule, } self.ui_methods = {} - self._wsgi = wsgi self._load_ui_modules(settings.get("ui_modules", {})) self._load_ui_methods(settings.get("ui_methods", {})) if self.settings.get("static_path"): @@ -1534,7 +1700,7 @@ class Application(object): self.settings.setdefault('serve_traceback', True) # Automatically reload modified modules - if self.settings.get('autoreload') and not wsgi: + if self.settings.get('autoreload'): from tornado import autoreload autoreload.start() @@ -1634,64 +1800,15 @@ class Application(object): except TypeError: pass + def start_request(self, connection): + # Modern HTTPServer interface + return _RequestDispatcher(self, connection) + def __call__(self, request): - """Called by HTTPServer to execute the request.""" - transforms = [t(request) for t in self.transforms] - handler = None - args = [] - kwargs = {} - handlers = self._get_host_handlers(request) - if not handlers: - handler = RedirectHandler( - self, request, url="http://" + self.default_host + "/") - else: - for spec in handlers: - match = spec.regex.match(request.path) - if match: - handler = spec.handler_class(self, request, **spec.kwargs) - if spec.regex.groups: - # None-safe wrapper around url_unescape to handle - # unmatched optional groups correctly - def unquote(s): - if s is None: - return s - return escape.url_unescape(s, encoding=None, - plus=False) - # Pass matched groups to the handler. Since - # match.groups() includes both named and unnamed groups, - # we want to use either groups or groupdict but not both. - # Note that args are passed as bytes so the handler can - # decide what encoding to use. - - if spec.regex.groupindex: - kwargs = dict( - (str(k), unquote(v)) - for (k, v) in match.groupdict().items()) - else: - args = [unquote(s) for s in match.groups()] - break - if not handler: - if self.settings.get('default_handler_class'): - handler_class = self.settings['default_handler_class'] - handler_args = self.settings.get( - 'default_handler_args', {}) - else: - handler_class = ErrorHandler - handler_args = dict(status_code=404) - handler = handler_class(self, request, **handler_args) - - # If template cache is disabled (usually in the debug mode), - # re-compile templates and reload static files on every - # request so you don't need to restart to see changes - if not self.settings.get("compiled_template_cache", True): - with RequestHandler._template_loader_lock: - for loader in RequestHandler._template_loaders.values(): - loader.reset() - if not self.settings.get('static_hash_cache', True): - StaticFileHandler.reset() - - handler._execute(transforms, *args, **kwargs) - return handler + # Legacy HTTPServer interface + dispatcher = _RequestDispatcher(self, None) + dispatcher.set_request(request) + return dispatcher.execute() def reverse_url(self, name, *args): """Returns a URL path for handler named ``name`` @@ -1728,6 +1845,113 @@ class Application(object): handler._request_summary(), request_time) +class _RequestDispatcher(httputil.HTTPMessageDelegate): + def __init__(self, application, connection): + self.application = application + self.connection = connection + self.request = None + self.chunks = [] + self.handler_class = None + self.handler_kwargs = None + self.path_args = [] + self.path_kwargs = {} + + def headers_received(self, start_line, headers): + self.set_request(httputil.HTTPServerRequest( + connection=self.connection, start_line=start_line, headers=headers)) + if self.stream_request_body: + self.request.body = Future() + return self.execute() + + def set_request(self, request): + self.request = request + self._find_handler() + self.stream_request_body = _has_stream_request_body(self.handler_class) + + def _find_handler(self): + # Identify the handler to use as soon as we have the request. + # Save url path arguments for later. + app = self.application + handlers = app._get_host_handlers(self.request) + if not handlers: + self.handler_class = RedirectHandler + self.handler_kwargs = dict(url="http://" + app.default_host + "/") + return + for spec in handlers: + match = spec.regex.match(self.request.path) + if match: + self.handler_class = spec.handler_class + self.handler_kwargs = spec.kwargs + if spec.regex.groups: + # Pass matched groups to the handler. Since + # match.groups() includes both named and + # unnamed groups, we want to use either groups + # or groupdict but not both. + if spec.regex.groupindex: + self.path_kwargs = dict( + (str(k), _unquote_or_none(v)) + for (k, v) in match.groupdict().items()) + else: + self.path_args = [_unquote_or_none(s) + for s in match.groups()] + return + if app.settings.get('default_handler_class'): + self.handler_class = app.settings['default_handler_class'] + self.handler_kwargs = app.settings.get( + 'default_handler_args', {}) + else: + self.handler_class = ErrorHandler + self.handler_kwargs = dict(status_code=404) + + def data_received(self, data): + if self.stream_request_body: + return self.handler.data_received(data) + else: + self.chunks.append(data) + + def finish(self): + if self.stream_request_body: + self.request.body.set_result(None) + else: + self.request.body = b''.join(self.chunks) + self.request._parse_body() + self.execute() + + def on_connection_close(self): + if self.stream_request_body: + self.handler.on_connection_close() + else: + self.chunks = None + + def execute(self): + # If template cache is disabled (usually in the debug mode), + # re-compile templates and reload static files on every + # request so you don't need to restart to see changes + if not self.application.settings.get("compiled_template_cache", True): + with RequestHandler._template_loader_lock: + for loader in RequestHandler._template_loaders.values(): + loader.reset() + if not self.application.settings.get('static_hash_cache', True): + StaticFileHandler.reset() + + self.handler = self.handler_class(self.application, self.request, + **self.handler_kwargs) + transforms = [t(self.request) for t in self.application.transforms] + + if self.stream_request_body: + self.handler._prepared_future = Future() + # Note that if an exception escapes handler._execute it will be + # trapped in the Future it returns (which we are ignoring here). + # However, that shouldn't happen because _execute has a blanket + # except handler, and we cannot easily access the IOLoop here to + # call add_future. + self.handler._execute(transforms, *self.path_args, **self.path_kwargs) + # If we are streaming the request body, then execute() is finished + # when the handler has prepared to receive the body. If not, + # it doesn't matter when execute() finishes (so we return None) + return self.handler._prepared_future + + class HTTPError(Exception): """An exception that will turn into an HTTP error response. @@ -1886,8 +2110,9 @@ class StaticFileHandler(RequestHandler): cls._static_hashes = {} def head(self, path): - self.get(path, include_body=False) + return self.get(path, include_body=False) + @gen.coroutine def get(self, path, include_body=True): # Set up our path instance variables. self.path = self.parse_url_path(path) @@ -1912,9 +2137,9 @@ class StaticFileHandler(RequestHandler): # the request will be treated as if the header didn't exist. request_range = httputil._parse_request_range(range_header) + size = self.get_content_size() if request_range: start, end = request_range - size = self.get_content_size() if (start is not None and start >= size) or end == 0: # As per RFC 2616 14.35.1, a range is not satisfiable only: if # the first requested byte is equal to or greater than the @@ -1939,18 +2164,26 @@ class StaticFileHandler(RequestHandler): httputil._get_content_range(start, end, size)) else: start = end = None - content = self.get_content(self.absolute_path, start, end) - if isinstance(content, bytes_type): - content = [content] - content_length = 0 - for chunk in content: - if include_body: + + if start is not None and end is not None: + content_length = end - start + elif end is not None: + content_length = end + elif start is not None: + content_length = size - start + else: + content_length = size + self.set_header("Content-Length", content_length) + + if include_body: + content = self.get_content(self.absolute_path, start, end) + if isinstance(content, bytes_type): + content = [content] + for chunk in content: self.write(chunk) - else: - content_length += len(chunk) - if not include_body: + yield self.flush() + else: assert self.request.method == "HEAD" - self.set_header("Content-Length", content_length) def compute_etag(self): """Sets the ``Etag`` header based on static url version. @@ -2130,10 +2363,13 @@ class StaticFileHandler(RequestHandler): def get_content_size(self): """Retrieve the total size of the resource at the given path. - This method may be overridden by subclasses. It will only - be called if a partial result is requested from `get_content` + This method may be overridden by subclasses. .. versionadded:: 3.1 + + .. versionchanged:: 3.3 + This method is now always called, instead of only when + partial results are requested. """ stat_result = self._stat() return stat_result[stat.ST_SIZE] @@ -2255,7 +2491,7 @@ class FallbackHandler(RequestHandler): """A `RequestHandler` that wraps another HTTP server callback. The fallback is a callable object that accepts an - `~.httpserver.HTTPRequest`, such as an `Application` or + `~.httputil.HTTPServerRequest`, such as an `Application` or `tornado.wsgi.WSGIContainer`. This is most useful to use both Tornado ``RequestHandlers`` and WSGI in the same server. Typical usage:: @@ -2279,7 +2515,7 @@ class OutputTransform(object): """A transform modifies the result of an HTTP request (e.g., GZip encoding) A new transform instance is created for every request. See the - ChunkedTransferEncoding example below if you want to implement a + GZipContentEncoding example below if you want to implement a new Transform. """ def __init__(self, request): @@ -2296,16 +2532,24 @@ class GZipContentEncoding(OutputTransform): """Applies the gzip content encoding to the response. See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11 + + .. versionchanged:: 3.3 + Now compresses all mime types beginning with ``text/``, instead + of just a whitelist. (the whitelist is still used for certain + non-text mime types). """ - CONTENT_TYPES = set([ - "text/plain", "text/html", "text/css", "text/xml", "application/javascript", - "application/x-javascript", "application/xml", "application/atom+xml", - "text/javascript", "application/json", "application/xhtml+xml"]) + # Whitelist of compressible mime types (in addition to any types + # beginning with "text/"). + CONTENT_TYPES = set(["application/javascript", "application/x-javascript", + "application/xml", "application/atom+xml", + "application/json", "application/xhtml+xml"]) MIN_LENGTH = 5 def __init__(self, request): - self._gzipping = request.supports_http_1_1() and \ - "gzip" in request.headers.get("Accept-Encoding", "") + self._gzipping = "gzip" in request.headers.get("Accept-Encoding", "") + + def _compressible_type(self, ctype): + return ctype.startswith('text/') or ctype in self.CONTENT_TYPES def transform_first_chunk(self, status_code, headers, chunk, finishing): if 'Vary' in headers: @@ -2314,7 +2558,7 @@ class GZipContentEncoding(OutputTransform): headers['Vary'] = b'Accept-Encoding' if self._gzipping: ctype = _unicode(headers.get("Content-Type", "")).split(";")[0] - self._gzipping = (ctype in self.CONTENT_TYPES) and \ + self._gzipping = self._compressible_type(ctype) and \ (not finishing or len(chunk) >= self.MIN_LENGTH) and \ (finishing or "Content-Length" not in headers) and \ ("Content-Encoding" not in headers) @@ -2340,42 +2584,16 @@ class GZipContentEncoding(OutputTransform): return chunk -class ChunkedTransferEncoding(OutputTransform): - """Applies the chunked transfer encoding to the response. - - See http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.6.1 - """ - def __init__(self, request): - self._chunking = request.supports_http_1_1() - - def transform_first_chunk(self, status_code, headers, chunk, finishing): - # 304 responses have no body (not even a zero-length body), and so - # should not have either Content-Length or Transfer-Encoding headers. - if self._chunking and status_code != 304: - # No need to chunk the output if a Content-Length is specified - if "Content-Length" in headers or "Transfer-Encoding" in headers: - self._chunking = False - else: - headers["Transfer-Encoding"] = "chunked" - chunk = self.transform_chunk(chunk, finishing) - return status_code, headers, chunk - - def transform_chunk(self, block, finishing): - if self._chunking: - # Don't write out empty chunks because that means END-OF-STREAM - # with chunked encoding - if block: - block = utf8("%x" % len(block)) + b"\r\n" + block + b"\r\n" - if finishing: - block += b"0\r\n\r\n" - return block - - def authenticated(method): """Decorate methods with this to require that the user be logged in. If the user is not logged in, they will be redirected to the configured `login url `. + + If you configure a login url with a query parameter, Tornado will + assume you know what you're doing and use it as-is. If not, it + will add a `next` parameter so the login page knows where to send + you once you're logged in. """ @functools.wraps(method) def wrapper(self, *args, **kwargs): @@ -2640,29 +2858,103 @@ else: return result == 0 -def create_signed_value(secret, name, value): - timestamp = utf8(str(int(time.time()))) +def create_signed_value(secret, name, value, version=None, clock=None): + if version is None: + version = DEFAULT_SIGNED_VALUE_VERSION + if clock is None: + clock = time.time + timestamp = utf8(str(int(clock()))) value = base64.b64encode(utf8(value)) - signature = _create_signature(secret, name, value, timestamp) - value = b"|".join([value, timestamp, signature]) - return value + if version == 1: + signature = _create_signature_v1(secret, name, value, timestamp) + value = b"|".join([value, timestamp, signature]) + return value + elif version == 2: + # The v2 format consists of a version number and a series of + # length-prefixed fields "%d:%s", the last of which is a + # signature, all separated by pipes. All numbers are in + # decimal format with no leading zeros. The signature is an + # HMAC-SHA256 of the whole string up to that point, including + # the final pipe. + # + # The fields are: + # - format version (i.e. 2; no length prefix) + # - key version (currently 0; reserved for future key rotation features) + # - timestamp (integer seconds since epoch) + # - name (not encoded; assumed to be ~alphanumeric) + # - value (base64-encoded) + # - signature (hex-encoded; no length prefix) + def format_field(s): + return utf8("%d:" % len(s)) + utf8(s) + to_sign = b"|".join([ + b"2|1:0", + format_field(timestamp), + format_field(name), + format_field(value), + b'']) + signature = _create_signature_v2(secret, to_sign) + return to_sign + signature + else: + raise ValueError("Unsupported version %d" % version) + +# A leading version number in decimal with no leading zeros, followed by a pipe. +_signed_value_version_re = re.compile(br"^([1-9][0-9]*)\|(.*)$") -def decode_signed_value(secret, name, value, max_age_days=31): +def decode_signed_value(secret, name, value, max_age_days=31, clock=None, min_version=None): + if clock is None: + clock = time.time + if min_version is None: + min_version = DEFAULT_SIGNED_VALUE_MIN_VERSION + if min_version > 2: + raise ValueError("Unsupported min_version %d" % min_version) if not value: return None + + # Figure out what version this is. Version 1 did not include an + # explicit version field and started with arbitrary base64 data, + # which makes this tricky. + value = utf8(value) + m = _signed_value_version_re.match(value) + if m is None: + version = 1 + else: + try: + version = int(m.group(1)) + if version > 999: + # Certain payloads from the version-less v1 format may + # be parsed as valid integers. Due to base64 padding + # restrictions, this can only happen for numbers whose + # length is a multiple of 4, so we can treat all + # numbers up to 999 as versions, and for the rest we + # fall back to v1 format. + version = 1 + except ValueError: + version = 1 + + if version < min_version: + return None + if version == 1: + return _decode_signed_value_v1(secret, name, value, max_age_days, clock) + elif version == 2: + return _decode_signed_value_v2(secret, name, value, max_age_days, clock) + else: + return None + + +def _decode_signed_value_v1(secret, name, value, max_age_days, clock): parts = utf8(value).split(b"|") if len(parts) != 3: return None - signature = _create_signature(secret, name, parts[0], parts[1]) + signature = _create_signature_v1(secret, name, parts[0], parts[1]) if not _time_independent_equals(parts[2], signature): gen_log.warning("Invalid cookie signature %r", value) return None timestamp = int(parts[1]) - if timestamp < time.time() - max_age_days * 86400: + if timestamp < clock() - max_age_days * 86400: gen_log.warning("Expired cookie %r", value) return None - if timestamp > time.time() + 31 * 86400: + if timestamp > clock() + 31 * 86400: # _cookie_signature does not hash a delimiter between the # parts of the cookie, so an attacker could transfer trailing # digits from the payload to the timestamp without altering the @@ -2679,8 +2971,62 @@ def decode_signed_value(secret, name, value, max_age_days=31): return None -def _create_signature(secret, *parts): +def _decode_signed_value_v2(secret, name, value, max_age_days, clock): + def _consume_field(s): + length, _, rest = s.partition(b':') + n = int(length) + field_value = rest[:n] + # In python 3, indexing bytes returns small integers; we must + # use a slice to get a byte string as in python 2. + if rest[n:n + 1] != b'|': + raise ValueError("malformed v2 signed value field") + rest = rest[n + 1:] + return field_value, rest + rest = value[2:] # remove version number + try: + key_version, rest = _consume_field(rest) + timestamp, rest = _consume_field(rest) + name_field, rest = _consume_field(rest) + value_field, rest = _consume_field(rest) + except ValueError: + return None + passed_sig = rest + signed_string = value[:-len(passed_sig)] + expected_sig = _create_signature_v2(secret, signed_string) + if not _time_independent_equals(passed_sig, expected_sig): + return None + if name_field != utf8(name): + return None + timestamp = int(timestamp) + if timestamp < clock() - max_age_days * 86400: + # The signature has expired. + return None + try: + return base64.b64decode(value_field) + except Exception: + return None + + +def _create_signature_v1(secret, *parts): hash = hmac.new(utf8(secret), digestmod=hashlib.sha1) for part in parts: hash.update(utf8(part)) return utf8(hash.hexdigest()) + + +def _create_signature_v2(secret, s): + hash = hmac.new(utf8(secret), digestmod=hashlib.sha256) + hash.update(utf8(s)) + return utf8(hash.hexdigest()) + + +def _unquote_or_none(s): + """None-safe wrapper around url_unescape to handle unamteched optional + groups correctly. + + Note that args are passed as bytes so the handler can decide what + encoding to use. + """ + if s is None: + return s + return escape.url_unescape(s, encoding=None, plus=False) diff --git a/libs/tornado/websocket.py b/libs/tornado/websocket.py index 9bec9bba..c0065c79 100755 --- a/libs/tornado/websocket.py +++ b/libs/tornado/websocket.py @@ -20,7 +20,6 @@ communication between the browser and server. from __future__ import absolute_import, division, print_function, with_statement # Author: Jacob Kristhammar, 2010 -import array import base64 import collections import functools @@ -32,14 +31,19 @@ import tornado.escape import tornado.web from tornado.concurrent import TracebackFuture -from tornado.escape import utf8, native_str +from tornado.escape import utf8, native_str, to_unicode from tornado import httpclient, httputil from tornado.ioloop import IOLoop from tornado.iostream import StreamClosedError from tornado.log import gen_log, app_log -from tornado.netutil import Resolver from tornado import simple_httpclient -from tornado.util import bytes_type, unicode_type +from tornado.tcpclient import TCPClient +from tornado.util import bytes_type, unicode_type, _websocket_mask + +try: + from urllib.parse import urlparse # py2 +except ImportError: + from urlparse import urlparse # py3 try: xrange # py2 @@ -108,20 +112,17 @@ class WebSocketHandler(tornado.web.RequestHandler): def __init__(self, application, request, **kwargs): tornado.web.RequestHandler.__init__(self, application, request, **kwargs) - self.stream = request.connection.stream self.ws_connection = None + self.close_code = None + self.close_reason = None - def _execute(self, transforms, *args, **kwargs): + @tornado.web.asynchronous + def get(self, *args, **kwargs): self.open_args = args self.open_kwargs = kwargs - # Websocket only supports GET method - if self.request.method != 'GET': - self.stream.write(tornado.escape.utf8( - "HTTP/1.1 405 Method Not Allowed\r\n\r\n" - )) - self.stream.close() - return + self.stream = self.request.connection.detach() + self.stream.set_close_callback(self.on_connection_close) # Upgrade header should be present and should be equal to WebSocket if self.request.headers.get("Upgrade", "").lower() != 'websocket': @@ -144,9 +145,26 @@ class WebSocketHandler(tornado.web.RequestHandler): self.stream.close() return + # Handle WebSocket Origin naming convention differences # The difference between version 8 and 13 is that in 8 the # client sends a "Sec-Websocket-Origin" header and in 13 it's # simply "Origin". + if "Origin" in self.request.headers: + origin = self.request.headers.get("Origin") + else: + origin = self.request.headers.get("Sec-Websocket-Origin", None) + + + # If there was an origin header, check to make sure it matches + # according to check_origin. When the origin is None, we assume it + # did not come from a browser and that it can be passed on. + if origin is not None and not self.check_origin(origin): + self.stream.write(tornado.escape.utf8( + "HTTP/1.1 403 Cross Origin Websockets Disabled\r\n\r\n" + )) + self.stream.close() + return + if self.request.headers.get("Sec-WebSocket-Version") in ("7", "8", "13"): self.ws_connection = WebSocketProtocol13(self) self.ws_connection.accept_connection() @@ -160,6 +178,7 @@ class WebSocketHandler(tornado.web.RequestHandler): "Sec-WebSocket-Version: 8\r\n\r\n")) self.stream.close() + def write_message(self, message, binary=False): """Sends the given message to the client of this Web Socket. @@ -220,18 +239,70 @@ class WebSocketHandler(tornado.web.RequestHandler): pass def on_close(self): - """Invoked when the WebSocket is closed.""" + """Invoked when the WebSocket is closed. + + If the connection was closed cleanly and a status code or reason + phrase was supplied, these values will be available as the attributes + ``self.close_code`` and ``self.close_reason``. + + .. versionchanged:: 3.3 + + Added ``close_code`` and ``close_reason`` attributes. + """ pass - def close(self): + def close(self, code=None, reason=None): """Closes this Web Socket. Once the close handshake is successful the socket will be closed. + + ``code`` may be a numeric status code, taken from the values + defined in `RFC 6455 section 7.4.1 + `_. + ``reason`` may be a textual message about why the connection is + closing. These values are made available to the client, but are + not otherwise interpreted by the websocket protocol. + + The ``code`` and ``reason`` arguments are ignored in the "draft76" + protocol version. + + .. versionchanged:: 3.3 + + Added the ``code`` and ``reason`` arguments. """ if self.ws_connection: - self.ws_connection.close() + self.ws_connection.close(code, reason) self.ws_connection = None + def check_origin(self, origin): + """Override to enable support for allowing alternate origins. + + The ``origin`` argument is the value of the ``Origin`` HTTP + header, the url responsible for initiating this request. This + method is not called for clients that do not send this header; + such requests are always allowed (because all browsers that + implement WebSockets support this header, and non-browser + clients do not have the same cross-site security concerns). + + Should return True to accept the request or False to reject it. + By default, rejects all requests with an origin on a host other + than this one. + + This is a security protection against cross site scripting attacks on + browsers, since WebSockets are allowed to bypass the usual same-origin + policies and don't use CORS headers. + + .. versionadded:: 3.3 + """ + parsed_origin = urlparse(origin) + origin = parsed_origin.netloc + origin = origin.lower() + + host = self.request.headers.get("Host") + + # Check to see that origin matches host directly, including ports + return origin == host + def allow_draft76(self): """Override to enable support for the older "draft76" protocol. @@ -489,7 +560,7 @@ class WebSocketProtocol76(WebSocketProtocol): """Send ping frame.""" raise ValueError("Ping messages not supported by this version of websockets") - def close(self): + def close(self, code=None, reason=None): """Closes the WebSocket connection.""" if not self.server_terminated: if not self.stream.closed(): @@ -739,6 +810,10 @@ class WebSocketProtocol13(WebSocketProtocol): elif opcode == 0x8: # Close self.client_terminated = True + if len(data) >= 2: + self.handler.close_code = struct.unpack('>H', data[:2])[0] + if len(data) > 2: + self.handler.close_reason = to_unicode(data[2:]) self.close() elif opcode == 0x9: # Ping @@ -749,11 +824,19 @@ class WebSocketProtocol13(WebSocketProtocol): else: self._abort() - def close(self): + def close(self, code=None, reason=None): """Closes the WebSocket connection.""" if not self.server_terminated: if not self.stream.closed(): - self._write_frame(True, 0x8, b"") + if code is None and reason is not None: + code = 1000 # "normal closure" status code + if code is None: + close_data = b'' + else: + close_data = struct.pack('>H', code) + if reason is not None: + close_data += utf8(reason) + self._write_frame(True, 0x8, close_data) self.server_terminated = True if self.client_terminated: if self._waiting is not None: @@ -789,18 +872,25 @@ class WebSocketClientConnection(simple_httpclient._HTTPConnection): 'Sec-WebSocket-Version': '13', }) - self.resolver = Resolver(io_loop=io_loop) + self.tcp_client = TCPClient(io_loop=io_loop) super(WebSocketClientConnection, self).__init__( io_loop, None, request, lambda: None, self._on_http_response, - 104857600, self.resolver) + 104857600, self.tcp_client, 65536) - def close(self): + def close(self, code=None, reason=None): """Closes the websocket connection. + ``code`` and ``reason`` are documented under + `WebSocketHandler.close`. + .. versionadded:: 3.2 + + .. versionchanged:: 3.3 + + Added the ``code`` and ``reason`` arguments. """ if self.protocol is not None: - self.protocol.close() + self.protocol.close(code, reason) self.protocol = None def _on_close(self): @@ -816,8 +906,12 @@ class WebSocketClientConnection(simple_httpclient._HTTPConnection): self.connect_future.set_exception(WebSocketError( "Non-websocket response")) - def _handle_1xx(self, code): - assert code == 101 + def headers_received(self, start_line, headers): + if start_line.code != 101: + return super(WebSocketClientConnection, self).headers_received( + start_line, headers) + + self.headers = headers assert self.headers['Upgrade'].lower() == 'websocket' assert self.headers['Connection'].lower() == 'upgrade' accept = WebSocketProtocol13.compute_accept_value(self.key) @@ -830,6 +924,9 @@ class WebSocketClientConnection(simple_httpclient._HTTPConnection): self.io_loop.remove_timeout(self._timeout) self._timeout = None + self.stream = self.connection.detach() + self.stream.set_close_callback(self._on_close) + self.connect_future.set_result(self) def write_message(self, message, binary=False): @@ -890,35 +987,3 @@ def websocket_connect(url, io_loop=None, callback=None, connect_timeout=None): if callback is not None: io_loop.add_future(conn.connect_future, callback) return conn.connect_future - - -def _websocket_mask_python(mask, data): - """Websocket masking function. - - `mask` is a `bytes` object of length 4; `data` is a `bytes` object of any length. - Returns a `bytes` object of the same length as `data` with the mask applied - as specified in section 5.3 of RFC 6455. - - This pure-python implementation may be replaced by an optimized version when available. - """ - mask = array.array("B", mask) - unmasked = array.array("B", data) - for i in xrange(len(data)): - unmasked[i] = unmasked[i] ^ mask[i % 4] - if hasattr(unmasked, 'tobytes'): - # tostring was deprecated in py32. It hasn't been removed, - # but since we turn on deprecation warnings in our tests - # we need to use the right one. - return unmasked.tobytes() - else: - return unmasked.tostring() - -if os.environ.get('TORNADO_NO_EXTENSION'): - # This environment variable exists to make it easier to do performance comparisons; - # it's not guaranteed to remain supported in the future. - _websocket_mask = _websocket_mask_python -else: - try: - from tornado.speedups import websocket_mask as _websocket_mask - except ImportError: - _websocket_mask = _websocket_mask_python diff --git a/libs/tornado/wsgi.py b/libs/tornado/wsgi.py index 615f2e1f..47a0590a 100755 --- a/libs/tornado/wsgi.py +++ b/libs/tornado/wsgi.py @@ -20,9 +20,9 @@ WSGI is the Python standard for web servers, and allows for interoperability between Tornado and other Python web frameworks and servers. This module provides WSGI support in two ways: -* `WSGIApplication` is a version of `tornado.web.Application` that can run - inside a WSGI server. This is useful for running a Tornado app on another - HTTP server, such as Google App Engine. See the `WSGIApplication` class +* `WSGIAdapter` converts a `tornado.web.Application` to the WSGI application + interface. This is useful for running a Tornado app on another + HTTP server, such as Google App Engine. See the `WSGIAdapter` class documentation for limitations that apply. * `WSGIContainer` lets you run other WSGI applications and frameworks on the Tornado HTTP server. For example, with this class you can mix Django @@ -32,15 +32,14 @@ provides WSGI support in two ways: from __future__ import absolute_import, division, print_function, with_statement import sys -import time -import copy import tornado +from tornado.concurrent import Future from tornado import escape from tornado import httputil from tornado.log import access_log from tornado import web -from tornado.escape import native_str, parse_qs_bytes +from tornado.escape import native_str from tornado.util import bytes_type, unicode_type try: @@ -48,11 +47,6 @@ try: except ImportError: from cStringIO import StringIO as BytesIO # python 2 -try: - import Cookie # py2 -except ImportError: - import http.cookies as Cookie # py3 - try: import urllib.parse as urllib_parse # py3 except ImportError: @@ -83,11 +77,84 @@ else: class WSGIApplication(web.Application): """A WSGI equivalent of `tornado.web.Application`. - `WSGIApplication` is very similar to `tornado.web.Application`, - except no asynchronous methods are supported (since WSGI does not - support non-blocking requests properly). If you call - ``self.flush()`` or other asynchronous methods in your request - handlers running in a `WSGIApplication`, we throw an exception. + .. deprecated: 3.3:: + + Use a regular `.Application` and wrap it in `WSGIAdapter` instead. + """ + def __call__(self, environ, start_response): + return WSGIAdapter(self)(environ, start_response) + + +# WSGI has no facilities for flow control, so just return an already-done +# Future when the interface requires it. +_dummy_future = Future() +_dummy_future.set_result(None) + + +class _WSGIConnection(httputil.HTTPConnection): + def __init__(self, method, start_response, context): + self.method = method + self.start_response = start_response + self.context = context + self._write_buffer = [] + self._finished = False + self._expected_content_remaining = None + self._error = None + + def set_close_callback(self, callback): + # WSGI has no facility for detecting a closed connection mid-request, + # so we can simply ignore the callback. + pass + + def write_headers(self, start_line, headers, chunk=None, callback=None): + if self.method == 'HEAD': + self._expected_content_remaining = 0 + elif 'Content-Length' in headers: + self._expected_content_remaining = int(headers['Content-Length']) + else: + self._expected_content_remaining = None + self.start_response( + '%s %s' % (start_line.code, start_line.reason), + [(native_str(k), native_str(v)) for (k, v) in headers.get_all()]) + if chunk is not None: + self.write(chunk, callback) + elif callback is not None: + callback() + return _dummy_future + + def write(self, chunk, callback=None): + if self._expected_content_remaining is not None: + self._expected_content_remaining -= len(chunk) + if self._expected_content_remaining < 0: + self._error = httputil.HTTPOutputException( + "Tried to write more data than Content-Length") + raise self._error + self._write_buffer.append(chunk) + if callback is not None: + callback() + return _dummy_future + + def finish(self): + if (self._expected_content_remaining is not None and + self._expected_content_remaining != 0): + self._error = httputil.HTTPOutputException( + "Tried to write %d bytes less than Content-Length" % + self._expected_content_remaining) + raise self._error + self._finished = True + + +class _WSGIRequestContext(object): + def __init__(self, remote_ip, protocol): + self.remote_ip = remote_ip + self.protocol = protocol + + def __str__(self): + return self.remote_ip + + +class WSGIAdapter(object): + """Converts a `tornado.web.Application` instance into a WSGI application. Example usage:: @@ -100,121 +167,83 @@ class WSGIApplication(web.Application): self.write("Hello, world") if __name__ == "__main__": - application = tornado.wsgi.WSGIApplication([ + application = tornado.web.Application([ (r"/", MainHandler), ]) - server = wsgiref.simple_server.make_server('', 8888, application) + wsgi_app = tornado.wsgi.WSGIAdapter(application) + server = wsgiref.simple_server.make_server('', 8888, wsgi_app) server.serve_forever() See the `appengine demo - `_ + `_ for an example of using this module to run a Tornado app on Google App Engine. - WSGI applications use the same `.RequestHandler` class, but not - ``@asynchronous`` methods or ``flush()``. This means that it is - not possible to use `.AsyncHTTPClient`, or the `tornado.auth` or - `tornado.websocket` modules. + In WSGI mode asynchronous methods are not supported. This means + that it is not possible to use `.AsyncHTTPClient`, or the + `tornado.auth` or `tornado.websocket` modules. + + .. versionadded:: 3.3 """ - def __init__(self, handlers=None, default_host="", **settings): - web.Application.__init__(self, handlers, default_host, transforms=[], - wsgi=True, **settings) + def __init__(self, application): + if isinstance(application, WSGIApplication): + self.application = lambda request: web.Application.__call__( + application, request) + else: + self.application = application def __call__(self, environ, start_response): - handler = web.Application.__call__(self, HTTPRequest(environ)) - assert handler._finished - reason = handler._reason - status = str(handler._status_code) + " " + reason - headers = list(handler._headers.get_all()) - if hasattr(handler, "_new_cookie"): - for cookie in handler._new_cookie.values(): - headers.append(("Set-Cookie", cookie.OutputString(None))) - start_response(status, - [(native_str(k), native_str(v)) for (k, v) in headers]) - return handler._write_buffer - - -class HTTPRequest(object): - """Mimics `tornado.httpserver.HTTPRequest` for WSGI applications.""" - def __init__(self, environ): - """Parses the given WSGI environment to construct the request.""" - self.method = environ["REQUEST_METHOD"] - self.path = urllib_parse.quote(from_wsgi_str(environ.get("SCRIPT_NAME", ""))) - self.path += urllib_parse.quote(from_wsgi_str(environ.get("PATH_INFO", ""))) - self.uri = self.path - self.arguments = {} - self.query_arguments = {} - self.body_arguments = {} - self.query = environ.get("QUERY_STRING", "") - if self.query: - self.uri += "?" + self.query - self.arguments = parse_qs_bytes(native_str(self.query), - keep_blank_values=True) - self.query_arguments = copy.deepcopy(self.arguments) - self.version = "HTTP/1.1" - self.headers = httputil.HTTPHeaders() + method = environ["REQUEST_METHOD"] + uri = urllib_parse.quote(from_wsgi_str(environ.get("SCRIPT_NAME", ""))) + uri += urllib_parse.quote(from_wsgi_str(environ.get("PATH_INFO", ""))) + if environ.get("QUERY_STRING"): + uri += "?" + environ["QUERY_STRING"] + headers = httputil.HTTPHeaders() if environ.get("CONTENT_TYPE"): - self.headers["Content-Type"] = environ["CONTENT_TYPE"] + headers["Content-Type"] = environ["CONTENT_TYPE"] if environ.get("CONTENT_LENGTH"): - self.headers["Content-Length"] = environ["CONTENT_LENGTH"] + headers["Content-Length"] = environ["CONTENT_LENGTH"] for key in environ: if key.startswith("HTTP_"): - self.headers[key[5:].replace("_", "-")] = environ[key] - if self.headers.get("Content-Length"): - self.body = environ["wsgi.input"].read( - int(self.headers["Content-Length"])) + headers[key[5:].replace("_", "-")] = environ[key] + if headers.get("Content-Length"): + body = environ["wsgi.input"].read( + int(headers["Content-Length"])) else: - self.body = "" - self.protocol = environ["wsgi.url_scheme"] - self.remote_ip = environ.get("REMOTE_ADDR", "") + body = "" + protocol = environ["wsgi.url_scheme"] + remote_ip = environ.get("REMOTE_ADDR", "") if environ.get("HTTP_HOST"): - self.host = environ["HTTP_HOST"] + host = environ["HTTP_HOST"] else: - self.host = environ["SERVER_NAME"] - - # Parse request body - self.files = {} - httputil.parse_body_arguments(self.headers.get("Content-Type", ""), - self.body, self.body_arguments, self.files) - - for k, v in self.body_arguments.items(): - self.arguments.setdefault(k, []).extend(v) - - self._start_time = time.time() - self._finish_time = None - - def supports_http_1_1(self): - """Returns True if this request supports HTTP/1.1 semantics""" - return self.version == "HTTP/1.1" - - @property - def cookies(self): - """A dictionary of Cookie.Morsel objects.""" - if not hasattr(self, "_cookies"): - self._cookies = Cookie.SimpleCookie() - if "Cookie" in self.headers: - try: - self._cookies.load( - native_str(self.headers["Cookie"])) - except Exception: - self._cookies = None - return self._cookies - - def full_url(self): - """Reconstructs the full URL for this request.""" - return self.protocol + "://" + self.host + self.uri - - def request_time(self): - """Returns the amount of time it took for this request to execute.""" - if self._finish_time is None: - return time.time() - self._start_time - else: - return self._finish_time - self._start_time + host = environ["SERVER_NAME"] + connection = _WSGIConnection(method, start_response, + _WSGIRequestContext(remote_ip, protocol)) + request = httputil.HTTPServerRequest( + method, uri, "HTTP/1.1", headers=headers, body=body, + host=host, connection=connection) + request._parse_body() + self.application(request) + if connection._error: + raise connection._error + if not connection._finished: + raise Exception("request did not finish synchronously") + return connection._write_buffer class WSGIContainer(object): r"""Makes a WSGI-compatible function runnable on Tornado's HTTP server. + .. warning:: + + WSGI is a *synchronous* interface, while Tornado's concurrency model + is based on single-threaded asynchronous execution. This means that + running a WSGI app with Tornado's `WSGIContainer` is *less scalable* + than running the same app in a multi-threaded WSGI server like + ``gunicorn`` or ``uwsgi``. Use `WSGIContainer` only when there are + benefits to combining Tornado and WSGI in the same process that + outweigh the reduced scalability. + Wrap a WSGI function in a `WSGIContainer` and pass it to `.HTTPServer` to run it. For example:: @@ -281,7 +310,7 @@ class WSGIContainer(object): @staticmethod def environ(request): - """Converts a `tornado.httpserver.HTTPRequest` to a WSGI environment. + """Converts a `tornado.httputil.HTTPServerRequest` to a WSGI environment. """ hostport = request.host.split(":") if len(hostport) == 2: @@ -327,3 +356,6 @@ class WSGIContainer(object): summary = request.method + " " + request.uri + " (" + \ request.remote_ip + ")" log_method("%d %s %.2fms", status_code, summary, request_time) + + +HTTPRequest = httputil.HTTPServerRequest diff --git a/libs/unrar2/__init__.py b/libs/unrar2/__init__.py index b5c9a4d3..fe27cfe1 100644 --- a/libs/unrar2/__init__.py +++ b/libs/unrar2/__init__.py @@ -33,7 +33,7 @@ similar to the C interface provided by UnRAR. There is also a higher level interface which makes some common operations easier. """ -__version__ = '0.99.2' +__version__ = '0.99.3' try: WindowsError diff --git a/libs/unrar2/unix.py b/libs/unrar2/unix.py index 21f384cf..9ebab40d 100644 --- a/libs/unrar2/unix.py +++ b/libs/unrar2/unix.py @@ -33,6 +33,7 @@ from rar_exceptions import * class UnpackerNotInstalled(Exception): pass rar_executable_cached = None +rar_executable_version = None def call_unrar(params): "Calls rar/unrar command line executable, returns stdout pipe" @@ -59,10 +60,10 @@ def call_unrar(params): class RarFileImplementation(object): def init(self, password = None): + global rar_executable_version self.password = password - stdoutdata, stderrdata = self.call('v', []).communicate() for line in stderrdata.splitlines(): @@ -73,18 +74,42 @@ class RarFileImplementation(object): accum = [] source = iter(stdoutdata.splitlines()) line = '' - while not (line.startswith('Comment:') or line.startswith('Pathname/Comment')): - if line.strip().endswith('is not RAR archive'): - raise InvalidRARArchive + while not (line.startswith('UNRAR')): line = source.next() - while not line.startswith('Pathname/Comment'): - accum.append(line.rstrip('\n')) + signature = line + # The code below is mighty flaky + # and will probably crash on localized versions of RAR + # but I see no safe way to rewrite it using a CLI tool + if signature.startswith("UNRAR 4"): + rar_executable_version = 4 + while not (line.startswith('Comment:') or line.startswith('Pathname/Comment')): + if line.strip().endswith('is not RAR archive'): + raise InvalidRARArchive + line = source.next() + while not line.startswith('Pathname/Comment'): + accum.append(line.rstrip('\n')) + line = source.next() + if len(accum): + accum[0] = accum[0][9:] # strip out "Comment:" part + self.comment = '\n'.join(accum[:-1]) + else: + self.comment = None + elif signature.startswith("UNRAR 5"): + rar_executable_version = 5 line = source.next() - if len(accum): - accum[0] = accum[0][9:] - self.comment = '\n'.join(accum[:-1]) + while not line.startswith('Archive:'): + if line.strip().endswith('is not RAR archive'): + raise InvalidRARArchive + accum.append(line.rstrip('\n')) + line = source.next() + if len(accum): + self.comment = '\n'.join(accum[:-1]).strip() + else: + self.comment = None else: - self.comment = None + raise UnpackerNotInstalled("Unsupported RAR version, expected 4.x or 5.x, found: " + + signature.split(" ")[1]) + def escaped_password(self): return '-' if self.password == None else self.password @@ -97,7 +122,8 @@ class RarFileImplementation(object): def infoiter(self): - stdoutdata, stderrdata = self.call('v', ['c-']).communicate() + command = "v" if rar_executable_version == 4 else "l" + stdoutdata, stderrdata = self.call(command, ['c-']).communicate() for line in stderrdata.splitlines(): if line.strip().startswith("Cannot open"): @@ -106,31 +132,48 @@ class RarFileImplementation(object): accum = [] source = iter(stdoutdata.splitlines()) line = '' - while not line.startswith('--------------'): + while not line.startswith('-----------'): if line.strip().endswith('is not RAR archive'): raise InvalidRARArchive - if line.find("CRC failed") >= 0: + if line.startswith("CRC failed") or line.startswith("Checksum error"): raise IncorrectRARPassword line = source.next() line = source.next() i = 0 re_spaces = re.compile(r"\s+") - while not line.startswith('--------------'): - accum.append(line) - if len(accum) == 2: + if rar_executable_version == 4: + while not line.startswith('-----------'): + accum.append(line) + if len(accum) == 2: + data = {} + data['index'] = i + # asterisks mark password-encrypted files + data['filename'] = accum[0].strip().lstrip("*") # asterisks marks password-encrypted files + fields = re_spaces.split(accum[1].strip()) + data['size'] = int(fields[0]) + attr = fields[5] + data['isdir'] = 'd' in attr.lower() + data['datetime'] = time.strptime(fields[3] + " " + fields[4], '%d-%m-%y %H:%M') + data['comment'] = None + yield data + accum = [] + i += 1 + line = source.next() + elif rar_executable_version == 5: + while not line.startswith('-----------'): + fields = line.strip().lstrip("*").split() data = {} data['index'] = i - data['filename'] = accum[0].strip() - info = re_spaces.split(accum[1].strip()) - data['size'] = int(info[0]) - attr = info[5] + data['filename'] = " ".join(fields[4:]) + data['size'] = int(fields[1]) + attr = fields[0] data['isdir'] = 'd' in attr.lower() - data['datetime'] = time.strptime(info[3] + " " + info[4], '%d-%m-%y %H:%M') + data['datetime'] = time.strptime(fields[2] + " " + fields[3], '%d-%m-%y %H:%M') data['comment'] = None yield data - accum = [] i += 1 - line = source.next() + line = source.next() + def read_files(self, checker): res = [] @@ -151,7 +194,7 @@ class RarFileImplementation(object): if overwrite: options.append('o+') else: - options.append('o-') + options.append('o-') if not path.endswith(os.sep): path += os.sep names = [] @@ -165,7 +208,7 @@ class RarFileImplementation(object): names.append(path) proc = self.call(command, options, names) stdoutdata, stderrdata = proc.communicate() - if stderrdata.find("CRC failed") >= 0: + if stderrdata.find("CRC failed") >= 0 or stderrdata.find("Checksum error") >= 0: raise IncorrectRARPassword return res