\S+))?\s+(?Pimg|IMG|left|right|center|video|audio|blockleft|blockright)(?:\s+(?P\d+px))?\s*$',re.S)
@@ -648,7 +648,9 @@ def autolinks_simple(url):
image, video or audio tag
"""
u_url=url.lower()
- if u_url.endswith(('.jpg','.jpeg','.gif','.png')):
+ if '@' in url and not '://' in url:
+ return '%s' % (url, url)
+ elif u_url.endswith(('.jpg','.jpeg','.gif','.png')):
return '
' % url
elif u_url.endswith(('.mp4','.mpeg','.mov','.ogv')):
return '' % url
@@ -673,6 +675,9 @@ def protolinks_simple(proto, url):
return '
'%url
return proto+':'+url
+def email_simple(email):
+ return '%s' % (email, email)
+
def render(text,
extra={},
allowed={},
diff --git a/gluon/contrib/markmin/markmin2latex.py b/gluon/contrib/markmin/markmin2latex.py
index 564f9905..65ad4aa8 100755
--- a/gluon/contrib/markmin/markmin2latex.py
+++ b/gluon/contrib/markmin/markmin2latex.py
@@ -15,16 +15,14 @@ regex_dd=re.compile('\$\$(?P.*?)\$\$')
regex_code = re.compile('('+META+')|(``(?P.*?)``(:(?P\w+))?)',re.S)
regex_title = re.compile('^#{1} (?P[^\n]+)',re.M)
regex_maps = [
- (re.compile('[ \t\r]+\n'),'\n'),
(re.compile('[ \t\r]+\n'),'\n'),
(re.compile('\*\*(?P[^\s\*]+( +[^\s\*]+)*)\*\*'),'{\\\\bf \g}'),
(re.compile("''(?P[^\s']+( +[^\s']+)*)''"),'{\\it \g}'),
- (re.compile('^#{6} (?P[^\n]+)',re.M),'\n\n{\\\\bf \g}\n'),
- (re.compile('^#{5} (?P[^\n]+)',re.M),'\n\n{\\\\bf \g}\n'),
- (re.compile('^#{4} (?P[^\n]+)',re.M),'\n\n\\\\goodbreak\\subsubsection{\g}\n'),
- (re.compile('^#{3} (?P[^\n]+)',re.M),'\n\n\\\\goodbreak\\subsection{\g}\n'),
- (re.compile('^#{2} (?P[^\n]+)',re.M),'\n\n\\\\goodbreak\\section{\g}\n'),
- (re.compile('^#{1} (?P[^\n]+)',re.M),''),
+ (re.compile('^#{5,6}\s*(?P[^\n]+)',re.M),'\n\n{\\\\bf \g}\n'),
+ (re.compile('^#{4}\s*(?P[^\n]+)',re.M),'\n\n\\\\goodbreak\\subsubsection{\g}\n'),
+ (re.compile('^#{3}\s*(?P[^\n]+)',re.M),'\n\n\\\\goodbreak\\subsection{\g}\n'),
+ (re.compile('^#{2}\s*(?P[^\n]+)',re.M),'\n\n\\\\goodbreak\\section{\g}\n'),
+ (re.compile('^#{1}\s*(?P[^\n]+)',re.M),''),
(re.compile('^\- +(?P.*)',re.M),'\\\\begin{itemize}\n\\item \g\n\\end{itemize}'),
(re.compile('^\+ +(?P.*)',re.M),'\\\\begin{itemize}\n\\item \g\n\\end{itemize}'),
(re.compile('\\\\end\{itemize\}\s+\\\\begin\{itemize\}'),'\n'),
diff --git a/gluon/contrib/markmin/markmin2pdf.py b/gluon/contrib/markmin/markmin2pdf.py
index b94ae666..9774ee64 100644
--- a/gluon/contrib/markmin/markmin2pdf.py
+++ b/gluon/contrib/markmin/markmin2pdf.py
@@ -1,6 +1,6 @@
"""
Created by Massimo Di Pierro
-Licese BSD
+License BSD
"""
import subprocess
diff --git a/gluon/contrib/memdb.py b/gluon/contrib/memdb.py
index e043da73..6ad9b159 100644
--- a/gluon/contrib/memdb.py
+++ b/gluon/contrib/memdb.py
@@ -5,7 +5,7 @@
This file is part of web2py Web Framework (Copyrighted, 2007-2009).
Developed by Massimo Di Pierro and
Robin B .
-License: GPL v2
+License: LGPLv3
"""
__all__ = ['MEMDB', 'Field']
@@ -292,8 +292,13 @@ class Table(DALStorage):
def __str__(self):
return self._tablename
- def __call__(self, id):
- return self.get(id)
+ def __call__(self, id, **kwargs):
+ record = self.get(id)
+ if record is None:
+ return None
+ if kwargs and any(record[key]!=kwargs[key] for key in kwargs):
+ return None
+ return record
class Expression(object):
diff --git a/gluon/contrib/mockimaplib.py b/gluon/contrib/mockimaplib.py
new file mode 100644
index 00000000..87f8b479
--- /dev/null
+++ b/gluon/contrib/mockimaplib.py
@@ -0,0 +1,255 @@
+# -*- encoding: utf-8 -*-
+
+from imaplib import ParseFlags
+
+# mockimaplib: A very simple mock server module for imap client APIs
+# Copyright (C) 2014 Alan Etkin
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or(at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this program. If not, see
+#
+
+"""
+mockimaplib allows you to test applications connecting to a dummy imap
+service. For more details on the api subset implemented,
+refer to the imaplib docs.
+
+The client should configure a dictionary to map imap string queries to sets
+of entries stored in a message dummy storage dictionary. The module includes
+a small set of default message records (SPAM and MESSAGES), two mailboxes
+(Draft and INBOX) and a list of query/resultset entries (RESULTS).
+
+Usage:
+
+>>> import mockimaplib
+>>> connection = mockimaplib.IMAP4_SSL()
+>>> connection.login(, )
+None
+>>> connection.select("INBOX")
+("OK", ... )
+# fetch commands specifying single uid or message id
+# will try to get messages recorded in SPAM
+>>> connection.uid(...)
+
+# returns a string list of matching message ids
+>>> connection.search()
+("OK", ... "1 2 ... n")
+"""
+
+MESSAGES = (
+'MIME-Version: 1.0\r\nReceived: by 10.140.91.199 with HTTP; Mon, 27 Jan 2014 13:52:30 -0800 (PST)\r\nDate: Mon, 27 Jan 2014 19:52:30 -0200\r\nDelivered-To: nurse@example.com\r\nMessage-ID: <10101010101010010000010101010001010101001010010000001@mail.example.com>\r\nSubject: spam1\r\nFrom: Mr. Gumby \r\nTo: The nurse \r\nContent-Type: text/plain; charset=ISO-8859-1\r\n\r\nNurse!\r\n\r\n\r\n',
+'MIME-Version: 1.0\r\nReceived: by 10.140.91.199 with HTTP; Mon, 27 Jan 2014 13:52:47 -0800 (PST)\r\nDate: Mon, 27 Jan 2014 19:52:47 -0200\r\nDelivered-To: nurse@example.com\r\nMessage-ID: <101010101010100100000101010100010101010010100100000010@mail.example.com>\r\nSubject: spam2\r\nFrom: Mr. Gumby \r\nTo: The nurse \r\nContent-Type: text/plain; charset=ISO-8859-1\r\n\r\nNurse, nurse!',
+'MIME-Version: 1.0\r\nReceived: by 10.140.91.199 with HTTP; Mon, 27 Jan 2014 13:54:54 -0800 (PST)\r\nDate: Mon, 27 Jan 2014 19:54:54 -0200\r\nDelivered-To: nurse@example.com\r\nMessage-ID: <1010101010101001000001010101000101010100101001000000101@mail.example.com>\r\nSubject: spamalot1\r\nFrom: Mr. Gumby \r\nTo: The nurse \r\nContent-Type: text/plain; charset=ISO-8859-1\r\n\r\nNurse!\r\n\r\n\r\n',
+'MIME-Version: 1.0\r\n\r\nReceived: by 10.140.91.199 with HTTP; Mon, 27 Jan 2014 13:54:54 -0800 (PST)\r\nDate: Mon, 27 Jan 2014 19:54:54 -0200\r\nDelivered-To: nurse@example.com\r\nMessage-ID: <101010101010100100000101010100010101010010100100000010101@mail.example.com>\r\nSubject: spamalot2\r\nFrom: Mr. Gumby \r\nTo: The nurse \r\nContent-Type: text/plain; charset=ISO-8859-1\r\n\r\nNurse! ... Nurse! ... Nurse!\r\n\r\n\r\n')
+
+SPAM = {
+ "INBOX": [
+ {"uid": "483209",
+ "headers": MESSAGES[0],
+ "complete": MESSAGES[0],
+ "flags": ""},
+ {"uid": "483211",
+ "headers": MESSAGES[1],
+ "complete": MESSAGES[1],
+ "flags": ""},
+ {"uid": "483225",
+ "headers": MESSAGES[2],
+ "complete": MESSAGES[2],
+ "flags": ""}],
+ "Draft":[
+ {"uid": "483432",
+ "headers": MESSAGES[3],
+ "complete": MESSAGES[3],
+ "flags": ""},]
+}
+
+RESULTS = {
+ # : [ | , ...]
+ "INBOX": {
+ "(ALL)": (1, 2, 3),
+ "(1:3)": (1, 2, 3)},
+ "Draft": {
+ "(1:1)": (1,)},
+}
+
+class Connection(object):
+ """Dummy connection object for the imap client.
+ By default, uses the module SPAM and RESULT
+ sets (use Connection.setup for custom values)"""
+ def login(self, user, password):
+ pass
+
+ def __init__(self):
+ self._readonly = False
+ self._mailbox = None
+ self.setup()
+
+ def list(self):
+ return ('OK', ['(\\HasNoChildren) "/" "%s"' % key for key in self.spam])
+
+ def select(self, tablename, readonly=False):
+ self._readonly = readonly
+ """args: mailbox, boolean
+ result[1][0] -> int last message id / mailbox lenght
+ result[0] = 'OK'
+ """
+ self._mailbox = tablename
+ return ('OK', (len(SPAM[self._mailbox]), None))
+
+ def uid(self, command, uid, arg):
+ """ args:
+ command: "search" | "fetch"
+ uid: None | uid
+ parts: "(ALL)" | "(RFC822 FLAGS)" | "(RFC822.HEADER FLAGS)"
+
+ "search", None, "(ALL)" -> ("OK", ("uid_1 uid_2 ... uid_", None))
+ "search", None, "" -> ("OK", ("uid_1 uid_2 ... uid_n", None))
+ "fetch", uid, parts -> ("OK", ((" ...", ""), "")
+ [0] [1][0][0] [1][0][1] [1][1]
+ """
+ if command == "search":
+ return self._search(arg)
+ elif command == "fetch":
+ return self._fetch(uid, arg)
+
+ def _search(self, query):
+ return ("OK", (" ".join([str(item["uid"]) for item in self._get_messages(query)]), None))
+
+ def _fetch(self, value, arg):
+ try:
+ message = self.spam[self._mailbox][value - 1]
+ message_id = value
+ except TypeError:
+ for x, item in enumerate(self.spam[self._mailbox]):
+ if item["uid"] == value:
+ message = item
+ message_id = x + 1
+ break
+
+ parts = "headers"
+ if arg in ("(ALL)", "(RFC822 FLAGS)"):
+ parts = "complete"
+
+ return ("OK", (("%s " % message_id, message[parts]), message["flags"]))
+
+ def _get_messages(self, query):
+ if query.strip().isdigit():
+ return [self.spam[self._mailbox][int(query.strip()) - 1],]
+ elif query[1:-1].strip().isdigit():
+ return [self.spam[self._mailbox][int(query[1:-1].strip()) -1],]
+ elif query[1:-1].replace("UID", "").strip().isdigit():
+ for item in self.spam[self._mailbox]:
+ if item["uid"] == query[1:-1].replace("UID", "").strip():
+ return [item,]
+ messages = []
+ try:
+ for m in self.results[self._mailbox][query]:
+ try:
+ self.spam[self._mailbox][m - 1]["id"] = m
+ messages.append(self.spam[self._mailbox][m - 1])
+ except TypeError:
+ for x, item in enumerate(self.spam[self._mailbox]):
+ if item["uid"] == m:
+ item["id"] = x + 1
+ messages.append(item)
+ break
+ except IndexError:
+ # message removed
+ pass
+ return messages
+ except KeyError:
+ raise ValueError("The client issued an unexpected query: %s" % query)
+
+ def setup(self, spam={}, results={}):
+ """adds custom message and query databases or sets
+ the values to the module defaults.
+ """
+
+ self.spam = spam
+ self.results = results
+ if not spam:
+ for key in SPAM:
+ self.spam[key] = []
+ for d in SPAM[key]:
+ self.spam[key].append(d.copy())
+ if not results:
+ for key in RESULTS:
+ self.results[key] = RESULTS[key].copy()
+
+
+ def search(self, first, query):
+ """ args:
+ first: None
+ query: string with mailbox query (flags, date, uid, id, ...)
+ example: '2:15723 BEFORE 27-Jan-2014 FROM "gumby"'
+ result[1][0] -> "id_1 id_2 ... id_n"
+ """
+ messages = self._get_messages(query)
+ ids = " ".join([str(item["id"]) for item in messages])
+ return ("OK", (ids, None))
+
+ def append(self, mailbox, flags, struct_time, message):
+ """
+ result, data = self.connection.append(mailbox, flags, struct_time, message)
+ if result == "OK":
+ uid = int(re.findall("\d+", str(data))[-1])
+ """
+ last = self.spam[mailbox][-1]
+ try:
+ uid = int(last["uid"]) +1
+ except ValueError:
+ alluids = []
+ for _mailbox in self.spam.keys():
+ for item in self.spam[_mailbox]:
+ try:
+ alluids.append(int(item["uid"]))
+ except:
+ pass
+ if len(alluids) > 0:
+ uid = max(alluids) + 1
+ else:
+ uid = 1
+ flags = "FLAGS " + flags
+ item = {"uid": str(uid), "headers": message, "complete": message, "flags": flags}
+ self.spam[mailbox].append(item)
+ return ("OK", "spam spam %s spam" % uid)
+
+
+ def store(self, *args):
+ """
+ implements some flag commands
+ args: ("", "<+|->FLAGS", "(\\Flag1 \\Flag2 ... \\Flagn)")
+ """
+ message = self.spam[self._mailbox][int(args[0] - 1)]
+ old_flags = ParseFlags(message["flags"])
+ flags = ParseFlags("FLAGS" + args[2])
+ if args[1].strip().startswith("+"):
+ message["flags"] = "FLAGS (%s)" % " ".join(set(flags + old_flags))
+ elif args[1].strip().startswith("-"):
+ message["flags"] = "FLAGS (%s)" % " ".join([flag for flag in old_flags if not flag in flags])
+
+ def expunge(self):
+ """implements removal of deleted flag messages"""
+ for x, item in enumerate(self.spam[self._mailbox]):
+ if "\\Deleted" in item["flags"]:
+ self.spam[self._mailbox].pop(x)
+
+
+class IMAP4(object):
+ """>>> connection = IMAP4() # creates the dummy imap4 client object"""
+ def __new__(self, *args, **kwargs):
+ # args: (server, port)
+ return Connection()
+
+IMAP4_SSL = IMAP4
+
diff --git a/gluon/contrib/populate.py b/gluon/contrib/populate.py
index 122a4d64..cc7dd216 100644
--- a/gluon/contrib/populate.py
+++ b/gluon/contrib/populate.py
@@ -133,6 +133,8 @@ def populate_generator(table, default=True, compute=False, contents={}):
continue
elif field.type == 'id':
continue
+ elif field.type == 'upload':
+ continue
elif default and not field.default in (None, ''):
record[fieldname] = field.default
elif compute and field.compute:
@@ -153,8 +155,6 @@ def populate_generator(table, default=True, compute=False, contents={}):
record[fieldname] = datetime.time(h, m, 0)
elif field.type == 'password':
record[fieldname] = ''
- elif field.type == 'upload':
- record[fieldname] = None
elif field.type == 'integer' and \
hasattr(field.requires, 'options'):
options = field.requires.options(zero=False)
@@ -266,5 +266,5 @@ def populate_generator(table, default=True, compute=False, contents={}):
if __name__ == '__main__':
ell = Learner()
- ell.loadd(eval(IUP))
+ ell.loadd(IUP)
print ell.generate(1000, prefix=None)
diff --git a/gluon/contrib/pysimplesoap/__init__.py b/gluon/contrib/pysimplesoap/__init__.py
index 6043241d..28bfee12 100755
--- a/gluon/contrib/pysimplesoap/__init__.py
+++ b/gluon/contrib/pysimplesoap/__init__.py
@@ -1,7 +1,16 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
-"PySimpleSOAP"
-import client
-import server
-import simplexml
-import transport
\ No newline at end of file
+
+"""PySimpleSOAP"""
+
+
+__author__ = "Mariano Reingart"
+__author_email__ = "reingart@gmail.com"
+__copyright__ = "Copyright (C) 2013 Mariano Reingart"
+__license__ = "LGPL 3.0"
+__version__ = "1.11"
+
+TIMEOUT = 60
+
+
+from . import client, server, simplexml, transport
diff --git a/gluon/contrib/pysimplesoap/client.py b/gluon/contrib/pysimplesoap/client.py
index 75bca191..88300eba 100755
--- a/gluon/contrib/pysimplesoap/client.py
+++ b/gluon/contrib/pysimplesoap/client.py
@@ -1,5 +1,5 @@
-#!/usr/bin/env python
-# -*- coding: latin-1 -*-
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any later
@@ -10,114 +10,134 @@
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
-"Pythonic simple SOAP Client implementation"
+"""Pythonic simple SOAP Client implementation"""
-__author__ = "Mariano Reingart (reingart@gmail.com)"
-__copyright__ = "Copyright (C) 2008 Mariano Reingart"
-__license__ = "LGPL 3.0"
-__version__ = "1.07a"
+from __future__ import unicode_literals
+import sys
+if sys.version > '3':
+ unicode = str
-TIMEOUT = 60
-
-import cPickle as pickle
+try:
+ import cPickle as pickle
+except ImportError:
+ import pickle
import hashlib
import logging
import os
import tempfile
-import urllib2
-from urlparse import urlsplit
-from simplexml import SimpleXMLElement, TYPE_MAP, REVERSE_TYPE_MAP, OrderedDict
-from transport import get_http_wrapper, set_http_wrapper, get_Http
+
+from . import __author__, __copyright__, __license__, __version__, TIMEOUT
+from .simplexml import SimpleXMLElement, TYPE_MAP, REVERSE_TYPE_MAP, OrderedDict
+from .transport import get_http_wrapper, set_http_wrapper, get_Http
+# Utility functions used throughout wsdl_parse, moved aside for readability
+from .helpers import fetch, sort_dict, make_key, process_element, \
+ postprocess_element, get_message, preprocess_schema, \
+ get_local_name, get_namespace_prefix, TYPE_MAP, urlsplit
+
log = logging.getLogger(__name__)
-logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.WARNING)
class SoapFault(RuntimeError):
- def __init__(self,faultcode,faultstring):
+ def __init__(self, faultcode, faultstring):
self.faultcode = faultcode
self.faultstring = faultstring
RuntimeError.__init__(self, faultcode, faultstring)
- def __str__(self):
- return self.__unicode__().encode("ascii", "ignore")
-
def __unicode__(self):
- return u'%s: %s' % (self.faultcode, self.faultstring)
+ return '%s: %s' % (self.faultcode, self.faultstring)
+
+ if sys.version > '3':
+ __str__ = __unicode__
+ else:
+ def __str__(self):
+ return self.__unicode__().encode('ascii', 'ignore')
def __repr__(self):
- return u"SoapFault(%s, %s)" % (repr(self.faultcode),
- repr(self.faultstring))
+ return "SoapFault(%s, %s)" % (repr(self.faultcode),
+ repr(self.faultstring))
# soap protocol specification & namespace
soap_namespaces = dict(
- soap11="http://schemas.xmlsoap.org/soap/envelope/",
- soap="http://schemas.xmlsoap.org/soap/envelope/",
- soapenv="http://schemas.xmlsoap.org/soap/envelope/",
- soap12="http://www.w3.org/2003/05/soap-env",
+ soap11='http://schemas.xmlsoap.org/soap/envelope/',
+ soap='http://schemas.xmlsoap.org/soap/envelope/',
+ soapenv='http://schemas.xmlsoap.org/soap/envelope/',
+ soap12='http://www.w3.org/2003/05/soap-env',
+ soap12env="http://www.w3.org/2003/05/soap-envelope",
)
-_USE_GLOBAL_DEFAULT = object()
class SoapClient(object):
- "Simple SOAP Client (simil PHP)"
- def __init__(self, location = None, action = None, namespace = None,
- cert = None, trace = False, exceptions = True, proxy = None, ns=False,
- soap_ns=None, wsdl = None, cache = False, cacert=None,
- sessions=False, soap_server=None, timeout=_USE_GLOBAL_DEFAULT,
- http_headers={}
+ """Simple SOAP Client (simil PHP)"""
+ def __init__(self, location=None, action=None, namespace=None,
+ cert=None, exceptions=True, proxy=None, ns=None,
+ soap_ns=None, wsdl=None, wsdl_basedir='', cache=False, cacert=None,
+ sessions=False, soap_server=None, timeout=TIMEOUT,
+ http_headers=None, trace=False,
+ username=None, password=None,
):
"""
:param http_headers: Additional HTTP Headers; example: {'Host': 'ipsec.example.com'}
"""
- self.certssl = cert
- self.keyssl = None
+ self.certssl = cert
+ self.keyssl = None
self.location = location # server location (url)
self.action = action # SOAP base action
- self.namespace = namespace # message
- self.trace = trace # show debug messages
+ self.namespace = namespace # message
self.exceptions = exceptions # lanzar execpiones? (Soap Faults)
self.xml_request = self.xml_response = ''
- self.http_headers = http_headers
+ self.http_headers = http_headers or {}
+ # extract the base directory / url for wsdl relative imports:
+ if wsdl and wsdl_basedir == '':
+ # parse the wsdl url, strip the scheme and filename
+ url_scheme, netloc, path, query, fragment = urlsplit(wsdl)
+ wsdl_basedir = os.path.dirname(netloc + path)
+
+ self.wsdl_basedir = wsdl_basedir
+
+ # shortcut to print all debugging info and sent / received xml messages
+ if trace:
+ logging.basicConfig(level=logging.DEBUG)
+
if not soap_ns and not ns:
- self.__soap_ns = 'soap' # 1.1
+ self.__soap_ns = 'soap' # 1.1
elif not soap_ns and ns:
- self.__soap_ns = 'soapenv' # 1.2
+ self.__soap_ns = 'soapenv' # 1.2
else:
self.__soap_ns = soap_ns
-
- # SOAP Server (special cases like oracle or jbossas6)
+
+ # SOAP Server (special cases like oracle, jbossas6 or jetty)
self.__soap_server = soap_server
-
+
# SOAP Header support
self.__headers = {} # general headers
self.__call_headers = None # OrderedDict to be marshalled for RPC Call
-
+
# check if the Certification Authority Cert is a string and store it
- if cacert and cacert.startswith("-----BEGIN CERTIFICATE-----"):
+ if cacert and cacert.startswith('-----BEGIN CERTIFICATE-----'):
fd, filename = tempfile.mkstemp()
f = os.fdopen(fd, 'w+b', -1)
- if self.trace: log.info(u"Saving CA certificate to %s" % filename)
+ log.debug("Saving CA certificate to %s" % filename)
f.write(cacert)
cacert = filename
f.close()
self.cacert = cacert
-
- if timeout is _USE_GLOBAL_DEFAULT:
- timeout = TIMEOUT
- else:
- timeout = timeout
# Create HTTP wrapper
Http = get_Http()
self.http = Http(timeout=timeout, cacert=cacert, proxy=proxy, sessions=sessions)
-
- self.__ns = ns # namespace prefix or False to not use it
+ if username and password:
+ if hasattr(self.http, 'add_credentials'):
+ self.http.add_credentials(username, password)
+
+
+ # namespace prefix, None to use xmlns attribute or False to not use it:
+ self.__ns = ns
if not ns:
- self.__xml = """
-<%(soap_ns)s:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xmlns:xsd="http://www.w3.org/2001/XMLSchema"
+ self.__xml = """
+<%(soap_ns)s:Envelope xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xmlns:xsd="http://www.w3.org/2001/XMLSchema"
xmlns:%(soap_ns)s="%(soap_uri)s">
<%(soap_ns)s:Header/>
<%(soap_ns)s:Body>
@@ -136,127 +156,127 @@ class SoapClient(object):
%(soap_ns)s:Envelope>"""
# parse wsdl url
- self.services = wsdl and self.wsdl_parse(wsdl, debug=trace, cache=cache)
+ self.services = wsdl and self.wsdl_parse(wsdl, cache=cache)
self.service_port = None # service port for late binding
def __getattr__(self, attr):
- "Return a pseudo-method that can be called"
- if not self.services: # not using WSDL?
- return lambda self=self, *args, **kwargs: self.call(attr,*args,**kwargs)
- else: # using WSDL:
- return lambda *args, **kwargs: self.wsdl_call(attr,*args,**kwargs)
-
+ """Return a pseudo-method that can be called"""
+ if not self.services: # not using WSDL?
+ return lambda self=self, *args, **kwargs: self.call(attr, *args, **kwargs)
+ else: # using WSDL:
+ return lambda *args, **kwargs: self.wsdl_call(attr, *args, **kwargs)
+
def call(self, method, *args, **kwargs):
"""Prepare xml request and make SOAP call, returning a SimpleXMLElement.
-
+
If a keyword argument called "headers" is passed with a value of a
SimpleXMLElement object, then these headers will be inserted into the
request.
- """
+ """
#TODO: method != input_message
# Basic SOAP request:
- xml = self.__xml % dict(method=method, namespace=self.namespace, ns=self.__ns,
- soap_ns=self.__soap_ns, soap_uri=soap_namespaces[self.__soap_ns])
- request = SimpleXMLElement(xml,namespace=self.__ns and self.namespace, prefix=self.__ns)
-
- try:
- request_headers = kwargs.pop('headers')
- except KeyError:
- request_headers = None
-
+ xml = self.__xml % dict(method=method, # method tag name
+ namespace=self.namespace, # method ns uri
+ ns=self.__ns, # method ns prefix
+ soap_ns=self.__soap_ns, # soap prefix & uri
+ soap_uri=soap_namespaces[self.__soap_ns])
+ request = SimpleXMLElement(xml, namespace=self.__ns and self.namespace,
+ prefix=self.__ns)
+
+ request_headers = kwargs.pop('headers', None)
+
# serialize parameters
if kwargs:
- parameters = kwargs.items()
+ parameters = list(kwargs.items())
else:
parameters = args
if parameters and isinstance(parameters[0], SimpleXMLElement):
# merge xmlelement parameter ("raw" - already marshalled)
if parameters[0].children() is not None:
for param in parameters[0].children():
- getattr(request,method).import_node(param)
+ getattr(request, method).import_node(param)
+ for k,v in parameters[0].attributes().items():
+ getattr(request, method)[k] = v
elif parameters:
# marshall parameters:
- for k,v in parameters: # dict: tag=valor
- getattr(request,method).marshall(k,v)
- elif not self.__soap_server in ('oracle', ) or self.__soap_server in ('jbossas6',):
+ use_ns = None if (self.__soap_server == "jetty" or self.qualified is False) else True
+ for k, v in parameters: # dict: tag=valor
+ getattr(request, method).marshall(k, v, ns=use_ns)
+ elif not self.__soap_server in ('oracle',) or self.__soap_server in ('jbossas6',):
# JBossAS-6 requires no empty method parameters!
- delattr(request("Body", ns=soap_namespaces.values(),), method)
-
+ delattr(request("Body", ns=list(soap_namespaces.values()),), method)
+
# construct header and parameters (if not wsdl given) except wsse
if self.__headers and not self.services:
self.__call_headers = dict([(k, v) for k, v in self.__headers.items()
- if not k.startswith("wsse:")])
+ if not k.startswith('wsse:')])
# always extract WS Security header and send it
if 'wsse:Security' in self.__headers:
#TODO: namespaces too hardwired, clean-up...
- header = request('Header' , ns=soap_namespaces.values(),)
+ header = request('Header', ns=list(soap_namespaces.values()),)
k = 'wsse:Security'
v = self.__headers[k]
header.marshall(k, v, ns=False, add_children_ns=False)
header(k)['xmlns:wsse'] = 'http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd'
- #
+ #
if self.__call_headers:
- header = request('Header' , ns=soap_namespaces.values(),)
+ header = request('Header', ns=list(soap_namespaces.values()),)
for k, v in self.__call_headers.items():
##if not self.__ns:
## header['xmlns']
- header.marshall(k, v, ns=self.__ns, add_children_ns=False)
-
+ if isinstance(v, SimpleXMLElement):
+ # allows a SimpleXMLElement to be constructed and inserted
+ # rather than a dictionary. marshall doesn't allow ns: prefixes
+ # in dict key names
+ header.import_node(v)
+ else:
+ header.marshall(k, v, ns=self.__ns, add_children_ns=False)
if request_headers:
- header = request('Header' , ns=soap_namespaces.values(),)
+ header = request('Header', ns=list(soap_namespaces.values()),)
for subheader in request_headers.children():
header.import_node(subheader)
-
+
self.xml_request = request.as_xml()
self.xml_response = self.send(method, self.xml_request)
- response = SimpleXMLElement(self.xml_response, namespace=self.namespace)
- if self.exceptions and response("Fault", ns=soap_namespaces.values(), error=False):
+ response = SimpleXMLElement(self.xml_response, namespace=self.namespace,
+ jetty=self.__soap_server in ('jetty',))
+ if self.exceptions and response("Fault", ns=list(soap_namespaces.values()), error=False):
raise SoapFault(unicode(response.faultcode), unicode(response.faultstring))
return response
-
-
+
def send(self, method, xml):
- "Send SOAP request using HTTP"
+ """Send SOAP request using HTTP"""
if self.location == 'test': return
- # location = "%s" % self.location #?op=%s" % (self.location, method)
- location = self.location
-
+ # location = '%s' % self.location #?op=%s" % (self.location, method)
+ location = str(self.location)
+
if self.services:
- soap_action = self.action
+ soap_action = str(self.action)
else:
- soap_action = self.action + method
-
- headers={
+ soap_action = str(self.action) + method
+
+ headers = {
'Content-type': 'text/xml; charset="UTF-8"',
'Content-length': str(len(xml)),
- "SOAPAction": "\"%s\"" % (soap_action)
+ 'SOAPAction': '"%s"' % soap_action
}
headers.update(self.http_headers)
log.info("POST %s" % location)
- log.info("Headers: %s" % headers)
-
- if self.trace:
- print "-"*80
- print "POST %s" % location
- print '\n'.join(["%s: %s" % (k,v) for k,v in headers.items()])
- print u"\n%s" % xml.decode("utf8","ignore")
-
+ log.debug('\n'.join(["%s: %s" % (k, v) for k, v in headers.items()]))
+ log.debug(xml)
+
response, content = self.http.request(
- location, "POST", body=xml, headers=headers)
+ location, 'POST', body=xml, headers=headers)
self.response = response
self.content = content
-
- if self.trace:
- print
- print '\n'.join(["%s: %s" % (k,v) for k,v in response.items()])
- print content#.decode("utf8","ignore")
- print "="*80
- return content
+ log.debug('\n'.join(["%s: %s" % (k, v) for k, v in response.items()]))
+ log.debug(content)
+ return content
def get_operation(self, method):
# try to find operation in wsdl file
- soap_ver = self.__soap_ns == 'soap12' and 'soap12' or 'soap11'
+ soap_ver = self.__soap_ns.startswith('soap12') and 'soap12' or 'soap11'
if not self.service_port:
for service_name, service in self.services.items():
for port_name, port in [port for port in service['ports'].items()]:
@@ -264,400 +284,347 @@ class SoapClient(object):
self.service_port = service_name, port_name
break
else:
- raise RuntimeError("Cannot determine service in WSDL: "
- "SOAP version: %s" % soap_ver)
+ raise RuntimeError('Cannot determine service in WSDL: '
+ 'SOAP version: %s' % soap_ver)
else:
port = self.services[self.service_port[0]]['ports'][self.service_port[1]]
- self.location = port['location']
- operation = port['operations'].get(unicode(method))
+ if not self.location:
+ self.location = port['location']
+ operation = port['operations'].get(method)
if not operation:
- raise RuntimeError("Operation %s not found in WSDL: "
- "Service/Port Type: %s" %
+ raise RuntimeError('Operation %s not found in WSDL: '
+ 'Service/Port Type: %s' %
(method, self.service_port))
return operation
-
+
def wsdl_call(self, method, *args, **kwargs):
- "Pre and post process SOAP call, input and output parameters using WSDL"
+ """Pre and post process SOAP call, input and output parameters using WSDL"""
soap_uri = soap_namespaces[self.__soap_ns]
operation = self.get_operation(method)
+
# get i/o type declarations:
input = operation['input']
output = operation['output']
header = operation.get('header')
if 'action' in operation:
self.action = operation['action']
- # sort parameters (same order as xsd:sequence)
- def sort_dict(od, d):
- if isinstance(od, dict):
- ret = OrderedDict()
- for k in od.keys():
- v = d.get(k)
- # don't append null tags!
- if v is not None:
- if isinstance(v, dict):
- v = sort_dict(od[k], v)
- elif isinstance(v, list):
- v = [sort_dict(od[k][0], v1)
- for v1 in v]
- ret[str(k)] = v
- return ret
- else:
- return d
+
+ if 'namespace' in operation:
+ self.namespace = operation['namespace'] or ''
+ self.qualified = operation['qualified']
+
# construct header and parameters
if header:
self.__call_headers = sort_dict(header, self.__headers)
+ method, params = self.wsdl_call_get_params(method, input, *args, **kwargs)
+
+ # call remote procedure
+ response = self.call(method, *params)
+ # parse results:
+ resp = response('Body', ns=soap_uri).children().unmarshall(output)
+ return resp and list(resp.values())[0] # pass Response tag children
+
+ def wsdl_call_get_params(self, method, input, *args, **kwargs):
+ """Build params from input and args/kwargs"""
+ params = inputname = inputargs = None
+ all_args = {}
+ if input:
+ inputname = list(input.keys())[0]
+ inputargs = input[inputname]
+
if input and args:
# convert positional parameters to named parameters:
- d = [(k, arg) for k, arg in zip(input.values()[0].keys(), args)]
- kwargs.update(dict(d))
- if input and kwargs:
- params = sort_dict(input.values()[0], kwargs).items()
- if self.__soap_server == "axis":
+ d = {}
+ for idx, arg in enumerate(args):
+ key = list(inputargs.keys())[idx]
+ if isinstance(arg, dict):
+ if key in arg:
+ d[key] = arg[key]
+ else:
+ raise KeyError('Unhandled key %s. use client.help(method)')
+ else:
+ d[key] = arg
+ all_args.update({inputname: d})
+
+ if input and (kwargs or all_args):
+ if kwargs:
+ all_args.update({inputname: kwargs})
+ valid, errors, warnings = self.wsdl_validate_params(input, all_args)
+ if not valid:
+ raise ValueError('Invalid Args Structure. Errors: %s' % errors)
+ params = list(sort_dict(input, all_args).values())[0].items()
+ # TODO: check style and document attributes
+ if self.__soap_server in ('axis', ):
# use the operation name
method = method
else:
# use the message (element) name
- method = input.keys()[0]
+ method = inputname
#elif not input:
- #TODO: no message! (see wsmtxca.dummy)
+ #TODO: no message! (see wsmtxca.dummy)
else:
params = kwargs and kwargs.items()
- # call remote procedure
- response = self.call(method, *params)
- # parse results:
- resp = response('Body',ns=soap_uri).children().unmarshall(output)
- return resp and resp.values()[0] # pass Response tag children
+
+ return (method, params)
+
+ def wsdl_validate_params(self, struct, value):
+ """Validate the arguments (actual values) for the parameters structure.
+ Fail for any invalid arguments or type mismatches."""
+ errors = []
+ warnings = []
+ valid = True
+
+ # Determine parameter type
+ if type(struct) == type(value):
+ typematch = True
+ if not isinstance(struct, dict) and isinstance(value, dict):
+ typematch = True # struct can be an OrderedDict
+ else:
+ typematch = False
+
+ if struct == str:
+ struct = unicode # fix for py2 vs py3 string handling
+
+ if not isinstance(struct, (list, dict, tuple)) and struct in TYPE_MAP.keys():
+ if not type(value) == struct:
+ try:
+ struct(value) # attempt to cast input to parameter type
+ except:
+ valid = False
+ errors.append('Type mismatch for argument value. parameter(%s): %s, value(%s): %s' % (type(struct), struct, type(value), value))
+
+ elif isinstance(struct, list) and len(struct) == 1 and not isinstance(value, list):
+ # parameter can have a dict in a list: [{}] indicating a list is allowed, but not needed if only one argument.
+ next_valid, next_errors, next_warnings = self.wsdl_validate_params(struct[0], value)
+ if not next_valid:
+ valid = False
+ errors.extend(next_errors)
+ warnings.extend(next_warnings)
+
+ # traverse tree
+ elif isinstance(struct, dict):
+ if struct and value:
+ for key in value:
+ if key not in struct:
+ valid = False
+ errors.append('Argument key %s not in parameter. parameter: %s, args: %s' % (key, struct, value))
+ else:
+ next_valid, next_errors, next_warnings = self.wsdl_validate_params(struct[key], value[key])
+ if not next_valid:
+ valid = False
+ errors.extend(next_errors)
+ warnings.extend(next_warnings)
+ for key in struct:
+ if key not in value:
+ warnings.append('Parameter key %s not in args. parameter: %s, value: %s' % (key, struct, value))
+ elif struct and not value:
+ warnings.append('parameter keys not in args. parameter: %s, args: %s' % (struct, value))
+ elif not struct and value:
+ valid = False
+ errors.append('Args keys not in parameter. parameter: %s, args: %s' % (struct, value))
+ else:
+ pass
+ elif isinstance(struct, list):
+ struct_list_value = struct[0]
+ for item in value:
+ next_valid, next_errors, next_warnings = self.wsdl_validate_params(struct_list_value, item)
+ if not next_valid:
+ valid = False
+ errors.extend(next_errors)
+ warnings.extend(next_warnings)
+ elif not typematch:
+ valid = False
+ errors.append('Type mismatch. parameter(%s): %s, value(%s): %s' % (type(struct), struct, type(value), value))
+
+ return (valid, errors, warnings)
def help(self, method):
- "Return operation documentation and invocation/returned value example"
+ """Return operation documentation and invocation/returned value example"""
operation = self.get_operation(method)
input = operation.get('input')
- input = input and input.values() and input.values()[0]
+ input = input and input.values() and list(input.values())[0]
if isinstance(input, dict):
- input = ", ".join("%s=%s" % (k,repr(v)) for k,v
- in input.items())
+ input = ", ".join("%s=%s" % (k, repr(v)) for k, v in input.items())
elif isinstance(input, list):
input = repr(input)
output = operation.get('output')
if output:
- output = operation['output'].values()[0]
+ output = list(operation['output'].values())[0]
headers = operation.get('headers') or None
- return u"%s(%s)\n -> %s:\n\n%s\nHeaders: %s" % (
- method,
- input or "",
- output and output or "",
- operation.get("documentation",""),
+ return "%s(%s)\n -> %s:\n\n%s\nHeaders: %s" % (
+ method,
+ input or '',
+ output and output or '',
+ operation.get('documentation', ''),
headers,
- )
+ )
- def wsdl_parse(self, url, debug=False, cache=False):
- "Parse Web Service Description v1.1"
+ def wsdl_parse(self, url, cache=False):
+ """Parse Web Service Description v1.1"""
- log.debug("wsdl url: %s" % url)
+ log.debug('Parsing wsdl url: %s' % url)
# Try to load a previously parsed wsdl:
force_download = False
if cache:
- # make md5 hash of the url for caching...
- filename_pkl = "%s.pkl" % hashlib.md5(url).hexdigest()
+ # make md5 hash of the url for caching...
+ filename_pkl = '%s.pkl' % hashlib.md5(url).hexdigest()
if isinstance(cache, basestring):
- filename_pkl = os.path.join(cache, filename_pkl)
+ filename_pkl = os.path.join(cache, filename_pkl)
if os.path.exists(filename_pkl):
- log.debug("Unpickle file %s" % (filename_pkl, ))
- f = open(filename_pkl, "r")
+ log.debug('Unpickle file %s' % (filename_pkl, ))
+ f = open(filename_pkl, 'r')
pkl = pickle.load(f)
f.close()
# sanity check:
- if pkl['version'][:-1] != __version__.split(" ")[0][:-1] or pkl['url'] != url:
+ if pkl['version'][:-1] != __version__.split(' ')[0][:-1] or pkl['url'] != url:
import warnings
- warnings.warn('version or url mismatch! discarding cached wsdl', RuntimeWarning)
- if debug:
- log.debug('Version: %s %s' % (pkl['version'], __version__))
- log.debug('URL: %s %s' % (pkl['url'], url))
+ warnings.warn('version or url mismatch! discarding cached wsdl', RuntimeWarning)
+ log.debug('Version: %s %s' % (pkl['version'], __version__))
+ log.debug('URL: %s %s' % (pkl['url'], url))
force_download = True
else:
self.namespace = pkl['namespace']
self.documentation = pkl['documentation']
return pkl['services']
-
+
soap_ns = {
- "http://schemas.xmlsoap.org/wsdl/soap/": 'soap11',
- "http://schemas.xmlsoap.org/wsdl/soap12/": 'soap12',
- }
- wsdl_uri="http://schemas.xmlsoap.org/wsdl/"
- xsd_uri="http://www.w3.org/2001/XMLSchema"
- xsi_uri="http://www.w3.org/2001/XMLSchema-instance"
-
- get_local_name = lambda s: s and str((':' in s) and s.split(':')[1] or s)
- get_namespace_prefix = lambda s: s and str((':' in s) and s.split(':')[0] or None)
-
+ 'http://schemas.xmlsoap.org/wsdl/soap/': 'soap11',
+ 'http://schemas.xmlsoap.org/wsdl/soap12/': 'soap12',
+ }
+ wsdl_uri = 'http://schemas.xmlsoap.org/wsdl/'
+ xsd_uri = 'http://www.w3.org/2001/XMLSchema'
+ xsi_uri = 'http://www.w3.org/2001/XMLSchema-instance'
+
# always return an unicode object:
- REVERSE_TYPE_MAP[u'string'] = unicode
+ REVERSE_TYPE_MAP['string'] = str
- def fetch(url):
- "Download a document from a URL, save it locally if cache enabled"
-
- # check / append a valid schema if not given:
- url_scheme, netloc, path, query, fragment = urlsplit(url)
- if not url_scheme in ('http','https', 'file'):
- for scheme in ('http','https', 'file'):
- try:
- if not url.startswith("/") and scheme in ('http', 'https'):
- tmp_url = "%s://%s" % (scheme, url)
- else:
- tmp_url = "%s:%s" % (scheme, url)
- if debug: log.debug("Scheme not found, trying %s" % scheme)
- return fetch(tmp_url)
- except Exception, e:
- log.error(e)
- raise RuntimeError("No scheme given for url: %s" % url)
-
- # make md5 hash of the url for caching...
- filename = "%s.xml" % hashlib.md5(url).hexdigest()
- if isinstance(cache, basestring):
- filename = os.path.join(cache, filename)
- if cache and os.path.exists(filename) and not force_download:
- log.info("Reading file %s" % (filename, ))
- f = open(filename, "r")
- xml = f.read()
- f.close()
- else:
- if url_scheme == 'file':
- log.info("Fetching url %s using urllib2" % (url, ))
- f = urllib2.urlopen(url)
- xml = f.read()
- else:
- log.info("GET %s using %s" % (url, self.http._wrapper_version))
- response, xml = self.http.request(url, "GET", None, {})
- if cache:
- log.info("Writing file %s" % (filename, ))
- if not os.path.isdir(cache):
- os.makedirs(cache)
- f = open(filename, "w")
- f.write(xml)
- f.close()
- return xml
-
# Open uri and read xml:
- xml = fetch(url)
+ xml = fetch(url, self.http, cache, force_download, self.wsdl_basedir)
# Parse WSDL XML:
wsdl = SimpleXMLElement(xml, namespace=wsdl_uri)
+ # Extract useful data:
+ self.namespace = ""
+ self.documentation = unicode(wsdl('documentation', error=False)) or ''
+
+ # some wsdl are splitted down in several files, join them:
+ imported_wsdls = {}
+ for element in wsdl.children() or []:
+ if element.get_local_name() in ('import'):
+ wsdl_namespace = element['namespace']
+ wsdl_location = element['location']
+ if wsdl_location is None:
+ log.warning('WSDL location not provided for %s!' % wsdl_namespace)
+ continue
+ if wsdl_location in imported_wsdls:
+ log.warning('WSDL %s already imported!' % wsdl_location)
+ continue
+ imported_wsdls[wsdl_location] = wsdl_namespace
+ log.debug('Importing wsdl %s from %s' % (wsdl_namespace, wsdl_location))
+ # Open uri and read xml:
+ xml = fetch(wsdl_location, self.http, cache, force_download, self.wsdl_basedir)
+ # Parse imported XML schema (recursively):
+ imported_wsdl = SimpleXMLElement(xml, namespace=xsd_uri)
+ # merge the imported wsdl into the main document:
+ wsdl.import_node(imported_wsdl)
+ # warning: do not process schemas to avoid infinite recursion!
+
+
# detect soap prefix and uri (xmlns attributes of )
xsd_ns = None
soap_uris = {}
for k, v in wsdl[:]:
- if v in soap_ns and k.startswith("xmlns:"):
+ if v in soap_ns and k.startswith('xmlns:'):
soap_uris[get_local_name(k)] = v
- if v== xsd_uri and k.startswith("xmlns:"):
+ if v == xsd_uri and k.startswith('xmlns:'):
xsd_ns = get_local_name(k)
- # Extract useful data:
- self.namespace = wsdl['targetNamespace']
- self.documentation = unicode(wsdl('documentation', error=False) or '')
-
services = {}
- bindings = {} # binding_name: binding
- operations = {} # operation_name: operation
- port_type_bindings = {} # port_type_name: binding
- messages = {} # message: element
- elements = {} # element: type def
-
+ bindings = {} # binding_name: binding
+ operations = {} # operation_name: operation
+ port_type_bindings = {} # port_type_name: binding
+ messages = {} # message: element
+ elements = {} # element: type def
+
for service in wsdl.service:
- service_name=service['name']
+ service_name = service['name']
if not service_name:
- continue # empty service?
- if debug: log.debug("Processing service %s" % service_name)
+ continue # empty service?
serv = services.setdefault(service_name, {'ports': {}})
- serv['documentation']=service['documentation'] or ''
+ serv['documentation'] = service['documentation'] or ''
for port in service.port:
binding_name = get_local_name(port['binding'])
- address = port('address', ns=soap_uris.values(), error=False)
+ operations[binding_name] = {}
+ address = port('address', ns=list(soap_uris.values()), error=False)
location = address and address['location'] or None
soap_uri = address and soap_uris.get(address.get_prefix())
soap_ver = soap_uri and soap_ns.get(soap_uri)
- bindings[binding_name] = {'service_name': service_name,
- 'location': location,
- 'soap_uri': soap_uri, 'soap_ver': soap_ver,
- }
+ bindings[binding_name] = {'name': binding_name,
+ 'service_name': service_name,
+ 'location': location,
+ 'soap_uri': soap_uri,
+ 'soap_ver': soap_ver, }
serv['ports'][port['name']] = bindings[binding_name]
-
+
for binding in wsdl.binding:
binding_name = binding['name']
- if debug: log.debug("Processing binding %s" % service_name)
- soap_binding = binding('binding', ns=soap_uris.values(), error=False)
+ soap_binding = binding('binding', ns=list(soap_uris.values()), error=False)
transport = soap_binding and soap_binding['transport'] or None
port_type_name = get_local_name(binding['type'])
bindings[binding_name].update({
'port_type_name': port_type_name,
'transport': transport, 'operations': {},
- })
- port_type_bindings[port_type_name] = bindings[binding_name]
+ })
+ if port_type_name not in port_type_bindings:
+ port_type_bindings[port_type_name] = []
+ port_type_bindings[port_type_name].append(bindings[binding_name])
for operation in binding.operation:
op_name = operation['name']
- op = operation('operation',ns=soap_uris.values(), error=False)
+ op = operation('operation', ns=list(soap_uris.values()), error=False)
action = op and op['soapAction']
- d = operations.setdefault(op_name, {})
+ d = operations[binding_name].setdefault(op_name, {})
bindings[binding_name]['operations'][op_name] = d
d.update({'name': op_name})
d['parts'] = {}
# input and/or ouput can be not present!
input = operation('input', error=False)
- body = input and input('body', ns=soap_uris.values(), error=False)
+ body = input and input('body', ns=list(soap_uris.values()), error=False)
d['parts']['input_body'] = body and body['parts'] or None
output = operation('output', error=False)
- body = output and output('body', ns=soap_uris.values(), error=False)
+ body = output and output('body', ns=list(soap_uris.values()), error=False)
d['parts']['output_body'] = body and body['parts'] or None
- header = input and input('header', ns=soap_uris.values(), error=False)
+ header = input and input('header', ns=list(soap_uris.values()), error=False)
d['parts']['input_header'] = header and {'message': header['message'], 'part': header['part']} or None
- headers = output and output('header', ns=soap_uris.values(), error=False)
+ header = output and output('header', ns=list(soap_uris.values()), error=False)
d['parts']['output_header'] = header and {'message': header['message'], 'part': header['part']} or None
- #if action: #TODO: separe operation_binding from operation
if action:
- d["action"] = action
-
- def make_key(element_name, element_type):
- "return a suitable key for elements"
- # only distinguish 'element' vs other types
- if element_type in ('complexType', 'simpleType'):
- eltype = 'complexType'
- else:
- eltype = element_type
- if eltype not in ('element', 'complexType', 'simpleType'):
- raise RuntimeError("Unknown element type %s = %s" % (unicode(element_name), eltype))
- return (unicode(element_name), eltype)
-
- #TODO: cleanup element/schema/types parsing:
- def process_element(element_name, node, element_type):
- "Parse and define simple element types"
- if debug:
- log.debug("Processing element %s %s" % (element_name, element_type))
- for tag in node:
- if tag.get_local_name() in ("annotation", "documentation"):
- continue
- elif tag.get_local_name() in ('element', 'restriction'):
- if debug: log.debug("%s has not children! %s" % (element_name,tag))
- children = tag # element "alias"?
- alias = True
- elif tag.children():
- children = tag.children()
- alias = False
- else:
- if debug: log.debug("%s has not children! %s" % (element_name,tag))
- continue #TODO: abstract?
- d = OrderedDict()
- for e in children:
- t = e['type']
- if not t:
- t = e['base'] # complexContent (extension)!
- if not t:
- t = 'anyType' # no type given!
- t = t.split(":")
- if len(t)>1:
- ns, type_name = t
- else:
- ns, type_name = None, t[0]
- if element_name == type_name:
- pass ## warning with infinite recursion
- uri = ns and e.get_namespace_uri(ns) or xsd_uri
- if uri==xsd_uri:
- # look for the type, None == any
- fn = REVERSE_TYPE_MAP.get(unicode(type_name), None)
- else:
- fn = None
- if not fn:
- # simple / complex type, postprocess later
- fn = elements.setdefault(make_key(type_name, "complexType"), OrderedDict())
-
- if e['name'] is not None and not alias:
- e_name = unicode(e['name'])
- d[e_name] = fn
- else:
- if debug: log.debug("complexConent/simpleType/element %s = %s" % (element_name, type_name))
- d[None] = fn
- if e['maxOccurs']=="unbounded" or (ns == 'SOAP-ENC' and type_name == 'Array'):
- # it's an array... TODO: compound arrays?
- d.array = True
- if e is not None and e.get_local_name() == 'extension' and e.children():
- # extend base element:
- process_element(element_name, e.children(), element_type)
- elements.setdefault(make_key(element_name, element_type), OrderedDict()).update(d)
-
- # check axis2 namespace at schema types attributes
- self.namespace = dict(wsdl.types("schema", ns=xsd_uri)[:]).get('targetNamespace', self.namespace)
+ d['action'] = action
+ # check axis2 namespace at schema types attributes (europa.eu checkVat)
+ if "http://xml.apache.org/xml-soap" in dict(wsdl[:]).values():
+ # get the sub-namespace in the first schema element (see issue 8)
+ if wsdl('types', error=False):
+ schema = wsdl.types('schema', ns=xsd_uri)
+ attrs = dict(schema[:])
+ self.namespace = attrs.get('targetNamespace', self.namespace)
+ if not self.namespace or self.namespace == "urn:DefaultNamespace":
+ self.namespace = wsdl['targetNamespace'] or self.namespace
+
imported_schemas = {}
+ global_namespaces = {None: self.namespace}
- def preprocess_schema(schema):
- "Find schema elements and complex types"
- for element in schema.children() or []:
- if element.get_local_name() in ('import', ):
- schema_namespace = element['namespace']
- schema_location = element['schemaLocation']
- if schema_location is None:
- if debug: log.debug("Schema location not provided for %s!" % (schema_namespace, ))
- continue
- if schema_location in imported_schemas:
- if debug: log.debug("Schema %s already imported!" % (schema_location, ))
- continue
- imported_schemas[schema_location] = schema_namespace
- if debug: print "Importing schema %s from %s" % (schema_namespace, schema_location)
- # Open uri and read xml:
- xml = fetch(schema_location)
- # Parse imported XML schema (recursively):
- imported_schema = SimpleXMLElement(xml, namespace=xsd_uri)
- preprocess_schema(imported_schema)
+ # process current wsdl schema (if any):
+ if wsdl('types', error=False):
+ for schema in wsdl.types('schema', ns=xsd_uri):
+ preprocess_schema(schema, imported_schemas, elements, xsd_uri,
+ self.__soap_server, self.http, cache,
+ force_download, self.wsdl_basedir,
+ global_namespaces=global_namespaces)
- element_type = element.get_local_name()
- if element_type in ('element', 'complexType', "simpleType"):
- element_name = unicode(element['name'])
- if debug: log.debug("Parsing Element %s: %s" % (element_type, element_name))
- if element.get_local_name() == 'complexType':
- children = element.children()
- elif element.get_local_name() == 'simpleType':
- children = element("restriction", ns=xsd_uri)
- elif element.get_local_name() == 'element' and element['type']:
- children = element
- else:
- children = element.children()
- if children:
- children = children.children()
- elif element.get_local_name() == 'element':
- children = element
- if children:
- process_element(element_name, children, element_type)
-
- def postprocess_element(elements):
- "Fix unresolved references (elements referenced before its definition, thanks .net)"
- for k,v in elements.items():
- if isinstance(v, OrderedDict):
- if v.array:
- elements[k] = [v] # convert arrays to python lists
- if v!=elements: #TODO: fix recursive elements
- postprocess_element(v)
- if None in v and v[None]: # extension base?
- if isinstance(v[None], dict):
- for i, kk in enumerate(v[None]):
- # extend base -keep orginal order-
- if v[None] is not None:
- elements[k].insert(kk, v[None][kk], i)
- del v[None]
- else: # "alias", just replace
- if debug: log.debug("Replacing %s = %s" % (k, v[None]))
- elements[k] = v[None]
- #break
- if isinstance(v, list):
- for n in v: # recurse list
- postprocess_element(n)
-
-
- # process current wsdl schema:
- for schema in wsdl.types("schema", ns=xsd_uri):
- preprocess_schema(schema)
-
- postprocess_element(elements)
+ # 2nd phase: alias, postdefined elements, extend bases, convert lists
+ postprocess_element(elements, [])
for message in wsdl.message:
- if debug: log.debug("Processing message %s" % message['name'])
for part in message('part', error=False) or []:
element = {}
element_name = part['element']
@@ -668,112 +635,110 @@ class SoapClient(object):
type_uri = wsdl.get_namespace_uri(type_ns)
if type_uri == xsd_uri:
element_name = get_local_name(element_name)
- fn = REVERSE_TYPE_MAP.get(unicode(element_name), None)
+ fn = REVERSE_TYPE_MAP.get(element_name, None)
element = {part['name']: fn}
# emulate a true Element (complexType)
- messages.setdefault((message['name'], None), {message['name']: OrderedDict()}).values()[0].update(element)
+ list(messages.setdefault((message['name'], None), {message['name']: OrderedDict()}).values())[0].update(element)
else:
element_name = get_local_name(element_name)
- fn = elements.get(make_key(element_name, 'element'))
+ fn = elements.get(make_key(element_name, 'element', type_uri))
if not fn:
# some axis servers uses complexType for part messages
- fn = elements.get(make_key(element_name, 'complexType'))
+ fn = elements.get(make_key(element_name, 'complexType', type_uri))
element = {message['name']: {part['name']: fn}}
else:
element = {element_name: fn}
messages[(message['name'], part['name'])] = element
- def get_message(message_name, part_name):
- if part_name:
- # get the specific part of the message:
- return messages.get((message_name, part_name))
- else:
- # get the first part for the specified message:
- for (message_name_key, part_name_key), message in messages.items():
- if message_name_key == message_name:
- return message
-
for port_type in wsdl.portType:
port_type_name = port_type['name']
- if debug: log.debug("Processing port type %s" % port_type_name)
- binding = port_type_bindings[port_type_name]
- for operation in port_type.operation:
- op_name = operation['name']
- op = operations[op_name]
- op['documentation'] = unicode(operation('documentation', error=False) or '')
- if binding['soap_ver']:
- #TODO: separe operation_binding from operation (non SOAP?)
- if operation("input", error=False):
- input_msg = get_local_name(operation.input['message'])
- input_header = op['parts'].get('input_header')
- if input_header:
- header_msg = get_local_name(input_header.get('message'))
- header_part = get_local_name(input_header.get('part'))
- # warning: some implementations use a separate message!
- header = get_message(header_msg or input_msg, header_part)
+ for binding in port_type_bindings.get(port_type_name, []):
+ for operation in port_type.operation:
+ op_name = operation['name']
+ op = operations[binding['name']][op_name]
+ op['documentation'] = unicode(operation('documentation', error=False)) or ''
+ if binding['soap_ver']:
+ #TODO: separe operation_binding from operation (non SOAP?)
+ if operation('input', error=False):
+ input_msg = get_local_name(operation.input['message'])
+ input_header = op['parts'].get('input_header')
+ if input_header:
+ header_msg = get_local_name(input_header.get('message'))
+ header_part = get_local_name(input_header.get('part'))
+ # warning: some implementations use a separate message!
+ header = get_message(messages, header_msg or input_msg, header_part)
+ else:
+ header = None # not enought info to search the header message:
+ op['input'] = get_message(messages, input_msg, op['parts'].get('input_body'))
+ op['header'] = header
+ try:
+ element = list(op['input'].values())[0]
+ ns_uri = element.namespace
+ qualified = element.qualified
+ except AttributeError:
+ # TODO: fix if no parameters parsed or "variants"
+ ns = get_namespace_prefix(operation.input['message'])
+ ns_uri = operation.get_namespace_uri(ns)
+ qualified = None
+ if ns_uri:
+ op['namespace'] = ns_uri
+ op['qualified'] = qualified
else:
- header = None # not enought info to search the header message:
- op['input'] = get_message(input_msg, op['parts'].get('input_body'))
- op['header'] = header
- else:
- op['input'] = None
- op['header'] = None
- if operation("output", error=False):
- output_msg = get_local_name(operation.output['message'])
- op['output'] = get_message(output_msg, op['parts'].get('output_body'))
- else:
- op['output'] = None
+ op['input'] = None
+ op['header'] = None
+ if operation('output', error=False):
+ output_msg = get_local_name(operation.output['message'])
+ op['output'] = get_message(messages, output_msg, op['parts'].get('output_body'))
+ else:
+ op['output'] = None
+
+ # dump the full service/port/operation map
+ #log.debug(pprint.pformat(services))
- if debug:
- import pprint
- log.debug(pprint.pformat(services))
-
# Save parsed wsdl (cache)
if cache:
f = open(filename_pkl, "wb")
pkl = {
- 'version': __version__.split(" ")[0],
- 'url': url,
- 'namespace': self.namespace,
+ 'version': __version__.split(' ')[0],
+ 'url': url,
+ 'namespace': self.namespace,
'documentation': self.documentation,
'services': services,
- }
+ }
pickle.dump(pkl, f)
f.close()
-
+
return services
def __setitem__(self, item, value):
- "Set SOAP Header value - this header will be sent for every request."
+ """Set SOAP Header value - this header will be sent for every request."""
self.__headers[item] = value
def close(self):
- "Finish the connection and remove temp files"
+ """Finish the connection and remove temp files"""
self.http.close()
if self.cacert.startswith(tempfile.gettempdir()):
- if self.trace: log.info("removing %s" % self.cacert)
+ log.debug('removing %s' % self.cacert)
os.unlink(self.cacert)
-
+
def parse_proxy(proxy_str):
- "Parses proxy address user:pass@host:port into a dict suitable for httplib2"
- if isinstance(proxy_str, unicode):
- proxy_str = proxy_str.encode("utf8")
+ """Parses proxy address user:pass@host:port into a dict suitable for httplib2"""
proxy_dict = {}
if proxy_str is None:
- return
- if "@" in proxy_str:
- user_pass, host_port = proxy_str.split("@")
+ return
+ if '@' in proxy_str:
+ user_pass, host_port = proxy_str.split('@')
else:
- user_pass, host_port = "", proxy_str
- if ":" in host_port:
- host, port = host_port.split(":")
+ user_pass, host_port = '', proxy_str
+ if ':' in host_port:
+ host, port = host_port.split(':')
proxy_dict['proxy_host'], proxy_dict['proxy_port'] = host, int(port)
- if ":" in user_pass:
- proxy_dict['proxy_user'], proxy_dict['proxy_pass'] = user_pass.split(":")
+ if ':' in user_pass:
+ proxy_dict['proxy_user'], proxy_dict['proxy_pass'] = user_pass.split(':')
return proxy_dict
-
-
-if __name__ == "__main__":
+
+
+if __name__ == '__main__':
pass
diff --git a/gluon/contrib/pysimplesoap/helpers.py b/gluon/contrib/pysimplesoap/helpers.py
new file mode 100644
index 00000000..b0e1af7d
--- /dev/null
+++ b/gluon/contrib/pysimplesoap/helpers.py
@@ -0,0 +1,489 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published by the
+# Free Software Foundation; either version 3, or (at your option) any later
+# version.
+#
+# This program is distributed in the hope that it will be useful, but
+# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTIBILITY
+# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+# for more details.
+
+"""Pythonic simple SOAP Client helpers"""
+
+
+from __future__ import unicode_literals
+import sys
+if sys.version > '3':
+ basestring = unicode = str
+
+import datetime
+from decimal import Decimal
+import os
+import logging
+import hashlib
+import warnings
+
+try:
+ import urllib2
+ from urlparse import urlsplit
+except ImportError:
+ from urllib import request as urllib2
+ from urllib.parse import urlsplit
+
+from . import __author__, __copyright__, __license__, __version__
+
+
+log = logging.getLogger(__name__)
+
+
+def fetch(url, http, cache=False, force_download=False, wsdl_basedir=''):
+ """Download a document from a URL, save it locally if cache enabled"""
+
+ # check / append a valid schema if not given:
+ url_scheme, netloc, path, query, fragment = urlsplit(url)
+ if not url_scheme in ('http', 'https', 'file'):
+ for scheme in ('http', 'https', 'file'):
+ try:
+ if not url.startswith("/") and scheme in ('http', 'https'):
+ tmp_url = "%s://%s" % (scheme, os.path.join(wsdl_basedir, url))
+ else:
+ tmp_url = "%s:%s" % (scheme, os.path.join(wsdl_basedir, url))
+ log.debug('Scheme not found, trying %s' % scheme)
+ return fetch(tmp_url, http, cache, force_download, wsdl_basedir)
+ except Exception as e:
+ log.error(e)
+ raise RuntimeError('No scheme given for url: %s' % url)
+
+ # make md5 hash of the url for caching...
+ filename = '%s.xml' % hashlib.md5(url.encode('utf8')).hexdigest()
+ if isinstance(cache, basestring):
+ filename = os.path.join(cache, filename)
+ if cache and os.path.exists(filename) and not force_download:
+ log.info('Reading file %s' % filename)
+ f = open(filename, 'r')
+ xml = f.read()
+ f.close()
+ else:
+ if url_scheme == 'file':
+ log.info('Fetching url %s using urllib2' % url)
+ f = urllib2.urlopen(url)
+ xml = f.read()
+ else:
+ log.info('GET %s using %s' % (url, http._wrapper_version))
+ response, xml = http.request(url, 'GET', None, {})
+ if cache:
+ log.info('Writing file %s' % filename)
+ if not os.path.isdir(cache):
+ os.makedirs(cache)
+ f = open(filename, 'w')
+ f.write(xml)
+ f.close()
+ return xml
+
+
+def sort_dict(od, d):
+ """Sort parameters (same order as xsd:sequence)"""
+ if isinstance(od, dict):
+ ret = OrderedDict()
+ for k in od.keys():
+ v = d.get(k)
+ # don't append null tags!
+ if v is not None:
+ if isinstance(v, dict):
+ v = sort_dict(od[k], v)
+ elif isinstance(v, list):
+ v = [sort_dict(od[k][0], v1) for v1 in v]
+ ret[k] = v
+ if hasattr(od, 'namespace'):
+ ret.namespace = od.namespace
+ ret.qualified = od.qualified
+ return ret
+ else:
+ return d
+
+
+def make_key(element_name, element_type, namespace):
+ """Return a suitable key for elements"""
+ # only distinguish 'element' vs other types
+ if element_type in ('complexType', 'simpleType'):
+ eltype = 'complexType'
+ else:
+ eltype = element_type
+ if eltype not in ('element', 'complexType', 'simpleType'):
+ raise RuntimeError("Unknown element type %s = %s" % (element_name, eltype))
+ return (element_name, eltype, namespace)
+
+
+def process_element(elements, element_name, node, element_type, xsd_uri, dialect, namespace, qualified=None,
+ soapenc_uri = 'http://schemas.xmlsoap.org/soap/encoding/'):
+ """Parse and define simple element types"""
+
+ log.debug('Processing element %s %s' % (element_name, element_type))
+ for tag in node:
+ if tag.get_local_name() in ('annotation', 'documentation'):
+ continue
+ elif tag.get_local_name() in ('element', 'restriction'):
+ log.debug('%s has no children! %s' % (element_name, tag))
+ children = tag # element "alias"?
+ alias = True
+ elif tag.children():
+ children = tag.children()
+ alias = False
+ else:
+ log.debug('%s has no children! %s' % (element_name, tag))
+ continue # TODO: abstract?
+ d = OrderedDict()
+ d.namespace = namespace
+ d.qualified = qualified
+ for e in children:
+ t = e['type']
+ if not t:
+ t = e['base'] # complexContent (extension)!
+ if not t:
+ t = e['ref'] # reference to another element
+ if not t:
+ # "anonymous" elements had no type attribute but children
+ if e['name'] and e.children():
+ # create a type name to process the children
+ t = "%s_%s" % (element_name, e['name'])
+ c = e.children()
+ et = c.get_local_name()
+ c = c.children()
+ process_element(elements, t, c, et, xsd_uri, dialect, namespace, qualified)
+ else:
+ t = 'anyType' # no type given!
+ t = t.split(":")
+ if len(t) > 1:
+ ns, type_name = t
+ else:
+ ns, type_name = None, t[0]
+ if element_name == type_name and not alias and len(children) > 1:
+ continue # abort to prevent infinite recursion
+ uri = ns and e.get_namespace_uri(ns) or xsd_uri
+ if uri in (xsd_uri, soapenc_uri) and type_name != 'Array':
+ # look for the type, None == any
+ fn = REVERSE_TYPE_MAP.get(type_name, None)
+ elif uri == soapenc_uri and type_name == 'Array':
+ # arrays of simple types (look at the attribute tags):
+ fn = []
+ for a in e.children():
+ for k, v in a[:]:
+ if k.endswith(":arrayType"):
+ type_name = v
+ if ":" in type_name:
+ type_name = type_name[type_name.index(":")+1:]
+ if "[]" in type_name:
+ type_name = type_name[:type_name.index("[]")]
+ fn.append(REVERSE_TYPE_MAP.get(type_name, None))
+ else:
+ fn = None
+
+ if not fn:
+ # simple / complex type, postprocess later
+ if ns:
+ fn_namespace = uri # use the specified namespace
+ else:
+ fn_namespace = namespace # use parent namespace (default)
+ for k, v in e[:]:
+ if k.startswith("xmlns:"):
+ # get the namespace uri from the element
+ fn_namespace = v
+ fn = elements.setdefault(make_key(type_name, 'complexType', fn_namespace), OrderedDict())
+
+ if e['maxOccurs'] == 'unbounded' or (uri == soapenc_uri and type_name == 'Array'):
+ # it's an array... TODO: compound arrays? and check ns uri!
+ if isinstance(fn, OrderedDict):
+ if len(children) > 1 and dialect in ('jetty',):
+ # Jetty style support
+ # {'ClassName': [{'attr1': val1, 'attr2': val2}]
+ fn.array = True
+ else:
+ # .NET style support (backward compatibility)
+ # [{'ClassName': {'attr1': val1, 'attr2': val2}]
+ d.array = True
+ else:
+ if dialect in ('jetty',):
+ # scalar support [{'attr1': [val1]}]
+ fn = [fn]
+ else:
+ d.array = True
+
+ if (e['name'] is not None and not alias) or e['ref']:
+ e_name = e['name'] or type_name # for refs, use the type name
+ d[e_name] = fn
+ else:
+ log.debug('complexContent/simpleType/element %s = %s' % (element_name, type_name))
+ d[None] = fn
+ if e is not None and e.get_local_name() == 'extension' and e.children():
+ # extend base element:
+ process_element(elements, element_name, e.children(), element_type, xsd_uri, dialect, namespace, qualified)
+ elements.setdefault(make_key(element_name, element_type, namespace), OrderedDict()).update(d)
+
+
+def postprocess_element(elements, processed):
+ """Fix unresolved references (elements referenced before its definition, thanks .net)"""
+
+ # avoid already processed elements:
+ if elements in processed:
+ return
+ processed.append(elements)
+
+ for k, v in elements.items():
+ if isinstance(v, OrderedDict):
+ if v != elements: # TODO: fix recursive elements
+ postprocess_element(v, processed)
+ if None in v and v[None]: # extension base?
+ if isinstance(v[None], dict):
+ for i, kk in enumerate(v[None]):
+ # extend base -keep orginal order-
+ if v[None] is not None:
+ elements[k].insert(kk, v[None][kk], i)
+ del v[None]
+ else: # "alias", just replace
+ log.debug('Replacing %s = %s' % (k, v[None]))
+ elements[k] = v[None]
+ #break
+ if v.array:
+ elements[k] = [v] # convert arrays to python lists
+ if isinstance(v, list):
+ for n in v: # recurse list
+ if isinstance(n, (OrderedDict, list)):
+ #if n != elements: # TODO: fix recursive elements
+ postprocess_element(n, processed)
+
+
+def get_message(messages, message_name, part_name):
+ if part_name:
+ # get the specific part of the message:
+ return messages.get((message_name, part_name))
+ else:
+ # get the first part for the specified message:
+ for (message_name_key, part_name_key), message in messages.items():
+ if message_name_key == message_name:
+ return message
+
+
+get_local_name = lambda s: s and str((':' in s) and s.split(':')[1] or s)
+get_namespace_prefix = lambda s: s and str((':' in s) and s.split(':')[0] or None)
+
+
+def preprocess_schema(schema, imported_schemas, elements, xsd_uri, dialect, http, cache, force_download, wsdl_basedir, global_namespaces=None, qualified=False):
+ """Find schema elements and complex types"""
+
+ from .simplexml import SimpleXMLElement # here to avoid recursive imports
+
+ # analyze the namespaces used in this schema
+ local_namespaces = {}
+ for k, v in schema[:]:
+ if k.startswith("xmlns"):
+ local_namespaces[get_local_name(k)] = v
+ if k == 'targetNamespace':
+ # URI namespace reference for this schema
+ if v == "urn:DefaultNamespace":
+ v = global_namespaces[None]
+ local_namespaces[None] = v
+ if k == 'elementFormDefault':
+ qualified = (v == "qualified")
+ # add schema namespaces to the global namespace dict = {URI: ns prefix}
+ for ns in local_namespaces.values():
+ if ns not in global_namespaces:
+ global_namespaces[ns] = 'ns%s' % len(global_namespaces)
+
+ for element in schema.children() or []:
+ if element.get_local_name() in ('import', 'include',):
+ schema_namespace = element['namespace']
+ schema_location = element['schemaLocation']
+ if schema_location is None:
+ log.debug('Schema location not provided for %s!' % schema_namespace)
+ continue
+ if schema_location in imported_schemas:
+ log.debug('Schema %s already imported!' % schema_location)
+ continue
+ imported_schemas[schema_location] = schema_namespace
+ log.debug('Importing schema %s from %s' % (schema_namespace, schema_location))
+ # Open uri and read xml:
+ xml = fetch(schema_location, http, cache, force_download, wsdl_basedir)
+
+ # Parse imported XML schema (recursively):
+ imported_schema = SimpleXMLElement(xml, namespace=xsd_uri)
+ preprocess_schema(imported_schema, imported_schemas, elements, xsd_uri, dialect, http, cache, force_download, wsdl_basedir, global_namespaces, qualified)
+
+ element_type = element.get_local_name()
+ if element_type in ('element', 'complexType', "simpleType"):
+ namespace = local_namespaces[None] # get targetNamespace
+ element_ns = global_namespaces[ns] # get the prefix
+ element_name = element['name']
+ log.debug("Parsing Element %s: %s" % (element_type, element_name))
+ if element.get_local_name() == 'complexType':
+ children = element.children()
+ elif element.get_local_name() == 'simpleType':
+ children = element('restriction', ns=xsd_uri)
+ elif element.get_local_name() == 'element' and element['type']:
+ children = element
+ else:
+ children = element.children()
+ if children:
+ children = children.children()
+ elif element.get_local_name() == 'element':
+ children = element
+ if children:
+ process_element(elements, element_name, children, element_type, xsd_uri, dialect, namespace, qualified)
+
+
+# simplexml utilities:
+
+try:
+ _strptime = datetime.datetime.strptime
+except AttributeError: # python2.4
+ _strptime = lambda s, fmt: datetime.datetime(*(time.strptime(s, fmt)[:6]))
+
+
+# Functions to serialize/deserialize special immutable types:
+def datetime_u(s):
+ fmt = "%Y-%m-%dT%H:%M:%S"
+ try:
+ return _strptime(s, fmt)
+ except ValueError:
+ try:
+ # strip utc offset
+ if s[-3] == ":" and s[-6] in (' ', '-', '+'):
+ warnings.warn('removing unsupported UTC offset', RuntimeWarning)
+ s = s[:-6]
+ # parse microseconds
+ try:
+ return _strptime(s, fmt + ".%f")
+ except:
+ return _strptime(s, fmt)
+ except ValueError:
+ # strip microseconds (not supported in this platform)
+ if "." in s:
+ warnings.warn('removing unsuppported microseconds', RuntimeWarning)
+ s = s[:s.index(".")]
+ return _strptime(s, fmt)
+
+datetime_m = lambda dt: dt.isoformat()
+date_u = lambda s: _strptime(s[0:10], "%Y-%m-%d").date()
+date_m = lambda d: d.strftime("%Y-%m-%d")
+time_u = lambda s: _strptime(s, "%H:%M:%S").time()
+time_m = lambda d: d.strftime("%H%M%S")
+bool_u = lambda s: {'0': False, 'false': False, '1': True, 'true': True}[s]
+bool_m = lambda s: {False: 'false', True: 'true'}[s]
+
+
+# aliases:
+class Alias(object):
+ def __init__(self, py_type, xml_type):
+ self.py_type, self.xml_type = py_type, xml_type
+
+ def __call__(self, value):
+ return self.py_type(value)
+
+ def __repr__(self):
+ return "" % (self.xml_type, self.py_type)
+
+if sys.version > '3':
+ long = Alias(int, 'long')
+byte = Alias(str, 'byte')
+short = Alias(int, 'short')
+double = Alias(float, 'double')
+integer = Alias(long, 'integer')
+DateTime = datetime.datetime
+Date = datetime.date
+Time = datetime.time
+
+# Define convertion function (python type): xml schema type
+TYPE_MAP = {
+ unicode: 'string',
+ bool: 'boolean',
+ short: 'short',
+ byte: 'byte',
+ int: 'int',
+ long: 'long',
+ integer: 'integer',
+ float: 'float',
+ double: 'double',
+ Decimal: 'decimal',
+ datetime.datetime: 'dateTime',
+ datetime.date: 'date',
+}
+TYPE_MARSHAL_FN = {
+ datetime.datetime: datetime_m,
+ datetime.date: date_m,
+ bool: bool_m
+}
+TYPE_UNMARSHAL_FN = {
+ datetime.datetime: datetime_u,
+ datetime.date: date_u,
+ bool: bool_u,
+ str: unicode,
+}
+
+REVERSE_TYPE_MAP = dict([(v, k) for k, v in TYPE_MAP.items()])
+
+REVERSE_TYPE_MAP.update({
+ 'base64Binary': str,
+})
+
+# insert str here to avoid collision in REVERSE_TYPE_MAP (i.e. decoding errors)
+if str not in TYPE_MAP:
+ TYPE_MAP[str] = 'string'
+
+
+class OrderedDict(dict):
+ """Minimal ordered dictionary for xsd:sequences"""
+ def __init__(self):
+ self.__keys = []
+ self.array = False
+ self.namespace = None
+ self.qualified = None
+
+ def __setitem__(self, key, value):
+ if key not in self.__keys:
+ self.__keys.append(key)
+ dict.__setitem__(self, key, value)
+
+ def insert(self, key, value, index=0):
+ if key not in self.__keys:
+ self.__keys.insert(index, key)
+ dict.__setitem__(self, key, value)
+
+ def __delitem__(self, key):
+ if key in self.__keys:
+ self.__keys.remove(key)
+ dict.__delitem__(self, key)
+
+ def __iter__(self):
+ return iter(self.__keys)
+
+ def keys(self):
+ return self.__keys
+
+ def items(self):
+ return [(key, self[key]) for key in self.__keys]
+
+ def update(self, other):
+ for k, v in other.items():
+ self[k] = v
+ # do not change if we are an array but the other is not:
+ if isinstance(other, OrderedDict) and not self.array:
+ self.array = other.array
+ if isinstance(other, OrderedDict) and not self.namespace:
+ self.namespace = other.namespace
+ self.qualified = other.qualified
+
+ def copy(self):
+ "Make a duplicate"
+ new = OrderedDict()
+ new.update(self)
+ return new
+
+ def __str__(self):
+ return "%s" % dict.__str__(self)
+
+ def __repr__(self):
+ s = "{%s}" % ", ".join(['%s: %s' % (repr(k), repr(v)) for k, v in self.items()])
+ if self.array and False:
+ s = "[%s]" % s
+ return s
diff --git a/gluon/contrib/pysimplesoap/server.py b/gluon/contrib/pysimplesoap/server.py
index 3888b49c..fb971494 100755
--- a/gluon/contrib/pysimplesoap/server.py
+++ b/gluon/contrib/pysimplesoap/server.py
@@ -1,5 +1,5 @@
-#!/usr/bin/env python
-# -*- coding: latin-1 -*-
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any later
@@ -10,30 +10,41 @@
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
-"Simple SOAP Server implementation"
+"""Pythonic simple SOAP Server implementation"""
-__author__ = "Mariano Reingart (reingart@gmail.com)"
-__copyright__ = "Copyright (C) 2010 Mariano Reingart"
-__license__ = "LGPL 3.0"
-__version__ = "1.03c"
+from __future__ import unicode_literals
+import sys
+if sys.version > '3':
+ unicode = str
+
+
+import datetime
+import sys
import logging
+import warnings
import re
import traceback
-from simplexml import SimpleXMLElement, TYPE_MAP, Date, Decimal
+try:
+ from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
+except ImportError:
+ from http.server import BaseHTTPRequestHandler, HTTPServer
+
+from . import __author__, __copyright__, __license__, __version__
+from .simplexml import SimpleXMLElement, TYPE_MAP, Date, Decimal
log = logging.getLogger(__name__)
-# Deprecated
-DEBUG = False
-NS_RX=re.compile(r'xmlns:(\w+)="(.+?)"')
+# Deprecated?
+NS_RX = re.compile(r'xmlns:(\w+)="(.+?)"')
+
class SoapDispatcher(object):
- "Simple Dispatcher for SOAP Server"
-
- def __init__(self, name, documentation='', action='', location='',
- namespace=None, prefix=False,
- soap_uri="http://schemas.xmlsoap.org/soap/envelope/",
+ """Simple Dispatcher for SOAP Server"""
+
+ def __init__(self, name, documentation='', action='', location='',
+ namespace=None, prefix=False,
+ soap_uri="http://schemas.xmlsoap.org/soap/envelope/",
soap_ns='soap',
namespaces={},
pretty=False,
@@ -45,13 +56,13 @@ class SoapDispatcher(object):
:param namespaces: Specify additional namespaces; example: {'external': 'http://external.mt.moboperator'}
:param pretty: Prettifies generated xmls
:param debug: Use to add tracebacks in generated xmls.
-
+
Multiple namespaces
===================
-
+
It is possible to support multiple namespaces.
You need to specify additional namespaces by passing `namespace` parameter.
-
+
>>> dispatcher = SoapDispatcher(
... name = "MTClientWS",
... location = "http://localhost:8008/ws/MTClientWS",
@@ -59,13 +70,13 @@ class SoapDispatcher(object):
... namespace = "http://external.mt.moboperator", prefix="external",
... documentation = 'moboperator MTClientWS',
... namespaces = {
- ... 'external': 'http://external.mt.moboperator',
+ ... 'external': 'http://external.mt.moboperator',
... 'model': 'http://model.common.mt.moboperator'
... },
... ns = True)
-
+
Now the registered method must return node names with namespaces' prefixes.
-
+
>>> def _multi_ns_func(self, serviceMsisdn):
... ret = {
... 'external:activateSubscriptionsReturn': [
@@ -73,23 +84,22 @@ class SoapDispatcher(object):
... {'model:description': 'desc'},
... ]}
... return ret
-
+
Our prefixes will be changed to those used by the client.
"""
self.methods = {}
self.name = name
self.documentation = documentation
- self.action = action # base SoapAction
+ self.action = action # base SoapAction
self.location = location
- self.namespace = namespace # targetNamespace
+ self.namespace = namespace # targetNamespace
self.prefix = prefix
self.soap_ns = soap_ns
self.soap_uri = soap_uri
self.namespaces = namespaces
self.pretty = pretty
self.debug = debug
-
-
+
@staticmethod
def _extra_namespaces(xml, ns):
"""Extends xml with extra namespaces.
@@ -99,56 +109,57 @@ class SoapDispatcher(object):
if ns:
_tpl = 'xmlns:%s="%s"'
_ns_str = " ".join([_tpl % (prefix, uri) for uri, prefix in ns.items() if uri not in xml])
- xml = xml.replace('/>', ' '+_ns_str+'/>')
+ xml = xml.replace('/>', ' ' + _ns_str + '/>')
return xml
-
-
+
def register_function(self, name, fn, returns=None, args=None, doc=None):
self.methods[name] = fn, returns, args, doc or getattr(fn, "__doc__", "")
-
-
- def dispatch(self, xml, action=None):
- "Receive and proccess SOAP call"
+
+ def dispatch(self, xml, action=None, fault=None):
+ """Receive and process SOAP call, returns the xml"""
+ # a dict can be sent in fault to expose it to the caller
# default values:
prefix = self.prefix
- ret = fault = None
+ ret = None
+ if fault is None:
+ fault = {}
soap_ns, soap_uri = self.soap_ns, self.soap_uri
soap_fault_code = 'VersionMismatch'
name = None
-
+
# namespaces = [('model', 'http://model.common.mt.moboperator'), ('external', 'http://external.mt.moboperator')]
- _ns_reversed = dict(((v,k) for k,v in self.namespaces.iteritems())) # Switch keys-values
+ _ns_reversed = dict(((v, k) for k, v in self.namespaces.items())) # Switch keys-values
# _ns_reversed = {'http://external.mt.moboperator': 'external', 'http://model.common.mt.moboperator': 'model'}
-
+
try:
request = SimpleXMLElement(xml, namespace=self.namespace)
-
+
# detect soap prefix and uri (xmlns attributes of Envelope)
for k, v in request[:]:
if v in ("http://schemas.xmlsoap.org/soap/envelope/",
- "http://www.w3.org/2003/05/soap-env",):
+ "http://www.w3.org/2003/05/soap-env",):
soap_ns = request.attributes()[k].localName
soap_uri = request.attributes()[k].value
-
+
# If the value from attributes on Envelope is in additional namespaces
elif v in self.namespaces.values():
_ns = request.attributes()[k].localName
_uri = request.attributes()[k].value
- _ns_reversed[_uri] = _ns # update with received alias
+ _ns_reversed[_uri] = _ns # update with received alias
# Now we change 'external' and 'model' to the received forms i.e. 'ext' and 'mod'
# After that we know how the client has prefixed additional namespaces
-
+
ns = NS_RX.findall(xml)
for k, v in ns:
if v in self.namespaces.values():
_ns_reversed[v] = k
-
+
soap_fault_code = 'Client'
-
+
# parse request message and get local method
method = request('Body', ns=soap_uri).children()(0)
if action:
- # method name = action
+ # method name = action
name = action[len(self.action)+1:-1]
prefix = self.prefix
if not action or not name:
@@ -159,21 +170,21 @@ class SoapDispatcher(object):
log.debug('dispatch method: %s', name)
function, returns_types, args_types, doc = self.methods[name]
log.debug('returns_types %s', returns_types)
-
+
# de-serialize parameters (if type definitions given)
if args_types:
args = method.children().unmarshall(args_types)
elif args_types is None:
- args = {'request': method} # send raw request
+ args = {'request': method} # send raw request
else:
- args = {} # no parameters
-
+ args = {} # no parameters
+
soap_fault_code = 'Server'
# execute function
ret = function(**args)
log.debug('dispathed method returns: %s', ret)
- except Exception: # This shouldn't be one huge try/except
+ except Exception: # This shouldn't be one huge try/except
import sys
etype, evalue, etb = sys.exc_info()
log.error(traceback.format_exc())
@@ -182,43 +193,43 @@ class SoapDispatcher(object):
detail += '\n\nXML REQUEST\n\n' + xml
else:
detail = None
- fault = {'faultcode': "%s.%s" % (soap_fault_code, etype.__name__),
- 'faultstring': unicode(evalue),
- 'detail': detail}
+ fault.update({'faultcode': "%s.%s" % (soap_fault_code, etype.__name__),
+ 'faultstring': evalue,
+ 'detail': detail})
# build response message
if not prefix:
- xml = """<%(soap_ns)s:Envelope xmlns:%(soap_ns)s="%(soap_uri)s"/>"""
+ xml = """<%(soap_ns)s:Envelope xmlns:%(soap_ns)s="%(soap_uri)s"/>"""
else:
xml = """<%(soap_ns)s:Envelope xmlns:%(soap_ns)s="%(soap_uri)s"
- xmlns:%(prefix)s="%(namespace)s"/>"""
-
+ xmlns:%(prefix)s="%(namespace)s"/>"""
+
xml %= { # a %= {} is a shortcut for a = a % {}
- 'namespace': self.namespace,
+ 'namespace': self.namespace,
'prefix': prefix,
- 'soap_ns': soap_ns,
+ 'soap_ns': soap_ns,
'soap_uri': soap_uri
}
-
+
# Now we add extra namespaces
xml = SoapDispatcher._extra_namespaces(xml, _ns_reversed)
-
+
# Change our namespace alias to that given by the client.
# We put [('model', 'http://model.common.mt.moboperator'), ('external', 'http://external.mt.moboperator')]
# mix it with {'http://external.mt.moboperator': 'ext', 'http://model.common.mt.moboperator': 'mod'}
- mapping = dict(((k, _ns_reversed[v]) for k,v in self.namespaces.iteritems())) # Switch keys-values and change value
+ mapping = dict(((k, _ns_reversed[v]) for k, v in self.namespaces.items())) # Switch keys-values and change value
# and get {'model': u'mod', 'external': u'ext'}
-
- response = SimpleXMLElement(xml,
+
+ response = SimpleXMLElement(xml,
namespace=self.namespace,
- namespaces_map = mapping,
+ namespaces_map=mapping,
prefix=prefix)
-
+
response['xmlns:xsi'] = "http://www.w3.org/2001/XMLSchema-instance"
response['xmlns:xsd'] = "http://www.w3.org/2001/XMLSchema"
-
+
body = response.add_child("%s:Body" % soap_ns, ns=False)
-
+
if fault:
# generate a Soap Fault (with the python exception)
body.marshall("%s:Fault" % soap_ns, fault, ns=False)
@@ -226,14 +237,24 @@ class SoapDispatcher(object):
# return normal value
res = body.add_child("%sResponse" % name, ns=prefix)
if not prefix:
- res['xmlns'] = self.namespace # add target namespace
+ res['xmlns'] = self.namespace # add target namespace
# serialize returned values (response) if type definition available
if returns_types:
- if not isinstance(ret, dict):
+ # TODO: full sanity check of type structure (recursive)
+ complex_type = isinstance(ret, dict)
+ if complex_type:
+ # check if type mapping correlates with return value
+ types_ok = all([k in returns_types for k in ret.keys()])
+ if not types_ok:
+ warnings.warn("Return value doesn't match type structure: "
+ "%s vs %s" % (str(returns_types), str(ret)))
+ if not complex_type or not types_ok:
+ # backward compatibility for scalar and simple types
res.marshall(returns_types.keys()[0], ret, )
else:
- for k,v in ret.items():
+ # new style for complex classes
+ for k, v in ret.items():
res.marshall(k, v)
elif returns_types is None:
# merge xmlelement returned
@@ -246,16 +267,16 @@ class SoapDispatcher(object):
# Introspection functions:
def list_methods(self):
- "Return a list of aregistered operations"
- return [(method, doc) for method, (function, returns, args, doc) in self.methods.items()]
+ """Return a list of aregistered operations"""
+ return [(method, doc) for method, (function, returns, args, doc) in self.methods.items()]
def help(self, method=None):
- "Generate sample request and response messages"
+ """Generate sample request and response messages"""
(function, returns, args, doc) = self.methods[method]
xml = """
<%(method)s xmlns="%(namespace)s"/>
-""" % {'method':method, 'namespace':self.namespace}
+""" % {'method': method, 'namespace': self.namespace}
request = SimpleXMLElement(xml, namespace=self.namespace, prefix=self.prefix)
if args:
items = args.items()
@@ -263,13 +284,13 @@ class SoapDispatcher(object):
items = [('value', None)]
else:
items = []
- for k,v in items:
+ for k, v in items:
request(method).marshall(k, v, add_comments=True, ns=False)
xml = """
<%(method)sResponse xmlns="%(namespace)s"/>
-""" % {'method':method, 'namespace':self.namespace}
+""" % {'method': method, 'namespace': self.namespace}
response = SimpleXMLElement(xml, namespace=self.namespace, prefix=self.prefix)
if returns:
items = returns.items()
@@ -277,16 +298,15 @@ class SoapDispatcher(object):
items = [('value', None)]
else:
items = []
- for k,v in items:
- response('%sResponse'%method).marshall(k, v, add_comments=True, ns=False)
+ for k, v in items:
+ response('%sResponse' % method).marshall(k, v, add_comments=True, ns=False)
return request.as_xml(pretty=True), response.as_xml(pretty=True), doc
-
def wsdl(self):
- "Generate Web Service Description v1.1"
+ """Generate Web Service Description v1.1"""
xml = """
-response (generic, any type)"
+ """Copy request->response (generic, any type)"""
return request.value
- dispatcher.register_function('Adder', adder,
- returns={'AddResult': {'ab': int, 'dd': str } },
- args={'p': {'a': int,'b': int}, 'dt': Date, 'c': [{'d': Decimal}]})
+ dispatcher.register_function(
+ 'Adder', adder,
+ returns={'AddResult': {'ab': int, 'dd': unicode, 'dt': datetime.date}},
+ args={'p': {'a': int, 'b': int}, 'dt': Date, 'c': [{'d': Decimal}]}
+ )
- dispatcher.register_function('Dummy', dummy,
- returns={'out0': str},
- args={'in0': str})
+ dispatcher.register_function(
+ 'Dummy', dummy,
+ returns={'out0': str},
+ args={'in0': str}
+ )
dispatcher.register_function('Echo', echo)
if '--local' in sys.argv:
- wsdl=dispatcher.wsdl()
- print wsdl
-
- # Commented because path is platform dependent
- # Looks that it doesnt matter.
- # open("C:/test.wsdl","w").write(wsdl)
+ wsdl = dispatcher.wsdl()
for method, doc in dispatcher.list_methods():
request, response, doc = dispatcher.help(method)
- ##print request
- ##print response
-
+
if '--serve' in sys.argv:
- print "Starting server..."
+ log.info("Starting server...")
httpd = HTTPServer(("", 8008), SOAPHandler)
httpd.dispatcher = dispatcher
httpd.serve_forever()
+ if '--wsgi-serve' in sys.argv:
+ log.info("Starting wsgi server...")
+ from wsgiref.simple_server import make_server
+ application = WSGISOAPHandler(dispatcher)
+ wsgid = make_server('', 8008, application)
+ wsgid.serve_forever()
+
if '--consume' in sys.argv:
- from client import SoapClient
+ from .client import SoapClient
client = SoapClient(
- location = "http://localhost:8008/",
- action = 'http://localhost:8008/', # SOAPAction
- namespace = "http://example.com/sample.wsdl",
+ location="http://localhost:8008/",
+ action='http://localhost:8008/', # SOAPAction
+ namespace="http://example.com/sample.wsdl",
soap_ns='soap',
- trace = True,
- ns = False)
- response = client.Adder(p={'a':1,'b':2},dt='20100724',c=[{'d':'1.20'},{'d':'2.01'}])
+ trace=True,
+ ns=False
+ )
+ p = {'a': 1, 'b': 2}
+ c = [{'d': '1.20'}, {'d': '2.01'}]
+ response = client.Adder(p=p, dt='2010-07-24', c=c)
result = response.AddResult
- print int(result.ab)
- print str(result.dd)
+ log.info(int(result.ab))
+ log.info(str(result.dd))
+
+ if '--consume-wsdl' in sys.argv:
+ from .client import SoapClient
+ client = SoapClient(
+ wsdl="http://localhost:8008/",
+ )
+ p = {'a': 1, 'b': 2}
+ c = [{'d': '1.20'}, {'d': '2.01'}]
+ dt = datetime.date.today()
+ response = client.Adder(p=p, dt=dt, c=c)
+ result = response['AddResult']
+ log.info(int(result['ab']))
+ log.info(str(result['dd']))
+
diff --git a/gluon/contrib/pysimplesoap/simplexml.py b/gluon/contrib/pysimplesoap/simplexml.py
index 2a57c902..9a23497a 100755
--- a/gluon/contrib/pysimplesoap/simplexml.py
+++ b/gluon/contrib/pysimplesoap/simplexml.py
@@ -1,5 +1,5 @@
-#!/usr/bin/env python
-# -*- coding: latin-1 -*-
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any later
@@ -10,164 +10,47 @@
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
-"Simple XML manipulation"
+"""Simple XML manipulation"""
-__author__ = "Mariano Reingart (reingart@gmail.com)"
-__copyright__ = "Copyright (C) 2008/009 Mariano Reingart"
-__license__ = "LGPL 3.0"
-__version__ = "1.03a"
-import datetime
+from __future__ import unicode_literals
+import sys
+if sys.version > '3':
+ basestring = str
+ unicode = str
+
import logging
import re
import time
-import warnings
import xml.dom.minidom
-from decimal import Decimal
+
+from . import __author__, __copyright__, __license__, __version__
+
+# Utility functions used for marshalling, moved aside for readability
+from .helpers import TYPE_MAP, TYPE_MARSHAL_FN, TYPE_UNMARSHAL_FN, \
+ REVERSE_TYPE_MAP, OrderedDict, Date, Decimal
log = logging.getLogger(__name__)
-logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.WARNING)
-
-DEBUG = False
-
-try:
- _strptime = datetime.datetime.strptime
-except AttributeError: # python2.4
- _strptime = lambda s, fmt: datetime.datetime(*(time.strptime(s, fmt)[:6]))
-
-
-# Functions to serialize/deserialize special immutable types:
-def datetime_u(s):
- fmt = "%Y-%m-%dT%H:%M:%S"
- try:
- return _strptime(s, fmt)
- except ValueError:
- try:
- # strip utc offset
- if s[-3] == ":" and s[-6] in (' ', '-', '+'):
- warnings.warn('removing unsupported UTC offset', RuntimeWarning)
- s = s[:-6]
- # parse microseconds
- try:
- return _strptime(s, fmt + ".%f")
- except:
- return _strptime(s, fmt)
- except ValueError:
- # strip microseconds (not supported in this platform)
- if "." in s:
- warnings.warn('removing unsuppported microseconds', RuntimeWarning)
- s = s[:s.index(".")]
- return _strptime(s, fmt)
-
-datetime_m = lambda dt: dt.isoformat('T')
-date_u = lambda s: _strptime(s[0:10], "%Y-%m-%d").date()
-date_m = lambda d: d.strftime("%Y-%m-%d")
-time_u = lambda s: _strptime(s, "%H:%M:%S").time()
-time_m = lambda d: d.strftime("%H%M%S")
-bool_u = lambda s: {'0':False, 'false': False, '1': True, 'true': True}[s]
-bool_m = lambda s: {False: 'false', True: 'true'}[s]
-
-# aliases:
-class Alias(object):
- def __init__(self, py_type, xml_type):
- self.py_type, self.xml_type = py_type, xml_type
- def __call__(self, value):
- return self.py_type(value)
- def __repr__(self):
- return "" % (self.xml_type, self.py_type)
-
-byte = Alias(str,'byte')
-short = Alias(int,'short')
-double = Alias(float,'double')
-integer = Alias(long,'integer')
-DateTime = datetime.datetime
-Date = datetime.date
-Time = datetime.time
-
-# Define convertion function (python type): xml schema type
-TYPE_MAP = {
- str:'string',
- unicode:'string',
- bool:'boolean',
- short:'short',
- byte:'byte',
- int:'int',
- long:'long',
- integer:'integer',
- float:'float',
- double:'double',
- Decimal:'decimal',
- datetime.datetime:'dateTime',
- datetime.date:'date',
-}
-TYPE_MARSHAL_FN = {
- datetime.datetime:datetime_m,
- datetime.date:date_m,
- bool:bool_m
-}
-TYPE_UNMARSHAL_FN = {
- datetime.datetime:datetime_u,
- datetime.date:date_u,
- bool:bool_u,
- str:unicode,
-}
-
-REVERSE_TYPE_MAP = dict([(v,k) for k,v in TYPE_MAP.items()])
-
-class OrderedDict(dict):
- "Minimal ordered dictionary for xsd:sequences"
- def __init__(self):
- self.__keys = []
- self.array = False
- def __setitem__(self, key, value):
- if key not in self.__keys:
- self.__keys.append(key)
- dict.__setitem__(self, key, value)
- def insert(self, key, value, index=0):
- if key not in self.__keys:
- self.__keys.insert(index, key)
- dict.__setitem__(self, key, value)
- def __delitem__(self, key):
- if key in self.__keys:
- self.__keys.remove(key)
- dict.__delitem__(self, key)
- def __iter__(self):
- return iter(self.__keys)
- def keys(self):
- return self.__keys
- def items(self):
- return [(key, self[key]) for key in self.__keys]
- def update(self, other):
- for k,v in other.items():
- self[k] = v
- if isinstance(other, OrderedDict):
- self.array = other.array
- def __str__(self):
- return "*%s*" % dict.__str__(self)
- def __repr__(self):
- s= "*{%s}*" % ", ".join(['%s: %s' % (repr(k),repr(v)) for k,v in self.items()])
- if self.array and False:
- s = "[%s]" % s
- return s
class SimpleXMLElement(object):
- "Simple XML manipulation (simil PHP)"
-
- def __init__(self, text = None, elements = None, document = None,
- namespace = None, prefix=None, namespaces_map={}):
+ """Simple XML manipulation (simil PHP)"""
+
+ def __init__(self, text=None, elements=None, document=None,
+ namespace=None, prefix=None, namespaces_map={}, jetty=False):
"""
:param namespaces_map: How to map our namespace prefix to that given by the client;
{prefix: received_prefix}
"""
self.__namespaces_map = namespaces_map
- _rx = "|".join(namespaces_map.keys()) # {'external': 'ext', 'model': 'mod'} -> 'external|model'
- self.__ns_rx = re.compile(r"^(%s):.*$" % _rx) # And now we build an expression ^(external|model):.*$
- # to find prefixes in all xml nodes i.e.: 1
- # and later change that to 1
+ _rx = "|".join(namespaces_map.keys()) # {'external': 'ext', 'model': 'mod'} -> 'external|model'
+ self.__ns_rx = re.compile(r"^(%s):.*$" % _rx) # And now we build an expression ^(external|model):.*$
+ # to find prefixes in all xml nodes i.e.: 1
+ # and later change that to 1
self.__ns = namespace
self.__prefix = prefix
-
+ self.__jetty = jetty # special list support
+
if text is not None:
try:
self.__document = xml.dom.minidom.parseString(text)
@@ -178,78 +61,81 @@ class SimpleXMLElement(object):
else:
self.__elements = elements
self.__document = document
-
+
def add_child(self, name, text=None, ns=True):
- "Adding a child tag to a node"
- if not ns or not self.__ns:
- log.debug('adding %s', name)
+ """Adding a child tag to a node"""
+ if not ns or self.__ns is False:
+ ##log.debug('adding %s without namespace', name)
element = self.__document.createElement(name)
else:
- log.debug('adding %s ns "%s" %s', name, self.__ns, ns)
- if self.__prefix:
+ ##log.debug('adding %s ns "%s" %s', name, self.__ns, ns)
+ if isinstance(ns, basestring):
+ element = self.__document.createElement(name)
+ if ns:
+ element.setAttribute("xmlns", ns)
+ elif self.__prefix:
element = self.__document.createElementNS(self.__ns, "%s:%s" % (self.__prefix, name))
else:
element = self.__document.createElementNS(self.__ns, name)
# don't append null tags!
if text is not None:
- if isinstance(text, unicode):
- element.appendChild(self.__document.createTextNode(text))
- else:
- element.appendChild(self.__document.createTextNode(str(text)))
+ element.appendChild(self.__document.createTextNode(text))
self._element.appendChild(element)
return SimpleXMLElement(
- elements=[element],
- document=self.__document,
- namespace=self.__ns,
- prefix=self.__prefix,
- namespaces_map=self.__namespaces_map)
-
+ elements=[element],
+ document=self.__document,
+ namespace=self.__ns,
+ prefix=self.__prefix,
+ jetty=self.__jetty,
+ namespaces_map=self.__namespaces_map
+ )
+
def __setattr__(self, tag, text):
- "Add text child tag node (short form)"
+ """Add text child tag node (short form)"""
if tag.startswith("_"):
object.__setattr__(self, tag, text)
else:
- log.debug('__setattr__(%s, %s)', tag, text)
+ ##log.debug('__setattr__(%s, %s)', tag, text)
self.add_child(tag, text)
def __delattr__(self, tag):
- "Remove a child tag (non recursive!)"
- elements=[__element for __element in self._element.childNodes
- if __element.nodeType == __element.ELEMENT_NODE
- ]
+ """Remove a child tag (non recursive!)"""
+ elements = [__element for __element in self._element.childNodes
+ if __element.nodeType == __element.ELEMENT_NODE]
for element in elements:
self._element.removeChild(element)
def add_comment(self, data):
- "Add an xml comment to this child"
+ """Add an xml comment to this child"""
comment = self.__document.createComment(data)
self._element.appendChild(comment)
def as_xml(self, filename=None, pretty=False):
- "Return the XML representation of the document"
+ """Return the XML representation of the document"""
if not pretty:
return self.__document.toxml('UTF-8')
else:
return self.__document.toprettyxml(encoding='UTF-8')
def __repr__(self):
- "Return the XML representation of this tag"
+ """Return the XML representation of this tag"""
+ # NOTE: do not use self.as_xml('UTF-8') as it returns the whole xml doc
return self._element.toxml('UTF-8')
def get_name(self):
- "Return the tag name of this node"
+ """Return the tag name of this node"""
return self._element.tagName
def get_local_name(self):
- "Return the tag loca name (prefix:name) of this node"
+ """Return the tag local name (prefix:name) of this node"""
return self._element.localName
def get_prefix(self):
- "Return the namespace prefix of this node"
+ """Return the namespace prefix of this node"""
return self._element.prefix
def get_namespace_uri(self, ns):
- "Return the namespace uri for a prefix"
+ """Return the namespace uri for a prefix"""
element = self._element
while element is not None and element.attributes is not None:
try:
@@ -257,38 +143,39 @@ class SimpleXMLElement(object):
except KeyError:
element = element.parentNode
-
def attributes(self):
- "Return a dict of attributes for this tag"
+ """Return a dict of attributes for this tag"""
#TODO: use slice syntax [:]?
return self._element.attributes
def __getitem__(self, item):
- "Return xml tag attribute value or a slice of attributes (iter)"
- log.debug('__getitem__(%s)', item)
+ """Return xml tag attribute value or a slice of attributes (iter)"""
+ ##log.debug('__getitem__(%s)', item)
if isinstance(item, basestring):
if self._element.hasAttribute(item):
return self._element.attributes[item].value
elif isinstance(item, slice):
# return a list with name:values
- return self._element.attributes.items()[item]
+ return list(self._element.attributes.items())[item]
else:
# return element by index (position)
element = self.__elements[item]
return SimpleXMLElement(
- elements=[element],
- document=self.__document,
- namespace=self.__ns,
- prefix=self.__prefix,
- namespaces_map=self.__namespaces_map)
-
+ elements=[element],
+ document=self.__document,
+ namespace=self.__ns,
+ prefix=self.__prefix,
+ jetty=self.__jetty,
+ namespaces_map=self.__namespaces_map
+ )
+
def add_attribute(self, name, value):
- "Set an attribute value from a string"
+ """Set an attribute value from a string"""
self._element.setAttribute(name, value)
-
+
def __setitem__(self, item, value):
- "Set an attribute value"
- if isinstance(item,basestring):
+ """Set an attribute value"""
+ if isinstance(item, basestring):
self.add_attribute(item, value)
elif isinstance(item, slice):
# set multiple attributes at once
@@ -297,7 +184,7 @@ class SimpleXMLElement(object):
def __call__(self, tag=None, ns=None, children=False, root=False,
error=True, ):
- "Search (even in child nodes) and return a child tag by name"
+ """Search (even in child nodes) and return a child tag by name"""
try:
if root:
# return entire document
@@ -306,6 +193,7 @@ class SimpleXMLElement(object):
document=self.__document,
namespace=self.__ns,
prefix=self.__prefix,
+ jetty=self.__jetty,
namespaces_map=self.__namespaces_map
)
if tag is None:
@@ -317,23 +205,23 @@ class SimpleXMLElement(object):
elements = None
if isinstance(tag, int):
# return tag by index
- elements=[self.__elements[tag]]
+ elements = [self.__elements[tag]]
if ns and not elements:
for ns_uri in isinstance(ns, (tuple, list)) and ns or (ns, ):
- log.debug('searching %s by ns=%s', tag, ns_uri)
+ ##log.debug('searching %s by ns=%s', tag, ns_uri)
elements = self._element.getElementsByTagNameNS(ns_uri, tag)
- if elements:
+ if elements:
break
if self.__ns and not elements:
- log.debug('searching %s by ns=%s', tag, self.__ns)
+ ##log.debug('searching %s by ns=%s', tag, self.__ns)
elements = self._element.getElementsByTagNameNS(self.__ns, tag)
if not elements:
- log.debug('searching %s', tag)
+ ##log.debug('searching %s', tag)
elements = self._element.getElementsByTagName(tag)
if not elements:
- #log.debug(self._element.toxml())
+ ##log.debug(self._element.toxml())
if error:
- raise AttributeError(u"No elements found")
+ raise AttributeError("No elements found")
else:
return
return SimpleXMLElement(
@@ -341,16 +229,17 @@ class SimpleXMLElement(object):
document=self.__document,
namespace=self.__ns,
prefix=self.__prefix,
+ jetty=self.__jetty,
namespaces_map=self.__namespaces_map)
- except AttributeError, e:
- raise AttributeError(u"Tag not found: %s (%s)" % (tag, unicode(e)))
+ except AttributeError as e:
+ raise AttributeError("Tag not found: %s (%s)" % (tag, e))
def __getattr__(self, tag):
- "Shortcut for __call__"
+ """Shortcut for __call__"""
return self.__call__(tag)
-
+
def __iter__(self):
- "Iterate over xml tags at this level"
+ """Iterate over xml tags at this level"""
try:
for __element in self.__elements:
yield SimpleXMLElement(
@@ -358,67 +247,72 @@ class SimpleXMLElement(object):
document=self.__document,
namespace=self.__ns,
prefix=self.__prefix,
+ jetty=self.__jetty,
namespaces_map=self.__namespaces_map)
except:
raise
def __dir__(self):
- "List xml children tags names"
- return [node.tagName for node
+ """List xml children tags names"""
+ return [node.tagName for node
in self._element.childNodes
if node.nodeType != node.TEXT_NODE]
def children(self):
- "Return xml children tags element"
- elements=[__element for __element in self._element.childNodes
- if __element.nodeType == __element.ELEMENT_NODE]
+ """Return xml children tags element"""
+ elements = [__element for __element in self._element.childNodes
+ if __element.nodeType == __element.ELEMENT_NODE]
if not elements:
return None
#raise IndexError("Tag %s has no children" % self._element.tagName)
return SimpleXMLElement(
- elements=elements,
- document=self.__document,
- namespace=self.__ns,
- prefix=self.__prefix,
- namespaces_map=self.__namespaces_map)
+ elements=elements,
+ document=self.__document,
+ namespace=self.__ns,
+ prefix=self.__prefix,
+ jetty=self.__jetty,
+ namespaces_map=self.__namespaces_map
+ )
def __len__(self):
- "Return elements count"
+ """Return element count"""
return len(self.__elements)
-
- def __contains__( self, item):
- "Search for a tag name in this element or child nodes"
+
+ def __contains__(self, item):
+ """Search for a tag name in this element or child nodes"""
return self._element.getElementsByTagName(item)
-
+
def __unicode__(self):
- "Returns the unicode text nodes of the current element"
+ """Returns the unicode text nodes of the current element"""
if self._element.childNodes:
- rc = u""
+ rc = ""
for node in self._element.childNodes:
if node.nodeType == node.TEXT_NODE:
rc = rc + node.data
return rc
return ''
-
+
def __str__(self):
- "Returns the str text nodes of the current element"
- return unicode(self).encode("utf8","ignore")
+ """Returns the str text nodes of the current element"""
+ return self.__unicode__()
def __int__(self):
- "Returns the integer value of the current element"
+ """Returns the integer value of the current element"""
return int(self.__str__())
def __float__(self):
- "Returns the float value of the current element"
+ """Returns the float value of the current element"""
try:
return float(self.__str__())
except:
- raise IndexError(self._element.toxml())
-
+ raise IndexError(self._element.toxml())
+
_element = property(lambda self: self.__elements[0])
def unmarshall(self, types, strict=True):
- "Convert to python values the current serialized xml element"
+ #import pdb; pdb.set_trace()
+
+ """Convert to python values the current serialized xml element"""
# types is a dict of {tag name: convertion function}
# strict=False to use default type conversion if not specified
# example: types={'p': {'a': int,'b': int}, 'c': [{'d':str}]}
@@ -435,37 +329,62 @@ class SimpleXMLElement(object):
if ref_node['id'] == href:
node = ref_node
ref_name_type = ref_node['xsi:type'].split(":")[1]
- break
+ break
+
try:
- fn = types[name]
- except (KeyError, ), e:
- if node.get_namespace_uri("soapenc"):
- fn = None # ignore multirefs!
- elif 'xsi:type' in node.attributes().keys():
+ if isinstance(types, dict):
+ fn = types[name]
+ # custom array only in the response (not defined in the WSDL):
+ # 1):
+ # Jetty array style support [{k, v}]
+ for parent in node:
+ tmp_dict = {} # unmarshall each value & mix
+ for child in (node.children() or []):
+ tmp_dict.update(child.unmarshall(fn[0], strict))
+ value.append(tmp_dict)
+ else: # .Net / Java
+ for child in (children or []):
+ value.append(child.unmarshall(fn[0], strict))
+
elif isinstance(fn, tuple):
value = []
_d = {}
children = node.children()
as_dict = len(fn) == 1 and isinstance(fn[0], dict)
- for child in (children and children() or []): # Readability counts
+ for child in (children and children() or []): # Readability counts
if as_dict:
- _d.update(child.unmarshall(fn[0], strict)) # Merging pairs
+ _d.update(child.unmarshall(fn[0], strict)) # Merging pairs
else:
value.append(child.unmarshall(fn[0], strict))
if as_dict:
@@ -477,32 +396,32 @@ class SimpleXMLElement(object):
value = tuple(_tmp)
else:
value = tuple(value)
-
+
elif isinstance(fn, dict):
##if ref_name_type is not None:
## fn = fn[ref_name_type]
children = node.children()
value = children and children.unmarshall(fn, strict)
else:
- if fn is None: # xsd:anyType not unmarshalled
+ if fn is None: # xsd:anyType not unmarshalled
value = node
- elif str(node) or fn == str:
+ elif unicode(node) or (fn == str and unicode(node) != ''):
try:
# get special deserialization function (if any)
- fn = TYPE_UNMARSHAL_FN.get(fn,fn)
+ fn = TYPE_UNMARSHAL_FN.get(fn, fn)
if fn == str:
# always return an unicode object:
+ # (avoid encoding errors in py<3!)
value = unicode(node)
else:
value = fn(unicode(node))
- except (ValueError, TypeError), e:
- raise ValueError(u"Tag: %s: %s" % (name, unicode(e)))
+ except (ValueError, TypeError) as e:
+ raise ValueError("Tag: %s: %s" % (name, e))
else:
value = None
d[name] = value
return d
-
-
+
def _update_ns(self, name):
"""Replace the defined namespace alias with tohse used by the client."""
pref = self.__ns_rx.search(name)
@@ -513,46 +432,50 @@ class SimpleXMLElement(object):
except KeyError:
log.warning('Unknown namespace alias %s' % name)
return name
-
-
- def marshall(self, name, value, add_child=True, add_comments=False,
+
+ def marshall(self, name, value, add_child=True, add_comments=False,
ns=False, add_children_ns=True):
- "Analize python value and add the serialized XML element using tag name"
+ """Analyze python value and add the serialized XML element using tag name"""
# Change node name to that used by a client
name = self._update_ns(name)
-
+
if isinstance(value, dict): # serialize dict (value)
+ # for the first parent node, use the document target namespace
+ # (ns==True) or use the namespace string uri if passed (elements)
child = add_child and self.add_child(name, ns=ns) or self
- for k,v in value.items():
+ for k, v in value.items():
if not add_children_ns:
ns = False
+ else:
+ # for children, use the wsdl element target namespace:
+ ns = getattr(value, 'namespace', None)
child.marshall(k, v, add_comments=add_comments, ns=ns)
elif isinstance(value, tuple): # serialize tuple (value)
child = add_child and self.add_child(name, ns=ns) or self
if not add_children_ns:
ns = False
- for k,v in value:
+ for k, v in value:
getattr(self, name).marshall(k, v, add_comments=add_comments, ns=ns)
- elif isinstance(value, list): # serialize lists
- child=self.add_child(name, ns=ns)
+ elif isinstance(value, list): # serialize lists
+ child = self.add_child(name, ns=ns)
if not add_children_ns:
ns = False
if add_comments:
child.add_comment("Repetitive array of:")
for t in value:
child.marshall(name, t, False, add_comments=add_comments, ns=ns)
- elif isinstance(value, basestring): # do not convert strings or unicodes
- self.add_child(name, value,ns=ns)
- elif value is None: # sent a empty tag?
+ elif isinstance(value, basestring): # do not convert strings or unicodes
+ self.add_child(name, value, ns=ns)
+ elif value is None: # sent a empty tag?
self.add_child(name, ns=ns)
elif value in TYPE_MAP.keys():
# add commented placeholders for simple tipes (for examples/help only)
- child = self.add_child(name, ns=ns)
+ child = self.add_child(name, ns=ns)
child.add_comment(TYPE_MAP[value])
- else: # the rest of object types are converted to string
+ else: # the rest of object types are converted to string
# get special serialization function (if any)
fn = TYPE_MARSHAL_FN.get(type(value), str)
- self.add_child(name, fn(value), ns=ns)
+ self.add_child(name, fn(value), ns=ns)
def import_node(self, other):
x = self.__document.importNode(other._element, True) # deep copy
diff --git a/gluon/contrib/pysimplesoap/transport.py b/gluon/contrib/pysimplesoap/transport.py
index 5441e342..87806c06 100644
--- a/gluon/contrib/pysimplesoap/transport.py
+++ b/gluon/contrib/pysimplesoap/transport.py
@@ -1,5 +1,5 @@
-#!/usr/bin/env python
-# -*- coding: latin-1 -*-
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation; either version 3, or (at your option) any later
@@ -10,30 +10,46 @@
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
-"Pythonic simple SOAP Client implementation"
+"""Pythonic simple SOAP Client transport"""
-__author__ = "Mariano Reingart (reingart@gmail.com)"
-__copyright__ = "Copyright (C) 2008 Mariano Reingart"
-__license__ = "LGPL 3.0"
-TIMEOUT = 60
-
-import os
-import cPickle as pickle
-import urllib2
-from urlparse import urlparse
-import tempfile
-from simplexml import SimpleXMLElement, TYPE_MAP, OrderedDict
import logging
+import sys
+try:
+ import urllib2
+ from cookielib import CookieJar
+except ImportError:
+ from urllib import request as urllib2
+ from http.cookiejar import CookieJar
+
+from . import __author__, __copyright__, __license__, __version__, TIMEOUT
+from .simplexml import SimpleXMLElement, TYPE_MAP, OrderedDict
log = logging.getLogger(__name__)
-logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.WARNING)
+
+#
+# Socket wrapper to enable socket.TCP_NODELAY - this greatly speeds up transactions in Linux
+# WARNING: this will modify the standard library socket module, use with care!
+# TODO: implement this as a transport faciliy
+# (to pass options directly to httplib2 or pycurl)
+# be aware of metaclasses and socks.py (SocksiPy) used by httplib2
+
+if False:
+ import socket
+ realsocket = socket.socket
+ def socketwrap(family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0):
+ sockobj = realsocket(family, type, proto)
+ if type == socket.SOCK_STREAM:
+ sockobj.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
+ return sockobj
+ socket.socket = socketwrap
#
# We store metadata about what available transport mechanisms we have available.
#
-_http_connectors = {} # libname: classimpl mapping
-_http_facilities = {} # functionalitylabel: [sequence of libname] mapping
+_http_connectors = {} # libname: classimpl mapping
+_http_facilities = {} # functionalitylabel: [sequence of libname] mapping
+
class TransportBase:
@classmethod
@@ -45,27 +61,41 @@ class TransportBase:
#
try:
import httplib2
+ if sys.version > '3' and httplib2.__version__ <= "0.7.7":
+ import http.client
+ # httplib2 workaround: check_hostname needs a SSL context with either
+ # CERT_OPTIONAL or CERT_REQUIRED
+ # see https://code.google.com/p/httplib2/issues/detail?id=173
+ orig__init__ = http.client.HTTPSConnection.__init__
+ def fixer(self, host, port, key_file, cert_file, timeout, context,
+ check_hostname, *args, **kwargs):
+ chk = kwargs.get('disable_ssl_certificate_validation', True) ^ True
+ orig__init__(self, host, port=port, key_file=key_file,
+ cert_file=cert_file, timeout=timeout, context=context,
+ check_hostname=chk)
+ http.client.HTTPSConnection.__init__ = fixer
except ImportError:
- TIMEOUT = None # timeout not supported by urllib2
+ TIMEOUT = None # timeout not supported by urllib2
pass
else:
class Httplib2Transport(httplib2.Http, TransportBase):
_wrapper_version = "httplib2 %s" % httplib2.__version__
_wrapper_name = 'httplib2'
+
def __init__(self, timeout, proxy=None, cacert=None, sessions=False):
##httplib2.debuglevel=4
kwargs = {}
if proxy:
import socks
kwargs['proxy_info'] = httplib2.ProxyInfo(proxy_type=socks.PROXY_TYPE_HTTP, **proxy)
- print "using proxy", proxy
+ log.info("using proxy %s" % proxy)
# set optional parameters according supported httplib2 version
if httplib2.__version__ >= '0.3.0':
kwargs['timeout'] = timeout
if httplib2.__version__ >= '0.7.0':
kwargs['disable_ssl_certificate_validation'] = cacert is None
- kwargs['ca_certs'] = cacert
+ kwargs['ca_certs'] = cacert
httplib2.Http.__init__(self, **kwargs)
_http_connectors['httplib2'] = Httplib2Transport
@@ -76,15 +106,15 @@ else:
if 'timeout' in inspect.getargspec(httplib2.Http.__init__)[0]:
_http_facilities.setdefault('timeout', []).append('httplib2')
+
#
# urllib2 support.
#
-import urllib2
class urllib2Transport(TransportBase):
_wrapper_version = "urllib2 %s" % urllib2.__version__
- _wrapper_name = 'urllib2'
+ _wrapper_name = 'urllib2'
+
def __init__(self, timeout=None, proxy=None, cacert=None, sessions=False):
- import sys
if (timeout is not None) and not self.supports_feature('timeout'):
raise RuntimeError('timeout is not supported with urllib2 transport')
if proxy:
@@ -94,26 +124,26 @@ class urllib2Transport(TransportBase):
self.request_opener = urllib2.urlopen
if sessions:
- from cookielib import CookieJar
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(CookieJar()))
self.request_opener = opener.open
-
+
self._timeout = timeout
def request(self, url, method="GET", body=None, headers={}):
req = urllib2.Request(url, body, headers)
try:
f = self.request_opener(req, timeout=self._timeout)
- except urllib2.HTTPError, f:
+ return f.info(), f.read()
+ except urllib2.HTTPError as f:
if f.code != 500:
raise
- return f.info(), f.read()
+ return f.info(), f.read()
_http_connectors['urllib2'] = urllib2Transport
_http_facilities.setdefault('sessions', []).append('urllib2')
import sys
-if sys.version_info >= (2,6):
+if sys.version_info >= (2, 6):
_http_facilities.setdefault('timeout', []).append('urllib2')
del sys
@@ -129,19 +159,23 @@ else:
try:
from cStringIO import StringIO
except ImportError:
- from StringIO import StringIO
+ try:
+ from StringIO import StringIO
+ except ImportError:
+ from io import StringIO
class pycurlTransport(TransportBase):
_wrapper_version = pycurl.version
_wrapper_name = 'pycurl'
+
def __init__(self, timeout, proxy=None, cacert=None, sessions=False):
- self.timeout = timeout
+ self.timeout = timeout
self.proxy = proxy or {}
self.cacert = cacert
-
+
def request(self, url, method, body, headers):
c = pycurl.Curl()
- c.setopt(pycurl.URL, str(url))
+ c.setopt(pycurl.URL, url)
if 'proxy_host' in self.proxy:
c.setopt(pycurl.PROXY, self.proxy['proxy_host'])
if 'proxy_port' in self.proxy:
@@ -154,20 +188,19 @@ else:
#self.body = StringIO(body)
#c.setopt(pycurl.HEADERFUNCTION, self.header)
if self.cacert:
- c.setopt(c.CAINFO, str(self.cacert))
+ c.setopt(c.CAINFO, self.cacert)
c.setopt(pycurl.SSL_VERIFYPEER, self.cacert and 1 or 0)
c.setopt(pycurl.SSL_VERIFYHOST, self.cacert and 2 or 0)
- c.setopt(pycurl.CONNECTTIMEOUT, self.timeout/6)
+ c.setopt(pycurl.CONNECTTIMEOUT, self.timeout / 6)
c.setopt(pycurl.TIMEOUT, self.timeout)
- if method=='POST':
+ if method == 'POST':
c.setopt(pycurl.POST, 1)
- c.setopt(pycurl.POSTFIELDS, body)
+ c.setopt(pycurl.POSTFIELDS, body)
if headers:
- hdrs = ['%s: %s' % (str(k), str(v)) for k, v in headers.items()]
- ##print hdrs
+ hdrs = ['%s: %s' % (k, v) for k, v in headers.items()]
+ log.debug(hdrs)
c.setopt(pycurl.HTTPHEADER, hdrs)
c.perform()
- ##print "pycurl perform..."
c.close()
return {}, self.buf.getvalue()
@@ -178,15 +211,15 @@ else:
class DummyTransport:
- "Testing class to load a xml response"
-
+ """Testing class to load a xml response"""
+
def __init__(self, xml_response):
self.xml_response = xml_response
-
+
def request(self, location, method, body, headers):
- print method, location
- print headers
- print body
+ log.debug("%s %s", method, location)
+ log.debug(headers)
+ log.debug(body)
return {}, self.xml_response
@@ -222,20 +255,19 @@ def get_http_wrapper(library=None, features=[]):
else:
return _http_connectors[candidate_name]
+
def set_http_wrapper(library=None, features=[]):
- "Set a suitable HTTP connection wrapper."
+ """Set a suitable HTTP connection wrapper."""
global Http
Http = get_http_wrapper(library, features)
return Http
def get_Http():
- "Return current transport class"
+ """Return current transport class"""
global Http
return Http
-
+
# define the default HTTP connection class (it can be changed at runtime!):
set_http_wrapper()
-
-
diff --git a/gluon/contrib/redis_cache.py b/gluon/contrib/redis_cache.py
index 634b91f3..4475a82b 100644
--- a/gluon/contrib/redis_cache.py
+++ b/gluon/contrib/redis_cache.py
@@ -24,7 +24,7 @@ def RedisCache(*args, **vars):
Usage example: put in models
from gluon.contrib.redis_cache import RedisCache
- cache.redis = RedisCache('localhost:6379',db=None, debug=True, with_lock=True)
+ cache.redis = RedisCache('localhost:6379',db=None, debug=True, with_lock=True, password=None)
:param db: redis db to use (0..16)
:param debug: if True adds to stats() the total_hits and misses
@@ -77,8 +77,9 @@ class RedisClient(object):
MAX_RETRIES = 5
RETRIES = 0
- def __init__(self, server='localhost:6379', db=None, debug=False, with_lock=False):
+ def __init__(self, server='localhost:6379', db=None, debug=False, with_lock=False, password=None):
self.server = server
+ self.password = password
self.db = db or 0
host, port = (self.server.split(':') + ['6379'])[:2]
port = int(port)
@@ -102,7 +103,10 @@ class RedisClient(object):
self.cache_set_key = 'w2p:%s:___cache_set' % (self.request.application)
- self.r_server = redis.Redis(host=host, port=port, db=self.db)
+ self.r_server = redis.Redis(host=host, port=port, db=self.db, password=self.password)
+
+ def initialize(self):
+ pass
def __call__(self, key, f, time_expire=300, with_lock=None):
if with_lock is None:
diff --git a/gluon/contrib/redis_session.py b/gluon/contrib/redis_session.py
index 2daa8184..d7bc419d 100644
--- a/gluon/contrib/redis_session.py
+++ b/gluon/contrib/redis_session.py
@@ -21,7 +21,7 @@ def RedisSession(*args, **vars):
"""
Usage example: put in models
from gluon.contrib.redis_session import RedisSession
- sessiondb = RedisSession('localhost:6379',db=0, session_expiry=False)
+ sessiondb = RedisSession('localhost:6379',db=0, session_expiry=False, password=None)
session.connect(request, response, db = sessiondb)
Simple slip-in storage for session
@@ -45,12 +45,13 @@ class RedisClient(object):
_release_script = None
def __init__(self, server='localhost:6379', db=None, debug=False,
- session_expiry=False, with_lock=False):
+ session_expiry=False, with_lock=False, password=None):
"""session_expiry can be an integer, in seconds, to set the default expiration
of sessions. The corresponding record will be deleted from the redis instance,
and there's virtually no need to run sessions2trash.py
"""
self.server = server
+ self.password = password
self.db = db or 0
host, port = (self.server.split(':') + ['6379'])[:2]
port = int(port)
@@ -59,7 +60,7 @@ class RedisClient(object):
self.app = current.request.application
else:
self.app = ''
- self.r_server = redis.Redis(host=host, port=port, db=self.db)
+ self.r_server = redis.Redis(host=host, port=port, db=self.db, password=self.password)
if with_lock:
RedisClient._release_script = \
self.r_server.register_script(_LUA_RELEASE_LOCK)
@@ -110,7 +111,7 @@ class MockTable(object):
self.session_expiry = session_expiry
self.with_lock = with_lock
- def __call__(self, record_id):
+ def __call__(self, record_id, unique_key=None):
# Support DAL shortcut query: table(record_id)
q = self.id # This will call the __getattr__ below
@@ -119,6 +120,7 @@ class MockTable(object):
# Instructs MockQuery, to behave as db(table.id == record_id)
q.op = 'eq'
q.value = record_id
+ q.unique_key = unique_key
row = q.select()
return row[0] if row else Storage()
@@ -128,7 +130,7 @@ class MockTable(object):
#return a fake query. We need to query it just by id for normal operations
self.query = MockQuery(field='id', db=self.r_server,
prefix=self.keyprefix, session_expiry=self.session_expiry,
- with_lock=self.with_lock)
+ with_lock=self.with_lock, unique_key=self.unique_key)
return self.query
elif key == '_db':
#needed because of the calls in sessions2trash.py and globals.py
@@ -161,7 +163,7 @@ class MockQuery(object):
and listing all keys. No other operation is supported
"""
def __init__(self, field=None, db=None, prefix=None, session_expiry=False,
- with_lock=False):
+ with_lock=False, unique_key=None):
self.field = field
self.value = None
self.db = db
@@ -169,6 +171,7 @@ class MockQuery(object):
self.op = None
self.session_expiry = session_expiry
self.with_lock = with_lock
+ self.unique_key = unique_key
def __eq__(self, value, op='eq'):
self.value = value
@@ -186,7 +189,12 @@ class MockQuery(object):
acquire_lock(self.db, key + ':lock', self.value)
rtn = self.db.hgetall(key)
if rtn:
- rtn['update_record'] = self.update # update record support
+ if self.unique_key:
+ #make sure the id and unique_key are correct
+ if rtn['unique_key'] == self.unique_key:
+ rtn['update_record'] = self.update # update record support
+ else:
+ rtn = None
return [Storage(rtn)] if rtn else []
elif self.op == 'ge' and self.field == 'id' and self.value == 0:
#means that someone wants the complete list
diff --git a/gluon/contrib/shell.py b/gluon/contrib/shell.py
index 4d206036..0a224100 100755
--- a/gluon/contrib/shell.py
+++ b/gluon/contrib/shell.py
@@ -258,7 +258,7 @@ def run(history, statement, env={}):
if not name.startswith('__'):
try:
history.set_global(name, val)
- except TypeError, ex:
+ except (TypeError, cPickle.PicklingError), ex:
UNPICKLABLE_TYPES.append(type(val))
history.add_unpicklable(statement, new_globals.keys())
diff --git a/gluon/contrib/simplejsonrpc.py b/gluon/contrib/simplejsonrpc.py
index afdaec7d..6a68da0a 100644
--- a/gluon/contrib/simplejsonrpc.py
+++ b/gluon/contrib/simplejsonrpc.py
@@ -81,12 +81,13 @@ class JSONSafeTransport(JSONTransportMixin, SafeTransport):
class ServerProxy(object):
"JSON RPC Simple Client Service Proxy"
- def __init__(self, uri, transport=None, encoding=None, verbose=0):
+ def __init__(self, uri, transport=None, encoding=None, verbose=0,version=None):
self.location = uri # server location (url)
self.trace = verbose # show debug messages
self.exceptions = True # raise errors? (JSONRPCError)
self.timeout = None
self.json_request = self.json_response = ''
+ self.version = version # '2.0' for jsonrpc2
type, uri = urllib.splittype(uri)
if type not in ("http", "https"):
@@ -112,6 +113,8 @@ class ServerProxy(object):
# build data sent to the service
request_id = random.randint(0, sys.maxint)
data = {'id': request_id, 'method': method, 'params': args, }
+ if self.version:
+ data['jsonrpc'] = self.version #mandatory key/value for jsonrpc2 validation else err -32600
request = json.dumps(data)
# make HTTP request (retry if connection is lost)
@@ -130,14 +133,13 @@ class ServerProxy(object):
# {'version': '1.1', 'id': id, 'result': result, 'error': None}
response = json.loads(response)
- if response['id'] != request_id:
- raise JSONRPCError(0, "JSON Request ID != Response ID")
-
self.error = response.get('error', {})
if self.error and self.exceptions:
raise JSONRPCError(self.error.get('code', 0),
self.error.get('message', ''),
self.error.get('data', None))
+ if response['id'] != request_id:
+ raise JSONRPCError(0, "JSON Request ID != Response ID")
return response.get('result')
diff --git a/gluon/contrib/spreadsheet.py b/gluon/contrib/spreadsheet.py
index 7cca9c3d..40663d81 100644
--- a/gluon/contrib/spreadsheet.py
+++ b/gluon/contrib/spreadsheet.py
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
"""
-Developed by Massimo Di Pierro, optional component of web2py, GPL2 license.
+Developed by Massimo Di Pierro, optional component of web2py, BSDv3 license.
"""
import re
diff --git a/gluon/contrib/webclient.py b/gluon/contrib/webclient.py
index c2f18fb5..602c79e6 100644
--- a/gluon/contrib/webclient.py
+++ b/gluon/contrib/webclient.py
@@ -1,3 +1,4 @@
+
"""
Developed by Massimo Di Pierro
Released under the web2py license (LGPL)
@@ -105,9 +106,9 @@ class WebClient(object):
# assume everything is ok and make http request
error = None
try:
- if isinstance(data,str):
+ if isinstance(data, str):
self.method = 'POST' if method=='auto' else method
- if isinstance(data, dict):
+ elif isinstance(data, dict):
self.method = 'POST' if method=='auto' else method
# if there is only one form, set _formname automatically
if not '_formname' in data and len(self.forms) == 1:
diff --git a/gluon/contrib/websocket_messaging.py b/gluon/contrib/websocket_messaging.py
index bf4630a2..c7f5d03f 100644
--- a/gluon/contrib/websocket_messaging.py
+++ b/gluon/contrib/websocket_messaging.py
@@ -1,4 +1,6 @@
+# -*- coding: utf8 -*-
#!/usr/bin/env python
+
"""
This file is part of the web2py Web Framework
Copyrighted by Massimo Di Pierro
@@ -17,13 +19,13 @@ Attention: Requires Chrome or Safari. For IE of Firefox you need https://github.
3) from any web2py app you can post messages with
from gluon.contrib.websocket_messaging import websocket_send
- websocket_send('http://127.0.0.1:8888','Hello World','mykey','mygroup')
+ websocket_send('http://127.0.0.1:8888', 'Hello World', 'mykey', 'mygroup')
4) from any template you can receive them with
@@ -49,11 +51,11 @@ Or if you want to send json messages and store evaluated json in a var called da
Here is a complete sample web2py action:
def index():
- form=LOAD('default','ajax_form',ajax=True)
+ form=LOAD('default', 'ajax_form', ajax=True)
script=SCRIPT('''
jQuery(document).ready(function(){
var callback=function(e){alert(e.data)};
- if(!$.web2py.web2py_websocket('ws://127.0.0.1:8888/realtime/mygroup',callback))
+ if(!$.web2py.web2py_websocket('ws://127.0.0.1:8888/realtime/mygroup', callback))
alert("html5 websocket not supported by your browser, try Google Chrome");
});
@@ -65,7 +67,7 @@ Here is a complete sample web2py action:
if form.accepts(request,session):
from gluon.contrib.websocket_messaging import websocket_send
websocket_send(
- 'http://127.0.0.1:8888',form.vars.message,'mykey','mygroup')
+ 'http://127.0.0.1:8888', form.vars.message, 'mykey', 'mygroup')
return form
Acknowledgements:
@@ -83,9 +85,7 @@ import optparse
import urllib
import time
-listeners = {}
-names = {}
-tokens = {}
+listeners, names, tokens = {}, {}, {}
def websocket_send(url, message, hmac_key=None, group='default'):
diff --git a/gluon/custom_import.py b/gluon/custom_import.py
index 133b9fda..b56ba65a 100644
--- a/gluon/custom_import.py
+++ b/gluon/custom_import.py
@@ -1,11 +1,17 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
+"""
+| This file is part of the web2py Web Framework
+| Copyrighted by Massimo Di Pierro
+| License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
+Support for smart import syntax for web2py applications
+-------------------------------------------------------
+"""
import __builtin__
import os
import sys
import threading
-import traceback
from gluon import current
NATIVE_IMPORTER = __builtin__.__import__
@@ -35,10 +41,10 @@ class CustomImportException(ImportError):
def custom_importer(name, globals=None, locals=None, fromlist=None, level=-1):
"""
- The web2py custom importer. Like the standard Python importer but it
- tries to transform import statements as something like
+ web2py's custom importer. It behaves like the standard Python importer but
+ it tries to transform import statements as something like
"import applications.app_name.modules.x".
- If the import failed, fall back on naive_importer
+ If the import fails, it falls back on naive_importer
"""
globals = globals or {}
@@ -102,7 +108,7 @@ def custom_importer(name, globals=None, locals=None, fromlist=None, level=-1):
class TrackImporter(object):
"""
An importer tracking the date of the module files and reloading them when
- they have changed.
+ they are changed.
"""
THREAD_LOCAL = threading.local()
@@ -143,7 +149,7 @@ class TrackImporter(object):
def _reload_check(self, name, globals, locals, level):
"""
Update the date associated to the module and reload the module if
- the file has changed.
+ the file changed.
"""
module = sys.modules.get(name)
file = self._get_module_file(module)
diff --git a/gluon/dal.py b/gluon/dal.py
index a7e624e2..2677b723 100644
--- a/gluon/dal.py
+++ b/gluon/dal.py
@@ -2,131 +2,137 @@
# -*- coding: utf-8 -*-
"""
-This file is part of the web2py Web Framework
-Copyrighted by Massimo Di Pierro
-License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
+| This file is part of the web2py Web Framework
+| Copyrighted by Massimo Di Pierro
+| License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html)
+|
Thanks to
- * Niall Sweeny for MS SQL support
- * Marcel Leuthi for Oracle support
- * Denes
- * Chris Clark
- * clach05
- * Denes Lengyel
- * and many others who have contributed to current and previous versions
-This file contains the DAL support for many relational databases,
-including:
-- SQLite & SpatiaLite
-- MySQL
-- Postgres
-- Firebird
-- Oracle
-- MS SQL
-- DB2
-- Interbase
-- Ingres
-- Informix (9+ and SE)
-- SapDB (experimental)
-- Cubrid (experimental)
-- CouchDB (experimental)
-- MongoDB (in progress)
-- Google:nosql
-- Google:sql
-- Teradata
-- IMAP (experimental)
+ - Niall Sweeny for MS SQL support
+ - Marcel Leuthi for Oracle support
+ - Denes
+ - Chris Clark
+ - clach05
+ - Denes Lengyel
-Example of usage:
+and many others who have contributed to current and previous versions
->>> # from dal import DAL, Field
+This file contains the DAL support for many relational databases, including:
-### create DAL connection (and create DB if it doesn't exist)
->>> db = DAL(('sqlite://storage.sqlite','mysql://a:b@localhost/x'),
-... folder=None)
+ - SQLite & SpatiaLite
+ - MySQL
+ - Postgres
+ - Firebird
+ - Oracle
+ - MS SQL
+ - DB2
+ - Interbase
+ - Ingres
+ - Informix (9+ and SE)
+ - SapDB (experimental)
+ - Cubrid (experimental)
+ - CouchDB (experimental)
+ - MongoDB (in progress)
+ - Google:nosql
+ - Google:sql
+ - Teradata
+ - IMAP (experimental)
-### define a table 'person' (create/alter as necessary)
->>> person = db.define_table('person',Field('name','string'))
+Example of usage::
-### insert a record
->>> id = person.insert(name='James')
+ >>> # from dal import DAL, Field
-### retrieve it by id
->>> james = person(id)
+ ### create DAL connection (and create DB if it doesn't exist)
+ >>> db = DAL(('sqlite://storage.sqlite', 'mysql://a:b@localhost/x'),
+ ... folder=None)
-### retrieve it by name
->>> james = person(name='James')
+ ### define a table 'person' (create/alter as necessary)
+ >>> person = db.define_table('person', Field('name', 'string'))
-### retrieve it by arbitrary query
->>> query = (person.name=='James') & (person.name.startswith('J'))
->>> james = db(query).select(person.ALL)[0]
+ ### insert a record
+ >>> id = person.insert(name='James')
-### update one record
->>> james.update_record(name='Jim')
-
+ ### retrieve it by id
+ >>> james = person(id)
-### update multiple records by query
->>> db(person.name.like('J%')).update(name='James')
-1
+ ### retrieve it by name
+ >>> james = person(name='James')
-### delete records by query
->>> db(person.name.lower() == 'jim').delete()
-0
+ ### retrieve it by arbitrary query
+ >>> query = (person.name == 'James') & (person.name.startswith('J'))
+ >>> james = db(query).select(person.ALL)[0]
-### retrieve multiple records (rows)
->>> people = db(person).select(orderby=person.name,
-... groupby=person.name, limitby=(0,100))
+ ### update one record
+ >>> james.update_record(name='Jim')
+
-### further filter them
->>> james = people.find(lambda row: row.name == 'James').first()
->>> print james.id, james.name
-1 James
+ ### update multiple records by query
+ >>> db(person.name.like('J%')).update(name='James')
+ 1
-### check aggregates
->>> counter = person.id.count()
->>> print db(person).select(counter).first()(counter)
-1
+ ### delete records by query
+ >>> db(person.name.lower() == 'jim').delete()
+ 0
-### delete one record
->>> james.delete_record()
-1
+ ### retrieve multiple records (rows)
+ >>> people = db(person).select(orderby=person.name,
+ ... groupby=person.name, limitby=(0, 100))
-### delete (drop) entire database table
->>> person.drop()
+ ### further filter them
+ >>> james = people.find(lambda row: row.name == 'James').first()
+ >>> print james.id, james.name
+ 1 James
-Supported field types:
-id string text boolean integer double decimal password upload
-blob time date datetime
+ ### check aggregates
+ >>> counter = person.id.count()
+ >>> print db(person).select(counter).first()(counter)
+ 1
-Supported DAL URI strings:
-'sqlite://test.db'
-'spatialite://test.db'
-'sqlite:memory'
-'spatialite:memory'
-'jdbc:sqlite://test.db'
-'mysql://root:none@localhost/test'
-'postgres://mdipierro:password@localhost/test'
-'postgres:psycopg2://mdipierro:password@localhost/test'
-'postgres:pg8000://mdipierro:password@localhost/test'
-'jdbc:postgres://mdipierro:none@localhost/test'
-'mssql://web2py:none@A64X2/web2py_test'
-'mssql2://web2py:none@A64X2/web2py_test' # alternate mappings
-'oracle://username:password@database'
-'firebird://user:password@server:3050/database'
-'db2://DSN=dsn;UID=user;PWD=pass'
-'firebird://username:password@hostname/database'
-'firebird_embedded://username:password@c://path'
-'informix://user:password@server:3050/database'
-'informixu://user:password@server:3050/database' # unicode informix
-'ingres://database' # or use an ODBC connection string, e.g. 'ingres://dsn=dsn_name'
-'google:datastore' # for google app engine datastore
-'google:sql' # for google app engine with sql (mysql compatible)
-'teradata://DSN=dsn;UID=user;PWD=pass; DATABASE=database' # experimental
-'imap://user:password@server:port' # experimental
-'mongodb://user:password@server:port/database' # experimental
+ ### delete one record
+ >>> james.delete_record()
+ 1
+
+ ### delete (drop) entire database table
+ >>> person.drop()
+
+
+Supported DAL URI strings::
+
+ 'sqlite://test.db'
+ 'spatialite://test.db'
+ 'sqlite:memory'
+ 'spatialite:memory'
+ 'jdbc:sqlite://test.db'
+ 'mysql://root:none@localhost/test'
+ 'postgres://mdipierro:password@localhost/test'
+ 'postgres:psycopg2://mdipierro:password@localhost/test'
+ 'postgres:pg8000://mdipierro:password@localhost/test'
+ 'jdbc:postgres://mdipierro:none@localhost/test'
+ 'mssql://web2py:none@A64X2/web2py_test'
+ 'mssql2://web2py:none@A64X2/web2py_test' # alternate mappings
+ 'mssql3://web2py:none@A64X2/web2py_test' # better pagination (requires >= 2005)
+ 'mssql4://web2py:none@A64X2/web2py_test' # best pagination (requires >= 2012)
+ 'oracle://username:password@database'
+ 'firebird://user:password@server:3050/database'
+ 'db2://DSN=dsn;UID=user;PWD=pass'
+ 'firebird://username:password@hostname/database'
+ 'firebird_embedded://username:password@c://path'
+ 'informix://user:password@server:3050/database'
+ 'informixu://user:password@server:3050/database' # unicode informix
+ 'ingres://database' # or use an ODBC connection string, e.g. 'ingres://dsn=dsn_name'
+ 'google:datastore' # for google app engine datastore
+ 'google:datastore+ndb' # for google app engine datastore + ndb
+ 'google:sql' # for google app engine with sql (mysql compatible)
+ 'teradata://DSN=dsn;UID=user;PWD=pass; DATABASE=database' # experimental
+ 'imap://user:password@server:port' # experimental
+ 'mongodb://user:password@server:port/database' # experimental
+
+For more info::
+
+ help(DAL)
+ help(Field)
-For more info:
-help(DAL)
-help(Field)
"""
###################################################################################
@@ -141,11 +147,10 @@ DEFAULTLENGTH = {'string':512,
'text':2**15,
'blob':2**31}
TIMINGSSIZE = 100
-SPATIALLIBS = {
- 'Windows':'libspatialite',
- 'Linux':'libspatialite.so',
- 'Darwin':'libspatialite.dylib'
- }
+SPATIALLIBS = {'Windows':'libspatialite',
+ 'Linux':'libspatialite.so',
+ 'Darwin':'libspatialite.dylib'
+ }
DEFAULT_URI = 'sqlite://dummy.db'
import re
@@ -185,7 +190,7 @@ else:
from io import StringIO as StringIO
import copyreg
long = int
- hashlib_md5 = lambda s: hashlib.md5(bytes(s,'utf8'))
+ hashlib_md5 = lambda s: hashlib.md5(bytes(s, 'utf8'))
bytes, unicode = bytes, str
if PYTHON_VERSION[:2] < (2, 7):
@@ -198,14 +203,13 @@ CALLABLETYPES = (types.LambdaType, types.FunctionType,
types.BuiltinFunctionType,
types.MethodType, types.BuiltinMethodType)
-TABLE_ARGS = set(
- ('migrate','primarykey','fake_migrate','format','redefine',
- 'singular','plural','trigger_name','sequence_name','fields',
- 'common_filter','polymodel','table_class','on_define','rname'))
+TABLE_ARGS = set(('migrate', 'primarykey', 'fake_migrate', 'format', 'redefine',
+ 'singular', 'plural', 'trigger_name', 'sequence_name', 'fields',
+ 'common_filter', 'polymodel', 'table_class', 'on_define', 'rname'))
-SELECT_ARGS = set(
- ('orderby', 'groupby', 'limitby','required', 'cache', 'left',
- 'distinct', 'having', 'join','for_update', 'processor','cacheable', 'orderby_on_limitby'))
+SELECT_ARGS = set(('orderby', 'groupby', 'limitby', 'required', 'cache', 'left',
+ 'distinct', 'having', 'join', 'for_update', 'processor',
+ 'cacheable', 'orderby_on_limitby'))
ogetattr = object.__getattribute__
osetattr = object.__setattr__
@@ -219,6 +223,7 @@ try:
from gluon.utils import web2py_uuid
except (ImportError, SystemError):
import uuid
+
def web2py_uuid(): return str(uuid.uuid4())
try:
@@ -241,7 +246,7 @@ except ImportError:
simplejson = None
LOGGER = logging.getLogger("web2py.dal")
-DEFAULT = lambda:0
+DEFAULT = lambda: 0
GLOBAL_LOCKER = threading.RLock()
THREAD_LOCAL = threading.local()
@@ -381,7 +386,7 @@ if not 'google' in DRIVERS:
DRIVERS.append('Firebird(fdb)')
except ImportError:
LOGGER.debug('no Firebird driver fdb')
-#####
+
try:
import firebirdsql
DRIVERS.append('Firebird(firebirdsql)')
@@ -441,21 +446,21 @@ if not 'google' in DRIVERS:
except:
LOGGER.debug('no IMAP driver imaplib')
-PLURALIZE_RULES = [
- (re.compile('child$'), re.compile('child$'), 'children'),
- (re.compile('oot$'), re.compile('oot$'), 'eet'),
- (re.compile('ooth$'), re.compile('ooth$'), 'eeth'),
- (re.compile('l[eo]af$'), re.compile('l([eo])af$'), 'l\\1aves'),
- (re.compile('sis$'), re.compile('sis$'), 'ses'),
- (re.compile('man$'), re.compile('man$'), 'men'),
- (re.compile('ife$'), re.compile('ife$'), 'ives'),
- (re.compile('eau$'), re.compile('eau$'), 'eaux'),
- (re.compile('lf$'), re.compile('lf$'), 'lves'),
- (re.compile('[sxz]$'), re.compile('$'), 'es'),
- (re.compile('[^aeioudgkprt]h$'), re.compile('$'), 'es'),
- (re.compile('(qu|[^aeiou])y$'), re.compile('y$'), 'ies'),
- (re.compile('$'), re.compile('$'), 's'),
- ]
+PLURALIZE_RULES = [(re.compile('child$'), re.compile('child$'), 'children'),
+ (re.compile('oot$'), re.compile('oot$'), 'eet'),
+ (re.compile('ooth$'), re.compile('ooth$'), 'eeth'),
+ (re.compile('l[eo]af$'), re.compile('l([eo])af$'), 'l\\1aves'),
+ (re.compile('sis$'), re.compile('sis$'), 'ses'),
+ (re.compile('man$'), re.compile('man$'), 'men'),
+ (re.compile('ife$'), re.compile('ife$'), 'ives'),
+ (re.compile('eau$'), re.compile('eau$'), 'eaux'),
+ (re.compile('lf$'), re.compile('lf$'), 'lves'),
+ (re.compile('[sxz]$'), re.compile('$'), 'es'),
+ (re.compile('[^aeioudgkprt]h$'), re.compile('$'), 'es'),
+ (re.compile('(qu|[^aeiou])y$'), re.compile('y$'), 'ies'),
+ (re.compile('$'), re.compile('$'), 's'),
+ ]
+
def pluralize(singular, rules=PLURALIZE_RULES):
for line in rules:
@@ -463,25 +468,31 @@ def pluralize(singular, rules=PLURALIZE_RULES):
plural = re_search.search(singular) and re_sub.sub(replace, singular)
if plural: return plural
-def hide_password(uri):
- if isinstance(uri,(list,tuple)):
- return [hide_password(item) for item in uri]
- return REGEX_NOPASSWD.sub('******',uri)
-def OR(a,b):
+def hide_password(uri):
+ if isinstance(uri, (list, tuple)):
+ return [hide_password(item) for item in uri]
+ return REGEX_NOPASSWD.sub('******', uri)
+
+
+def OR(a, b):
return a|b
-def AND(a,b):
+
+def AND(a, b):
return a&b
+
def IDENTITY(x): return x
-def varquote_aux(name,quotestr='%s'):
+
+def varquote_aux(name, quotestr='%s'):
return name if REGEX_W.match(name) else quotestr % name
-def quote_keyword(a,keyword='timestamp'):
+
+def quote_keyword(a, keyword='timestamp'):
regex = re.compile('\.keyword(?=\w)')
- a = regex.sub('."%s"' % keyword,a)
+ a = regex.sub('."%s"' % keyword, a)
return a
if 'google' in DRIVERS:
@@ -521,10 +532,10 @@ if 'google' in DRIVERS:
return value
elif isinstance(value, basestring):
return decimal.Decimal(value)
- raise gae.BadValueError("Property %s must be a Decimal or string."\
- % self.name)
+ raise gae.BadValueError("Property %s must be a Decimal or string."
+ % self.name)
- #TODO Needs more testing
+ # TODO Needs more testing
class NDBDecimalProperty(ndb.StringProperty):
"""
NDB decimal implementation
@@ -554,8 +565,9 @@ if 'google' in DRIVERS:
return value
elif isinstance(value, basestring):
return decimal.Decimal(value)
- raise TypeError("Property %s must be a Decimal or string."\
- % self._name)
+ raise TypeError("Property %s must be a Decimal or string."
+ % self._name)
+
###################################################################################
# class that handles connection pooling (all adapters are derived from this one)
@@ -572,7 +584,7 @@ class ConnectionPool(object):
# ## this allows gluon to commit/rollback all dbs in this thread
- def close(self,action='commit',really=True):
+ def close(self, action='commit', really=True):
if action:
if callable(action):
action(self)
@@ -593,20 +605,20 @@ class ConnectionPool(object):
@staticmethod
def close_all_instances(action):
""" to close cleanly databases in a multithreaded environment """
- dbs = getattr(THREAD_LOCAL,'db_instances',{}).items()
+ dbs = getattr(THREAD_LOCAL, 'db_instances', {}).items()
for db_uid, db_group in dbs:
for db in db_group:
- if hasattr(db,'_adapter'):
+ if hasattr(db, '_adapter'):
db._adapter.close(action)
- getattr(THREAD_LOCAL,'db_instances',{}).clear()
- getattr(THREAD_LOCAL,'db_instances_zombie',{}).clear()
+ getattr(THREAD_LOCAL, 'db_instances', {}).clear()
+ getattr(THREAD_LOCAL, 'db_instances_zombie', {}).clear()
if callable(action):
action(None)
return
def find_or_make_work_folder(self):
- """ this actually does not make the folder. it has to be there """
- self.folder = getattr(THREAD_LOCAL,'folder','')
+ #this actually does not make the folder. it has to be there
+ self.folder = getattr(THREAD_LOCAL, 'folder', '')
if (os.path.isabs(self.folder) and
isinstance(self, UseDatabaseStoredFile) and
@@ -618,24 +630,24 @@ class ConnectionPool(object):
os.mkdir(self.folder)
def after_connection_hook(self):
- """hook for the after_connection parameter"""
+ """Hook for the after_connection parameter"""
if callable(self._after_connection):
self._after_connection(self)
self.after_connection()
def after_connection(self):
- """ this it is supposed to be overloaded by adapters"""
+ #this it is supposed to be overloaded by adapters
pass
def reconnect(self, f=None, cursor=True):
"""
- this function defines: self.connection and self.cursor
- (iff cursor is True)
- if self.pool_size>0 it will try pull the connection from the pool
+ Defines: `self.connection` and `self.cursor`
+ (if cursor is True)
+ if `self.pool_size>0` it will try pull the connection from the pool
if the connection is not active (closed by db server) it will loop
- if not self.pool_size or no active connections in pool makes a new one
+ if not `self.pool_size` or no active connections in pool makes a new one
"""
- if getattr(self,'connection', None) != None:
+ if getattr(self, 'connection', None) is not None:
return
if f is None:
f = self.connector
@@ -671,23 +683,40 @@ class ConnectionPool(object):
break
self.after_connection_hook()
+
###################################################################################
# metaclass to prepare adapter classes static values
###################################################################################
class AdapterMeta(type):
- def __new__(cls, clsname, bases, dct):
- classobj = super(AdapterMeta, cls).__new__(cls, clsname, bases, dct)
- classobj.REGEX_TABLE_DOT_FIELD = re.compile(r'^' + \
- classobj.QUOTE_TEMPLATE % REGEX_NO_GREEDY_ENTITY_NAME + \
- r'\.' + \
- classobj.QUOTE_TEMPLATE % REGEX_NO_GREEDY_ENTITY_NAME + \
- r'$')
- return classobj
+ """Metaclass to support manipulation of adapter classes.
-###################################################################################
-# this is a generic adapter that does nothing; all others are derived from this one
-###################################################################################
+ At the moment is used to intercept `entity_quoting` argument passed to DAL.
+ """
+ def __call__(cls, *args, **kwargs):
+ entity_quoting = kwargs.get('entity_quoting', False)
+ if 'entity_quoting' in kwargs:
+ del kwargs['entity_quoting']
+
+ obj = super(AdapterMeta, cls).__call__(*args, **kwargs)
+ if not entity_quoting:
+ quot = obj.QUOTE_TEMPLATE = '%s'
+ regex_ent = r'(\w+)'
+ else:
+ quot = obj.QUOTE_TEMPLATE
+ regex_ent = REGEX_NO_GREEDY_ENTITY_NAME
+ obj.REGEX_TABLE_DOT_FIELD = re.compile(r'^' + \
+ quot % regex_ent + \
+ r'\.' + \
+ quot % regex_ent + \
+ r'$')
+
+ return obj
+
+
+###############################################################################
+# this is a generic adapter that does nothing; all others are derived from this
+###############################################################################
class BaseAdapter(ConnectionPool):
__metaclass__ = AdapterMeta
@@ -695,7 +724,7 @@ class BaseAdapter(ConnectionPool):
native_json = False
driver = None
driver_name = None
- drivers = () # list of drivers from which to pick
+ drivers = () # list of drivers from which to pick
connection = None
commit_on_alter_table = False
support_distributed_transaction = False
@@ -703,52 +732,51 @@ class BaseAdapter(ConnectionPool):
can_select_for_update = True
dbpath = None
folder = None
+ connector = lambda *args, **kwargs: None # __init__ should override this
TRUE = 'T'
FALSE = 'F'
T_SEP = ' '
QUOTE_TEMPLATE = '"%s"'
+ types = {'boolean': 'CHAR(1)',
+ 'string': 'CHAR(%(length)s)',
+ 'text': 'TEXT',
+ 'json': 'TEXT',
+ 'password': 'CHAR(%(length)s)',
+ 'blob': 'BLOB',
+ 'upload': 'CHAR(%(length)s)',
+ 'integer': 'INTEGER',
+ 'bigint': 'INTEGER',
+ 'float': 'DOUBLE',
+ 'double': 'DOUBLE',
+ 'decimal': 'DOUBLE',
+ 'date': 'DATE',
+ 'time': 'TIME',
+ 'datetime': 'TIMESTAMP',
+ 'id': 'INTEGER PRIMARY KEY AUTOINCREMENT',
+ 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
+ 'list:integer': 'TEXT',
+ 'list:string': 'TEXT',
+ 'list:reference': 'TEXT',
+ # the two below are only used when DAL(...bigint_id=True) and replace 'id','reference'
+ 'big-id': 'BIGINT PRIMARY KEY AUTOINCREMENT',
+ 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
+ 'reference FK': ', CONSTRAINT "FK_%(constraint_name)s" FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
+ }
- types = {
- 'boolean': 'CHAR(1)',
- 'string': 'CHAR(%(length)s)',
- 'text': 'TEXT',
- 'json': 'TEXT',
- 'password': 'CHAR(%(length)s)',
- 'blob': 'BLOB',
- 'upload': 'CHAR(%(length)s)',
- 'integer': 'INTEGER',
- 'bigint': 'INTEGER',
- 'float':'DOUBLE',
- 'double': 'DOUBLE',
- 'decimal': 'DOUBLE',
- 'date': 'DATE',
- 'time': 'TIME',
- 'datetime': 'TIMESTAMP',
- 'id': 'INTEGER PRIMARY KEY AUTOINCREMENT',
- 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
- 'list:integer': 'TEXT',
- 'list:string': 'TEXT',
- 'list:reference': 'TEXT',
- # the two below are only used when DAL(...bigint_id=True) and replace 'id','reference'
- 'big-id': 'BIGINT PRIMARY KEY AUTOINCREMENT',
- 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
- 'reference FK': ', CONSTRAINT "FK_%(constraint_name)s" FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
- }
-
- def isOperationalError(self,exception):
+ def isOperationalError(self, exception):
if not hasattr(self.driver, "OperationalError"):
return None
return isinstance(exception, self.driver.OperationalError)
- def isProgrammingError(self,exception):
+ def isProgrammingError(self, exception):
if not hasattr(self.driver, "ProgrammingError"):
return None
return isinstance(exception, self.driver.ProgrammingError)
def id_query(self, table):
- pkeys = getattr(table,'_primarykey',None)
+ pkeys = getattr(table, '_primarykey', None)
if pkeys:
return table[pkeys[0]] != None
else:
@@ -758,45 +786,39 @@ class BaseAdapter(ConnectionPool):
return "'%s'" % obj.replace("'", "''")
def smart_adapt(self, obj):
- if isinstance(obj,(int,float)):
+ if isinstance(obj, (int, float)):
return str(obj)
return self.adapt(str(obj))
def file_exists(self, filename):
- """
- to be used ONLY for files that on GAE may not be on filesystem
- """
+ # to be used ONLY for files that on GAE may not be on filesystem
return exists(filename)
def file_open(self, filename, mode='rb', lock=True):
- """
- to be used ONLY for files that on GAE may not be on filesystem
- """
+ # to be used ONLY for files that on GAE may not be on filesystem
if have_portalocker and lock:
- fileobj = portalocker.LockedFile(filename,mode)
+ fileobj = portalocker.LockedFile(filename, mode)
else:
- fileobj = open(filename,mode)
+ fileobj = open(filename, mode)
return fileobj
def file_close(self, fileobj):
- """
- to be used ONLY for files that on GAE may not be on filesystem
- """
+ #to be used ONLY for files that on GAE may not be on filesystem
if fileobj:
fileobj.close()
def file_delete(self, filename):
os.unlink(filename)
- def find_driver(self,adapter_args,uri=None):
+ def find_driver(self, adapter_args, uri=None):
self.adapter_args = adapter_args
- if getattr(self,'driver',None) != None:
+ if getattr(self, 'driver', None) != None:
return
drivers_available = [driver for driver in self.drivers
if driver in globals()]
if uri:
- items = uri.split('://',1)[0].split(':')
- request_driver = items[1] if len(items)>1 else None
+ items = uri.split('://', 1)[0].split(':')
+ request_driver = items[1] if len(items) > 1 else None
else:
request_driver = None
request_driver = request_driver or adapter_args.get('driver')
@@ -820,7 +842,7 @@ class BaseAdapter(ConnectionPool):
"""
isabs = None
- logfilename = self.adapter_args.get('logfile','sql.log')
+ logfilename = self.adapter_args.get('logfile', 'sql.log')
writelog = bool(logfilename)
if writelog:
isabs = os.path.isabs(logfilename)
@@ -834,10 +856,9 @@ class BaseAdapter(ConnectionPool):
logfile.write(message)
self.file_close(logfile)
-
- def __init__(self, db,uri,pool_size=0, folder=None, db_codec='UTF-8',
+ def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8',
credential_decoder=IDENTITY, driver_args={},
- adapter_args={},do_connect=True, after_connection=None):
+ adapter_args={}, do_connect=True, after_connection=None):
self.db = db
self.dbengine = "None"
self.uri = uri
@@ -845,21 +866,22 @@ class BaseAdapter(ConnectionPool):
self.folder = folder
self.db_codec = db_codec
self._after_connection = after_connection
+
class Dummy(object):
lastrowid = 1
+
def __getattr__(self, value):
return lambda *a, **b: []
self.connection = Dummy()
self.cursor = Dummy()
-
- def sequence_name(self,tablename):
+ def sequence_name(self, tablename):
return self.QUOTE_TEMPLATE % ('%s_sequence' % tablename)
- def trigger_name(self,tablename):
+ def trigger_name(self, tablename):
return '%s_sequence' % tablename
- def varquote(self,name):
+ def varquote(self, name):
return name
def create_table(self, table,
@@ -880,7 +902,7 @@ class BaseAdapter(ConnectionPool):
sortable += 1
field_name = field.name
field_type = field.type
- if isinstance(field_type,SQLCustomType):
+ if isinstance(field_type, SQLCustomType):
ftype = field_type.native or field_type.type
elif field_type.startswith('reference'):
referenced = field_type[10:].strip()
@@ -889,9 +911,9 @@ class BaseAdapter(ConnectionPool):
constraint_name = self.constraint_name(tablename, field_name)
# if not '.' in referenced \
# and referenced != tablename \
- # and hasattr(table,'_primarykey'):
+ # and hasattr(table, '_primarykey'):
# ftype = types['integer']
- #else:
+ # else:
try:
rtable = db[referenced]
rfield = rtable._id
@@ -900,7 +922,7 @@ class BaseAdapter(ConnectionPool):
except (KeyError, ValueError, AttributeError), e:
LOGGER.debug('Error: %s' % e)
try:
- rtablename,rfieldname = referenced.split('.')
+ rtablename, rfieldname = referenced.split('.')
rtable = db[rtablename]
rfield = rtable[rfieldname]
except Exception, e:
@@ -921,7 +943,7 @@ class BaseAdapter(ConnectionPool):
else:
ftype = ftype + \
types['reference FK'] % dict(
- constraint_name = constraint_name, # should be quoted
+ constraint_name = constraint_name, # should be quoted
foreign_key = rtable.sqlsafe + ' (' + rfield.sqlsafe_name + ')',
table_name = table.sqlsafe,
field_name = field.sqlsafe_name,
@@ -934,12 +956,12 @@ class BaseAdapter(ConnectionPool):
id_fieldname = table._id.sqlsafe_name
else: #make a guess
id_fieldname = self.QUOTE_TEMPLATE % 'id'
- #gotcha: the referenced table must be defined before
- #the referencing one to be able to create the table
- #Also if it's not recommended, we can still support
- #references to tablenames without rname to make
- #migrations and model relationship work also if tables
- #are not defined in order
+ # gotcha: the referenced table must be defined before
+ # the referencing one to be able to create the table
+ # Also if it's not recommended, we can still support
+ # references to tablenames without rname to make
+ # migrations and model relationship work also if tables
+ # are not defined in order
if referenced == tablename:
real_referenced = db[referenced].sqlsafe
else:
@@ -948,30 +970,29 @@ class BaseAdapter(ConnectionPool):
or referenced)
rfield = db[referenced]._id
ftype = types[field_type[:9]] % dict(
- index_name = self.QUOTE_TEMPLATE % (field_name+'__idx'),
- field_name = field.sqlsafe_name,
- constraint_name = self.QUOTE_TEMPLATE % constraint_name,
- foreign_key = '%s (%s)' % (real_referenced, rfield.sqlsafe_name),
+ index_name=self.QUOTE_TEMPLATE % (field_name+'__idx'),
+ field_name=field.sqlsafe_name,
+ constraint_name=self.QUOTE_TEMPLATE % constraint_name,
+ foreign_key='%s (%s)' % (real_referenced, rfield.sqlsafe_name),
on_delete_action=field.ondelete)
elif field_type.startswith('list:reference'):
ftype = types[field_type[:14]]
elif field_type.startswith('decimal'):
- precision, scale = map(int,field_type[8:-1].split(','))
+ precision, scale = map(int, field_type[8:-1].split(','))
ftype = types[field_type[:7]] % \
- dict(precision=precision,scale=scale)
+ dict(precision=precision, scale=scale)
elif field_type.startswith('geo'):
- if not hasattr(self,'srid'):
+ if not hasattr(self, 'srid'):
raise RuntimeError('Adapter does not support geometry')
srid = self.srid
geotype, parms = field_type[:-1].split('(')
if not geotype in types:
- raise SyntaxError(
- 'Field: unknown field type: %s for %s' \
- % (field_type, field_name))
+ raise SyntaxError('Field: unknown field type: %s for %s'
+ % (field_type, field_name))
ftype = types[geotype]
if self.dbengine == 'postgres' and geotype == 'geometry':
# parameters: schema, srid, dimension
- dimension = 2 # GIS.dimension ???
+ dimension = 2 # GIS.dimension ???
parms = parms.split(',')
if len(parms) == 3:
schema, srid, dimension = parms
@@ -986,8 +1007,8 @@ class BaseAdapter(ConnectionPool):
dimension=dimension)
postcreation_fields.append(ftype)
elif not field_type in types:
- raise SyntaxError('Field: unknown field type: %s for %s' % \
- (field_type, field_name))
+ raise SyntaxError('Field: unknown field type: %s for %s'
+ % (field_type, field_name))
else:
ftype = types[field_type]\
% dict(length=field.length)
@@ -1023,8 +1044,8 @@ class BaseAdapter(ConnectionPool):
sql_fields_aux[field_name] = dict(sql=ftype)
# Postgres - PostGIS:
# geometry fields are added after the table has been created, not now
- if not (self.dbengine == 'postgres' and \
- field_type.startswith('geom')):
+ if not (self.dbengine == 'postgres'
+ and field_type.startswith('geom')):
fields.append('%s %s' % (field.sqlsafe_name, ftype))
other = ';'
@@ -1032,14 +1053,14 @@ class BaseAdapter(ConnectionPool):
if self.dbengine == 'mysql':
if not hasattr(table, "_primarykey"):
fields.append('PRIMARY KEY (%s)' % (self.QUOTE_TEMPLATE % table._id.name))
- engine = self.adapter_args.get('engine','InnoDB')
+ engine = self.adapter_args.get('engine', 'InnoDB')
other = ' ENGINE=%s CHARACTER SET utf8;' % engine
fields = ',\n '.join(fields)
for rtablename in TFK:
rfields = TFK[rtablename]
pkeys = [self.QUOTE_TEMPLATE % pk for pk in db[rtablename]._primarykey]
- fkeys = [self.QUOTE_TEMPLATE % rfields[k].name for k in pkeys ]
+ fkeys = [self.QUOTE_TEMPLATE % rfields[k].name for k in pkeys]
fields = fields + ',\n ' + \
types['reference TFK'] % dict(
table_name = table.sqlsafe,
@@ -1050,10 +1071,10 @@ class BaseAdapter(ConnectionPool):
table_rname = table.sqlsafe
- if getattr(table,'_primarykey',None):
+ if getattr(table, '_primarykey', None):
query = "CREATE TABLE %s(\n %s,\n %s) %s" % \
(table.sqlsafe, fields,
- self.PRIMARY_KEY(', '.join([self.QUOTE_TEMPLATE % pk for pk in table._primarykey])),other)
+ self.PRIMARY_KEY(', '.join([self.QUOTE_TEMPLATE % pk for pk in table._primarykey])), other)
else:
query = "CREATE TABLE %s(\n %s\n)%s" % \
(table.sqlsafe, fields, other)
@@ -1084,7 +1105,7 @@ class BaseAdapter(ConnectionPool):
% (datetime.datetime.today().isoformat(),
query), table)
if not fake_migrate:
- self.create_sequence_and_triggers(query,table)
+ self.create_sequence_and_triggers(query, table)
table._db.commit()
# Postgres geom fields are added now,
# after the table has been created
@@ -1108,41 +1129,40 @@ class BaseAdapter(ConnectionPool):
raise RuntimeError('File %s appears corrupted' % table._dbt)
self.file_close(tfile)
if sql_fields != sql_fields_old:
- self.migrate_table(
- table,
- sql_fields, sql_fields_old,
- sql_fields_aux, None,
- fake_migrate=fake_migrate
- )
+ self.migrate_table(table,
+ sql_fields, sql_fields_old,
+ sql_fields_aux, None,
+ fake_migrate=fake_migrate
+ )
return query
- def migrate_table(
- self,
- table,
- sql_fields,
- sql_fields_old,
- sql_fields_aux,
- logfile,
- fake_migrate=False,
- ):
+ def migrate_table(self,
+ table,
+ sql_fields,
+ sql_fields_old,
+ sql_fields_aux,
+ logfile,
+ fake_migrate=False,
+ ):
# logfile is deprecated (moved to adapter.log method)
db = table._db
db._migrated.append(table._tablename)
tablename = table._tablename
+
def fix(item):
- k,v=item
- if not isinstance(v,dict):
- v=dict(type='unknown',sql=v)
+ k, v = item
+ if not isinstance(v, dict):
+ v = dict(type='unknown', sql=v)
if self.ignore_field_case is not True: return k, v
- return k.lower(),v
+ return k.lower(), v
# make sure all field names are lower case to avoid
# migrations because of case cahnge
- sql_fields = dict(map(fix,sql_fields.iteritems()))
- sql_fields_old = dict(map(fix,sql_fields_old.iteritems()))
- sql_fields_aux = dict(map(fix,sql_fields_aux.iteritems()))
+ sql_fields = dict(map(fix, sql_fields.iteritems()))
+ sql_fields_old = dict(map(fix, sql_fields_old.iteritems()))
+ sql_fields_aux = dict(map(fix, sql_fields_aux.iteritems()))
if db._debug:
- logging.debug('migrating %s to %s' % (sql_fields_old,sql_fields))
+ logging.debug('migrating %s to %s' % (sql_fields_old, sql_fields))
keys = sql_fields.keys()
for key in sql_fields_old:
@@ -1159,11 +1179,10 @@ class BaseAdapter(ConnectionPool):
if self.dbengine in ('postgres',) and \
sql_fields[key]['type'].startswith('geometry'):
# 'sql' == ftype in sql
- query = [ sql_fields[key]['sql'] ]
+ query = [sql_fields[key]['sql']]
else:
- query = ['ALTER TABLE %s ADD %s %s;' % \
- (table.sqlsafe, key,
- sql_fields_aux[key]['sql'].replace(', ', new_add))]
+ query = ['ALTER TABLE %s ADD %s %s;' % (table.sqlsafe, key,
+ sql_fields_aux[key]['sql'].replace(', ', new_add))]
metadata_change = True
elif self.dbengine in ('sqlite', 'spatialite'):
if key in sql_fields:
@@ -1176,11 +1195,11 @@ class BaseAdapter(ConnectionPool):
ftype.startswith('geometry')):
geotype, parms = ftype[:-1].split('(')
schema = parms.split(',')[0]
- query = [ "SELECT DropGeometryColumn ('%(schema)s', "+
+ query = ["SELECT DropGeometryColumn ('%(schema)s', "+
"'%(table)s', '%(field)s');" %
- dict(schema=schema, table=tablename, field=key,) ]
+ dict(schema=schema, table=tablename, field=key)]
elif self.dbengine in ('firebird',):
- query = ['ALTER TABLE %s DROP %s;' %
+ query = ['ALTER TABLE %s DROP %s;' %
(self.QUOTE_TEMPLATE % tablename, self.QUOTE_TEMPLATE % key)]
else:
query = ['ALTER TABLE %s DROP COLUMN %s;' %
@@ -1222,7 +1241,7 @@ class BaseAdapter(ConnectionPool):
self.log(sub_query + '\n', table)
if fake_migrate:
if db._adapter.commit_on_alter_table:
- self.save_dbt(table,sql_fields_current)
+ self.save_dbt(table, sql_fields_current)
self.log('faked!\n', table)
else:
self.execute(sub_query)
@@ -1233,18 +1252,18 @@ class BaseAdapter(ConnectionPool):
# update table._dbt after alter table.
if db._adapter.commit_on_alter_table:
db.commit()
- self.save_dbt(table,sql_fields_current)
+ self.save_dbt(table, sql_fields_current)
self.log('success!\n', table)
elif metadata_change:
- self.save_dbt(table,sql_fields_current)
+ self.save_dbt(table, sql_fields_current)
if metadata_change and not (query and db._adapter.commit_on_alter_table):
db.commit()
- self.save_dbt(table,sql_fields_current)
+ self.save_dbt(table, sql_fields_current)
self.log('success!\n', table)
- def save_dbt(self,table, sql_fields_current):
+ def save_dbt(self, table, sql_fields_current):
tfile = self.file_open(table._dbt, 'w')
pickle.dump(sql_fields_current, tfile)
self.file_close(tfile)
@@ -1281,7 +1300,7 @@ class BaseAdapter(ConnectionPool):
return 'Random()'
def NOT_NULL(self, default, field_type):
- return 'NOT NULL DEFAULT %s' % self.represent(default,field_type)
+ return 'NOT NULL DEFAULT %s' % self.represent(default, field_type)
def COALESCE(self, first, second):
expressions = [self.expand(first)]+[self.expand(e) for e in second]
@@ -1333,18 +1352,21 @@ class BaseAdapter(ConnectionPool):
return 'INSERT INTO %s DEFAULT VALUES;' % (table.sqlsafe)
def insert(self, table, fields):
- query = self._insert(table,fields)
+ query = self._insert(table, fields)
try:
self.execute(query)
except Exception:
e = sys.exc_info()[1]
- if hasattr(table,'_on_insert_error'):
- return table._on_insert_error(table,fields,e)
+ if hasattr(table, '_on_insert_error'):
+ return table._on_insert_error(table, fields, e)
raise e
- if hasattr(table,'_primarykey'):
- return dict([(k[0].name, k[1]) for k in fields \
- if k[0].name in table._primarykey])
+ if hasattr(table, '_primarykey'):
+ mydict = dict([(k[0].name, k[1]) for k in fields if k[0].name in table._primarykey])
+ if mydict != {}:
+ return mydict
id = self.lastrowid(table)
+ if hasattr(table, '_primarykey') and len(table._primarykey) == 1:
+ id = {table._primarykey[0]: id}
if not isinstance(id, (int, long)):
return id
rid = Reference(id)
@@ -1352,7 +1374,7 @@ class BaseAdapter(ConnectionPool):
return rid
def bulk_insert(self, table, items):
- return [self.insert(table,item) for item in items]
+ return [self.insert(table, item) for item in items]
def NOT(self, first):
return '(NOT %s)' % self.expand(first)
@@ -1372,18 +1394,19 @@ class BaseAdapter(ConnectionPool):
return '(%s IN (%s))' % (self.expand(first), items)
def REGEXP(self, first, second):
- "regular expression operator"
+ """Regular expression operator"""
raise NotImplementedError
def LIKE(self, first, second):
- "case sensitive like operator"
- raise NotImplementedError
-
- def ILIKE(self, first, second):
- "case in-sensitive like operator"
+ """Case sensitive like operator"""
return '(%s LIKE %s)' % (self.expand(first),
self.expand(second, 'string'))
+ def ILIKE(self, first, second):
+ """Case insensitive like operator"""
+ return '(LOWER(%s) LIKE %s)' % (self.expand(first),
+ self.expand(second, 'string').lower())
+
def STARTSWITH(self, first, second):
return '(%s LIKE %s)' % (self.expand(first),
self.expand(second+'%', 'string'))
@@ -1392,24 +1415,24 @@ class BaseAdapter(ConnectionPool):
return '(%s LIKE %s)' % (self.expand(first),
self.expand('%'+second, 'string'))
- def CONTAINS(self,first,second,case_sensitive=False):
- if first.type in ('string','text', 'json'):
- if isinstance(second,Expression):
- second = Expression(None,self.CONCAT('%',Expression(
- None,self.REPLACE(second,('%','%%'))),'%'))
+ def CONTAINS(self, first, second, case_sensitive=True):
+ if first.type in ('string', 'text', 'json'):
+ if isinstance(second, Expression):
+ second = Expression(None, self.CONCAT('%', Expression(
+ None, self.REPLACE(second, ('%', '%%'))), '%'))
else:
- second = '%'+str(second).replace('%','%%')+'%'
+ second = '%'+str(second).replace('%', '%%')+'%'
elif first.type.startswith('list:'):
- if isinstance(second,Expression):
- second = Expression(None,self.CONCAT(
- '%|',Expression(None,self.REPLACE(
- Expression(None,self.REPLACE(
- second,('%','%%'))),('|','||'))),'|%'))
+ if isinstance(second, Expression):
+ second = Expression(None, self.CONCAT(
+ '%|', Expression(None, self.REPLACE(
+ Expression(None, self.REPLACE(
+ second, ('%', '%%'))), ('|', '||'))), '|%'))
else:
- second = '%|'+str(second).replace('%','%%')\
- .replace('|','||')+'|%'
+ second = '%|'+str(second).replace('%', '%%')\
+ .replace('|', '||')+'|%'
op = case_sensitive and self.LIKE or self.ILIKE
- return op(first,second)
+ return op(first, second)
def EQ(self, first, second=None):
if second is None:
@@ -1423,44 +1446,44 @@ class BaseAdapter(ConnectionPool):
return '(%s <> %s)' % (self.expand(first),
self.expand(second, first.type))
- def LT(self,first,second=None):
+ def LT(self, first, second=None):
if second is None:
raise RuntimeError("Cannot compare %s < None" % first)
return '(%s < %s)' % (self.expand(first),
- self.expand(second,first.type))
+ self.expand(second, first.type))
- def LE(self,first,second=None):
+ def LE(self, first, second=None):
if second is None:
raise RuntimeError("Cannot compare %s <= None" % first)
return '(%s <= %s)' % (self.expand(first),
- self.expand(second,first.type))
+ self.expand(second, first.type))
- def GT(self,first,second=None):
+ def GT(self, first, second=None):
if second is None:
raise RuntimeError("Cannot compare %s > None" % first)
return '(%s > %s)' % (self.expand(first),
- self.expand(second,first.type))
+ self.expand(second, first.type))
- def GE(self,first,second=None):
+ def GE(self, first, second=None):
if second is None:
raise RuntimeError("Cannot compare %s >= None" % first)
return '(%s >= %s)' % (self.expand(first),
- self.expand(second,first.type))
+ self.expand(second, first.type))
def is_numerical_type(self, ftype):
- return ftype in ('integer','boolean','double','bigint') or \
+ return ftype in ('integer', 'boolean', 'double', 'bigint') or \
ftype.startswith('decimal')
def REPLACE(self, first, (second, third)):
- return 'REPLACE(%s,%s,%s)' % (self.expand(first,'string'),
- self.expand(second,'string'),
- self.expand(third,'string'))
+ return 'REPLACE(%s,%s,%s)' % (self.expand(first, 'string'),
+ self.expand(second, 'string'),
+ self.expand(third, 'string'))
def CONCAT(self, *items):
- return '(%s)' % ' || '.join(self.expand(x,'string') for x in items)
+ return '(%s)' % ' || '.join(self.expand(x, 'string') for x in items)
def ADD(self, first, second):
- if self.is_numerical_type(first.type):
+ if self.is_numerical_type(first.type) or isinstance(first.type, Field):
return '(%s + %s)' % (self.expand(first),
self.expand(second, first.type))
else:
@@ -1483,12 +1506,12 @@ class BaseAdapter(ConnectionPool):
self.expand(second, first.type))
def AS(self, first, second):
- return '%s AS %s' % (self.expand(first), second)
+ return '%s AS %s' % (self.expand(first), second)
def ON(self, first, second):
table_rname = self.table_alias(first)
if use_common_filters(second):
- second = self.common_filter(second,[first._tablename])
+ second = self.common_filter(second, [first._tablename])
return ('%s ON %s') % (self.expand(table_rname), self.expand(second))
def INVERT(self, first):
@@ -1508,8 +1531,9 @@ class BaseAdapter(ConnectionPool):
out = '%s.%s' % (table_rname, expression._rname or (self.QUOTE_TEMPLATE % (expression.name)))
else:
out = '%s.%s' % (self.QUOTE_TEMPLATE % et._tablename, self.QUOTE_TEMPLATE % expression.name)
- if field_type == 'string' and not expression.type in (
- 'string','text','json','password'):
+ if field_type == 'string' \
+ and not expression.type in ('string', 'text', 'json',
+ 'password'):
out = self.CAST(out, self.types['text'])
return out
elif isinstance(expression, (Expression, Query)):
@@ -1520,18 +1544,18 @@ class BaseAdapter(ConnectionPool):
if not second is None:
out = op(first, second, **optional_args)
elif not first is None:
- out = op(first,**optional_args)
+ out = op(first, **optional_args)
elif isinstance(op, str):
if op.endswith(';'):
- op=op[:-1]
+ op = op[:-1]
out = '(%s)' % op
else:
out = op()
return out
elif field_type:
- return str(self.represent(expression,field_type))
- elif isinstance(expression,(list,tuple)):
- return ','.join(self.represent(item,field_type) \
+ return str(self.represent(expression, field_type))
+ elif isinstance(expression, (list, tuple)):
+ return ','.join(self.represent(item, field_type) \
for item in expression)
elif isinstance(expression, bool):
return '1' if expression else '0'
@@ -1543,7 +1567,6 @@ class BaseAdapter(ConnectionPool):
tbl = self.db[tbl]
return tbl.sqlsafe_alias
-
def alias(self, table, alias):
"""
Given a table object, makes a new table object
@@ -1595,8 +1618,8 @@ class BaseAdapter(ConnectionPool):
except Exception:
e = sys.exc_info()[1]
table = self.db[tablename]
- if hasattr(table,'_on_update_error'):
- return table._on_update_error(table,query,fields,e)
+ if hasattr(table, '_on_update_error'):
+ return table._on_update_error(table, query, fields, e)
raise e
try:
return self.cursor.rowcount
@@ -1625,21 +1648,21 @@ class BaseAdapter(ConnectionPool):
try:
counter = self.cursor.rowcount
except:
- counter = None
+ counter = None
### special code to handle CASCADE in SQLite & SpatiaLite
if self.dbengine in ('sqlite', 'spatialite') and counter:
for field in table._referenced_by:
- if field.type=='reference '+table._tablename \
- and field.ondelete=='CASCADE':
+ if field.type == 'reference '+table._tablename \
+ and field.ondelete == 'CASCADE':
db(field.belongs(deleted)).delete()
### end special code to handle CASCADE in SQLite & SpatiaLite
return counter
def get_table(self, query):
tablenames = self.tables(query)
- if len(tablenames)==1:
+ if len(tablenames) == 1:
return tablenames[0]
- elif len(tablenames)<1:
+ elif len(tablenames) < 1:
raise RuntimeError("No table selected")
else:
raise RuntimeError("Too many tables selected")
@@ -1649,15 +1672,15 @@ class BaseAdapter(ConnectionPool):
new_fields = []
append = new_fields.append
for item in fields:
- if isinstance(item,SQLALL):
+ if isinstance(item, SQLALL):
new_fields += item._table
- elif isinstance(item,str):
+ elif isinstance(item, str):
m = self.REGEX_TABLE_DOT_FIELD.match(item)
if m:
- tablename,fieldname = m.groups()
+ tablename, fieldname = m.groups()
append(db[tablename][fieldname])
else:
- append(Expression(db,lambda item=item:item))
+ append(Expression(db, lambda item=item: item))
else:
append(item)
# ## if no fields specified take them all from the requested tables
@@ -1678,7 +1701,7 @@ class BaseAdapter(ConnectionPool):
if isinstance(field, basestring):
m = self.REGEX_TABLE_DOT_FIELD.match(field)
if m:
- tn,fn = m.groups()
+ tn, fn = m.groups()
field = self.db[tn][fn]
for tablename in tables(field):
if not tablename in tablenames:
@@ -1686,13 +1709,17 @@ class BaseAdapter(ConnectionPool):
if len(tablenames) < 1:
raise SyntaxError('Set: no tables selected')
+
def colexpand(field):
return self.expand(field, colnames=True)
+
self._colnames = map(colexpand, fields)
+
def geoexpand(field):
- if isinstance(field.type,str) and field.type.startswith('geometry') and isinstance(field, Field):
+ if isinstance(field.type, str) and field.type.startswith('geo') and isinstance(field, Field):
field = field.st_astext()
return self.expand(field)
+
sql_f = ', '.join(map(geoexpand, fields))
sql_o = ''
sql_s = ''
@@ -1716,7 +1743,7 @@ class BaseAdapter(ConnectionPool):
if not isinstance(inner_join, (tuple, list)):
inner_join = [inner_join]
ijoint = [t._tablename for t in inner_join
- if not isinstance(t,Expression)]
+ if not isinstance(t, Expression)]
ijoinon = [t for t in inner_join if isinstance(t, Expression)]
itables_to_merge={} #issue 490
[itables_to_merge.update(
@@ -1750,26 +1777,25 @@ class BaseAdapter(ConnectionPool):
excluded = tablenames
if use_common_filters(query):
- query = self.common_filter(query,tablenames_for_common_filters)
+ query = self.common_filter(query, tablenames_for_common_filters)
sql_w = ' WHERE ' + self.expand(query) if query else ''
if inner_join and not left:
- sql_t = ', '.join([self.table_alias(t) for t in iexcluded + \
- itables_to_merge.keys()])
+ sql_t = ', '.join([self.table_alias(t)
+ for t in iexcluded + itables_to_merge.keys()])
for t in ijoinon:
sql_t += ' %s %s' % (icommand, t)
elif not inner_join and left:
- sql_t = ', '.join([self.table_alias(t) for t in excluded + \
- tables_to_merge.keys()])
+ sql_t = ', '.join([self.table_alias(t)
+ for t in excluded + tables_to_merge.keys()])
if joint:
sql_t += ' %s %s' % (command,
','.join([t for t in joint]))
for t in joinon:
sql_t += ' %s %s' % (command, t)
elif inner_join and left:
- all_tables_in_query = set(important_tablenames + \
- iimportant_tablenames + \
- tablenames)
+ all_tables_in_query = set(important_tablenames +
+ iimportant_tablenames + tablenames)
tables_in_joinon = set(joinont + ijoinont)
tables_not_in_joinon = \
all_tables_in_query.difference(tables_in_joinon)
@@ -1798,8 +1824,8 @@ class BaseAdapter(ConnectionPool):
sql_o += ' ORDER BY %s' % self.expand(orderby)
if (limitby and not groupby and tablenames and orderby_on_limitby and not orderby):
sql_o += ' ORDER BY %s' % ', '.join(
- [self.db[t][x].sqlsafe for t in tablenames for x in (
- hasattr(self.db[t],'_primarykey') and self.db[t]._primarykey
+ [self.db[t].sqlsafe + '.' + self.db[t][x].sqlsafe_name for t in tablenames for x in (
+ hasattr(self.db[t], '_primarykey') and self.db[t]._primarykey
or ['_id']
)
]
@@ -1820,7 +1846,7 @@ class BaseAdapter(ConnectionPool):
def _fetchall(self):
return self.cursor.fetchall()
- def _select_aux(self,sql,fields,attributes):
+ def _select_aux(self, sql, fields, attributes):
args_get = attributes.get
cache = args_get('cache',None)
if not cache:
@@ -1833,14 +1859,14 @@ class BaseAdapter(ConnectionPool):
def _select_aux2():
self.execute(sql)
return self._fetchall()
- rows = cache_model(key,_select_aux2,time_expire)
- if isinstance(rows,tuple):
+ rows = cache_model(key, _select_aux2, time_expire)
+ if isinstance(rows, tuple):
rows = list(rows)
- limitby = args_get('limitby', None) or (0,)
- rows = self.rowslice(rows,limitby[0],None)
- processor = args_get('processor',self.parse)
- cacheable = args_get('cacheable',False)
- return processor(rows,fields,self._colnames,cacheable=cacheable)
+ limitby = args_get('limitby', None) or (0, )
+ rows = self.rowslice(rows, limitby[0], None)
+ processor = args_get('processor', self.parse)
+ cacheable = args_get('cacheable', False)
+ return processor(rows, fields, self._colnames, cacheable=cacheable)
def select(self, query, fields, attributes):
"""
@@ -1848,18 +1874,18 @@ class BaseAdapter(ConnectionPool):
"""
sql = self._select(query, fields, attributes)
cache = attributes.get('cache', None)
- if cache and attributes.get('cacheable',False):
+ if cache and attributes.get('cacheable', False):
del attributes['cache']
(cache_model, time_expire) = cache
key = self.uri + '/' + sql
- if len(key)>200: key = hashlib_md5(key).hexdigest()
- args = (sql,fields,attributes)
+ if len(key) > 200: key = hashlib_md5(key).hexdigest()
+ args = (sql, fields, attributes)
return cache_model(
key,
- lambda self=self,args=args:self._select_aux(*args),
+ lambda self=self, args=args: self._select_aux(*args),
time_expire)
else:
- return self._select_aux(sql,fields,attributes)
+ return self._select_aux(sql, fields, attributes)
def _count(self, query, distinct=None):
tablenames = self.tables(query)
@@ -1871,7 +1897,7 @@ class BaseAdapter(ConnectionPool):
sql_w = ''
sql_t = ','.join(self.table_alias(t) for t in tablenames)
if distinct:
- if isinstance(distinct,(list, tuple)):
+ if isinstance(distinct, (list, tuple)):
distinct = xorify(distinct)
sql_d = self.expand(distinct)
return 'SELECT count(DISTINCT %s) FROM %s%s;' % \
@@ -1924,24 +1950,23 @@ class BaseAdapter(ConnectionPool):
return ', ADD '
def constraint_name(self, table, fieldname):
- return '%s_%s__constraint' % (table,fieldname)
+ return '%s_%s__constraint' % (table, fieldname)
def create_sequence_and_triggers(self, query, table, **args):
self.execute(query)
-
def log_execute(self, *a, **b):
if not self.connection: raise ValueError(a[0])
if not self.connection: return None
command = a[0]
- if hasattr(self,'filter_sql_command'):
+ if hasattr(self, 'filter_sql_command'):
command = self.filter_sql_command(command)
if self.db._debug:
LOGGER.debug('SQL: %s' % command)
self.db._lastsql = command
t0 = time.time()
ret = self.cursor.execute(command, *a[1:], **b)
- self.db._timings.append((command,time.time()-t0))
+ self.db._timings.append((command, time.time()-t0))
del self.db._timings[:-TIMINGSSIZE]
return ret
@@ -1954,7 +1979,7 @@ class BaseAdapter(ConnectionPool):
obj = obj()
if isinstance(fieldtype, SQLCustomType):
value = fieldtype.encoder(obj)
- if fieldtype.type in ('string','text', 'json'):
+ if fieldtype.type in ('string', 'text', 'json'):
return self.adapt(value)
return value
if isinstance(obj, (Expression, Field)):
@@ -1965,9 +1990,9 @@ class BaseAdapter(ConnectionPool):
elif not isinstance(obj, (list, tuple)):
obj = [obj]
if field_is_type('list:string'):
- obj = map(str,obj)
+ obj = map(str, obj)
else:
- obj = map(int,[o for o in obj if o != ''])
+ obj = map(int, [o for o in obj if o != ''])
# we don't want to bar_encode json objects
if isinstance(obj, (list, tuple)) and (not fieldtype == "json"):
obj = bar_encode(obj)
@@ -2033,7 +2058,7 @@ class BaseAdapter(ConnectionPool):
obj = simplejson.dumps(obj)
else:
raise RuntimeError("missing simplejson")
- if not isinstance(obj,bytes):
+ if not isinstance(obj, bytes):
obj = bytes(obj)
try:
obj.decode(self.db_codec)
@@ -2074,7 +2099,7 @@ class BaseAdapter(ConnectionPool):
return value
else:
key = REGEX_TYPE.match(field_type).group(0)
- return self.parsemap[key](value,field_type)
+ return self.parsemap[key](value, field_type)
def parse_reference(self, value, field_type):
referee = field_type[10:].strip()
@@ -2089,14 +2114,14 @@ class BaseAdapter(ConnectionPool):
def parse_date(self, value, field_type):
if isinstance(value, datetime.datetime):
return value.date()
- if not isinstance(value, (datetime.date,datetime.datetime)):
+ if not isinstance(value, (datetime.date, datetime.datetime)):
(y, m, d) = map(int, str(value)[:10].strip().split('-'))
value = datetime.date(y, m, d)
return value
def parse_time(self, value, field_type):
if not isinstance(value, datetime.time):
- time_items = map(int,str(value)[:8].strip().split(':')[:3])
+ time_items = map(int, str(value)[:8].strip().split(':')[:3])
if len(time_items) == 3:
(h, mi, s) = time_items
else:
@@ -2107,21 +2132,21 @@ class BaseAdapter(ConnectionPool):
def parse_datetime(self, value, field_type):
if not isinstance(value, datetime.datetime):
value = str(value)
- date_part,time_part,timezone = value[:10],value[11:19],value[19:]
+ date_part, time_part, timezone = value[:10], value[11:19], value[19:]
if '+' in timezone:
- ms,tz = timezone.split('+')
- h,m = tz.split(':')
+ ms, tz = timezone.split('+')
+ h, m = tz.split(':')
dt = datetime.timedelta(seconds=3600*int(h)+60*int(m))
elif '-' in timezone:
- ms,tz = timezone.split('-')
- h,m = tz.split(':')
+ ms, tz = timezone.split('-')
+ h, m = tz.split(':')
dt = -datetime.timedelta(seconds=3600*int(h)+60*int(m))
else:
dt = None
- (y, m, d) = map(int,date_part.split('-'))
- time_parts = time_part and time_part.split(':')[:3] or (0,0,0)
+ (y, m, d) = map(int, date_part.split('-'))
+ time_parts = time_part and time_part.split(':')[:3] or (0, 0, 0)
while len(time_parts)<3: time_parts.append(0)
- time_items = map(int,time_parts)
+ time_items = map(int, time_parts)
(h, mi, s) = time_items
value = datetime.datetime(y, m, d, h, mi, s)
if dt:
@@ -2178,27 +2203,26 @@ class BaseAdapter(ConnectionPool):
return value
def build_parsemap(self):
- self.parsemap = {
- 'id':self.parse_id,
- 'integer':self.parse_integer,
- 'bigint':self.parse_integer,
- 'float':self.parse_double,
- 'double':self.parse_double,
- 'reference':self.parse_reference,
- 'boolean':self.parse_boolean,
- 'date':self.parse_date,
- 'time':self.parse_time,
- 'datetime':self.parse_datetime,
- 'blob':self.parse_blob,
- 'decimal':self.parse_decimal,
- 'json':self.parse_json,
- 'list:integer':self.parse_list_integers,
- 'list:reference':self.parse_list_references,
- 'list:string':self.parse_list_strings,
- }
+ self.parsemap = {'id': self.parse_id,
+ 'integer': self.parse_integer,
+ 'bigint': self.parse_integer,
+ 'float': self.parse_double,
+ 'double': self.parse_double,
+ 'reference': self.parse_reference,
+ 'boolean': self.parse_boolean,
+ 'date': self.parse_date,
+ 'time': self.parse_time,
+ 'datetime': self.parse_datetime,
+ 'blob': self.parse_blob,
+ 'decimal': self.parse_decimal,
+ 'json': self.parse_json,
+ 'list:integer': self.parse_list_integers,
+ 'list:reference': self.parse_list_references,
+ 'list:string': self.parse_list_strings,
+ }
def parse(self, rows, fields, colnames, blob_decode=True,
- cacheable = False):
+ cacheable=False):
db = self.db
virtualtables = []
new_rows = []
@@ -2213,26 +2237,26 @@ class BaseAdapter(ConnectionPool):
field = table[fieldname]
ft = field.type
tmps.append((tablename, fieldname, table, field, ft))
- for (i,row) in enumerate(rows):
+ for (i, row) in enumerate(rows):
new_row = Row()
- for (j,colname) in enumerate(colnames):
+ for (j, colname) in enumerate(colnames):
value = row[j]
tmp = tmps[j]
if tmp:
- (tablename,fieldname,table,field,ft) = tmp
+ (tablename, fieldname, table, field, ft) = tmp
colset = new_row.get(tablename, None)
if colset is None:
colset = new_row[tablename] = Row()
if tablename not in virtualtables:
virtualtables.append(tablename)
- value = self.parse_value(value,ft,blob_decode)
+ value = self.parse_value(value, ft, blob_decode)
if field.filter_out:
value = field.filter_out(value)
colset[fieldname] = value
# for backward compatibility
- if ft=='id' and fieldname!='id' and \
- not 'id' in table.fields:
+ if ft == 'id' and fieldname != 'id' \
+ and not 'id' in table.fields:
colset['id'] = value
if ft == 'id' and not cacheable:
@@ -2245,50 +2269,49 @@ class BaseAdapter(ConnectionPool):
colset.gae_item = value
else:
id = value
- colset.update_record = RecordUpdater(colset,table,id)
- colset.delete_record = RecordDeleter(table,id)
+ colset.update_record = RecordUpdater(colset, table, id)
+ colset.delete_record = RecordDeleter(table, id)
if table._db._lazy_tables:
colset['__get_lazy_reference__'] = LazyReferenceGetter(table, id)
for rfield in table._referenced_by:
referee_link = db._referee_name and \
db._referee_name % dict(
- table=rfield.tablename,field=rfield.name)
+ table=rfield.tablename, field=rfield.name)
if referee_link and not referee_link in colset:
- colset[referee_link] = LazySet(rfield,id)
+ colset[referee_link] = LazySet(rfield, id)
else:
if not '_extra' in new_row:
new_row['_extra'] = Row()
new_row['_extra'][colname] = \
self.parse_value(value,
- fields[j].type,blob_decode)
+ fields[j].type, blob_decode)
new_column_name = \
REGEX_SELECT_AS_PARSER.search(colname)
if not new_column_name is None:
column_name = new_column_name.groups(0)
- setattr(new_row,column_name[0],value)
+ setattr(new_row, column_name[0], value)
new_rows.append(new_row)
rowsobj = Rows(db, new_rows, colnames, rawrows=rows)
-
for tablename in virtualtables:
table = db[tablename]
- fields_virtual = [(f,v) for (f,v) in table.iteritems()
- if isinstance(v,FieldVirtual)]
- fields_lazy = [(f,v) for (f,v) in table.iteritems()
- if isinstance(v,FieldMethod)]
+ fields_virtual = [(f, v) for (f, v) in table.iteritems()
+ if isinstance(v, FieldVirtual)]
+ fields_lazy = [(f, v) for (f, v) in table.iteritems()
+ if isinstance(v, FieldMethod)]
if fields_virtual or fields_lazy:
for row in rowsobj.records:
box = row[tablename]
- for f,v in fields_virtual:
+ for f, v in fields_virtual:
try:
box[f] = v.f(row)
except AttributeError:
- pass # not enough fields to define virtual field
- for f,v in fields_lazy:
+ pass # not enough fields to define virtual field
+ for f, v in fields_lazy:
try:
- box[f] = (v.handler or VirtualCommand)(v.f,row)
+ box[f] = (v.handler or VirtualCommand)(v.f, row)
except AttributeError:
- pass # not enough fields to define virtual field
+ pass # not enough fields to define virtual field
### old style virtual fields
for item in table.virtualfields:
@@ -2320,14 +2343,14 @@ class BaseAdapter(ConnectionPool):
query = query & newquery
return query
- def CASE(self,query,t,f):
+ def CASE(self, query, t, f):
def represent(x):
- types = {type(True):'boolean',type(0):'integer',type(1.0):'double'}
+ types = {type(True):'boolean', type(0):'integer', type(1.0):'double'}
if x is None: return 'NULL'
- elif isinstance(x,Expression): return str(x)
- else: return self.represent(x,types.get(type(x),'string'))
- return Expression(self.db,'CASE WHEN %s THEN %s ELSE %s END' % \
- (self.expand(query),represent(t),represent(f)))
+ elif isinstance(x, Expression): return str(x)
+ else: return self.represent(x, types.get(type(x), 'string'))
+ return Expression(self.db, 'CASE WHEN %s THEN %s ELSE %s END' % \
+ (self.expand(query), represent(t), represent(f)))
def sqlsafe_table(self, tablename, ot=None):
if ot is not None:
@@ -2337,28 +2360,27 @@ class BaseAdapter(ConnectionPool):
def sqlsafe_field(self, fieldname):
return self.QUOTE_TEMPLATE % fieldname
+
###################################################################################
# List of all the available adapters; they all extend BaseAdapter.
###################################################################################
-
class SQLiteAdapter(BaseAdapter):
- drivers = ('sqlite2','sqlite3')
+ drivers = ('sqlite2', 'sqlite3')
can_select_for_update = None # support ourselves with BEGIN TRANSACTION
- def EXTRACT(self,field,what):
+ def EXTRACT(self, field, what):
return "web2py_extract('%s',%s)" % (what, self.expand(field))
@staticmethod
def web2py_extract(lookup, s):
- table = {
- 'year': (0, 4),
- 'month': (5, 7),
- 'day': (8, 10),
- 'hour': (11, 13),
- 'minute': (14, 16),
- 'second': (17, 19),
- }
+ table = {'year': (0, 4),
+ 'month': (5, 7),
+ 'day': (8, 10),
+ 'hour': (11, 13),
+ 'minute': (14, 16),
+ 'second': (17, 19),
+ }
try:
if lookup != 'epoch':
(i, j) = table[lookup]
@@ -2372,7 +2394,7 @@ class SQLiteAdapter(BaseAdapter):
def web2py_regexp(expression, item):
return re.compile(expression).search(item) is not None
- def __init__(self, db, uri, pool_size=0, folder=None, db_codec ='UTF-8',
+ def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, after_connection=None):
self.db = db
@@ -2390,7 +2412,7 @@ class SQLiteAdapter(BaseAdapter):
if uri.startswith('sqlite:memory'):
self.dbpath = ':memory:'
else:
- self.dbpath = uri.split('://',1)[1]
+ self.dbpath = uri.split('://', 1)[1]
if self.dbpath[0] != '/':
if PYTHON_VERSION[0] == 2:
self.dbpath = pjoin(
@@ -2401,8 +2423,10 @@ class SQLiteAdapter(BaseAdapter):
driver_args['check_same_thread'] = False
if not 'detect_types' in driver_args and do_connect:
driver_args['detect_types'] = self.driver.PARSE_DECLTYPES
+
def connector(dbpath=self.dbpath, driver_args=driver_args):
return self.driver.Connection(dbpath, **driver_args)
+
self.connector = connector
if do_connect: self.reconnect()
@@ -2412,7 +2436,7 @@ class SQLiteAdapter(BaseAdapter):
self.connection.create_function("REGEXP", 2,
SQLiteAdapter.web2py_regexp)
- if self.adapter_args.get('foreign_keys',True):
+ if self.adapter_args.get('foreign_keys', True):
self.execute('PRAGMA foreign_keys=ON;')
def _truncate(self, table, mode=''):
@@ -2423,13 +2447,13 @@ class SQLiteAdapter(BaseAdapter):
def lastrowid(self, table):
return self.cursor.lastrowid
- def REGEXP(self,first,second):
+ def REGEXP(self, first, second):
return '(%s REGEXP %s)' % (self.expand(first),
- self.expand(second,'string'))
+ self.expand(second, 'string'))
def select(self, query, fields, attributes):
"""
- Simulate SELECT ... FOR UPDATE with BEGIN IMMEDIATE TRANSACTION.
+ Simulate `SELECT ... FOR UPDATE` with `BEGIN IMMEDIATE TRANSACTION`.
Note that the entire database, rather than one record, is locked
(it will be locked eventually anyway by the following UPDATE).
"""
@@ -2437,13 +2461,14 @@ class SQLiteAdapter(BaseAdapter):
self.execute('BEGIN IMMEDIATE TRANSACTION;')
return super(SQLiteAdapter, self).select(query, fields, attributes)
+
class SpatiaLiteAdapter(SQLiteAdapter):
- drivers = ('sqlite3','sqlite2')
+ drivers = ('sqlite3', 'sqlite2')
types = copy.copy(BaseAdapter.types)
types.update(geometry='GEOMETRY')
- def __init__(self, db, uri, pool_size=0, folder=None, db_codec ='UTF-8',
+ def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, srid=4326, after_connection=None):
self.db = db
@@ -2461,7 +2486,7 @@ class SpatiaLiteAdapter(SQLiteAdapter):
if uri.startswith('spatialite:memory'):
self.dbpath = ':memory:'
else:
- self.dbpath = uri.split('://',1)[1]
+ self.dbpath = uri.split('://', 1)[1]
if self.dbpath[0] != '/':
self.dbpath = pjoin(
self.folder.decode(path_encoding).encode('utf8'), self.dbpath)
@@ -2469,8 +2494,10 @@ class SpatiaLiteAdapter(SQLiteAdapter):
driver_args['check_same_thread'] = False
if not 'detect_types' in driver_args and do_connect:
driver_args['detect_types'] = self.driver.PARSE_DECLTYPES
+
def connector(dbpath=self.dbpath, driver_args=driver_args):
return self.driver.Connection(dbpath, **driver_args)
+
self.connector = connector
if do_connect: self.reconnect()
@@ -2494,60 +2521,60 @@ class SpatiaLiteAdapter(SQLiteAdapter):
second['precision'], second['options'])
def ST_ASTEXT(self, first):
- return 'AsText(%s)' %(self.expand(first))
+ return 'AsText(%s)' % (self.expand(first))
def ST_CONTAINS(self, first, second):
- return 'Contains(%s,%s)' %(self.expand(first),
+ return 'Contains(%s,%s)' % (self.expand(first),
self.expand(second, first.type))
def ST_DISTANCE(self, first, second):
- return 'Distance(%s,%s)' %(self.expand(first),
+ return 'Distance(%s,%s)' % (self.expand(first),
self.expand(second, first.type))
def ST_EQUALS(self, first, second):
- return 'Equals(%s,%s)' %(self.expand(first),
+ return 'Equals(%s,%s)' % (self.expand(first),
self.expand(second, first.type))
def ST_INTERSECTS(self, first, second):
- return 'Intersects(%s,%s)' %(self.expand(first),
+ return 'Intersects(%s,%s)' % (self.expand(first),
self.expand(second, first.type))
def ST_OVERLAPS(self, first, second):
- return 'Overlaps(%s,%s)' %(self.expand(first),
+ return 'Overlaps(%s,%s)' % (self.expand(first),
self.expand(second, first.type))
def ST_SIMPLIFY(self, first, second):
- return 'Simplify(%s,%s)' %(self.expand(first),
+ return 'Simplify(%s,%s)' % (self.expand(first),
self.expand(second, 'double'))
def ST_TOUCHES(self, first, second):
- return 'Touches(%s,%s)' %(self.expand(first),
+ return 'Touches(%s,%s)' % (self.expand(first),
self.expand(second, first.type))
def ST_WITHIN(self, first, second):
- return 'Within(%s,%s)' %(self.expand(first),
+ return 'Within(%s,%s)' % (self.expand(first),
self.expand(second, first.type))
def represent(self, obj, fieldtype):
field_is_type = fieldtype.startswith
if field_is_type('geo'):
- srid = 4326 # Spatialite default srid for geometry
+ srid = 4326 # Spatialite default srid for geometry
geotype, parms = fieldtype[:-1].split('(')
parms = parms.split(',')
if len(parms) >= 2:
schema, srid = parms[:2]
-# if field_is_type('geometry'):
- value = "ST_GeomFromText('%s',%s)" %(obj, srid)
-# elif field_is_type('geography'):
-# value = "ST_GeogFromText('SRID=%s;%s')" %(srid, obj)
-# else:
-# raise SyntaxError, 'Invalid field type %s' %fieldtype
+ # if field_is_type('geometry'):
+ value = "ST_GeomFromText('%s',%s)" % (obj, srid)
+ # elif field_is_type('geography'):
+ # value = "ST_GeogFromText('SRID=%s;%s')" %(srid, obj)
+ # else:
+ # raise SyntaxError, 'Invalid field type %s' %fieldtype
return value
return BaseAdapter.represent(self, obj, fieldtype)
class JDBCSQLiteAdapter(SQLiteAdapter):
- drivers = ('zxJDBC_sqlite',)
+ drivers = ('zxJDBC_sqlite', )
def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8',
credential_decoder=IDENTITY, driver_args={},
@@ -2566,14 +2593,16 @@ class JDBCSQLiteAdapter(SQLiteAdapter):
if uri.startswith('sqlite:memory'):
self.dbpath = ':memory:'
else:
- self.dbpath = uri.split('://',1)[1]
+ self.dbpath = uri.split('://', 1)[1]
if self.dbpath[0] != '/':
self.dbpath = pjoin(
self.folder.decode(path_encoding).encode('utf8'), self.dbpath)
- def connector(dbpath=self.dbpath,driver_args=driver_args):
+
+ def connector(dbpath=self.dbpath, driver_args=driver_args):
return self.driver.connect(
self.driver.getConnection('jdbc:sqlite:'+dbpath),
**driver_args)
+
self.connector = connector
if do_connect: self.reconnect()
@@ -2587,45 +2616,44 @@ class JDBCSQLiteAdapter(SQLiteAdapter):
class MySQLAdapter(BaseAdapter):
- drivers = ('MySQLdb','pymysql', 'mysqlconnector')
+ drivers = ('MySQLdb', 'pymysql', 'mysqlconnector')
commit_on_alter_table = True
support_distributed_transaction = True
- types = {
- 'boolean': 'CHAR(1)',
- 'string': 'VARCHAR(%(length)s)',
- 'text': 'LONGTEXT',
- 'json': 'LONGTEXT',
- 'password': 'VARCHAR(%(length)s)',
- 'blob': 'LONGBLOB',
- 'upload': 'VARCHAR(%(length)s)',
- 'integer': 'INT',
- 'bigint': 'BIGINT',
- 'float': 'FLOAT',
- 'double': 'DOUBLE',
- 'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
- 'date': 'DATE',
- 'time': 'TIME',
- 'datetime': 'DATETIME',
- 'id': 'INT AUTO_INCREMENT NOT NULL',
- 'reference': 'INT, INDEX %(index_name)s (%(field_name)s), FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
- 'list:integer': 'LONGTEXT',
- 'list:string': 'LONGTEXT',
- 'list:reference': 'LONGTEXT',
- 'big-id': 'BIGINT AUTO_INCREMENT NOT NULL',
- 'big-reference': 'BIGINT, INDEX %(index_name)s (%(field_name)s), FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
- 'reference FK': ', CONSTRAINT `FK_%(constraint_name)s` FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
- }
+ types = {'boolean': 'CHAR(1)',
+ 'string': 'VARCHAR(%(length)s)',
+ 'text': 'LONGTEXT',
+ 'json': 'LONGTEXT',
+ 'password': 'VARCHAR(%(length)s)',
+ 'blob': 'LONGBLOB',
+ 'upload': 'VARCHAR(%(length)s)',
+ 'integer': 'INT',
+ 'bigint': 'BIGINT',
+ 'float': 'FLOAT',
+ 'double': 'DOUBLE',
+ 'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
+ 'date': 'DATE',
+ 'time': 'TIME',
+ 'datetime': 'DATETIME',
+ 'id': 'INT AUTO_INCREMENT NOT NULL',
+ 'reference': 'INT, INDEX %(index_name)s (%(field_name)s), FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
+ 'list:integer': 'LONGTEXT',
+ 'list:string': 'LONGTEXT',
+ 'list:reference': 'LONGTEXT',
+ 'big-id': 'BIGINT AUTO_INCREMENT NOT NULL',
+ 'big-reference': 'BIGINT, INDEX %(index_name)s (%(field_name)s), FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
+ 'reference FK': ', CONSTRAINT `FK_%(constraint_name)s` FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
+ }
QUOTE_TEMPLATE = "`%s`"
- def varquote(self,name):
- return varquote_aux(name,'`%s`')
+ def varquote(self, name):
+ return varquote_aux(name, '`%s`')
def RANDOM(self):
return 'RAND()'
- def SUBSTRING(self,field,parameters):
+ def SUBSTRING(self, field, parameters):
return 'SUBSTRING(%s,%s,%s)' % (self.expand(field),
parameters[0], parameters[1])
@@ -2633,13 +2661,17 @@ class MySQLAdapter(BaseAdapter):
return "UNIX_TIMESTAMP(%s)" % self.expand(first)
def CONCAT(self, *items):
- return 'CONCAT(%s)' % ','.join(self.expand(x,'string') for x in items)
+ return 'CONCAT(%s)' % ','.join(self.expand(x, 'string') for x in items)
- def REGEXP(self,first,second):
+ def REGEXP(self, first, second):
return '(%s REGEXP %s)' % (self.expand(first),
- self.expand(second,'string'))
+ self.expand(second, 'string'))
- def _drop(self,table,mode):
+ def CAST(self, first, second):
+ if second=='LONGTEXT': second = 'CHAR'
+ return 'CAST(%s AS %s)' % (first, second)
+
+ def _drop(self, table, mode):
# breaks db integrity but without this mysql does not drop table
table_rname = table.sqlsafe
return ['SET FOREIGN_KEY_CHECKS=0;','DROP TABLE %s;' % table_rname,
@@ -2648,14 +2680,14 @@ class MySQLAdapter(BaseAdapter):
def _insert_empty(self, table):
return 'INSERT INTO %s VALUES (DEFAULT);' % (table.sqlsafe)
- def distributed_transaction_begin(self,key):
+ def distributed_transaction_begin(self, key):
self.execute('XA START;')
- def prepare(self,key):
+ def prepare(self, key):
self.execute("XA END;")
self.execute("XA PREPARE;")
- def commit_prepared(self,ley):
+ def commit_prepared(self,key):
self.execute("XA COMMIT;")
def rollback_prepared(self,key):
@@ -2663,19 +2695,19 @@ class MySQLAdapter(BaseAdapter):
REGEX_URI = re.compile('^(?P[^:@]+)(\:(?P[^@]*))?@(?P[^\:/]+)(\:(?P[0-9]+))?/(?P[^?]+)(\?set_encoding=(?P\w+))?$')
- def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
+ def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, after_connection=None):
self.db = db
self.dbengine = "mysql"
self.uri = uri
- if do_connect: self.find_driver(adapter_args,uri)
+ if do_connect: self.find_driver(adapter_args, uri)
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self._after_connection = after_connection
self.find_or_make_work_folder()
- ruri = uri.split('://',1)[1]
+ ruri = uri.split('://', 1)[1]
m = self.REGEX_URI.match(ruri)
if not m:
raise SyntaxError(
@@ -2701,7 +2733,6 @@ class MySQLAdapter(BaseAdapter):
port=port,
charset=charset)
-
def connector(driver_args=driver_args):
return self.driver.connect(**driver_args)
self.connector = connector
@@ -2711,60 +2742,57 @@ class MySQLAdapter(BaseAdapter):
self.execute('SET FOREIGN_KEY_CHECKS=1;')
self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
- def lastrowid(self,table):
+ def lastrowid(self, table):
self.execute('select last_insert_id();')
return int(self.cursor.fetchone()[0])
class PostgreSQLAdapter(BaseAdapter):
- drivers = ('psycopg2','pg8000')
+ drivers = ('psycopg2', 'pg8000')
QUOTE_TEMPLATE = '"%s"'
support_distributed_transaction = True
- types = {
- 'boolean': 'CHAR(1)',
- 'string': 'VARCHAR(%(length)s)',
- 'text': 'TEXT',
- 'json': 'TEXT',
- 'password': 'VARCHAR(%(length)s)',
- 'blob': 'BYTEA',
- 'upload': 'VARCHAR(%(length)s)',
- 'integer': 'INTEGER',
- 'bigint': 'BIGINT',
- 'float': 'FLOAT',
- 'double': 'FLOAT8',
- 'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
- 'date': 'DATE',
- 'time': 'TIME',
- 'datetime': 'TIMESTAMP',
- 'id': 'SERIAL PRIMARY KEY',
- 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
- 'list:integer': 'TEXT',
- 'list:string': 'TEXT',
- 'list:reference': 'TEXT',
- 'geometry': 'GEOMETRY',
- 'geography': 'GEOGRAPHY',
- 'big-id': 'BIGSERIAL PRIMARY KEY',
- 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
- 'reference FK': ', CONSTRAINT "FK_%(constraint_name)s" FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
- 'reference TFK': ' CONSTRAINT "FK_%(foreign_table)s_PK" FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s',
+ types = {'boolean': 'CHAR(1)',
+ 'string': 'VARCHAR(%(length)s)',
+ 'text': 'TEXT',
+ 'json': 'TEXT',
+ 'password': 'VARCHAR(%(length)s)',
+ 'blob': 'BYTEA',
+ 'upload': 'VARCHAR(%(length)s)',
+ 'integer': 'INTEGER',
+ 'bigint': 'BIGINT',
+ 'float': 'FLOAT',
+ 'double': 'FLOAT8',
+ 'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
+ 'date': 'DATE',
+ 'time': 'TIME',
+ 'datetime': 'TIMESTAMP',
+ 'id': 'SERIAL PRIMARY KEY',
+ 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
+ 'list:integer': 'TEXT',
+ 'list:string': 'TEXT',
+ 'list:reference': 'TEXT',
+ 'geometry': 'GEOMETRY',
+ 'geography': 'GEOGRAPHY',
+ 'big-id': 'BIGSERIAL PRIMARY KEY',
+ 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
+ 'reference FK': ', CONSTRAINT "FK_%(constraint_name)s" FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
+ 'reference TFK': ' CONSTRAINT "FK_%(foreign_table)s_PK" FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s',
+ }
- }
+ def varquote(self, name):
+ return varquote_aux(name, '"%s"')
-
- def varquote(self,name):
- return varquote_aux(name,'"%s"')
-
- def adapt(self,obj):
+ def adapt(self, obj):
if self.driver_name == 'psycopg2':
return psycopg2_adapt(obj).getquoted()
elif self.driver_name == 'pg8000':
- return "'%s'" % str(obj).replace("%","%%").replace("'","''")
+ return "'%s'" % str(obj).replace("%", "%%").replace("'", "''")
else:
- return "'%s'" % str(obj).replace("'","''")
+ return "'%s'" % str(obj).replace("'", "''")
- def sequence_name(self,table):
+ def sequence_name(self, table):
return self.QUOTE_TEMPLATE % (table + '_id_seq')
def RANDOM(self):
@@ -2772,21 +2800,21 @@ class PostgreSQLAdapter(BaseAdapter):
def ADD(self, first, second):
t = first.type
- if t in ('text','string','password', 'json', 'upload','blob'):
+ if t in ('text', 'string', 'password', 'json', 'upload', 'blob'):
return '(%s || %s)' % (self.expand(first), self.expand(second, t))
else:
return '(%s + %s)' % (self.expand(first), self.expand(second, t))
- def distributed_transaction_begin(self,key):
+ def distributed_transaction_begin(self, key):
return
- def prepare(self,key):
+ def prepare(self, key):
self.execute("PREPARE TRANSACTION '%s';" % key)
- def commit_prepared(self,key):
+ def commit_prepared(self, key):
self.execute("COMMIT PREPARED '%s';" % key)
- def rollback_prepared(self,key):
+ def rollback_prepared(self, key):
self.execute("ROLLBACK PREPARED '%s';" % key)
def create_sequence_and_triggers(self, query, table, **args):
@@ -2798,21 +2826,21 @@ class PostgreSQLAdapter(BaseAdapter):
REGEX_URI = re.compile('^(?P[^:@]+)(\:(?P[^@]*))?@(?P[^\:@]+)(\:(?P[0-9]+))?/(?P[^\?]+)(\?sslmode=(?P.+))?$')
- def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
+ def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, srid=4326,
after_connection=None):
self.db = db
self.dbengine = "postgres"
self.uri = uri
- if do_connect: self.find_driver(adapter_args,uri)
+ if do_connect: self.find_driver(adapter_args, uri)
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self._after_connection = after_connection
self.srid = srid
self.find_or_make_work_folder()
- ruri = uri.split('://',1)[1]
+ ruri = uri.split('://', 1)[1]
m = self.REGEX_URI.match(ruri)
if not m:
raise SyntaxError("Invalid URI string in DAL")
@@ -2844,8 +2872,10 @@ class PostgreSQLAdapter(BaseAdapter):
self.driver.__version__)
else:
self.__version__ = None
- def connector(msg=msg,driver_args=driver_args):
- return self.driver.connect(msg,**driver_args)
+
+ def connector(msg=msg, driver_args=driver_args):
+ return self.driver.connect(msg, **driver_args)
+
self.connector = connector
if do_connect: self.reconnect()
@@ -2854,7 +2884,7 @@ class PostgreSQLAdapter(BaseAdapter):
self.execute("SET standard_conforming_strings=on;")
self.try_json()
- def lastrowid(self,table = None):
+ def lastrowid(self, table=None):
self.execute("select lastval()")
return int(self.cursor.fetchone()[0])
@@ -2874,33 +2904,33 @@ class PostgreSQLAdapter(BaseAdapter):
self.native_json = True
else: LOGGER.debug("Your database version does not support the JSON data type (using TEXT instead)")
- def LIKE(self,first,second):
- args = (self.expand(first), self.expand(second,'string'))
+ def LIKE(self, first, second):
+ args = (self.expand(first), self.expand(second, 'string'))
if not first.type in ('string', 'text', 'json'):
return '(%s LIKE %s)' % (
self.CAST(args[0], 'CHAR(%s)' % first.length), args[1])
else:
return '(%s LIKE %s)' % args
- def ILIKE(self,first,second):
- args = (self.expand(first), self.expand(second,'string'))
+ def ILIKE(self, first, second):
+ args = (self.expand(first), self.expand(second, 'string'))
if not first.type in ('string', 'text', 'json'):
return '(%s LIKE %s)' % (
self.CAST(args[0], 'CHAR(%s)' % first.length), args[1])
else:
return '(%s ILIKE %s)' % args
- def REGEXP(self,first,second):
+ def REGEXP(self, first, second):
return '(%s ~ %s)' % (self.expand(first),
- self.expand(second,'string'))
+ self.expand(second, 'string'))
- def STARTSWITH(self,first,second):
- return '(%s ILIKE %s)' % (self.expand(first),
- self.expand(second+'%','string'))
+ def STARTSWITH(self, first, second):
+ return '(%s LIKE %s)' % (self.expand(first),
+ self.expand(second+'%', 'string'))
- def ENDSWITH(self,first,second):
- return '(%s ILIKE %s)' % (self.expand(first),
- self.expand('%'+second,'string'))
+ def ENDSWITH(self, first, second):
+ return '(%s LIKE %s)' % (self.expand(first),
+ self.expand('%'+second, 'string'))
# GIS functions
@@ -2908,82 +2938,92 @@ class PostgreSQLAdapter(BaseAdapter):
"""
http://postgis.org/docs/ST_AsGeoJSON.html
"""
- return 'ST_AsGeoJSON(%s,%s,%s,%s)' %(second['version'],
- self.expand(first), second['precision'], second['options'])
+ return 'ST_AsGeoJSON(%s,%s,%s,%s)' % (second['version'],
+ self.expand(first),
+ second['precision'],
+ second['options'])
def ST_ASTEXT(self, first):
"""
http://postgis.org/docs/ST_AsText.html
"""
- return 'ST_AsText(%s)' %(self.expand(first))
+ return 'ST_AsText(%s)' % (self.expand(first))
def ST_X(self, first):
"""
http://postgis.org/docs/ST_X.html
"""
- return 'ST_X(%s)' %(self.expand(first))
+ return 'ST_X(%s)' % (self.expand(first))
def ST_Y(self, first):
"""
http://postgis.org/docs/ST_Y.html
"""
- return 'ST_Y(%s)' %(self.expand(first))
+ return 'ST_Y(%s)' % (self.expand(first))
def ST_CONTAINS(self, first, second):
"""
http://postgis.org/docs/ST_Contains.html
"""
- return 'ST_Contains(%s,%s)' %(self.expand(first), self.expand(second, first.type))
+ return 'ST_Contains(%s,%s)' % (self.expand(first),
+ self.expand(second, first.type))
def ST_DISTANCE(self, first, second):
"""
http://postgis.org/docs/ST_Distance.html
"""
- return 'ST_Distance(%s,%s)' %(self.expand(first), self.expand(second, first.type))
+ return 'ST_Distance(%s,%s)' % (self.expand(first),
+ self.expand(second, first.type))
def ST_EQUALS(self, first, second):
"""
http://postgis.org/docs/ST_Equals.html
"""
- return 'ST_Equals(%s,%s)' %(self.expand(first), self.expand(second, first.type))
+ return 'ST_Equals(%s,%s)' % (self.expand(first),
+ self.expand(second, first.type))
def ST_INTERSECTS(self, first, second):
"""
http://postgis.org/docs/ST_Intersects.html
"""
- return 'ST_Intersects(%s,%s)' %(self.expand(first), self.expand(second, first.type))
+ return 'ST_Intersects(%s,%s)' % (self.expand(first),
+ self.expand(second, first.type))
def ST_OVERLAPS(self, first, second):
"""
http://postgis.org/docs/ST_Overlaps.html
"""
- return 'ST_Overlaps(%s,%s)' %(self.expand(first), self.expand(second, first.type))
+ return 'ST_Overlaps(%s,%s)' % (self.expand(first),
+ self.expand(second, first.type))
def ST_SIMPLIFY(self, first, second):
"""
http://postgis.org/docs/ST_Simplify.html
"""
- return 'ST_Simplify(%s,%s)' %(self.expand(first), self.expand(second, 'double'))
+ return 'ST_Simplify(%s,%s)' % (self.expand(first),
+ self.expand(second, 'double'))
def ST_TOUCHES(self, first, second):
"""
http://postgis.org/docs/ST_Touches.html
"""
- return 'ST_Touches(%s,%s)' %(self.expand(first), self.expand(second, first.type))
+ return 'ST_Touches(%s,%s)' % (self.expand(first),
+ self.expand(second, first.type))
def ST_WITHIN(self, first, second):
"""
http://postgis.org/docs/ST_Within.html
"""
- return 'ST_Within(%s,%s)' %(self.expand(first), self.expand(second, first.type))
+ return 'ST_Within(%s,%s)' % (self.expand(first),
+ self.expand(second, first.type))
def ST_DWITHIN(self, first, (second, third)):
"""
- http://postgis.org/docs/ST_Within.html
+ http://postgis.org/docs/ST_DWithin.html
"""
- return 'ST_DWithin(%s,%s,%s)' %(self.expand(first),
- self.expand(second, first.type),
- self.expand(third, 'double'))
+ return 'ST_DWithin(%s,%s,%s)' % (self.expand(first),
+ self.expand(second, first.type),
+ self.expand(third, 'double'))
def represent(self, obj, fieldtype):
field_is_type = fieldtype.startswith
@@ -2994,9 +3034,9 @@ class PostgreSQLAdapter(BaseAdapter):
if len(parms) >= 2:
schema, srid = parms[:2]
if field_is_type('geometry'):
- value = "ST_GeomFromText('%s',%s)" %(obj, srid)
+ value = "ST_GeomFromText('%s',%s)" % (obj, srid)
elif field_is_type('geography'):
- value = "ST_GeogFromText('SRID=%s;%s')" %(srid, obj)
+ value = "ST_GeogFromText('SRID=%s;%s')" % (srid, obj)
# else:
# raise SyntaxError('Invalid field type %s' %fieldtype)
return value
@@ -3007,35 +3047,35 @@ class PostgreSQLAdapter(BaseAdapter):
raise ValueError('Invalid mode: %s' % mode)
return ['DROP TABLE ' + table.sqlsafe + ' ' + str(mode) + ';']
-class NewPostgreSQLAdapter(PostgreSQLAdapter):
- drivers = ('psycopg2','pg8000')
- types = {
- 'boolean': 'CHAR(1)',
- 'string': 'VARCHAR(%(length)s)',
- 'text': 'TEXT',
- 'json': 'TEXT',
- 'password': 'VARCHAR(%(length)s)',
- 'blob': 'BYTEA',
- 'upload': 'VARCHAR(%(length)s)',
- 'integer': 'INTEGER',
- 'bigint': 'BIGINT',
- 'float': 'FLOAT',
- 'double': 'FLOAT8',
- 'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
- 'date': 'DATE',
- 'time': 'TIME',
- 'datetime': 'TIMESTAMP',
- 'id': 'SERIAL PRIMARY KEY',
- 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
- 'list:integer': 'BIGINT[]',
- 'list:string': 'TEXT[]',
- 'list:reference': 'BIGINT[]',
- 'geometry': 'GEOMETRY',
- 'geography': 'GEOGRAPHY',
- 'big-id': 'BIGSERIAL PRIMARY KEY',
- 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
- }
+class NewPostgreSQLAdapter(PostgreSQLAdapter):
+ drivers = ('psycopg2', 'pg8000')
+
+ types = {'boolean': 'CHAR(1)',
+ 'string': 'VARCHAR(%(length)s)',
+ 'text': 'TEXT',
+ 'json': 'TEXT',
+ 'password': 'VARCHAR(%(length)s)',
+ 'blob': 'BYTEA',
+ 'upload': 'VARCHAR(%(length)s)',
+ 'integer': 'INTEGER',
+ 'bigint': 'BIGINT',
+ 'float': 'FLOAT',
+ 'double': 'FLOAT8',
+ 'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
+ 'date': 'DATE',
+ 'time': 'TIME',
+ 'datetime': 'TIMESTAMP',
+ 'id': 'SERIAL PRIMARY KEY',
+ 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
+ 'list:integer': 'BIGINT[]',
+ 'list:string': 'TEXT[]',
+ 'list:reference': 'BIGINT[]',
+ 'geometry': 'GEOMETRY',
+ 'geography': 'GEOGRAPHY',
+ 'big-id': 'BIGSERIAL PRIMARY KEY',
+ 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
+ }
def parse_list_integers(self, value, field_type):
return value
@@ -3054,31 +3094,31 @@ class NewPostgreSQLAdapter(PostgreSQLAdapter):
elif not isinstance(obj, (list, tuple)):
obj = [obj]
if field_is_type('list:string'):
- obj = map(str,obj)
+ obj = map(str, obj)
else:
- obj = map(int,obj)
+ obj = map(int, obj)
return 'ARRAY[%s]' % ','.join(repr(item) for item in obj)
return BaseAdapter.represent(self, obj, fieldtype)
class JDBCPostgreSQLAdapter(PostgreSQLAdapter):
- drivers = ('zxJDBC',)
+ drivers = ('zxJDBC', )
REGEX_URI = re.compile('^(?P[^:@]+)(\:(?P[^@]*))?@(?P[^\:/]+)(\:(?P[0-9]+))?/(?P.+)$')
- def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
+ def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8',
credential_decoder=IDENTITY, driver_args={},
- adapter_args={}, do_connect=True, after_connection=None ):
+ adapter_args={}, do_connect=True, after_connection=None):
self.db = db
self.dbengine = "postgres"
self.uri = uri
- if do_connect: self.find_driver(adapter_args,uri)
+ if do_connect: self.find_driver(adapter_args, uri)
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self._after_connection = after_connection
self.find_or_make_work_folder()
- ruri = uri.split('://',1)[1]
+ ruri = uri.split('://', 1)[1]
m = self.REGEX_URI.match(ruri)
if not m:
raise SyntaxError("Invalid URI string in DAL")
@@ -3096,8 +3136,10 @@ class JDBCPostgreSQLAdapter(PostgreSQLAdapter):
raise SyntaxError('Database name required')
port = m.group('port') or '5432'
msg = ('jdbc:postgresql://%s:%s/%s' % (host, port, db), user, password)
- def connector(msg=msg,driver_args=driver_args):
- return self.driver.connect(*msg,**driver_args)
+
+ def connector(msg=msg, driver_args=driver_args):
+ return self.driver.connect(*msg, **driver_args)
+
self.connector = connector
if do_connect: self.reconnect()
@@ -3109,38 +3151,36 @@ class JDBCPostgreSQLAdapter(PostgreSQLAdapter):
class OracleAdapter(BaseAdapter):
- drivers = ('cx_Oracle',)
+ drivers = ('cx_Oracle', )
commit_on_alter_table = False
- types = {
- 'boolean': 'CHAR(1)',
- 'string': 'VARCHAR2(%(length)s)',
- 'text': 'CLOB',
- 'json': 'CLOB',
- 'password': 'VARCHAR2(%(length)s)',
- 'blob': 'CLOB',
- 'upload': 'VARCHAR2(%(length)s)',
- 'integer': 'INT',
- 'bigint': 'NUMBER',
- 'float': 'FLOAT',
- 'double': 'BINARY_DOUBLE',
- 'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
- 'date': 'DATE',
- 'time': 'CHAR(8)',
- 'datetime': 'DATE',
- 'id': 'NUMBER PRIMARY KEY',
- 'reference': 'NUMBER, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
- 'list:integer': 'CLOB',
- 'list:string': 'CLOB',
- 'list:reference': 'CLOB',
- 'big-id': 'NUMBER PRIMARY KEY',
- 'big-reference': 'NUMBER, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
- 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
- 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s',
- }
+ types = {'boolean': 'CHAR(1)',
+ 'string': 'VARCHAR2(%(length)s)',
+ 'text': 'CLOB',
+ 'json': 'CLOB',
+ 'password': 'VARCHAR2(%(length)s)',
+ 'blob': 'CLOB',
+ 'upload': 'VARCHAR2(%(length)s)',
+ 'integer': 'INT',
+ 'bigint': 'NUMBER',
+ 'float': 'FLOAT',
+ 'double': 'BINARY_DOUBLE',
+ 'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
+ 'date': 'DATE',
+ 'time': 'CHAR(8)',
+ 'datetime': 'DATE',
+ 'id': 'NUMBER PRIMARY KEY',
+ 'reference': 'NUMBER, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
+ 'list:integer': 'CLOB',
+ 'list:string': 'CLOB',
+ 'list:reference': 'CLOB',
+ 'big-id': 'NUMBER PRIMARY KEY',
+ 'big-reference': 'NUMBER, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
+ 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
+ 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s',
+ }
-
- def trigger_name(self,tablename):
+ def trigger_name(self, tablename):
return '%s_trigger' % tablename
def LEFT_JOIN(self):
@@ -3149,10 +3189,14 @@ class OracleAdapter(BaseAdapter):
def RANDOM(self):
return 'dbms_random.value'
- def NOT_NULL(self,default,field_type):
- return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
+ def NOT_NULL(self, default, field_type):
+ return 'DEFAULT %s NOT NULL' % self.represent(default, field_type)
- def _drop(self,table,mode):
+ def REGEXP(self, first, second):
+ return 'REGEXP_LIKE(%s, %s)' % (self.expand(first),
+ self.expand(second, 'string'))
+
+ def _drop(self, table, mode):
sequence_name = table._sequence_name
return ['DROP TABLE %s %s;' % (table.sqlsafe, mode), 'DROP SEQUENCE %s;' % sequence_name]
@@ -3184,7 +3228,7 @@ class OracleAdapter(BaseAdapter):
return "to_date('%s','yyyy-mm-dd')" % obj
elif fieldtype == 'datetime':
if isinstance(obj, datetime.datetime):
- obj = obj.isoformat()[:19].replace('T',' ')
+ obj = obj.isoformat()[:19].replace('T', ' ')
elif isinstance(obj, datetime.date):
obj = obj.isoformat()[:10]+' 00:00:00'
else:
@@ -3192,7 +3236,7 @@ class OracleAdapter(BaseAdapter):
return "to_date('%s','yyyy-mm-dd hh24:mi:ss')" % obj
return None
- def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
+ def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, after_connection=None):
self.db = db
@@ -3204,11 +3248,13 @@ class OracleAdapter(BaseAdapter):
self.db_codec = db_codec
self._after_connection = after_connection
self.find_or_make_work_folder()
- ruri = uri.split('://',1)[1]
+ ruri = uri.split('://', 1)[1]
if not 'threaded' in driver_args:
- driver_args['threaded']=True
- def connector(uri=ruri,driver_args=driver_args):
- return self.driver.connect(uri,**driver_args)
+ driver_args['threaded'] = True
+
+ def connector(uri=ruri, driver_args=driver_args):
+ return self.driver.connect(uri, **driver_args)
+
self.connector = connector
if do_connect: self.reconnect()
@@ -3228,7 +3274,7 @@ class OracleAdapter(BaseAdapter):
command = command[:m.start('clob')] + str(i) + command[m.end('clob'):]
args.append(m.group('clob')[6:-2].replace("''", "'"))
i += 1
- if command[-1:]==';':
+ if command[-1:] == ';':
command = command[:-1]
return self.log_execute(command, args)
@@ -3258,14 +3304,14 @@ class OracleAdapter(BaseAdapter):
SELECT %(sequence_name)s.nextval INTO :NEW.%(id)s FROM DUAL;
END;
""" % dict(trigger_name=trigger_name, tablename=tablename,
- sequence_name=sequence_name,id=id_name))
+ sequence_name=sequence_name, id=id_name))
- def lastrowid(self,table):
+ def lastrowid(self, table):
sequence_name = table._sequence_name
self.execute('SELECT %s.currval FROM dual;' % sequence_name)
return long(self.cursor.fetchone()[0])
- #def parse_value(self, value, field_type, blob_decode=True):
+ # def parse_value(self, value, field_type, blob_decode=True):
# if blob_decode and isinstance(value, cx_Oracle.LOB):
# try:
# value = value.read()
@@ -3275,61 +3321,60 @@ class OracleAdapter(BaseAdapter):
# return BaseAdapter.parse_value(self, value, field_type, blob_decode)
def _fetchall(self):
- if any(x[1]==cx_Oracle.CLOB for x in self.cursor.description):
- return [tuple([(c.read() if type(c) == cx_Oracle.LOB else c) \
- for c in r]) for r in self.cursor]
+ if any(x[1] == cx_Oracle.CLOB for x in self.cursor.description):
+ return [tuple([(c.read() if type(c) == cx_Oracle.LOB else c)
+ for c in r]) for r in self.cursor]
else:
return self.cursor.fetchall()
def sqlsafe_table(self, tablename, ot=None):
if ot is not None:
- return (self.QUOTE_TEMPLATE + ' ' \
- + self.QUOTE_TEMPLATE) % (ot, tablename)
+ return (self.QUOTE_TEMPLATE + ' ' + self.QUOTE_TEMPLATE) \
+ % (ot, tablename)
return self.QUOTE_TEMPLATE % tablename
class MSSQLAdapter(BaseAdapter):
- drivers = ('pyodbc',)
+ drivers = ('pyodbc', )
T_SEP = 'T'
- QUOTE_TEMPLATE = "[%s]"
+ QUOTE_TEMPLATE = '"%s"'
- types = {
- 'boolean': 'BIT',
- 'string': 'VARCHAR(%(length)s)',
- 'text': 'TEXT',
- 'json': 'TEXT',
- 'password': 'VARCHAR(%(length)s)',
- 'blob': 'IMAGE',
- 'upload': 'VARCHAR(%(length)s)',
- 'integer': 'INT',
- 'bigint': 'BIGINT',
- 'float': 'FLOAT',
- 'double': 'FLOAT',
- 'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
- 'date': 'DATETIME',
- 'time': 'CHAR(8)',
- 'datetime': 'DATETIME',
- 'id': 'INT IDENTITY PRIMARY KEY',
- 'reference': 'INT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
- 'list:integer': 'TEXT',
- 'list:string': 'TEXT',
- 'list:reference': 'TEXT',
- 'geometry': 'geometry',
- 'geography': 'geography',
- 'big-id': 'BIGINT IDENTITY PRIMARY KEY',
- 'big-reference': 'BIGINT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
- 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
- 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s',
- }
+ types = {'boolean': 'BIT',
+ 'string': 'VARCHAR(%(length)s)',
+ 'text': 'TEXT',
+ 'json': 'TEXT',
+ 'password': 'VARCHAR(%(length)s)',
+ 'blob': 'IMAGE',
+ 'upload': 'VARCHAR(%(length)s)',
+ 'integer': 'INT',
+ 'bigint': 'BIGINT',
+ 'float': 'FLOAT',
+ 'double': 'FLOAT',
+ 'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
+ 'date': 'DATETIME',
+ 'time': 'CHAR(8)',
+ 'datetime': 'DATETIME',
+ 'id': 'INT IDENTITY PRIMARY KEY',
+ 'reference': 'INT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
+ 'list:integer': 'TEXT',
+ 'list:string': 'TEXT',
+ 'list:reference': 'TEXT',
+ 'geometry': 'geometry',
+ 'geography': 'geography',
+ 'big-id': 'BIGINT IDENTITY PRIMARY KEY',
+ 'big-reference': 'BIGINT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
+ 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
+ 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s',
+ }
- def concat_add(self,tablename):
+ def concat_add(self, tablename):
return '; ALTER TABLE %s ADD ' % tablename
- def varquote(self,name):
- return varquote_aux(name,'[%s]')
+ def varquote(self, name):
+ return varquote_aux(name, '[%s]')
- def EXTRACT(self,field,what):
+ def EXTRACT(self, field, what):
return "DATEPART(%s,%s)" % (what, self.expand(field))
def LEFT_JOIN(self):
@@ -3344,7 +3389,7 @@ class MSSQLAdapter(BaseAdapter):
def CAST(self, first, second):
return first # apparently no cast necessary in MSSQL
- def SUBSTRING(self,field,parameters):
+ def SUBSTRING(self, field, parameters):
return 'SUBSTRING(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1])
def PRIMARY_KEY(self,key):
@@ -3369,14 +3414,14 @@ class MSSQLAdapter(BaseAdapter):
REGEX_URI = re.compile('^(?P[^:@]+)(\:(?P[^@]*))?@(?P[^\:/]+)(\:(?P[0-9]+))?/(?P[^\?]+)(\?(?P.*))?$')
REGEX_ARGPATTERN = re.compile('(?P[^=]+)=(?P[^&]*)')
- def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
+ def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, srid=4326,
after_connection=None):
self.db = db
self.dbengine = "mssql"
self.uri = uri
- if do_connect: self.find_driver(adapter_args,uri)
+ if do_connect: self.find_driver(adapter_args, uri)
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
@@ -3384,7 +3429,7 @@ class MSSQLAdapter(BaseAdapter):
self.srid = srid
self.find_or_make_work_folder()
# ## read: http://bytes.com/groups/python/460325-cx_oracle-utf8
- ruri = uri.split('://',1)[1]
+ ruri = uri.split('://', 1)[1]
if '@' not in ruri:
try:
m = self.REGEX_DSN.match(ruri)
@@ -3421,24 +3466,26 @@ class MSSQLAdapter(BaseAdapter):
# Parse the optional url name-value arg pairs after the '?'
# (in the form of arg1=value1&arg2=value2&...)
# Default values (drivers like FreeTDS insist on uppercase parameter keys)
- argsdict = { 'DRIVER':'{SQL Server}' }
+ argsdict = {'DRIVER':'{SQL Server}'}
urlargs = m.group('urlargs') or ''
for argmatch in self.REGEX_ARGPATTERN.finditer(urlargs):
argsdict[str(argmatch.group('argkey')).upper()] = argmatch.group('argvalue')
urlargs = ';'.join(['%s=%s' % (ak, av) for (ak, av) in argsdict.iteritems()])
cnxn = 'SERVER=%s;PORT=%s;DATABASE=%s;UID=%s;PWD=%s;%s' \
% (host, port, db, user, password, urlargs)
- def connector(cnxn=cnxn,driver_args=driver_args):
- return self.driver.connect(cnxn,**driver_args)
+
+ def connector(cnxn=cnxn, driver_args=driver_args):
+ return self.driver.connect(cnxn, **driver_args)
+
self.connector = connector
if do_connect: self.reconnect()
- def lastrowid(self,table):
+ def lastrowid(self, table):
#self.execute('SELECT @@IDENTITY;')
self.execute('SELECT SCOPE_IDENTITY();')
return long(self.cursor.fetchone()[0])
- def rowslice(self,rows,minimum=0,maximum=None):
+ def rowslice(self, rows, minimum=0, maximum=None):
if maximum is None:
return rows[minimum:]
return rows[minimum:maximum]
@@ -3447,37 +3494,44 @@ class MSSQLAdapter(BaseAdapter):
return "DATEDIFF(second, '1970-01-01 00:00:00', %s)" % self.expand(first)
def CONCAT(self, *items):
- return '(%s)' % ' + '.join(self.expand(x,'string') for x in items)
+ return '(%s)' % ' + '.join(self.expand(x, 'string') for x in items)
# GIS Spatial Extensions
# No STAsGeoJSON in MSSQL
def ST_ASTEXT(self, first):
- return '%s.STAsText()' %(self.expand(first))
+ return '%s.STAsText()' % (self.expand(first))
def ST_CONTAINS(self, first, second):
- return '%s.STContains(%s)=1' %(self.expand(first), self.expand(second, first.type))
+ return '%s.STContains(%s)=1' % (self.expand(first),
+ self.expand(second, first.type))
def ST_DISTANCE(self, first, second):
- return '%s.STDistance(%s)' %(self.expand(first), self.expand(second, first.type))
+ return '%s.STDistance(%s)' % (self.expand(first),
+ self.expand(second, first.type))
def ST_EQUALS(self, first, second):
- return '%s.STEquals(%s)=1' %(self.expand(first), self.expand(second, first.type))
+ return '%s.STEquals(%s)=1' % (self.expand(first),
+ self.expand(second, first.type))
def ST_INTERSECTS(self, first, second):
- return '%s.STIntersects(%s)=1' %(self.expand(first), self.expand(second, first.type))
+ return '%s.STIntersects(%s)=1' % (self.expand(first),
+ self.expand(second, first.type))
def ST_OVERLAPS(self, first, second):
- return '%s.STOverlaps(%s)=1' %(self.expand(first), self.expand(second, first.type))
+ return '%s.STOverlaps(%s)=1' % (self.expand(first),
+ self.expand(second, first.type))
# no STSimplify in MSSQL
def ST_TOUCHES(self, first, second):
- return '%s.STTouches(%s)=1' %(self.expand(first), self.expand(second, first.type))
+ return '%s.STTouches(%s)=1' % (self.expand(first),
+ self.expand(second, first.type))
def ST_WITHIN(self, first, second):
- return '%s.STWithin(%s)=1' %(self.expand(first), self.expand(second, first.type))
+ return '%s.STWithin(%s)=1' % (self.expand(first),
+ self.expand(second, first.type))
def represent(self, obj, fieldtype):
field_is_type = fieldtype.startswith
@@ -3486,13 +3540,13 @@ class MSSQLAdapter(BaseAdapter):
geotype, parms = fieldtype[:-1].split('(')
if parms:
srid = parms
- return "geometry::STGeomFromText('%s',%s)" %(obj, srid)
+ return "geometry::STGeomFromText('%s',%s)" % (obj, srid)
elif fieldtype == 'geography':
srid = 4326 # MS SQL default srid for geography
geotype, parms = fieldtype[:-1].split('(')
if parms:
srid = parms
- return "geography::STGeomFromText('%s',%s)" %(obj, srid)
+ return "geography::STGeomFromText('%s',%s)" % (obj, srid)
# else:
# raise SyntaxError('Invalid field type %s' %fieldtype)
return "geometry::STGeomFromText('%s',%s)" %(obj, srid)
@@ -3500,7 +3554,10 @@ class MSSQLAdapter(BaseAdapter):
class MSSQL3Adapter(MSSQLAdapter):
- """ experimental support for pagination in MSSQL"""
+ """Experimental support for pagination in MSSQL
+
+ Requires MSSQL >= 2005, uses `ROW_NUMBER()`
+ """
def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
if limitby:
(lmin, lmax) = limitby
@@ -3515,13 +3572,47 @@ class MSSQL3Adapter(MSSQLAdapter):
sql_f_iproxy = ['%s AS %s' % (o, n) for (o, n) in zip(sql_f_inner, sql_f_outer)]
sql_f_iproxy = ', '.join(sql_f_iproxy)
sql_f_oproxy = ', '.join(sql_f_outer)
- return 'SELECT %s %s FROM (SELECT %s ROW_NUMBER() OVER (ORDER BY %s) AS w_row, %s FROM %s%s%s) TMP WHERE w_row BETWEEN %i AND %s;' % (sql_s,sql_f_oproxy,sql_s,sql_f,sql_f_iproxy,sql_t,sql_w,sql_g_inner,lmin,lmax)
- return 'SELECT %s %s FROM %s%s%s;' % (sql_s,sql_f,sql_t,sql_w,sql_o)
- def rowslice(self,rows,minimum=0,maximum=None):
+ return 'SELECT %s %s FROM (SELECT %s ROW_NUMBER() OVER (ORDER BY %s) AS w_row, %s FROM %s%s%s) TMP WHERE w_row BETWEEN %i AND %s;' % (sql_s, sql_f_oproxy, sql_s, sql_f, sql_f_iproxy, sql_t, sql_w, sql_g_inner, lmin, lmax)
+ return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
+
+ def rowslice(self, rows, minimum=0, maximum=None):
return rows
+
class MSSQL4Adapter(MSSQLAdapter):
- """ support for true pagination in MSSQL >= 2012"""
+ """Support for "native" pagination
+
+ Requires MSSQL >= 2012, uses `OFFSET ... ROWS ... FETCH NEXT ... ROWS ONLY`
+ """
+
+ types = {
+ 'boolean': 'BIT',
+ 'string': 'VARCHAR(%(length)s)',
+ 'text': 'VARCHAR(MAX)',
+ 'json': 'VARCHAR(MAX)',
+ 'password': 'VARCHAR(%(length)s)',
+ 'blob': 'IMAGE',
+ 'upload': 'VARCHAR(%(length)s)',
+ 'integer': 'INT',
+ 'bigint': 'BIGINT',
+ 'float': 'FLOAT',
+ 'double': 'FLOAT',
+ 'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
+ 'date': 'DATETIME',
+ 'time': 'TIME(7)',
+ 'datetime': 'DATETIME',
+ 'id': 'INT IDENTITY PRIMARY KEY',
+ 'reference': 'INT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
+ 'list:integer': 'VARCHAR(MAX)',
+ 'list:string': 'VARCHAR(MAX)',
+ 'list:reference': 'VARCHAR(MAX)',
+ 'geometry': 'geometry',
+ 'geography': 'geography',
+ 'big-id': 'BIGINT IDENTITY PRIMARY KEY',
+ 'big-reference': 'BIGINT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
+ 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
+ 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s',
+ }
def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
if limitby:
@@ -3540,76 +3631,74 @@ class MSSQL4Adapter(MSSQLAdapter):
return 'SELECT %s %s FROM %s%s%s;' % \
(sql_s, sql_f, sql_t, sql_w, sql_o)
- def rowslice(self,rows,minimum=0,maximum=None):
+ def rowslice(self, rows, minimum=0, maximum=None):
return rows
-class MSSQL2Adapter(MSSQLAdapter):
- drivers = ('pyodbc',)
- types = {
- 'boolean': 'CHAR(1)',
- 'string': 'NVARCHAR(%(length)s)',
- 'text': 'NTEXT',
- 'json': 'NTEXT',
- 'password': 'NVARCHAR(%(length)s)',
- 'blob': 'IMAGE',
- 'upload': 'NVARCHAR(%(length)s)',
- 'integer': 'INT',
- 'bigint': 'BIGINT',
- 'float': 'FLOAT',
- 'double': 'FLOAT',
- 'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
- 'date': 'DATETIME',
- 'time': 'CHAR(8)',
- 'datetime': 'DATETIME',
- 'id': 'INT IDENTITY PRIMARY KEY',
- 'reference': 'INT, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
- 'list:integer': 'NTEXT',
- 'list:string': 'NTEXT',
- 'list:reference': 'NTEXT',
- 'big-id': 'BIGINT IDENTITY PRIMARY KEY',
- 'big-reference': 'BIGINT, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
- 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
- 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s',
- }
+class MSSQL2Adapter(MSSQLAdapter):
+ drivers = ('pyodbc', )
+
+ types = {'boolean': 'CHAR(1)',
+ 'string': 'NVARCHAR(%(length)s)',
+ 'text': 'NTEXT',
+ 'json': 'NTEXT',
+ 'password': 'NVARCHAR(%(length)s)',
+ 'blob': 'IMAGE',
+ 'upload': 'NVARCHAR(%(length)s)',
+ 'integer': 'INT',
+ 'bigint': 'BIGINT',
+ 'float': 'FLOAT',
+ 'double': 'FLOAT',
+ 'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
+ 'date': 'DATETIME',
+ 'time': 'CHAR(8)',
+ 'datetime': 'DATETIME',
+ 'id': 'INT IDENTITY PRIMARY KEY',
+ 'reference': 'INT, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
+ 'list:integer': 'NTEXT',
+ 'list:string': 'NTEXT',
+ 'list:reference': 'NTEXT',
+ 'big-id': 'BIGINT IDENTITY PRIMARY KEY',
+ 'big-reference': 'BIGINT, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
+ 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
+ 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s',
+ }
def represent(self, obj, fieldtype):
value = BaseAdapter.represent(self, obj, fieldtype)
- if fieldtype in ('string','text', 'json') and value[:1]=="'":
+ if fieldtype in ('string', 'text', 'json') and value[:1] == "'":
value = 'N'+value
return value
- def execute(self,a):
+ def execute(self, a):
return self.log_execute(a.decode('utf8'))
class VerticaAdapter(MSSQLAdapter):
- drivers = ('pyodbc',)
+ drivers = ('pyodbc', )
T_SEP = ' '
- types = {
- 'boolean': 'BOOLEAN',
- 'string': 'VARCHAR(%(length)s)',
- 'text': 'BYTEA',
- 'json': 'VARCHAR(%(length)s)',
- 'password': 'VARCHAR(%(length)s)',
- 'blob': 'BYTEA',
- 'upload': 'VARCHAR(%(length)s)',
- 'integer': 'INT',
- 'bigint': 'BIGINT',
- 'float': 'FLOAT',
- 'double': 'DOUBLE PRECISION',
- 'decimal': 'DECIMAL(%(precision)s,%(scale)s)',
- 'date': 'DATE',
- 'time': 'TIME',
- 'datetime': 'DATETIME',
- 'id': 'IDENTITY',
- 'reference': 'INT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
- 'list:integer': 'BYTEA',
- 'list:string': 'BYTEA',
- 'list:reference': 'BYTEA',
- 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
- }
-
+ types = {'boolean': 'BOOLEAN',
+ 'string': 'VARCHAR(%(length)s)',
+ 'text': 'BYTEA',
+ 'json': 'VARCHAR(%(length)s)',
+ 'password': 'VARCHAR(%(length)s)',
+ 'blob': 'BYTEA',
+ 'upload': 'VARCHAR(%(length)s)',
+ 'integer': 'INT',
+ 'bigint': 'BIGINT',
+ 'float': 'FLOAT',
+ 'double': 'DOUBLE PRECISION',
+ 'decimal': 'DECIMAL(%(precision)s,%(scale)s)',
+ 'date': 'DATE',
+ 'time': 'TIME',
+ 'datetime': 'DATETIME',
+ 'id': 'IDENTITY',
+ 'reference': 'INT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
+ 'list:integer': 'BYTEA',
+ 'list:string': 'BYTEA',
+ 'list:reference': 'BYTEA',
+ 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
+ }
def EXTRACT(self, first, what):
return "DATE_PART('%s', TIMESTAMP %s)" % (what, self.expand(first))
@@ -3625,54 +3714,53 @@ class VerticaAdapter(MSSQLAdapter):
return 'SELECT %s %s FROM %s%s%s;' % \
(sql_s, sql_f, sql_t, sql_w, sql_o)
- def lastrowid(self,table):
+ def lastrowid(self, table):
self.execute('SELECT LAST_INSERT_ID();')
return long(self.cursor.fetchone()[0])
def execute(self, a):
return self.log_execute(a)
+
class SybaseAdapter(MSSQLAdapter):
- drivers = ('Sybase',)
+ drivers = ('Sybase', )
- types = {
- 'boolean': 'BIT',
- 'string': 'CHAR VARYING(%(length)s)',
- 'text': 'TEXT',
- 'json': 'TEXT',
- 'password': 'CHAR VARYING(%(length)s)',
- 'blob': 'IMAGE',
- 'upload': 'CHAR VARYING(%(length)s)',
- 'integer': 'INT',
- 'bigint': 'BIGINT',
- 'float': 'FLOAT',
- 'double': 'FLOAT',
- 'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
- 'date': 'DATETIME',
- 'time': 'CHAR(8)',
- 'datetime': 'DATETIME',
- 'id': 'INT IDENTITY PRIMARY KEY',
- 'reference': 'INT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
- 'list:integer': 'TEXT',
- 'list:string': 'TEXT',
- 'list:reference': 'TEXT',
- 'geometry': 'geometry',
- 'geography': 'geography',
- 'big-id': 'BIGINT IDENTITY PRIMARY KEY',
- 'big-reference': 'BIGINT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
- 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
- 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s',
- }
+ types = {'boolean': 'BIT',
+ 'string': 'CHAR VARYING(%(length)s)',
+ 'text': 'TEXT',
+ 'json': 'TEXT',
+ 'password': 'CHAR VARYING(%(length)s)',
+ 'blob': 'IMAGE',
+ 'upload': 'CHAR VARYING(%(length)s)',
+ 'integer': 'INT',
+ 'bigint': 'BIGINT',
+ 'float': 'FLOAT',
+ 'double': 'FLOAT',
+ 'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
+ 'date': 'DATETIME',
+ 'time': 'CHAR(8)',
+ 'datetime': 'DATETIME',
+ 'id': 'INT IDENTITY PRIMARY KEY',
+ 'reference': 'INT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
+ 'list:integer': 'TEXT',
+ 'list:string': 'TEXT',
+ 'list:reference': 'TEXT',
+ 'geometry': 'geometry',
+ 'geography': 'geography',
+ 'big-id': 'BIGINT IDENTITY PRIMARY KEY',
+ 'big-reference': 'BIGINT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
+ 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
+ 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s',
+ }
-
- def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
+ def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, srid=4326,
after_connection=None):
self.db = db
self.dbengine = "sybase"
self.uri = uri
- if do_connect: self.find_driver(adapter_args,uri)
+ if do_connect: self.find_driver(adapter_args, uri)
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
@@ -3680,7 +3768,7 @@ class SybaseAdapter(MSSQLAdapter):
self.srid = srid
self.find_or_make_work_folder()
# ## read: http://bytes.com/groups/python/460325-cx_oracle-utf8
- ruri = uri.split('://',1)[1]
+ ruri = uri.split('://', 1)[1]
if '@' not in ruri:
try:
m = self.REGEX_DSN.match(ruri)
@@ -3713,51 +3801,51 @@ class SybaseAdapter(MSSQLAdapter):
raise SyntaxError('Database name required')
port = m.group('port') or '1433'
- dsn = 'sybase:host=%s:%s;dbname=%s' % (host,port,db)
+ dsn = 'sybase:host=%s:%s;dbname=%s' % (host, port, db)
driver_args.update(user = credential_decoder(user),
password = credential_decoder(password))
- def connector(dsn=dsn,driver_args=driver_args):
- return self.driver.connect(dsn,**driver_args)
+ def connector(dsn=dsn, driver_args=driver_args):
+ return self.driver.connect(dsn, **driver_args)
+
self.connector = connector
if do_connect: self.reconnect()
class FireBirdAdapter(BaseAdapter):
- drivers = ('kinterbasdb','firebirdsql','fdb','pyodbc')
+ drivers = ('kinterbasdb', 'firebirdsql', 'fdb', 'pyodbc')
commit_on_alter_table = False
support_distributed_transaction = True
- types = {
- 'boolean': 'CHAR(1)',
- 'string': 'VARCHAR(%(length)s)',
- 'text': 'BLOB SUB_TYPE 1',
- 'json': 'BLOB SUB_TYPE 1',
- 'password': 'VARCHAR(%(length)s)',
- 'blob': 'BLOB SUB_TYPE 0',
- 'upload': 'VARCHAR(%(length)s)',
- 'integer': 'INTEGER',
- 'bigint': 'BIGINT',
- 'float': 'FLOAT',
- 'double': 'DOUBLE PRECISION',
- 'decimal': 'DECIMAL(%(precision)s,%(scale)s)',
- 'date': 'DATE',
- 'time': 'TIME',
- 'datetime': 'TIMESTAMP',
- 'id': 'INTEGER PRIMARY KEY',
- 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
- 'list:integer': 'BLOB SUB_TYPE 1',
- 'list:string': 'BLOB SUB_TYPE 1',
- 'list:reference': 'BLOB SUB_TYPE 1',
- 'big-id': 'BIGINT PRIMARY KEY',
- 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
- }
+ types = {'boolean': 'CHAR(1)',
+ 'string': 'VARCHAR(%(length)s)',
+ 'text': 'BLOB SUB_TYPE 1',
+ 'json': 'BLOB SUB_TYPE 1',
+ 'password': 'VARCHAR(%(length)s)',
+ 'blob': 'BLOB SUB_TYPE 0',
+ 'upload': 'VARCHAR(%(length)s)',
+ 'integer': 'INTEGER',
+ 'bigint': 'BIGINT',
+ 'float': 'FLOAT',
+ 'double': 'DOUBLE PRECISION',
+ 'decimal': 'DECIMAL(%(precision)s,%(scale)s)',
+ 'date': 'DATE',
+ 'time': 'TIME',
+ 'datetime': 'TIMESTAMP',
+ 'id': 'INTEGER PRIMARY KEY',
+ 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
+ 'list:integer': 'BLOB SUB_TYPE 1',
+ 'list:string': 'BLOB SUB_TYPE 1',
+ 'list:reference': 'BLOB SUB_TYPE 1',
+ 'big-id': 'BIGINT PRIMARY KEY',
+ 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
+ }
- def sequence_name(self,tablename):
+ def sequence_name(self, tablename):
return ('genid_' + self.QUOTE_TEMPLATE) % tablename
- def trigger_name(self,tablename):
+ def trigger_name(self, tablename):
return 'trg_id_%s' % tablename
def RANDOM(self):
@@ -3766,23 +3854,23 @@ class FireBirdAdapter(BaseAdapter):
def EPOCH(self, first):
return "DATEDIFF(second, '1970-01-01 00:00:00', %s)" % self.expand(first)
- def NOT_NULL(self,default,field_type):
- return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
+ def NOT_NULL(self, default, field_type):
+ return 'DEFAULT %s NOT NULL' % self.represent(default, field_type)
- def SUBSTRING(self,field,parameters):
+ def SUBSTRING(self, field, parameters):
return 'SUBSTRING(%s from %s for %s)' % (self.expand(field), parameters[0], parameters[1])
def LENGTH(self, first):
return "CHAR_LENGTH(%s)" % self.expand(first)
- def CONTAINS(self,first,second,case_sensitive=False):
+ def CONTAINS(self, first, second, case_sensitive=False):
if first.type.startswith('list:'):
- second = Expression(None,self.CONCAT('|',Expression(
- None,self.REPLACE(second,('|','||'))),'|'))
+ second = Expression(None, self.CONCAT('|', Expression(
+ None, self.REPLACE(second, ('|', '||'))), '|'))
return '(%s CONTAINING %s)' % (self.expand(first),
self.expand(second, 'string'))
- def _drop(self,table,mode):
+ def _drop(self, table, mode):
sequence_name = table._sequence_name
return ['DROP TABLE %s %s;' % (table.sqlsafe, mode), 'DROP GENERATOR %s;' % sequence_name]
@@ -3798,19 +3886,19 @@ class FireBirdAdapter(BaseAdapter):
REGEX_URI = re.compile('^(?P[^:@]+)(\:(?P[^@]*))?@(?P[^\:/]+)(\:(?P[0-9]+))?/(?P.+?)(\?set_encoding=(?P\w+))?$')
- def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
+ def __init__(self, db, uri, pool_size=0, folder=None, db_codec ='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, after_connection=None):
self.db = db
self.dbengine = "firebird"
self.uri = uri
- if do_connect: self.find_driver(adapter_args,uri)
+ if do_connect: self.find_driver(adapter_args, uri)
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self._after_connection = after_connection
self.find_or_make_work_folder()
- ruri = uri.split('://',1)[1]
+ ruri = uri.split('://', 1)[1]
m = self.REGEX_URI.match(ruri)
if not m:
raise SyntaxError("Invalid URI string in DAL: %s" % self.uri)
@@ -3828,13 +3916,14 @@ class FireBirdAdapter(BaseAdapter):
if not db:
raise SyntaxError('Database name required')
charset = m.group('charset') or 'UTF8'
- driver_args.update(dsn='%s/%s:%s' % (host,port,db),
- user = credential_decoder(user),
- password = credential_decoder(password),
- charset = charset)
+ driver_args.update(dsn='%s/%s:%s' % (host, port, db),
+ user=credential_decoder(user),
+ password=credential_decoder(password),
+ charset=charset)
def connector(driver_args=driver_args):
return self.driver.connect(**driver_args)
+
self.connector = connector
if do_connect: self.reconnect()
@@ -3847,30 +3936,30 @@ class FireBirdAdapter(BaseAdapter):
self.execute('set generator %s to 0;' % sequence_name)
self.execute('create trigger %s for %s active before insert position 0 as\nbegin\nif(new.id is null) then\nbegin\nnew.id = gen_id(%s, 1);\nend\nend;' % (trigger_name, tablename, sequence_name))
- def lastrowid(self,table):
+ def lastrowid(self, table):
sequence_name = table._sequence_name
self.execute('SELECT gen_id(%s, 0) FROM rdb$database' % sequence_name)
return long(self.cursor.fetchone()[0])
class FireBirdEmbeddedAdapter(FireBirdAdapter):
- drivers = ('kinterbasdb','firebirdsql','fdb','pyodbc')
+ drivers = ('kinterbasdb', 'firebirdsql', 'fdb', 'pyodbc')
REGEX_URI = re.compile('^(?P[^:@]+)(\:(?P[^@]*))?@(?P[^\?]+)(\?set_encoding=(?P\w+))?$')
- def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
+ def __init__(self, db, uri, pool_size=0, folder=None, db_codec ='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, after_connection=None):
self.db = db
self.dbengine = "firebird"
self.uri = uri
- if do_connect: self.find_driver(adapter_args,uri)
+ if do_connect: self.find_driver(adapter_args, uri)
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self._after_connection = after_connection
self.find_or_make_work_folder()
- ruri = uri.split('://',1)[1]
+ ruri = uri.split('://', 1)[1]
m = self.REGEX_URI.match(ruri)
if not m:
raise SyntaxError(
@@ -3896,44 +3985,45 @@ class FireBirdEmbeddedAdapter(FireBirdAdapter):
def connector(driver_args=driver_args):
return self.driver.connect(**driver_args)
+
self.connector = connector
if do_connect: self.reconnect()
-class InformixAdapter(BaseAdapter):
- drivers = ('informixdb',)
- types = {
- 'boolean': 'CHAR(1)',
- 'string': 'VARCHAR(%(length)s)',
- 'text': 'BLOB SUB_TYPE 1',
- 'json': 'BLOB SUB_TYPE 1',
- 'password': 'VARCHAR(%(length)s)',
- 'blob': 'BLOB SUB_TYPE 0',
- 'upload': 'VARCHAR(%(length)s)',
- 'integer': 'INTEGER',
- 'bigint': 'BIGINT',
- 'float': 'FLOAT',
- 'double': 'DOUBLE PRECISION',
- 'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
- 'date': 'DATE',
- 'time': 'CHAR(8)',
- 'datetime': 'DATETIME',
- 'id': 'SERIAL',
- 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
- 'list:integer': 'BLOB SUB_TYPE 1',
- 'list:string': 'BLOB SUB_TYPE 1',
- 'list:reference': 'BLOB SUB_TYPE 1',
- 'big-id': 'BIGSERIAL',
- 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
- 'reference FK': 'REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s CONSTRAINT FK_%(table_name)s_%(field_name)s',
- 'reference TFK': 'FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s CONSTRAINT TFK_%(table_name)s_%(field_name)s',
- }
+class InformixAdapter(BaseAdapter):
+ drivers = ('informixdb', )
+
+ types = {'boolean': 'CHAR(1)',
+ 'string': 'VARCHAR(%(length)s)',
+ 'text': 'BLOB SUB_TYPE 1',
+ 'json': 'BLOB SUB_TYPE 1',
+ 'password': 'VARCHAR(%(length)s)',
+ 'blob': 'BLOB SUB_TYPE 0',
+ 'upload': 'VARCHAR(%(length)s)',
+ 'integer': 'INTEGER',
+ 'bigint': 'BIGINT',
+ 'float': 'FLOAT',
+ 'double': 'DOUBLE PRECISION',
+ 'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
+ 'date': 'DATE',
+ 'time': 'CHAR(8)',
+ 'datetime': 'DATETIME',
+ 'id': 'SERIAL',
+ 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
+ 'list:integer': 'BLOB SUB_TYPE 1',
+ 'list:string': 'BLOB SUB_TYPE 1',
+ 'list:reference': 'BLOB SUB_TYPE 1',
+ 'big-id': 'BIGSERIAL',
+ 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
+ 'reference FK': 'REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s CONSTRAINT FK_%(table_name)s_%(field_name)s',
+ 'reference TFK': 'FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s CONSTRAINT TFK_%(table_name)s_%(field_name)s',
+ }
def RANDOM(self):
return 'Random()'
- def NOT_NULL(self,default,field_type):
- return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
+ def NOT_NULL(self, default, field_type):
+ return 'DEFAULT %s NOT NULL' % self.represent(default, field_type)
def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
if limitby:
@@ -3957,7 +4047,7 @@ class InformixAdapter(BaseAdapter):
return "to_date('%s','%%Y-%%m-%%d')" % obj
elif fieldtype == 'datetime':
if isinstance(obj, datetime.datetime):
- obj = obj.isoformat()[:19].replace('T',' ')
+ obj = obj.isoformat()[:19].replace('T', ' ')
elif isinstance(obj, datetime.date):
obj = obj.isoformat()[:10]+' 00:00:00'
else:
@@ -3967,7 +4057,7 @@ class InformixAdapter(BaseAdapter):
REGEX_URI = re.compile('^(?P[^:@]+)(\:(?P[^@]*))?@(?P[^\:/]+)(\:(?P[0-9]+))?/(?P.+)$')
- def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
+ def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, after_connection=None):
self.db = db
@@ -3979,7 +4069,7 @@ class InformixAdapter(BaseAdapter):
self.db_codec = db_codec
self._after_connection = after_connection
self.find_or_make_work_folder()
- ruri = uri.split('://',1)[1]
+ ruri = uri.split('://', 1)[1]
m = self.REGEX_URI.match(ruri)
if not m:
raise SyntaxError(
@@ -3998,21 +4088,24 @@ class InformixAdapter(BaseAdapter):
raise SyntaxError('Database name required')
user = credential_decoder(user)
password = credential_decoder(password)
- dsn = '%s@%s' % (db,host)
- driver_args.update(user=user,password=password,autocommit=True)
- def connector(dsn=dsn,driver_args=driver_args):
- return self.driver.connect(dsn,**driver_args)
+ dsn = '%s@%s' % (db, host)
+ driver_args.update(user=user, password=password, autocommit=True)
+
+ def connector(dsn=dsn, driver_args=driver_args):
+ return self.driver.connect(dsn, **driver_args)
+
self.connector = connector
if do_connect: self.reconnect()
- def execute(self,command):
- if command[-1:]==';':
+ def execute(self, command):
+ if command[-1:] == ';':
command = command[:-1]
return self.log_execute(command)
- def lastrowid(self,table):
+ def lastrowid(self, table):
return self.cursor.sqlerrd[1]
+
class InformixSEAdapter(InformixAdapter):
""" work in progress """
@@ -4020,40 +4113,39 @@ class InformixSEAdapter(InformixAdapter):
return 'SELECT %s %s FROM %s%s%s;' % \
(sql_s, sql_f, sql_t, sql_w, sql_o)
- def rowslice(self,rows,minimum=0,maximum=None):
+ def rowslice(self, rows, minimum=0, maximum=None):
if maximum is None:
return rows[minimum:]
return rows[minimum:maximum]
class DB2Adapter(BaseAdapter):
- drivers = ('pyodbc',)
+ drivers = ('pyodbc', )
- types = {
- 'boolean': 'CHAR(1)',
- 'string': 'VARCHAR(%(length)s)',
- 'text': 'CLOB',
- 'json': 'CLOB',
- 'password': 'VARCHAR(%(length)s)',
- 'blob': 'BLOB',
- 'upload': 'VARCHAR(%(length)s)',
- 'integer': 'INT',
- 'bigint': 'BIGINT',
- 'float': 'REAL',
- 'double': 'DOUBLE',
- 'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
- 'date': 'DATE',
- 'time': 'TIME',
- 'datetime': 'TIMESTAMP',
- 'id': 'INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL',
- 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
- 'list:integer': 'CLOB',
- 'list:string': 'CLOB',
- 'list:reference': 'CLOB',
- 'big-id': 'BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL',
- 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
- 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
- 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s',
- }
+ types = {'boolean': 'CHAR(1)',
+ 'string': 'VARCHAR(%(length)s)',
+ 'text': 'CLOB',
+ 'json': 'CLOB',
+ 'password': 'VARCHAR(%(length)s)',
+ 'blob': 'BLOB',
+ 'upload': 'VARCHAR(%(length)s)',
+ 'integer': 'INT',
+ 'bigint': 'BIGINT',
+ 'float': 'REAL',
+ 'double': 'DOUBLE',
+ 'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
+ 'date': 'DATE',
+ 'time': 'TIME',
+ 'datetime': 'TIMESTAMP',
+ 'id': 'INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL',
+ 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
+ 'list:integer': 'CLOB',
+ 'list:string': 'CLOB',
+ 'list:reference': 'CLOB',
+ 'big-id': 'BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL',
+ 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
+ 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
+ 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s',
+ }
def LEFT_JOIN(self):
return 'LEFT OUTER JOIN'
@@ -4073,96 +4165,99 @@ class DB2Adapter(BaseAdapter):
return "BLOB('%s')" % obj
elif fieldtype == 'datetime':
if isinstance(obj, datetime.datetime):
- obj = obj.isoformat()[:19].replace('T','-').replace(':','.')
+ obj = obj.isoformat()[:19].replace('T', '-').replace(':', '.')
elif isinstance(obj, datetime.date):
obj = obj.isoformat()[:10]+'-00.00.00'
return "'%s'" % obj
return None
- def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
+ def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, after_connection=None):
self.db = db
self.dbengine = "db2"
self.uri = uri
- if do_connect: self.find_driver(adapter_args,uri)
+ if do_connect: self.find_driver(adapter_args, uri)
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self._after_connection = after_connection
self.find_or_make_work_folder()
ruri = uri.split('://', 1)[1]
- def connector(cnxn=ruri,driver_args=driver_args):
- return self.driver.connect(cnxn,**driver_args)
+
+ def connector(cnxn=ruri, driver_args=driver_args):
+ return self.driver.connect(cnxn, **driver_args)
+
self.connector = connector
if do_connect: self.reconnect()
- def execute(self,command):
+ def execute(self, command):
if command[-1:]==';':
command = command[:-1]
return self.log_execute(command)
- def lastrowid(self,table):
+ def lastrowid(self, table):
self.execute('SELECT DISTINCT IDENTITY_VAL_LOCAL() FROM %s;' % table)
return long(self.cursor.fetchone()[0])
- def rowslice(self,rows,minimum=0,maximum=None):
+ def rowslice(self, rows, minimum=0, maximum=None):
if maximum is None:
return rows[minimum:]
return rows[minimum:maximum]
class TeradataAdapter(BaseAdapter):
- drivers = ('pyodbc',)
+ drivers = ('pyodbc', )
- types = {
- 'boolean': 'CHAR(1)',
- 'string': 'VARCHAR(%(length)s)',
- 'text': 'VARCHAR(2000)',
- 'json': 'VARCHAR(4000)',
- 'password': 'VARCHAR(%(length)s)',
- 'blob': 'BLOB',
- 'upload': 'VARCHAR(%(length)s)',
- 'integer': 'INT',
- 'bigint': 'BIGINT',
- 'float': 'REAL',
- 'double': 'DOUBLE',
- 'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
- 'date': 'DATE',
- 'time': 'TIME',
- 'datetime': 'TIMESTAMP',
- # Modified Constraint syntax for Teradata.
- # Teradata does not support ON DELETE.
- 'id': 'INT GENERATED ALWAYS AS IDENTITY', # Teradata Specific
- 'reference': 'INT',
- 'list:integer': 'VARCHAR(4000)',
- 'list:string': 'VARCHAR(4000)',
- 'list:reference': 'VARCHAR(4000)',
- 'big-id': 'BIGINT GENERATED ALWAYS AS IDENTITY', # Teradata Specific
- 'big-reference': 'BIGINT',
- 'reference FK': ' REFERENCES %(foreign_key)s',
- 'reference TFK': ' FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s)',
- }
+ types = {'boolean': 'CHAR(1)',
+ 'string': 'VARCHAR(%(length)s)',
+ 'text': 'VARCHAR(2000)',
+ 'json': 'VARCHAR(4000)',
+ 'password': 'VARCHAR(%(length)s)',
+ 'blob': 'BLOB',
+ 'upload': 'VARCHAR(%(length)s)',
+ 'integer': 'INT',
+ 'bigint': 'BIGINT',
+ 'float': 'REAL',
+ 'double': 'DOUBLE',
+ 'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
+ 'date': 'DATE',
+ 'time': 'TIME',
+ 'datetime': 'TIMESTAMP',
+ # Modified Constraint syntax for Teradata.
+ # Teradata does not support ON DELETE.
+ 'id': 'INT GENERATED ALWAYS AS IDENTITY', # Teradata Specific
+ 'reference': 'INT',
+ 'list:integer': 'VARCHAR(4000)',
+ 'list:string': 'VARCHAR(4000)',
+ 'list:reference': 'VARCHAR(4000)',
+ 'big-id': 'BIGINT GENERATED ALWAYS AS IDENTITY', # Teradata Specific
+ 'big-reference': 'BIGINT',
+ 'reference FK': ' REFERENCES %(foreign_key)s',
+ 'reference TFK': ' FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s)',
+ }
- def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
+ def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, after_connection=None):
self.db = db
self.dbengine = "teradata"
self.uri = uri
- if do_connect: self.find_driver(adapter_args,uri)
+ if do_connect: self.find_driver(adapter_args, uri)
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self._after_connection = after_connection
self.find_or_make_work_folder()
ruri = uri.split('://', 1)[1]
- def connector(cnxn=ruri,driver_args=driver_args):
- return self.driver.connect(cnxn,**driver_args)
+
+ def connector(cnxn=ruri, driver_args=driver_args):
+ return self.driver.connect(cnxn, **driver_args)
+
self.connector = connector
if do_connect: self.reconnect()
- def close(self,action='commit',really=True):
+ def close(self, action='commit', really=True):
# Teradata does not implicitly close off the cursor
# leading to SQL_ACTIVE_STATEMENTS limit errors
self.cursor.close()
@@ -4182,39 +4277,39 @@ class TeradataAdapter(BaseAdapter):
tablename = table._tablename
return ['DELETE FROM %s ALL;' % (tablename)]
-INGRES_SEQNAME='ii***lineitemsequence' # NOTE invalid database object name
- # (ANSI-SQL wants this form of name
- # to be a delimited identifier)
+INGRES_SEQNAME='ii***lineitemsequence' # NOTE invalid database object name
+ # (ANSI-SQL wants this form of name
+ # to be a delimited identifier)
+
class IngresAdapter(BaseAdapter):
- drivers = ('pyodbc',)
+ drivers = ('pyodbc', )
- types = {
- 'boolean': 'CHAR(1)',
- 'string': 'VARCHAR(%(length)s)',
- 'text': 'CLOB',
- 'json': 'CLOB',
- 'password': 'VARCHAR(%(length)s)', ## Not sure what this contains utf8 or nvarchar. Or even bytes?
- 'blob': 'BLOB',
- 'upload': 'VARCHAR(%(length)s)', ## FIXME utf8 or nvarchar... or blob? what is this type?
- 'integer': 'INTEGER4', # or int8...
- 'bigint': 'BIGINT',
- 'float': 'FLOAT',
- 'double': 'FLOAT8',
- 'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
- 'date': 'ANSIDATE',
- 'time': 'TIME WITHOUT TIME ZONE',
- 'datetime': 'TIMESTAMP WITHOUT TIME ZONE',
- 'id': 'int not null unique with default next value for %s' % INGRES_SEQNAME,
- 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
- 'list:integer': 'CLOB',
- 'list:string': 'CLOB',
- 'list:reference': 'CLOB',
- 'big-id': 'bigint not null unique with default next value for %s' % INGRES_SEQNAME,
- 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
- 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
- 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', ## FIXME TODO
- }
+ types = {'boolean': 'CHAR(1)',
+ 'string': 'VARCHAR(%(length)s)',
+ 'text': 'CLOB',
+ 'json': 'CLOB',
+ 'password': 'VARCHAR(%(length)s)', ## Not sure what this contains utf8 or nvarchar. Or even bytes?
+ 'blob': 'BLOB',
+ 'upload': 'VARCHAR(%(length)s)', ## FIXME utf8 or nvarchar... or blob? what is this type?
+ 'integer': 'INTEGER4', # or int8...
+ 'bigint': 'BIGINT',
+ 'float': 'FLOAT',
+ 'double': 'FLOAT8',
+ 'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
+ 'date': 'ANSIDATE',
+ 'time': 'TIME WITHOUT TIME ZONE',
+ 'datetime': 'TIMESTAMP WITHOUT TIME ZONE',
+ 'id': 'int not null unique with default next value for %s' % INGRES_SEQNAME,
+ 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
+ 'list:integer': 'CLOB',
+ 'list:string': 'CLOB',
+ 'list:reference': 'CLOB',
+ 'big-id': 'bigint not null unique with default next value for %s' % INGRES_SEQNAME,
+ 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
+ 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
+ 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', ## FIXME TODO
+ }
def LEFT_JOIN(self):
return 'LEFT OUTER JOIN'
@@ -4233,14 +4328,14 @@ class IngresAdapter(BaseAdapter):
sql_o += ' OFFSET %d' % (lmin, )
return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
- def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
+ def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, after_connection=None):
self.db = db
self.dbengine = "ingres"
self._driver = pyodbc
self.uri = uri
- if do_connect: self.find_driver(adapter_args,uri)
+ if do_connect: self.find_driver(adapter_args, uri)
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
@@ -4261,8 +4356,9 @@ class IngresAdapter(BaseAdapter):
vnode = '(local)'
servertype = 'ingres'
ruri = 'Driver={%s};Server=%s;Database=%s' % (default_driver_name, vnode, database_name)
- def connector(cnxn=ruri,driver_args=driver_args):
- return self.driver.connect(cnxn,**driver_args)
+
+ def connector(cnxn=ruri, driver_args=driver_args):
+ return self.driver.connect(cnxn, **driver_args)
self.connector = connector
@@ -4273,7 +4369,7 @@ class IngresAdapter(BaseAdapter):
# post create table auto inc code (if needed)
# modify table to btree for performance....
# Older Ingres releases could use rule/trigger like Oracle above.
- if hasattr(table,'_primarykey'):
+ if hasattr(table, '_primarykey'):
modify_tbl_sql = 'modify %s to btree unique on %s' % \
(table._tablename,
', '.join(["'%s'" % x for x in table.primarykey]))
@@ -4285,8 +4381,7 @@ class IngresAdapter(BaseAdapter):
self.execute(query)
self.execute('modify %s to btree unique on %s' % (table._tablename, 'id'))
-
- def lastrowid(self,table):
+ def lastrowid(self, table):
tmp_seqname='%s_iisq' % table
self.execute('select current value for %s' % tmp_seqname)
return long(self.cursor.fetchone()[0]) # don't really need int type cast here...
@@ -4294,65 +4389,64 @@ class IngresAdapter(BaseAdapter):
class IngresUnicodeAdapter(IngresAdapter):
- drivers = ('pyodbc',)
+ drivers = ('pyodbc', )
+
+ types = {'boolean': 'CHAR(1)',
+ 'string': 'NVARCHAR(%(length)s)',
+ 'text': 'NCLOB',
+ 'json': 'NCLOB',
+ 'password': 'NVARCHAR(%(length)s)', # Not sure what this contains utf8 or nvarchar. Or even bytes?
+ 'blob': 'BLOB',
+ 'upload': 'VARCHAR(%(length)s)', # FIXME utf8 or nvarchar... or blob? what is this type?
+ 'integer': 'INTEGER4', # or int8...
+ 'bigint': 'BIGINT',
+ 'float': 'FLOAT',
+ 'double': 'FLOAT8',
+ 'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
+ 'date': 'ANSIDATE',
+ 'time': 'TIME WITHOUT TIME ZONE',
+ 'datetime': 'TIMESTAMP WITHOUT TIME ZONE',
+ 'id': 'INTEGER4 not null unique with default next value for %s'% INGRES_SEQNAME,
+ 'reference': 'INTEGER4, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
+ 'list:integer': 'NCLOB',
+ 'list:string': 'NCLOB',
+ 'list:reference': 'NCLOB',
+ 'big-id': 'BIGINT not null unique with default next value for %s'% INGRES_SEQNAME,
+ 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
+ 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
+ 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', ## FIXME TODO
+ }
- types = {
- 'boolean': 'CHAR(1)',
- 'string': 'NVARCHAR(%(length)s)',
- 'text': 'NCLOB',
- 'json': 'NCLOB',
- 'password': 'NVARCHAR(%(length)s)', ## Not sure what this contains utf8 or nvarchar. Or even bytes?
- 'blob': 'BLOB',
- 'upload': 'VARCHAR(%(length)s)', ## FIXME utf8 or nvarchar... or blob? what is this type?
- 'integer': 'INTEGER4', # or int8...
- 'bigint': 'BIGINT',
- 'float': 'FLOAT',
- 'double': 'FLOAT8',
- 'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
- 'date': 'ANSIDATE',
- 'time': 'TIME WITHOUT TIME ZONE',
- 'datetime': 'TIMESTAMP WITHOUT TIME ZONE',
- 'id': 'INTEGER4 not null unique with default next value for %s'% INGRES_SEQNAME,
- 'reference': 'INTEGER4, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
- 'list:integer': 'NCLOB',
- 'list:string': 'NCLOB',
- 'list:reference': 'NCLOB',
- 'big-id': 'BIGINT not null unique with default next value for %s'% INGRES_SEQNAME,
- 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
- 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
- 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', ## FIXME TODO
- }
class SAPDBAdapter(BaseAdapter):
- drivers = ('sapdb',)
+ drivers = ('sapdb', )
support_distributed_transaction = False
- types = {
- 'boolean': 'CHAR(1)',
- 'string': 'VARCHAR(%(length)s)',
- 'text': 'LONG',
- 'json': 'LONG',
- 'password': 'VARCHAR(%(length)s)',
- 'blob': 'LONG',
- 'upload': 'VARCHAR(%(length)s)',
- 'integer': 'INT',
- 'bigint': 'BIGINT',
- 'float': 'FLOAT',
- 'double': 'DOUBLE PRECISION',
- 'decimal': 'FIXED(%(precision)s,%(scale)s)',
- 'date': 'DATE',
- 'time': 'TIME',
- 'datetime': 'TIMESTAMP',
- 'id': 'INT PRIMARY KEY',
- 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
- 'list:integer': 'LONG',
- 'list:string': 'LONG',
- 'list:reference': 'LONG',
- 'big-id': 'BIGINT PRIMARY KEY',
- 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
- }
+ types = {'boolean': 'CHAR(1)',
+ 'string': 'VARCHAR(%(length)s)',
+ 'text': 'LONG',
+ 'json': 'LONG',
+ 'password': 'VARCHAR(%(length)s)',
+ 'blob': 'LONG',
+ 'upload': 'VARCHAR(%(length)s)',
+ 'integer': 'INT',
+ 'bigint': 'BIGINT',
+ 'float': 'FLOAT',
+ 'double': 'DOUBLE PRECISION',
+ 'decimal': 'FIXED(%(precision)s,%(scale)s)',
+ 'date': 'DATE',
+ 'time': 'TIME',
+ 'datetime': 'TIMESTAMP',
+ 'id': 'INT PRIMARY KEY',
+ 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
+ 'list:integer': 'LONG',
+ 'list:string': 'LONG',
+ 'list:reference': 'LONG',
+ 'big-id': 'BIGINT PRIMARY KEY',
+ 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
+ }
- def sequence_name(self,table):
+ def sequence_name(self, table):
return (self.QUOTE_TEMPLATE + '_id_Seq') % table
def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
@@ -4368,26 +4462,26 @@ class SAPDBAdapter(BaseAdapter):
def create_sequence_and_triggers(self, query, table, **args):
# following lines should only be executed if table._sequence_name does not exist
self.execute('CREATE SEQUENCE %s;' % table._sequence_name)
- self.execute("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT NEXTVAL('%s');" \
- % (table._tablename, table._id.name, table._sequence_name))
+ self.execute("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT NEXTVAL('%s');"
+ % (table._tablename, table._id.name, table._sequence_name))
self.execute(query)
REGEX_URI = re.compile('^(?P[^:@]+)(\:(?P[^@]*))?@(?P[^\:@]+)(\:(?P[0-9]+))?/(?P[^\?]+)(\?sslmode=(?P.+))?$')
- def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
+ def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, after_connection=None):
self.db = db
self.dbengine = "sapdb"
self.uri = uri
- if do_connect: self.find_driver(adapter_args,uri)
+ if do_connect: self.find_driver(adapter_args, uri)
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self._after_connection = after_connection
self.find_or_make_work_folder()
- ruri = uri.split('://',1)[1]
+ ruri = uri.split('://', 1)[1]
m = self.REGEX_URI.match(ruri)
if not m:
raise SyntaxError("Invalid URI string in DAL")
@@ -4403,19 +4497,21 @@ class SAPDBAdapter(BaseAdapter):
db = m.group('db')
if not db:
raise SyntaxError('Database name required')
+
def connector(user=user, password=password, database=db,
- host=host, driver_args=driver_args):
+ host=host, driver_args=driver_args):
return self.driver.Connection(user, password, database,
host, **driver_args)
self.connector = connector
if do_connect: self.reconnect()
- def lastrowid(self,table):
+ def lastrowid(self, table):
self.execute("select %s.NEXTVAL from dual" % table._sequence_name)
return long(self.cursor.fetchone()[0])
+
class CubridAdapter(MySQLAdapter):
- drivers = ('cubriddb',)
+ drivers = ('cubriddb', )
REGEX_URI = re.compile('^(?P[^:@]+)(\:(?P[^@]*))?@(?P[^\:/]+)(\:(?P[0-9]+))?/(?P[^?]+)(\?set_encoding=(?P\w+))?$')
@@ -4425,13 +4521,13 @@ class CubridAdapter(MySQLAdapter):
self.db = db
self.dbengine = "cubrid"
self.uri = uri
- if do_connect: self.find_driver(adapter_args,uri)
+ if do_connect: self.find_driver(adapter_args, uri)
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self._after_connection = after_connection
self.find_or_make_work_folder()
- ruri = uri.split('://',1)[1]
+ ruri = uri.split('://', 1)[1]
m = self.REGEX_URI.match(ruri)
if not m:
raise SyntaxError(
@@ -4449,12 +4545,13 @@ class CubridAdapter(MySQLAdapter):
if not db:
raise SyntaxError('Database name required')
port = int(m.group('port') or '30000')
- charset = m.group('charset') or 'utf8'
user = credential_decoder(user)
passwd = credential_decoder(password)
- def connector(host=host,port=port,db=db,
- user=user,passwd=password,driver_args=driver_args):
- return self.driver.connect(host,port,db,user,passwd,**driver_args)
+
+ def connector(host=host, port=port, db=db,
+ user=user, passwd=passwd, driver_args=driver_args):
+ return self.driver.connect(host, port, db, user, passwd, **driver_args)
+
self.connector = connector
if do_connect: self.reconnect()
@@ -4464,15 +4561,14 @@ class CubridAdapter(MySQLAdapter):
######## GAE MySQL ##########
-
class DatabaseStoredFile:
web2py_filesystem = False
- def escape(self,obj):
+ def escape(self, obj):
return self.db._adapter.escape(obj)
- def __init__(self,db,filename,mode):
+ def __init__(self, db, filename, mode):
if not db._adapter.dbengine in ('mysql', 'postgres', 'sqlite'):
raise RuntimeError("only MySQL/Postgres/SQLite can store metadata .table files in database for now")
self.db = db
@@ -4485,11 +4581,11 @@ class DatabaseStoredFile:
sql = "CREATE TABLE IF NOT EXISTS web2py_filesystem (path VARCHAR(255), content TEXT, PRIMARY KEY(path));"
self.db.executesql(sql)
DatabaseStoredFile.web2py_filesystem = True
- self.p=0
+ self.p = 0
self.data = ''
- if mode in ('r','rw','a'):
+ if mode in ('r', 'rw', 'a'):
query = "SELECT content FROM web2py_filesystem WHERE path='%s'" \
- % filename
+ % filename
rows = self.db.executesql(query)
if rows:
self.data = rows[0][0]
@@ -4499,7 +4595,7 @@ class DatabaseStoredFile:
self.data = datafile.read()
finally:
datafile.close()
- elif mode in ('r','rw'):
+ elif mode in ('r', 'rw'):
raise RuntimeError("File %s does not exist" % filename)
def read(self, bytes):
@@ -4508,22 +4604,22 @@ class DatabaseStoredFile:
return data
def readline(self):
- i = self.data.find('\n',self.p)+1
+ i = self.data.find('\n', self.p)+1
if i>0:
data, self.p = self.data[self.p:i], i
else:
data, self.p = self.data[self.p:], len(self.data)
return data
- def write(self,data):
+ def write(self, data):
self.data += data
def close_connection(self):
if self.db is not None:
self.db.executesql(
"DELETE FROM web2py_filesystem WHERE path='%s'" % self.filename)
- query = "INSERT INTO web2py_filesystem(path,content) VALUES ('%s','%s')"\
- % (self.filename, self.data.replace("'","''"))
+ query = "INSERT INTO web2py_filesystem(path,content) VALUES ('%s','%s')" \
+ % (self.filename, self.data.replace("'", "''"))
self.db.executesql(query)
self.db.commit()
self.db = None
@@ -4552,20 +4648,21 @@ class DatabaseStoredFile:
class UseDatabaseStoredFile:
def file_exists(self, filename):
- return DatabaseStoredFile.exists(self.db,filename)
+ return DatabaseStoredFile.exists(self.db, filename)
def file_open(self, filename, mode='rb', lock=True):
- return DatabaseStoredFile(self.db,filename,mode)
+ return DatabaseStoredFile(self.db, filename, mode)
def file_close(self, fileobj):
fileobj.close_connection()
- def file_delete(self,filename):
+ def file_delete(self, filename):
query = "DELETE FROM web2py_filesystem WHERE path='%s'" % filename
self.db.executesql(query)
self.db.commit()
-class GoogleSQLAdapter(UseDatabaseStoredFile,MySQLAdapter):
+
+class GoogleSQLAdapter(UseDatabaseStoredFile, MySQLAdapter):
uploads_in_blob = True
REGEX_URI = re.compile('^(?P.*)/(?P.*)$')
@@ -4582,8 +4679,7 @@ class GoogleSQLAdapter(UseDatabaseStoredFile,MySQLAdapter):
self.db_codec = db_codec
self._after_connection = after_connection
if do_connect: self.find_driver(adapter_args, uri)
- self.folder = folder or pjoin('$HOME',THREAD_LOCAL.folder.split(
- os.sep+'applications'+os.sep,1)[1])
+ self.folder = folder or pjoin('$HOME', THREAD_LOCAL.folder.split(os.sep+'applications'+os.sep, 1)[1])
ruri = uri.split("://")[1]
m = self.REGEX_URI.match(ruri)
if not m:
@@ -4593,11 +4689,13 @@ class GoogleSQLAdapter(UseDatabaseStoredFile,MySQLAdapter):
driver_args['instance'] = instance
if not 'charset' in driver_args:
driver_args['charset'] = 'utf8'
- self.createdb = createdb = adapter_args.get('createdb',True)
+ self.createdb = createdb = adapter_args.get('createdb', True)
if not createdb:
driver_args['database'] = db
+
def connector(driver_args=driver_args):
return rdbms.connect(**driver_args)
+
self.connector = connector
if do_connect: self.reconnect()
@@ -4612,10 +4710,11 @@ class GoogleSQLAdapter(UseDatabaseStoredFile,MySQLAdapter):
def execute(self, command, *a, **b):
return self.log_execute(command.decode('utf8'), *a, **b)
- def find_driver(self,adapter_args,uri=None):
+ def find_driver(self, adapter_args, uri=None):
self.adapter_args = adapter_args
self.driver = "google"
+
class NoSQLAdapter(BaseAdapter):
can_select_for_update = False
QUOTE_TEMPLATE = '%s'
@@ -4642,7 +4741,7 @@ class NoSQLAdapter(BaseAdapter):
if self.dbengine == 'google:datastore':
if isinstance(fieldtype, gae.Property):
return obj
- is_string = isinstance(fieldtype,str)
+ is_string = isinstance(fieldtype, str)
is_list = is_string and field_is_type('list:')
if is_list:
if not obj:
@@ -4650,12 +4749,12 @@ class NoSQLAdapter(BaseAdapter):
if not isinstance(obj, (list, tuple)):
obj = [obj]
if obj == '' and not \
- (is_string and fieldtype[:2] in ['st','te', 'pa','up']):
+ (is_string and fieldtype[:2] in ['st', 'te', 'pa', 'up']):
return None
if not obj is None:
if isinstance(obj, list) and not is_list:
obj = [self.represent(o, fieldtype) for o in obj]
- elif fieldtype in ('integer','bigint','id'):
+ elif fieldtype in ('integer', 'bigint', 'id'):
obj = long(obj)
elif fieldtype == 'double':
obj = float(obj)
@@ -4670,14 +4769,14 @@ class NoSQLAdapter(BaseAdapter):
obj = False
elif fieldtype == 'date':
if not isinstance(obj, datetime.date):
- (y, m, d) = map(int,str(obj).strip().split('-'))
+ (y, m, d) = map(int, str(obj).strip().split('-'))
obj = datetime.date(y, m, d)
- elif isinstance(obj,datetime.datetime):
+ elif isinstance(obj, datetime.datetime):
(y, m, d) = (obj.year, obj.month, obj.day)
obj = datetime.date(y, m, d)
elif fieldtype == 'time':
if not isinstance(obj, datetime.time):
- time_items = map(int,str(obj).strip().split(':')[:3])
+ time_items = map(int, str(obj).strip().split(':')[:3])
if len(time_items) == 3:
(h, mi, s) = time_items
else:
@@ -4685,8 +4784,8 @@ class NoSQLAdapter(BaseAdapter):
obj = datetime.time(h, mi, s)
elif fieldtype == 'datetime':
if not isinstance(obj, datetime.datetime):
- (y, m, d) = map(int,str(obj)[:10].strip().split('-'))
- time_items = map(int,str(obj)[11:].strip().split(':')[:3])
+ (y, m, d) = map(int, str(obj)[:10].strip().split('-'))
+ time_items = map(int, str(obj)[11:].strip().split(':')[:3])
while len(time_items)<3:
time_items.append(0)
(h, mi, s) = time_items
@@ -4703,28 +4802,28 @@ class NoSQLAdapter(BaseAdapter):
else:
raise RuntimeError("missing simplejson")
elif is_string and field_is_type('list:string'):
- return map(self.to_unicode,obj)
+ return map(self.to_unicode, obj)
elif is_list:
- return map(int,obj)
+ return map(int, obj)
else:
obj = self.to_unicode(obj)
return obj
- def _insert(self,table,fields):
+ def _insert(self, table, fields):
return 'insert %s in %s' % (fields, table)
- def _count(self,query,distinct=None):
+ def _count(self, query, distinct=None):
return 'count %s' % repr(query)
- def _select(self,query,fields,attributes):
+ def _select(self, query, fields, attributes):
return 'select %s where %s' % (repr(fields), repr(query))
- def _delete(self,tablename, query):
- return 'delete %s where %s' % (repr(tablename),repr(query))
+ def _delete(self, tablename, query):
+ return 'delete %s where %s' % (repr(tablename), repr(query))
- def _update(self,tablename,query,fields):
+ def _update(self, tablename, query, fields):
return 'update %s (%s) where %s' % (repr(tablename),
- repr(fields),repr(query))
+ repr(fields), repr(query))
def commit(self):
"""
@@ -4744,47 +4843,80 @@ class NoSQLAdapter(BaseAdapter):
"""
pass
-
# these functions should never be called!
- def OR(self,first,second): raise SyntaxError("Not supported")
- def AND(self,first,second): raise SyntaxError("Not supported")
- def AS(self,first,second): raise SyntaxError("Not supported")
- def ON(self,first,second): raise SyntaxError("Not supported")
- def STARTSWITH(self,first,second=None): raise SyntaxError("Not supported")
- def ENDSWITH(self,first,second=None): raise SyntaxError("Not supported")
- def ADD(self,first,second): raise SyntaxError("Not supported")
- def SUB(self,first,second): raise SyntaxError("Not supported")
- def MUL(self,first,second): raise SyntaxError("Not supported")
- def DIV(self,first,second): raise SyntaxError("Not supported")
+ def OR(self, first, second): raise SyntaxError("Not supported")
+
+ def AND(self, first, second): raise SyntaxError("Not supported")
+
+ def AS(self, first, second): raise SyntaxError("Not supported")
+
+ def ON(self, first, second): raise SyntaxError("Not supported")
+
+ def STARTSWITH(self, first, second=None): raise SyntaxError("Not supported")
+
+ def ENDSWITH(self, first, second=None): raise SyntaxError("Not supported")
+
+ def ADD(self, first, second): raise SyntaxError("Not supported")
+
+ def SUB(self, first, second): raise SyntaxError("Not supported")
+
+ def MUL(self, first, second): raise SyntaxError("Not supported")
+
+ def DIV(self, first, second): raise SyntaxError("Not supported")
+
def LOWER(self,first): raise SyntaxError("Not supported")
+
def UPPER(self,first): raise SyntaxError("Not supported")
+
def EXTRACT(self,first,what): raise SyntaxError("Not supported")
+
def LENGTH(self, first): raise SyntaxError("Not supported")
+
def AGGREGATE(self,first,what): raise SyntaxError("Not supported")
+
def LEFT_JOIN(self): raise SyntaxError("Not supported")
+
def RANDOM(self): raise SyntaxError("Not supported")
- def SUBSTRING(self,field,parameters): raise SyntaxError("Not supported")
- def PRIMARY_KEY(self,key): raise SyntaxError("Not supported")
- def ILIKE(self,first,second): raise SyntaxError("Not supported")
- def drop(self,table,mode): raise SyntaxError("Not supported")
- def alias(self,table,alias): raise SyntaxError("Not supported")
- def migrate_table(self,*a,**b): raise SyntaxError("Not supported")
- def distributed_transaction_begin(self,key): raise SyntaxError("Not supported")
- def prepare(self,key): raise SyntaxError("Not supported")
- def commit_prepared(self,key): raise SyntaxError("Not supported")
- def rollback_prepared(self,key): raise SyntaxError("Not supported")
- def concat_add(self,table): raise SyntaxError("Not supported")
+
+ def SUBSTRING(self, field, parameters): raise SyntaxError("Not supported")
+
+ def PRIMARY_KEY(self, key): raise SyntaxError("Not supported")
+
+ def ILIKE(self, first, second): raise SyntaxError("Not supported")
+
+ def drop(self, table, mode): raise SyntaxError("Not supported")
+
+ def alias(self, table, alias): raise SyntaxError("Not supported")
+
+ def migrate_table(self, *a, **b): raise SyntaxError("Not supported")
+
+ def distributed_transaction_begin(self, key): raise SyntaxError("Not supported")
+
+ def prepare(self, key): raise SyntaxError("Not supported")
+
+ def commit_prepared(self, key): raise SyntaxError("Not supported")
+
+ def rollback_prepared(self, key): raise SyntaxError("Not supported")
+
+ def concat_add(self, table): raise SyntaxError("Not supported")
+
def constraint_name(self, table, fieldname): raise SyntaxError("Not supported")
+
def create_sequence_and_triggers(self, query, table, **args): pass
- def log_execute(self,*a,**b): raise SyntaxError("Not supported")
- def execute(self,*a,**b): raise SyntaxError("Not supported")
+
+ def log_execute(self, *a, **b): raise SyntaxError("Not supported")
+
+ def execute(self, *a, **b): raise SyntaxError("Not supported")
+
def represent_exceptions(self, obj, fieldtype): raise SyntaxError("Not supported")
- def lastrowid(self,table): raise SyntaxError("Not supported")
- def rowslice(self,rows,minimum=0,maximum=None): raise SyntaxError("Not supported")
+
+ def lastrowid(self, table): raise SyntaxError("Not supported")
+
+ def rowslice(self, rows, minimum=0, maximum=None): raise SyntaxError("Not supported")
class GAEF(object):
- def __init__(self,name,op,value,apply):
+ def __init__(self, name, op, value, apply):
self.name=name=='id' and '__key__' or name
self.op=op
self.value=value
@@ -4796,75 +4928,80 @@ class GoogleDatastoreAdapter(NoSQLAdapter):
"""
NDB:
- You can enable NDB by using adapter_args:
+ You can enable NDB by using adapter_args::
- db = DAL('google:datastore', adapter_args={'ndb_settings':ndb_settings, 'use_ndb':True})
+ db = DAL('google:datastore', adapter_args={'ndb_settings':ndb_settings, 'use_ndb':True})
ndb_settings is optional and can be used for per model caching settings.
- It must be a dict in this form:
- ndb_settings = {:{:}}
+ It must be a dict in this form::
+
+ ndb_settings = {:{:}}
+
See: https://developers.google.com/appengine/docs/python/ndb/cache
"""
+ MAX_FETCH_LIMIT = 1000000
uploads_in_blob = True
types = {}
+ # reconnect is not required for Datastore dbs
+ reconnect = lambda *args, **kwargs: None
def file_exists(self, filename): pass
+
def file_open(self, filename, mode='rb', lock=True): pass
+
def file_close(self, fileobj): pass
REGEX_NAMESPACE = re.compile('.*://(?P.+)')
- def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
+ def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, after_connection=None):
- self.use_ndb = ('use_ndb' in adapter_args) and adapter_args['use_ndb']
+ self.use_ndb = adapter_args.get('use_ndb', uri.startswith('google:datastore+ndb'))
if self.use_ndb is True:
- self.types.update({
- 'boolean': ndb.BooleanProperty,
- 'string': (lambda **kwargs: ndb.StringProperty(**kwargs)),
- 'text': ndb.TextProperty,
- 'json': ndb.TextProperty,
- 'password': ndb.StringProperty,
- 'blob': ndb.BlobProperty,
- 'upload': ndb.StringProperty,
- 'integer': ndb.IntegerProperty,
- 'bigint': ndb.IntegerProperty,
- 'float': ndb.FloatProperty,
- 'double': ndb.FloatProperty,
- 'decimal': NDBDecimalProperty,
- 'date': ndb.DateProperty,
- 'time': ndb.TimeProperty,
- 'datetime': ndb.DateTimeProperty,
- 'id': None,
- 'reference': ndb.IntegerProperty,
- 'list:string': (lambda **kwargs: ndb.StringProperty(repeated=True,default=None, **kwargs)),
- 'list:integer': (lambda **kwargs: ndb.IntegerProperty(repeated=True,default=None, **kwargs)),
- 'list:reference': (lambda **kwargs: ndb.IntegerProperty(repeated=True,default=None, **kwargs)),
- })
+ self.types.update({'boolean': ndb.BooleanProperty,
+ 'string': (lambda **kwargs: ndb.StringProperty(**kwargs)),
+ 'text': ndb.TextProperty,
+ 'json': ndb.TextProperty,
+ 'password': ndb.StringProperty,
+ 'blob': ndb.BlobProperty,
+ 'upload': ndb.StringProperty,
+ 'integer': ndb.IntegerProperty,
+ 'bigint': ndb.IntegerProperty,
+ 'float': ndb.FloatProperty,
+ 'double': ndb.FloatProperty,
+ 'decimal': NDBDecimalProperty,
+ 'date': ndb.DateProperty,
+ 'time': ndb.TimeProperty,
+ 'datetime': ndb.DateTimeProperty,
+ 'id': None,
+ 'reference': ndb.IntegerProperty,
+ 'list:string': (lambda **kwargs: ndb.StringProperty(repeated=True,default=None, **kwargs)),
+ 'list:integer': (lambda **kwargs: ndb.IntegerProperty(repeated=True,default=None, **kwargs)),
+ 'list:reference': (lambda **kwargs: ndb.IntegerProperty(repeated=True,default=None, **kwargs)),
+ })
else:
- self.types.update({
- 'boolean': gae.BooleanProperty,
- 'string': (lambda **kwargs: gae.StringProperty(multiline=True, **kwargs)),
- 'text': gae.TextProperty,
- 'json': gae.TextProperty,
- 'password': gae.StringProperty,
- 'blob': gae.BlobProperty,
- 'upload': gae.StringProperty,
- 'integer': gae.IntegerProperty,
- 'bigint': gae.IntegerProperty,
- 'float': gae.FloatProperty,
- 'double': gae.FloatProperty,
- 'decimal': GAEDecimalProperty,
- 'date': gae.DateProperty,
- 'time': gae.TimeProperty,
- 'datetime': gae.DateTimeProperty,
- 'id': None,
- 'reference': gae.IntegerProperty,
- 'list:string': (lambda **kwargs: gae.StringListProperty(default=None, **kwargs)),
- 'list:integer': (lambda **kwargs: gae.ListProperty(int,default=None, **kwargs)),
- 'list:reference': (lambda **kwargs: gae.ListProperty(int,default=None, **kwargs)),
- })
+ self.types.update({'boolean': gae.BooleanProperty,
+ 'string': (lambda **kwargs: gae.StringProperty(multiline=True, **kwargs)),
+ 'text': gae.TextProperty,
+ 'json': gae.TextProperty,
+ 'password': gae.StringProperty,
+ 'blob': gae.BlobProperty,
+ 'upload': gae.StringProperty,
+ 'integer': gae.IntegerProperty,
+ 'bigint': gae.IntegerProperty,
+ 'float': gae.FloatProperty,
+ 'double': gae.FloatProperty,
+ 'decimal': GAEDecimalProperty,
+ 'date': gae.DateProperty,
+ 'time': gae.TimeProperty,
+ 'datetime': gae.DateTimeProperty,
+ 'id': None,
+ 'reference': gae.IntegerProperty,
+ 'list:string': (lambda **kwargs: gae.StringListProperty(default=None, **kwargs)),
+ 'list:integer': (lambda **kwargs: gae.ListProperty(int,default=None, **kwargs)),
+ 'list:reference': (lambda **kwargs: gae.ListProperty(int,default=None, **kwargs)),
+ })
self.db = db
self.uri = uri
self.dbengine = 'google:datastore'
@@ -4885,10 +5022,10 @@ class GoogleDatastoreAdapter(NoSQLAdapter):
def parse_id(self, value, field_type):
return value
- def create_table(self,table,migrate=True,fake_migrate=False, polymodel=None):
+ def create_table(self, table, migrate=True, fake_migrate=False, polymodel=None):
myfields = {}
for field in table:
- if isinstance(polymodel,Table) and field.name in polymodel.fields():
+ if isinstance(polymodel, Table) and field.name in polymodel.fields():
continue
attr = {}
if isinstance(field.custom_qualifier, dict):
@@ -4917,8 +5054,7 @@ class GoogleDatastoreAdapter(NoSQLAdapter):
ftype = self.types[field_type[:14]](**attr)
elif field_type.startswith('list:'):
ftype = self.types[field_type](**attr)
- elif not field_type in self.types\
- or not self.types[field_type]:
+ elif not field_type in self.types or not self.types[field_type]:
raise SyntaxError('Field: unknown field type: %s' % field_type)
else:
ftype = self.types[field_type](**attr)
@@ -4934,14 +5070,14 @@ class GoogleDatastoreAdapter(NoSQLAdapter):
elif polymodel==True:
pm_cls = (self.use_ndb and NDBPolyModel) or PolyModel
table._tableobj = classobj(table._tablename, (pm_cls, ), myfields)
- elif isinstance(polymodel,Table):
+ elif isinstance(polymodel, Table):
table._tableobj = classobj(table._tablename, (polymodel._tableobj, ), myfields)
else:
raise SyntaxError("polymodel must be None, True, a table or a tablename")
return None
- def expand(self,expression,field_type=None):
- if isinstance(expression,Field):
+ def expand(self, expression, field_type=None):
+ if isinstance(expression, Field):
if expression.type in ('text', 'blob', 'json'):
raise SyntaxError('AppEngine does not index by: %s' % expression.type)
return expression.name
@@ -4953,125 +5089,126 @@ class GoogleDatastoreAdapter(NoSQLAdapter):
else:
return expression.op()
elif field_type:
- return self.represent(expression,field_type)
- elif isinstance(expression,(list,tuple)):
- return ','.join([self.represent(item,field_type) for item in expression])
+ return self.represent(expression, field_type)
+ elif isinstance(expression, (list, tuple)):
+ return ','.join([self.represent(item, field_type) for item in expression])
else:
return str(expression)
### TODO from gql.py Expression
- def AND(self,first,second):
+ def AND(self, first, second):
a = self.expand(first)
b = self.expand(second)
if b[0].name=='__key__' and a[0].name!='__key__':
return b+a
return a+b
- def EQ(self,first,second=None):
+ def EQ(self, first, second=None):
if isinstance(second, Key):
- return [GAEF(first.name,'=',second,lambda a,b:a==b)]
- return [GAEF(first.name,'=',self.represent(second,first.type),lambda a,b:a==b)]
+ return [GAEF(first.name, '=', second, lambda a, b:a==b)]
+ return [GAEF(first.name, '=', self.represent(second, first.type), lambda a, b:a==b)]
- def NE(self,first,second=None):
+ def NE(self, first, second=None):
if first.type != 'id':
- return [GAEF(first.name,'!=',self.represent(second,first.type),lambda a,b:a!=b)]
+ return [GAEF(first.name, '!=', self.represent(second, first.type), lambda a, b:a!=b)]
else:
if not second is None:
second = Key.from_path(first._tablename, long(second))
- return [GAEF(first.name,'!=',second,lambda a,b:a!=b)]
+ return [GAEF(first.name, '!=', second, lambda a, b:a!=b)]
- def LT(self,first,second=None):
+ def LT(self, first, second=None):
if first.type != 'id':
- return [GAEF(first.name,'<',self.represent(second,first.type),lambda a,b:a',self.represent(second,first.type),lambda a,b:a>b)]
+ return [GAEF(first.name, '>', self.represent(second, first.type), lambda a, b:a>b)]
else:
second = Key.from_path(first._tablename, long(second))
- return [GAEF(first.name,'>',second,lambda a,b:a>b)]
+ return [GAEF(first.name, '>', second, lambda a, b:a>b)]
- def GE(self,first,second=None):
+ def GE(self, first, second=None):
if first.type != 'id':
- return [GAEF(first.name,'>=',self.represent(second,first.type),lambda a,b:a>=b)]
+ return [GAEF(first.name, '>=', self.represent(second, first.type), lambda a, b:a>=b)]
else:
second = Key.from_path(first._tablename, long(second))
- return [GAEF(first.name,'>=',second,lambda a,b:a>=b)]
+ return [GAEF(first.name, '>=', second, lambda a, b:a>=b)]
- def INVERT(self,first):
+ def INVERT(self, first):
return '-%s' % first.name
- def COMMA(self,first,second):
- return '%s, %s' % (self.expand(first),self.expand(second))
+ def COMMA(self, first, second):
+ return '%s, %s' % (self.expand(first), self.expand(second))
- def BELONGS(self,first,second=None):
- if not isinstance(second,(list, tuple, set)):
+ def BELONGS(self, first, second=None):
+ if not isinstance(second, (list, tuple, set)):
raise SyntaxError("Not supported")
if not self.use_ndb:
- if isinstance(second,set):
+ if isinstance(second, set):
second = list(second)
if first.type == 'id':
second = [Key.from_path(first._tablename, int(i)) for i in second]
- return [GAEF(first.name,'in',second,lambda a,b:a in b)]
+ return [GAEF(first.name, 'in', second, lambda a, b:a in b)]
- def CONTAINS(self,first,second,case_sensitive=False):
+ def CONTAINS(self, first, second, case_sensitive=False):
# silently ignoring: GAE can only do case sensitive matches!
if not first.type.startswith('list:'):
raise SyntaxError("Not supported")
- return [GAEF(first.name,'=',self.expand(second,first.type[5:]),lambda a,b:b in a)]
+ return [GAEF(first.name, '=', self.expand(second, first.type[5:]), lambda a, b:b in a)]
- def NOT(self,first):
- nops = { self.EQ: self.NE,
- self.NE: self.EQ,
- self.LT: self.GE,
- self.GT: self.LE,
- self.LE: self.GT,
- self.GE: self.LT}
- if not isinstance(first,Query):
+ def NOT(self, first):
+ nops = {self.EQ: self.NE,
+ self.NE: self.EQ,
+ self.LT: self.GE,
+ self.GT: self.LE,
+ self.LE: self.GT,
+ self.GE: self.LT}
+ if not isinstance(first, Query):
raise SyntaxError("Not suported")
- nop = nops.get(first.op,None)
+ nop = nops.get(first.op, None)
if not nop:
raise SyntaxError("Not suported %s" % first.op.__name__)
first.op = nop
return self.expand(first)
- def truncate(self,table,mode):
+ def truncate(self, table, mode):
self.db(self.db._adapter.id_query(table)).delete()
- GAE_FILTER_OPTIONS = {
- '=': lambda q, t, p, v: q.filter(getattr(t,p) == v),
- '>': lambda q, t, p, v: q.filter(getattr(t,p) > v),
- '<': lambda q, t, p, v: q.filter(getattr(t,p) < v),
- '<=': lambda q, t, p, v: q.filter(getattr(t,p) <= v),
- '>=': lambda q, t, p, v: q.filter(getattr(t,p) >= v),
- '!=': lambda q, t, p, v: q.filter(getattr(t,p) != v),
- 'in': lambda q, t, p, v: q.filter(getattr(t,p).IN(v)),
- }
+ GAE_FILTER_OPTIONS = {'=': lambda q, t, p, v: q.filter(getattr(t, p) == v),
+ '>': lambda q, t, p, v: q.filter(getattr(t, p) > v),
+ '<': lambda q, t, p, v: q.filter(getattr(t, p) < v),
+ '<=': lambda q, t, p, v: q.filter(getattr(t, p) <= v),
+ '>=': lambda q, t, p, v: q.filter(getattr(t, p) >= v),
+ '!=': lambda q, t, p, v: q.filter(getattr(t, p) != v),
+ 'in': lambda q, t, p, v: q.filter(getattr(t, p).IN(v)),
+ }
def filter(self, query, tableobj, prop, op, value):
return self.GAE_FILTER_OPTIONS[op](query, tableobj, prop, value)
- def select_raw(self,query,fields=None,attributes=None):
+ def select_raw(self, query, fields=None, attributes=None, count_only=False):
db = self.db
fields = fields or []
attributes = attributes or {}
args_get = attributes.get
new_fields = []
+
for item in fields:
- if isinstance(item,SQLALL):
+ if isinstance(item, SQLALL):
new_fields += item._table
else:
new_fields.append(item)
+
fields = new_fields
if query:
tablename = self.get_table(query)
@@ -5083,15 +5220,15 @@ class GoogleDatastoreAdapter(NoSQLAdapter):
if query:
if use_common_filters(query):
- query = self.common_filter(query,[tablename])
+ query = self.common_filter(query, [tablename])
- #tableobj is a GAE/NDB Model class (or subclass)
+ # tableobj is a GAE/NDB Model class (or subclass)
tableobj = db[tablename]._tableobj
filters = self.expand(query)
projection = None
if len(db[tablename].fields) == len(fields):
- #getting all fields, not a projection query
+ # getting all fields, not a projection query
projection = None
elif args_get('projection') == True:
projection = []
@@ -5101,7 +5238,8 @@ class GoogleDatastoreAdapter(NoSQLAdapter):
"text and blob field types not allowed in projection queries")
else:
projection.append(f.name)
- elif args_get('filterfields') == True:
+
+ elif args_get('filterfields') is True:
projection = []
for f in fields:
projection.append(f.name)
@@ -5114,55 +5252,52 @@ class GoogleDatastoreAdapter(NoSQLAdapter):
args_get('projection') == True\
else None
- cursor = None
- if isinstance(args_get('reusecursor'), str):
- cursor = args_get('reusecursor')
+ cursor = args_get('reusecursor')
+ cursor = cursor if isinstance(cursor, str) else None
if self.use_ndb:
qo = ndb.QueryOptions(projection=query_projection, cursor=cursor)
items = tableobj.query(default_options=qo)
else:
- items = gae.Query(tableobj, projection=query_projection,
- cursor=cursor)
+ items = gae.Query(tableobj, projection=query_projection, cursor=cursor)
for filter in filters:
- if args_get('projection') == True and \
- filter.name in query_projection and \
- filter.op in ['=', '<=', '>=']:
- raise SyntaxError(
- "projection fields cannot have equality filters")
- if filter.name=='__key__' and filter.op=='>' and filter.value==0:
+ if (args_get('projection') == True and
+ filter.name in query_projection and
+ filter.op in ('=', '<=', '>=')):
+ raise SyntaxError("projection fields cannot have equality filters")
+ if filter.name == '__key__' and filter.op == '>' and filter.value == 0:
continue
- elif filter.name=='__key__' and filter.op=='=':
- if filter.value==0:
+ elif filter.name == '__key__' and filter.op == '=':
+ if filter.value == 0:
items = []
elif isinstance(filter.value, (self.use_ndb and ndb.Key) or Key):
# key qeuries return a class instance,
# can't use projection
# extra values will be ignored in post-processing later
item = filter.value.get() if self.use_ndb else tableobj.get(filter.value)
- items = (item and [item]) or []
+ items = [item] if item else []
else:
# key qeuries return a class instance,
# can't use projection
# extra values will be ignored in post-processing later
item = tableobj.get_by_id(filter.value)
- items = (item and [item]) or []
- elif isinstance(items,list): # i.e. there is a single record!
- items = [i for i in items if filter.apply(
- getattr(item,filter.name),filter.value)]
+ items = [item] if item else []
+ elif isinstance(items, list): # i.e. there is a single record!
+ items = [i for i in items if filter.apply(getattr(item,
+ filter.name),
+ filter.value)]
else:
- if filter.name=='__key__' and filter.op != 'in':
- if self.use_ndb:
- items.order(tableobj._key)
- else:
- items.order('__key__')
- items = self.filter(items, tableobj, filter.name,
- filter.op, filter.value) \
- if self.use_ndb else \
- items.filter('%s %s' % (filter.name,filter.op),
- filter.value)
+ if filter.name == '__key__' and filter.op != 'in':
+ items.order(tableobj._key) if self.use_ndb else items.order('__key__')
+ if self.use_ndb:
+ items = self.filter(items, tableobj, filter.name, filter.op, filter.value)
+ else:
+ items = items.filter('%s %s' % (filter.name, filter.op), filter.value)
- if not isinstance(items,list):
+ if count_only:
+ items = [len(items) if isinstance(items, list) else items.count()]
+ elif not isinstance(items, list):
+ query = items
if args_get('left', None):
raise SyntaxError('Set: no left join in appengine')
if args_get('groupby', None):
@@ -5172,7 +5307,7 @@ class GoogleDatastoreAdapter(NoSQLAdapter):
### THIS REALLY NEEDS IMPROVEMENT !!!
if isinstance(orderby, (list, tuple)):
orderby = xorify(orderby)
- if isinstance(orderby,Expression):
+ if isinstance(orderby, Expression):
orderby = self.expand(orderby)
orders = orderby.split(', ')
for order in orders:
@@ -5182,60 +5317,68 @@ class GoogleDatastoreAdapter(NoSQLAdapter):
s = str(o)
desc = s[0] == '-'
s = (desc and s[1:]) or s
- return (desc and -getattr(tableobj, s)) or getattr(tableobj, s)
- _order = {'-id':-tableobj._key,'id':tableobj._key}.get(order)
+ return (desc and -getattr(tableobj, s)) or getattr(tableobj, s)
+ _order = {'-id': -tableobj._key, 'id': tableobj._key}.get(order)
if _order is None:
_order = make_order(order)
- items = items.order(_order)
+ query = query.order(_order)
else:
- order={'-id':'-__key__','id':'__key__'}.get(order,order)
- items = items.order(order)
+ order = {'-id': '-__key__', 'id': '__key__'}.get(order, order)
+ query = query.order(order)
+
if args_get('limitby', None):
(lmin, lmax) = attributes['limitby']
- (limit, offset) = (lmax - lmin, lmin)
+ limit, fetch_args = lmax-lmin, {'offset': lmin, 'keys_only': True}
+
if self.use_ndb:
- rows, cursor, more = items.fetch_page(limit,offset=offset)
+ keys, cursor, more = query.fetch_page(limit, **fetch_args)
+ items = ndb.get_multi(keys)
else:
- rows = items.fetch(limit,offset=offset)
- #cursor is only useful if there was a limit and we didn't return
+ keys = query.fetch(limit, **fetch_args)
+ items = gae.get(keys)
+ cursor = query.cursor()
+ # cursor is only useful if there was a limit and we didn't return
# all results
if args_get('reusecursor'):
- db['_lastcursor'] = cursor if self.use_ndb else items.cursor()
- items = rows
+ db['_lastcursor'] = cursor
+ else:
+ # if a limit is not specified, always return an iterator
+ rows = query
+
return (items, tablename, projection or db[tablename].fields)
- def select(self,query,fields,attributes):
+ def select(self, query, fields, attributes):
"""
- This is the GAE version of select. some notes to consider:
- - db['_lastsql'] is not set because there is not SQL statement string
- for a GAE query
- - 'nativeRef' is a magical fieldname used for self references on GAE
- - optional attribute 'projection' when set to True will trigger
- use of the GAE projection queries. note that there are rules for
- what is accepted imposed by GAE: each field must be indexed,
- projection queries cannot contain blob or text fields, and you
- cannot use == and also select that same field. see https://developers.google.com/appengine/docs/python/datastore/queries#Query_Projection
- - optional attribute 'filterfields' when set to True web2py will only
- parse the explicitly listed fields into the Rows object, even though
- all fields are returned in the query. This can be used to reduce
- memory usage in cases where true projection queries are not
- usable.
- - optional attribute 'reusecursor' allows use of cursor with queries
- that have the limitby attribute. Set the attribute to True for the
- first query, set it to the value of db['_lastcursor'] to continue
- a previous query. The user must save the cursor value between
- requests, and the filters must be identical. It is up to the user
- to follow google's limitations: https://developers.google.com/appengine/docs/python/datastore/queries#Query_Cursors
+ This is the GAE version of select. Some notes to consider:
+ - db['_lastsql'] is not set because there is not SQL statement string
+ for a GAE query
+ - 'nativeRef' is a magical fieldname used for self references on GAE
+ - optional attribute 'projection' when set to True will trigger
+ use of the GAE projection queries. note that there are rules for
+ what is accepted imposed by GAE: each field must be indexed,
+ projection queries cannot contain blob or text fields, and you
+ cannot use == and also select that same field. see https://developers.google.com/appengine/docs/python/datastore/queries#Query_Projection
+ - optional attribute 'filterfields' when set to True web2py will only
+ parse the explicitly listed fields into the Rows object, even though
+ all fields are returned in the query. This can be used to reduce
+ memory usage in cases where true projection queries are not
+ usable.
+ - optional attribute 'reusecursor' allows use of cursor with queries
+ that have the limitby attribute. Set the attribute to True for the
+ first query, set it to the value of db['_lastcursor'] to continue
+ a previous query. The user must save the cursor value between
+ requests, and the filters must be identical. It is up to the user
+ to follow google's limitations: https://developers.google.com/appengine/docs/python/datastore/queries#Query_Cursors
"""
- (items, tablename, fields) = self.select_raw(query,fields,attributes)
- # self.db['_lastsql'] = self._select(query,fields,attributes)
+ (items, tablename, fields) = self.select_raw(query, fields, attributes)
+ # self.db['_lastsql'] = self._select(query, fields, attributes)
rows = [[(t==self.db[tablename]._id.name and item) or \
(t=='nativeRef' and item) or getattr(item, t) \
for t in fields] for item in items]
colnames = ['%s.%s' % (tablename, t) for t in fields]
- processor = attributes.get('processor',self.parse)
- return processor(rows,fields,colnames,False)
+ processor = attributes.get('processor', self.parse)
+ return processor(rows, fields, colnames, False)
def parse_list_integers(self, value, field_type):
return value[:] if self.use_ndb else value
@@ -5243,26 +5386,22 @@ class GoogleDatastoreAdapter(NoSQLAdapter):
def parse_list_strings(self, value, field_type):
return value[:] if self.use_ndb else value
- def count(self,query,distinct=None,limit=None):
+ def count(self, query, distinct=None, limit=None):
if distinct:
raise RuntimeError("COUNT DISTINCT not supported")
- (items, tablename, fields) = self.select_raw(query)
- # self.db['_lastsql'] = self._count(query)
- try:
- return len(items)
- except TypeError:
- return items.count(limit=limit)
+ (items, tablename, fields) = self.select_raw(query, count_only=True)
+ return items[0]
- def delete(self,tablename, query):
+ def delete(self, tablename, query):
"""
This function was changed on 2010-05-04 because according to
http://code.google.com/p/googleappengine/issues/detail?id=3119
GAE no longer supports deleting more than 1000 records.
"""
- # self.db['_lastsql'] = self._delete(tablename,query)
+ # self.db['_lastsql'] = self._delete(tablename, query)
(items, tablename, fields) = self.select_raw(query)
# items can be one item or a query
- if not isinstance(items,list):
+ if not isinstance(items, list):
#use a keys_only query to ensure that this runs as a datastore
# small operations
leftitems = items.fetch(1000, keys_only=True)
@@ -5282,21 +5421,21 @@ class GoogleDatastoreAdapter(NoSQLAdapter):
gae.delete(items)
return counter
- def update(self,tablename,query,update_fields):
- # self.db['_lastsql'] = self._update(tablename,query,update_fields)
+ def update(self, tablename, query, update_fields):
+ # self.db['_lastsql'] = self._update(tablename, query, update_fields)
(items, tablename, fields) = self.select_raw(query)
counter = 0
for item in items:
for field, value in update_fields:
- setattr(item, field.name, self.represent(value,field.type))
+ setattr(item, field.name, self.represent(value, field.type))
item.put()
counter += 1
LOGGER.info(str(counter))
return counter
- def insert(self,table,fields):
- dfields=dict((f.name,self.represent(v,f.type)) for f,v in fields)
- # table._db['_lastsql'] = self._insert(table,fields)
+ def insert(self, table, fields):
+ dfields = dict((f.name, self.represent(v, f.type)) for f, v in fields)
+ # table._db['_lastsql'] = self._insert(table, fields)
tmp = table._tableobj(**dfields)
tmp.put()
key = tmp.key if self.use_ndb else tmp.key()
@@ -5304,10 +5443,10 @@ class GoogleDatastoreAdapter(NoSQLAdapter):
(rid._table, rid._record, rid._gaekey) = (table, None, key)
return rid
- def bulk_insert(self,table,items):
+ def bulk_insert(self, table, items):
parsed_items = []
for item in items:
- dfields=dict((f.name,self.represent(v,f.type)) for f,v in item)
+ dfields = dict((f.name, self.represent(v, f.type)) for f, v in item)
parsed_items.append(table._tableobj(**dfields))
if self.use_ndb:
ndb.put_multi(parsed_items)
@@ -5315,78 +5454,81 @@ class GoogleDatastoreAdapter(NoSQLAdapter):
gae.put(parsed_items)
return True
+
def uuid2int(uuidv):
return uuid.UUID(uuidv).int
+
def int2uuid(n):
return str(uuid.UUID(int=n))
+
class CouchDBAdapter(NoSQLAdapter):
- drivers = ('couchdb',)
+ drivers = ('couchdb', )
uploads_in_blob = True
- types = {
- 'boolean': bool,
- 'string': str,
- 'text': str,
- 'json': str,
- 'password': str,
- 'blob': str,
- 'upload': str,
- 'integer': long,
- 'bigint': long,
- 'float': float,
- 'double': float,
- 'date': datetime.date,
- 'time': datetime.time,
- 'datetime': datetime.datetime,
- 'id': long,
- 'reference': long,
- 'list:string': list,
- 'list:integer': list,
- 'list:reference': list,
- }
+ types = {'boolean': bool,
+ 'string': str,
+ 'text': str,
+ 'json': str,
+ 'password': str,
+ 'blob': str,
+ 'upload': str,
+ 'integer': long,
+ 'bigint': long,
+ 'float': float,
+ 'double': float,
+ 'date': datetime.date,
+ 'time': datetime.time,
+ 'datetime': datetime.datetime,
+ 'id': long,
+ 'reference': long,
+ 'list:string': list,
+ 'list:integer': list,
+ 'list:reference': list,
+ }
def file_exists(self, filename): pass
+
def file_open(self, filename, mode='rb', lock=True): pass
+
def file_close(self, fileobj): pass
- def expand(self,expression,field_type=None):
- if isinstance(expression,Field):
- if expression.type=='id':
+ def expand(self, expression, field_type=None):
+ if isinstance(expression, Field):
+ if expression.type == 'id':
return "%s._id" % expression.tablename
- return BaseAdapter.expand(self,expression,field_type)
+ return BaseAdapter.expand(self, expression, field_type)
- def AND(self,first,second):
- return '(%s && %s)' % (self.expand(first),self.expand(second))
+ def AND(self, first, second):
+ return '(%s && %s)' % (self.expand(first), self.expand(second))
- def OR(self,first,second):
- return '(%s || %s)' % (self.expand(first),self.expand(second))
+ def OR(self, first, second):
+ return '(%s || %s)' % (self.expand(first), self.expand(second))
- def EQ(self,first,second):
+ def EQ(self, first, second):
if second is None:
return '(%s == null)' % self.expand(first)
- return '(%s == %s)' % (self.expand(first),self.expand(second,first.type))
+ return '(%s == %s)' % (self.expand(first), self.expand(second, first.type))
- def NE(self,first,second):
+ def NE(self, first, second):
if second is None:
return '(%s != null)' % self.expand(first)
- return '(%s != %s)' % (self.expand(first),self.expand(second,first.type))
+ return '(%s != %s)' % (self.expand(first), self.expand(second, first.type))
- def COMMA(self,first,second):
- return '%s + %s' % (self.expand(first),self.expand(second))
+ def COMMA(self, first, second):
+ return '%s + %s' % (self.expand(first), self.expand(second))
def represent(self, obj, fieldtype):
value = NoSQLAdapter.represent(self, obj, fieldtype)
- if fieldtype=='id':
+ if fieldtype == 'id':
return repr(str(long(value)))
- elif fieldtype in ('date','time','datetime','boolean'):
+ elif fieldtype in ('date', 'time', 'datetime', 'boolean'):
return serializers.json(value)
- return repr(not isinstance(value,unicode) and value \
- or value and value.encode('utf8'))
+ return repr(not isinstance(value, unicode) and value or value and value.encode('utf8'))
- def __init__(self,db,uri='couchdb://127.0.0.1:5984',
- pool_size=0,folder=None,db_codec ='UTF-8',
+ def __init__(self, db, uri='couchdb://127.0.0.1:5984',
+ pool_size=0, folder=None, db_codec='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, after_connection=None):
self.db = db
@@ -5399,10 +5541,12 @@ class CouchDBAdapter(NoSQLAdapter):
self._after_connection = after_connection
self.pool_size = pool_size
- url='http://'+uri[10:]
- def connector(url=url,driver_args=driver_args):
- return self.driver.Server(url,**driver_args)
- self.reconnect(connector,cursor=False)
+ url = 'http://'+uri[10:]
+
+ def connector(url=url, driver_args=driver_args):
+ return self.driver.Server(url, **driver_args)
+
+ self.reconnect(connector, cursor=False)
def create_table(self, table, migrate=True, fake_migrate=False, polymodel=None):
if migrate:
@@ -5411,55 +5555,58 @@ class CouchDBAdapter(NoSQLAdapter):
except:
pass
- def insert(self,table,fields):
+ def insert(self, table, fields):
id = uuid2int(web2py_uuid())
ctable = self.connection[table._tablename]
- values = dict((k.name,self.represent(v,k.type)) for k,v in fields)
+ values = dict((k.name, self.represent(v, k.type)) for k, v in fields)
values['_id'] = str(id)
ctable.save(values)
return id
- def _select(self,query,fields,attributes):
- if not isinstance(query,Query):
+ def _select(self, query, fields, attributes):
+ if not isinstance(query, Query):
raise SyntaxError("Not Supported")
for key in set(attributes.keys())-SELECT_ARGS:
raise SyntaxError('invalid select attribute: %s' % key)
new_fields=[]
for item in fields:
- if isinstance(item,SQLALL):
+ if isinstance(item, SQLALL):
new_fields += item._table
else:
new_fields.append(item)
+
def uid(fd):
- return fd=='id' and '_id' or fd
- def get(row,fd):
- return fd=='id' and long(row['_id']) or row.get(fd,None)
+ return fd == 'id' and '_id' or fd
+
+ def get(row, fd):
+ return fd=='id' and long(row['_id']) or row.get(fd, None)
+
fields = new_fields
tablename = self.get_table(query)
fieldnames = [f.name for f in (fields or self.db[tablename])]
- colnames = ['%s.%s' % (tablename,k) for k in fieldnames]
- fields = ','.join(['%s.%s' % (tablename,uid(f)) for f in fieldnames])
- fn="(function(%(t)s){if(%(query)s)emit(%(order)s,[%(fields)s]);})" %\
- dict(t=tablename,
- query=self.expand(query),
- order='%s._id' % tablename,
- fields=fields)
+ colnames = ['%s.%s' % (tablename, k) for k in fieldnames]
+ fields = ','.join(['%s.%s' % (tablename, uid(f)) for f in fieldnames])
+ fn = "(function(%(t)s){if(%(query)s)emit(%(order)s,[%(fields)s]);})" \
+ % dict(t=tablename,
+ query=self.expand(query),
+ order='%s._id' % tablename,
+ fields=fields)
return fn, colnames
- def select(self,query,fields,attributes):
- if not isinstance(query,Query):
+ def select(self, query, fields, attributes):
+ if not isinstance(query, Query):
raise SyntaxError("Not Supported")
- fn, colnames = self._select(query,fields,attributes)
+ fn, colnames = self._select(query, fields, attributes)
tablename = colnames[0].split('.')[0]
ctable = self.connection[tablename]
rows = [cols['value'] for cols in ctable.query(fn)]
- processor = attributes.get('processor',self.parse)
- return processor(rows,fields,colnames,False)
+ processor = attributes.get('processor', self.parse)
+ return processor(rows, fields, colnames, False)
- def delete(self,tablename,query):
- if not isinstance(query,Query):
+ def delete(self, tablename, query):
+ if not isinstance(query, Query):
raise SyntaxError("Not Supported")
- if query.first.type=='id' and query.op==self.EQ:
+ if query.first.type == 'id' and query.op == self.EQ:
id = query.second
tablename = query.first.tablename
assert(tablename == query.first.tablename)
@@ -5471,89 +5618,90 @@ class CouchDBAdapter(NoSQLAdapter):
return 0
else:
tablename = self.get_table(query)
- rows = self.select(query,[self.db[tablename]._id],{})
+ rows = self.select(query, [self.db[tablename]._id], {})
ctable = self.connection[tablename]
for row in rows:
del ctable[str(row.id)]
return len(rows)
- def update(self,tablename,query,fields):
- if not isinstance(query,Query):
+ def update(self, tablename, query, fields):
+ if not isinstance(query, Query):
raise SyntaxError("Not Supported")
- if query.first.type=='id' and query.op==self.EQ:
+ if query.first.type == 'id' and query.op == self.EQ:
id = query.second
tablename = query.first.tablename
ctable = self.connection[tablename]
try:
doc = ctable[str(id)]
- for key,value in fields:
- doc[key.name] = self.represent(value,self.db[tablename][key.name].type)
+ for key, value in fields:
+ doc[key.name] = self.represent(value, self.db[tablename][key.name].type)
ctable.save(doc)
return 1
except couchdb.http.ResourceNotFound:
return 0
else:
tablename = self.get_table(query)
- rows = self.select(query,[self.db[tablename]._id],{})
+ rows = self.select(query, [self.db[tablename]._id], {})
ctable = self.connection[tablename]
table = self.db[tablename]
for row in rows:
doc = ctable[str(row.id)]
- for key,value in fields:
- doc[key.name] = self.represent(value,table[key.name].type)
+ for key, value in fields:
+ doc[key.name] = self.represent(value, table[key.name].type)
ctable.save(doc)
return len(rows)
- def count(self,query,distinct=None):
+ def count(self, query, distinct=None):
if distinct:
raise RuntimeError("COUNT DISTINCT not supported")
- if not isinstance(query,Query):
+ if not isinstance(query, Query):
raise SyntaxError("Not Supported")
tablename = self.get_table(query)
- rows = self.select(query,[self.db[tablename]._id],{})
+ rows = self.select(query, [self.db[tablename]._id], {})
return len(rows)
+
def cleanup(text):
"""
- validates that the given text is clean: only contains [0-9a-zA-Z_]
+ Validates that the given text is clean: only contains [0-9a-zA-Z_]
"""
- #if not REGEX_ALPHANUMERIC.match(text):
- # raise SyntaxError('invalid table or field name: %s' % text)
+ # if not REGEX_ALPHANUMERIC.match(text):
+ # raise SyntaxError('invalid table or field name: %s' % text)
return text
+
class MongoDBAdapter(NoSQLAdapter):
native_json = True
- drivers = ('pymongo',)
+ drivers = ('pymongo', )
uploads_in_blob = False
- types = {
- 'boolean': bool,
- 'string': str,
- 'text': str,
- 'json': str,
- 'password': str,
- 'blob': str,
- 'upload': str,
- 'integer': long,
- 'bigint': long,
- 'float': float,
- 'double': float,
- 'date': datetime.date,
- 'time': datetime.time,
- 'datetime': datetime.datetime,
- 'id': long,
- 'reference': long,
- 'list:string': list,
- 'list:integer': list,
- 'list:reference': list,
- }
+ types = {'boolean': bool,
+ 'string': str,
+ 'text': str,
+ 'json': str,
+ 'password': str,
+ 'blob': str,
+ 'upload': str,
+ 'integer': long,
+ 'bigint': long,
+ 'float': float,
+ 'double': float,
+ 'date': datetime.date,
+ 'time': datetime.time,
+ 'datetime': datetime.datetime,
+ 'id': long,
+ 'reference': long,
+ 'list:string': list,
+ 'list:integer': list,
+ 'list:reference': list,
+ }
error_messages = {"javascript_needed": "This must yet be replaced" +
" with javascript in order to work."}
- def __init__(self,db,uri='mongodb://127.0.0.1:5984/db',
- pool_size=0, folder=None, db_codec ='UTF-8',
+ def __init__(self, db, uri='mongodb://127.0.0.1:5984/db',
+ pool_size=0, folder=None, db_codec='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, after_connection=None):
@@ -5579,21 +5727,21 @@ class MongoDBAdapter(NoSQLAdapter):
self.pool_size = pool_size
#this is the minimum amount of replicates that it should wait
# for on insert/update
- self.minimumreplication = adapter_args.get('minimumreplication',0)
+ self.minimumreplication = adapter_args.get('minimumreplication', 0)
# by default all inserts and selects are performand asynchronous,
# but now the default is
# synchronous, except when overruled by either this default or
# function parameter
- self.safe = adapter_args.get('safe',True)
+ self.safe = adapter_args.get('safe', True)
# load user setting for uploads in blob storage
self.uploads_in_blob = adapter_args.get('uploads_in_blob', False)
- if isinstance(m,tuple):
+ if isinstance(m, tuple):
m = {"database" : m[1]}
- if m.get('database')==None:
+ if m.get('database') is None:
raise SyntaxError("Database is required!")
- def connector(uri=self.uri,m=m):
+ def connector(uri=self.uri, m=m):
# Connection() is deprecated
if hasattr(self.driver, "MongoClient"):
Connection = self.driver.MongoClient
@@ -5601,7 +5749,7 @@ class MongoDBAdapter(NoSQLAdapter):
Connection = self.driver.Connection
return Connection(uri)[m.get('database')]
- self.reconnect(connector,cursor=False)
+ self.reconnect(connector, cursor=False)
def object_id(self, arg=None):
""" Convert input to a valid Mongodb ObjectId instance
@@ -5658,8 +5806,8 @@ class MongoDBAdapter(NoSQLAdapter):
else:
value = NoSQLAdapter.represent(self, obj, fieldtype)
# reference types must be convert to ObjectID
- if fieldtype =='date':
- if value == None:
+ if fieldtype == 'date':
+ if value is None:
return value
# this piece of data can be stripped off based on the fieldtype
t = datetime.time(0, 0, 0)
@@ -5667,7 +5815,7 @@ class MongoDBAdapter(NoSQLAdapter):
# string or integer
return datetime.datetime.combine(value, t)
elif fieldtype == 'time':
- if value == None:
+ if value is None:
return value
# this piece of data can be stripped of based on the fieldtype
d = datetime.date(2000, 1, 1)
@@ -5675,7 +5823,7 @@ class MongoDBAdapter(NoSQLAdapter):
# string or integer
return datetime.datetime.combine(d, value)
elif fieldtype == "blob":
- if value== None:
+ if value is None:
return value
from bson import Binary
if not isinstance(value, Binary):
@@ -5693,7 +5841,7 @@ class MongoDBAdapter(NoSQLAdapter):
return value
elif ((isinstance(fieldtype, basestring) and
fieldtype.startswith("reference")) or
- (isinstance(fieldtype, Table)) or fieldtype=="id"):
+ (isinstance(fieldtype, Table)) or fieldtype == "id"):
value = self.object_id(value)
return value
@@ -5705,11 +5853,11 @@ class MongoDBAdapter(NoSQLAdapter):
def count(self, query, distinct=None, snapshot=True):
if distinct:
raise RuntimeError("COUNT DISTINCT not supported")
- if not isinstance(query,Query):
+ if not isinstance(query, Query):
raise SyntaxError("Not Supported")
tablename = self.get_table(query)
- return long(self.select(query,[self.db[tablename]._id], {},
- count=True,snapshot=snapshot)['count'])
+ return long(self.select(query, [self.db[tablename]._id], {},
+ count=True, snapshot=snapshot)['count'])
# Maybe it would be faster if we just implemented the pymongo
# .count() function which is probably quicker?
# therefor call __select() connection[table].find(query).count()
@@ -5722,7 +5870,7 @@ class MongoDBAdapter(NoSQLAdapter):
# convert second arg to an objectid field
# (if its not already)
# if second arg is 0 convert to objectid
- if isinstance(expression.first,Field) and \
+ if isinstance(expression.first, Field) and \
((expression.first.type == 'id') or \
("reference" in expression.first.type)):
if expression.first.type == 'id':
@@ -5736,10 +5884,10 @@ class MongoDBAdapter(NoSQLAdapter):
result = expression.op(expression.first, expression.second)
if isinstance(expression, Field):
- if expression.type=='id':
+ if expression.type =='id':
result = "_id"
else:
- result = expression.name
+ result = expression.name
elif isinstance(expression, (Expression, Query)):
if not expression.second is None:
result = expression.op(expression.first, expression.second)
@@ -5750,10 +5898,10 @@ class MongoDBAdapter(NoSQLAdapter):
else:
result = expression.op
elif field_type:
- result = self.represent(expression,field_type)
- elif isinstance(expression,(list,tuple)):
- result = ','.join(self.represent(item,field_type) for
- item in expression)
+ result = self.represent(expression, field_type)
+ elif isinstance(expression, (list, tuple)):
+ result = [self.represent(item, field_type) for
+ item in expression]
else:
result = expression
return result
@@ -5768,45 +5916,41 @@ class MongoDBAdapter(NoSQLAdapter):
ctable = self.connection[table._tablename]
ctable.remove(None, safe=True)
- def _select(self, query, fields, attributes):
- if 'for_update' in attributes:
- logging.warn('mongodb does not support for_update')
- for key in set(attributes.keys())-set(('limitby',
- 'orderby','for_update')):
- if attributes[key]!=None:
- logging.warn('select attribute not implemented: %s' % key)
-
- new_fields=[]
- mongosort_list = []
-
+ def select(self, query, fields, attributes, count=False,
+ snapshot=False):
+ mongofields_dict = self.SON()
+ mongoqry_dict = {}
+ new_fields, mongosort_list = [], []
# try an orderby attribute
orderby = attributes.get('orderby', False)
limitby = attributes.get('limitby', False)
# distinct = attributes.get('distinct', False)
+ if 'for_update' in attributes:
+ logging.warn('mongodb does not support for_update')
+ for key in set(attributes.keys())-set(('limitby',
+ 'orderby', 'for_update')):
+ if attributes[key] is not None:
+ logging.warn('select attribute not implemented: %s' % key)
+ if limitby:
+ limitby_skip, limitby_limit = limitby[0], int(limitby[1])
+ else:
+ limitby_skip = limitby_limit = 0
if orderby:
if isinstance(orderby, (list, tuple)):
orderby = xorify(orderby)
-
# !!!! need to add 'random'
for f in self.expand(orderby).split(','):
if f.startswith('-'):
mongosort_list.append((f[1:], -1))
else:
mongosort_list.append((f, 1))
- if limitby:
- limitby_skip, limitby_limit = limitby[0], int(limitby[1])
- else:
- limitby_skip = limitby_limit = 0
-
- mongofields_dict = self.SON()
- mongoqry_dict = {}
for item in fields:
if isinstance(item, SQLALL):
new_fields += item._table
else:
new_fields.append(item)
fields = new_fields
- if isinstance(query,Query):
+ if isinstance(query, Query):
tablename = self.get_table(query)
elif len(fields) != 0:
tablename = fields[0].tablename
@@ -5817,28 +5961,18 @@ class MongoDBAdapter(NoSQLAdapter):
fields = fields or self.db[tablename]
for field in fields:
mongofields_dict[field.name] = 1
-
- return tablename, mongoqry_dict, mongofields_dict, mongosort_list, \
- limitby_limit, limitby_skip
-
- def select(self, query, fields, attributes, count=False,
- snapshot=False):
- # TODO: support joins
- tablename, mongoqry_dict, mongofields_dict, mongosort_list, \
- limitby_limit, limitby_skip = self._select(query, fields, attributes)
ctable = self.connection[tablename]
-
if count:
- return {'count' : ctable.find(
+ return {'count': ctable.find(
mongoqry_dict, mongofields_dict,
skip=limitby_skip, limit=limitby_limit,
sort=mongosort_list, snapshot=snapshot).count()}
else:
# pymongo cursor object
mongo_list_dicts = ctable.find(mongoqry_dict,
- mongofields_dict, skip=limitby_skip,
- limit=limitby_limit, sort=mongosort_list,
- snapshot=snapshot)
+ mongofields_dict, skip=limitby_skip,
+ limit=limitby_limit, sort=mongosort_list,
+ snapshot=snapshot)
rows = []
# populate row in proper order
# Here we replace ._id with .id to follow the standard naming
@@ -5854,7 +5988,7 @@ class MongoDBAdapter(NoSQLAdapter):
newnames.append(".".join((tablename, field.name)))
for record in mongo_list_dicts:
- row=[]
+ row = []
for colname in colnames:
tablename, fieldname = colname.split(".")
# switch to Mongo _id uuids for retrieving
@@ -5866,33 +6000,36 @@ class MongoDBAdapter(NoSQLAdapter):
value = None
row.append(value)
rows.append(row)
-
processor = attributes.get('processor', self.parse)
result = processor(rows, fields, newnames, False)
return result
- def _insert(self, table, fields):
+ def insert(self, table, fields, safe=None):
+ """Safe determines whether a asynchronous request is done or a
+ synchronous action is done
+ For safety, we use by default synchronous requests"""
+
values = dict()
+ if safe is None:
+ safe = self.safe
+ ctable = self.connection[table._tablename]
for k, v in fields:
if not k.name in ["id", "safe"]:
fieldname = k.name
fieldtype = table[k.name].type
values[fieldname] = self.represent(v, fieldtype)
- return values
- # Safe determines whether a asynchronious request is done or a
- # synchronious action is done
- # For safety, we use by default synchronous requests
- def insert(self, table, fields, safe=None):
- if safe==None:
- safe = self.safe
- ctable = self.connection[table._tablename]
- values = self._insert(table, fields)
ctable.insert(values, safe=safe)
return long(str(values['_id']), 16)
- #this function returns a dict with the where clause and update fields
- def _update(self, tablename, query, fields):
+ def update(self, tablename, query, fields, safe=None):
+ if safe is None:
+ safe = self.safe
+ # return amount of adjusted rows or zero, but no exceptions
+ # @ related not finding the result
+ if not isinstance(query, Query):
+ raise RuntimeError("Not implemented")
+ amount = self.count(query, False)
if not isinstance(query, Query):
raise SyntaxError("Not Supported")
filter = None
@@ -5901,17 +6038,6 @@ class MongoDBAdapter(NoSQLAdapter):
# do not try to update id fields to avoid backend errors
modify = {'$set': dict((k.name, self.represent(v, k.type)) for
k, v in fields if (not k.name in ("_id", "id")))}
- return modify, filter
-
- def update(self, tablename, query, fields, safe=None):
- if safe == None:
- safe = self.safe
- # return amount of adjusted rows or zero, but no exceptions
- # @ related not finding the result
- if not isinstance(query, Query):
- raise RuntimeError("Not implemented")
- amount = self.count(query, False)
- modify, filter = self._update(tablename, query, fields)
try:
result = self.connection[tablename].update(filter,
modify, multi=True, safe=safe)
@@ -5927,23 +6053,19 @@ class MongoDBAdapter(NoSQLAdapter):
# TODO Reverse update query to verifiy that the query succeded
raise RuntimeError("uncaught exception when updating rows: %s" % e)
- def _delete(self, tablename, query):
- if not isinstance(query, Query):
- raise RuntimeError("query type %s is not supported" % \
- type(query))
- return self.expand(query)
-
def delete(self, tablename, query, safe=None):
if safe is None:
safe = self.safe
amount = 0
amount = self.count(query, False)
- filter = self._delete(tablename, query)
+ if not isinstance(query, Query):
+ raise RuntimeError("query type %s is not supported" % type(query))
+ filter = self.expand(query)
self.connection[tablename].remove(filter, safe=safe)
return amount
def bulk_insert(self, table, items):
- return [self.insert(table,item) for item in items]
+ return [self.insert(table, item) for item in items]
## OPERATORS
def INVERT(self, first):
@@ -5954,23 +6076,23 @@ class MongoDBAdapter(NoSQLAdapter):
def NOT(self, first):
return {'$not': self.expand(first)}
- def AND(self,first,second):
+ def AND(self, first, second):
# pymongo expects: .find({'$and': [{'x':'1'}, {'y':'2'}]})
- return {'$and': [self.expand(first),self.expand(second)]}
+ return {'$and': [self.expand(first), self.expand(second)]}
- def OR(self,first,second):
+ def OR(self, first, second):
# pymongo expects: .find({'$or': [{'name':'1'}, {'name':'2'}]})
- return {'$or': [self.expand(first),self.expand(second)]}
+ return {'$or': [self.expand(first), self.expand(second)]}
def BELONGS(self, first, second):
if isinstance(second, str):
- return {self.expand(first) : {"$in" : [ second[:-1]]} }
- elif second==[] or second==() or second==set():
- return {1:0}
+ return {self.expand(first): {"$in": [second[:-1]]}}
+ elif second == [] or second == () or second == set():
+ return {1: 0}
items = [self.expand(item, first.type) for item in second]
- return {self.expand(first) : {"$in" : items} }
+ return {self.expand(first): {"$in": items}}
- def EQ(self,first,second=None):
+ def EQ(self, first, second=None):
result = {}
result[self.expand(first)] = self.expand(second)
return result
@@ -5980,26 +6102,26 @@ class MongoDBAdapter(NoSQLAdapter):
result[self.expand(first)] = {'$ne': self.expand(second)}
return result
- def LT(self,first,second=None):
+ def LT(self, first, second=None):
if second is None:
raise RuntimeError("Cannot compare %s < None" % first)
result = {}
result[self.expand(first)] = {'$lt': self.expand(second)}
return result
- def LE(self,first,second=None):
+ def LE(self, first, second=None):
if second is None:
raise RuntimeError("Cannot compare %s <= None" % first)
result = {}
result[self.expand(first)] = {'$lte': self.expand(second)}
return result
- def GT(self,first,second):
+ def GT(self, first, second):
result = {}
result[self.expand(first)] = {'$gt': self.expand(second)}
return result
- def GE(self,first,second=None):
+ def GE(self, first, second=None):
if second is None:
raise RuntimeError("Cannot compare %s >= None" % first)
result = {}
@@ -6050,40 +6172,37 @@ class MongoDBAdapter(NoSQLAdapter):
return '%s, %s' % (self.expand(first), self.expand(second))
def LIKE(self, first, second):
- #escaping regex operators?
- return {self.expand(first): ('%s' % \
- self.expand(second, 'string').replace('%','/'))}
+ # escaping regex operators?
+ return {self.expand(first): ('%s' % self.expand(second, 'string').replace('%', '/'))}
def ILIKE(self, first, second):
- val = second if isinstance(second,self.ObjectId) else {
+ val = second if isinstance(second, self.ObjectId) else {
'$regex': second.replace('%', ''), '$options': 'i'}
return {self.expand(first): val}
def STARTSWITH(self, first, second):
#escaping regex operators?
- return {self.expand(first): ('/^%s/' % \
- self.expand(second, 'string'))}
+ return {self.expand(first): ('/^%s/' % self.expand(second, 'string'))}
def ENDSWITH(self, first, second):
#escaping regex operators?
- return {self.expand(first): ('/%s^/' % \
- self.expand(second, 'string'))}
+ return {self.expand(first): ('/%s^/' % self.expand(second, 'string'))}
def CONTAINS(self, first, second, case_sensitive=False):
# silently ignore, only case sensitive
# There is a technical difference, but mongodb doesn't support
# that, but the result will be the same
- val = second if isinstance(second,self.ObjectId) else \
- {'$regex':".*" + re.escape(self.expand(second, 'string')) + ".*"}
- return {self.expand(first) : val}
+ val = second if isinstance(second, self.ObjectId) else \
+ {'$regex': ".*" + re.escape(self.expand(second, 'string')) + ".*"}
+ return {self.expand(first): val}
def LIKE(self, first, second):
import re
return {self.expand(first): {'$regex': \
re.escape(self.expand(second,
- 'string')).replace('%','.*')}}
+ 'string')).replace('%', '.*')}}
- #TODO verify full compatibilty with official SQL Like operator
+ # TODO verify full compatibilty with official SQL Like operator
def STARTSWITH(self, first, second):
#TODO Solve almost the same problem as with endswith
import re
@@ -6091,9 +6210,9 @@ class MongoDBAdapter(NoSQLAdapter):
re.escape(self.expand(second,
'string'))}}
- #TODO verify full compatibilty with official SQL Like operator
+ # TODO verify full compatibilty with official SQL Like operator
def ENDSWITH(self, first, second):
- #escaping regex operators?
+ # escaping regex operators?
#TODO if searched for a name like zsa_corbitt and the function
# is endswith('a') then this is also returned.
# Aldo it end with a t
@@ -6101,22 +6220,22 @@ class MongoDBAdapter(NoSQLAdapter):
return {self.expand(first): {'$regex': \
re.escape(self.expand(second, 'string')) + '$'}}
- #TODO verify full compatibilty with official oracle contains operator
+ # TODO verify full compatibilty with official oracle contains operator
def CONTAINS(self, first, second, case_sensitive=False):
# silently ignore, only case sensitive
- #There is a technical difference, but mongodb doesn't support
+ # There is a technical difference, but mongodb doesn't support
# that, but the result will be the same
- #TODO contains operators need to be transformed to Regex
+ # TODO contains operators need to be transformed to Regex
return {self.expand(first) : {'$regex': \
".*" + re.escape(self.expand(second, 'string')) + ".*"}}
class IMAPAdapter(NoSQLAdapter):
- drivers = ('imaplib',)
+ drivers = ('imaplib', )
""" IMAP server adapter
- This class is intended as an interface with
+ This class is intended as an interface with
email IMAP servers to perform simple queries in the
web2py DAL query syntax, so email read, search and
other related IMAP mail services (as those implemented
@@ -6177,81 +6296,82 @@ class IMAPAdapter(NoSQLAdapter):
To avoid this sequence numbers issues, it is recommended the use
of uid fields in query references (although the update and delete
in separate actions rule still applies).
+ ::
- # This is the code recommended to start imap support
- # at the app's model:
+ # This is the code recommended to start imap support
+ # at the app's model:
- imapdb = DAL("imap://user:password@server:port", pool_size=1) # port 993 for ssl
- imapdb.define_tables()
+ imapdb = DAL("imap://user:password@server:port", pool_size=1) # port 993 for ssl
+ imapdb.define_tables()
- Here is an (incomplete) list of possible imap commands:
+ Here is an (incomplete) list of possible imap commands::
- # Count today's unseen messages
- # smaller than 6000 octets from the
- # inbox mailbox
+ # Count today's unseen messages
+ # smaller than 6000 octets from the
+ # inbox mailbox
- q = imapdb.INBOX.seen == False
- q &= imapdb.INBOX.created == datetime.date.today()
- q &= imapdb.INBOX.size < 6000
- unread = imapdb(q).count()
+ q = imapdb.INBOX.seen == False
+ q &= imapdb.INBOX.created == datetime.date.today()
+ q &= imapdb.INBOX.size < 6000
+ unread = imapdb(q).count()
- # Fetch last query messages
- rows = imapdb(q).select()
+ # Fetch last query messages
+ rows = imapdb(q).select()
- # it is also possible to filter query select results with limitby and
- # sequences of mailbox fields
+ # it is also possible to filter query select results with limitby and
+ # sequences of mailbox fields
- set.select(, limitby=(, ))
+ set.select(, limitby=(, ))
- # Mark last query messages as seen
- messages = [row.uid for row in rows]
- seen = imapdb(imapdb.INBOX.uid.belongs(messages)).update(seen=True)
+ # Mark last query messages as seen
+ messages = [row.uid for row in rows]
+ seen = imapdb(imapdb.INBOX.uid.belongs(messages)).update(seen=True)
- # Delete messages in the imap database that have mails from mr. Gumby
+ # Delete messages in the imap database that have mails from mr. Gumby
- deleted = 0
- for mailbox in imapdb.tables
- deleted += imapdb(imapdb[mailbox].sender.contains("gumby")).delete()
+ deleted = 0
+ for mailbox in imapdb.tables
+ deleted += imapdb(imapdb[mailbox].sender.contains("gumby")).delete()
- # It is possible also to mark messages for deletion instead of ereasing them
- # directly with set.update(deleted=True)
+ # It is possible also to mark messages for deletion instead of ereasing them
+ # directly with set.update(deleted=True)
- # This object give access
- # to the adapter auto mailbox
- # mapped names (which native
- # mailbox has what table name)
+ # This object give access
+ # to the adapter auto mailbox
+ # mapped names (which native
+ # mailbox has what table name)
- imapdb.mailboxes # tablename, server native name pairs
+ imapdb.mailboxes # tablename, server native name pairs
- # To retrieve a table native mailbox name use:
- imapdb..mailbox
+ # To retrieve a table native mailbox name use:
+ imapdb..mailbox
- ### New features v2.4.1:
+ ### New features v2.4.1:
- # Declare mailboxes statically with tablename, name pairs
- # This avoids the extra server names retrieval
+ # Declare mailboxes statically with tablename, name pairs
+ # This avoids the extra server names retrieval
- imapdb.define_tables({"inbox": "INBOX"})
+ imapdb.define_tables({"inbox": "INBOX"})
- # Selects without content/attachments/email columns will only
- # fetch header and flags
+ # Selects without content/attachments/email columns will only
+ # fetch header and flags
+
+ imapdb(q).select(imapdb.INBOX.sender, imapdb.INBOX.subject)
- imapdb(q).select(imapdb.INBOX.sender, imapdb.INBOX.subject)
"""
- types = {
- 'string': str,
- 'text': str,
- 'date': datetime.date,
- 'datetime': datetime.datetime,
- 'id': long,
- 'boolean': bool,
- 'integer': int,
- 'bigint': long,
- 'blob': str,
- 'list:string': str,
- }
+ types = {'string': str,
+ 'text': str,
+ 'date': datetime.date,
+ 'datetime': datetime.datetime,
+ 'id': long,
+ 'boolean': bool,
+ 'integer': int,
+ 'bigint': long,
+ 'blob': str,
+ 'list:string': str
+ }
dbengine = 'imap'
@@ -6262,7 +6382,7 @@ class IMAPAdapter(NoSQLAdapter):
uri,
pool_size=0,
folder=None,
- db_codec ='UTF-8',
+ db_codec='UTF-8',
credential_decoder=IDENTITY,
driver_args={},
adapter_args={},
@@ -6275,7 +6395,7 @@ class IMAPAdapter(NoSQLAdapter):
self.db = db
self.uri = uri
if do_connect: self.find_driver(adapter_args)
- self.pool_size=pool_size
+ self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self._after_connection = after_connection
@@ -6294,18 +6414,17 @@ class IMAPAdapter(NoSQLAdapter):
self.flags = {'deleted': '\\Deleted', 'draft': '\\Draft',
'flagged': '\\Flagged', 'recent': '\\Recent',
'seen': '\\Seen', 'answered': '\\Answered'}
- self.search_fields = {
- 'id': 'MESSAGE', 'created': 'DATE',
- 'uid': 'UID', 'sender': 'FROM',
- 'to': 'TO', 'cc': 'CC',
- 'bcc': 'BCC', 'content': 'TEXT',
- 'size': 'SIZE', 'deleted': '\\Deleted',
- 'draft': '\\Draft', 'flagged': '\\Flagged',
- 'recent': '\\Recent', 'seen': '\\Seen',
- 'subject': 'SUBJECT', 'answered': '\\Answered',
- 'mime': None, 'email': None,
- 'attachments': None
- }
+ self.search_fields = {'id': 'MESSAGE', 'created': 'DATE',
+ 'uid': 'UID', 'sender': 'FROM',
+ 'to': 'TO', 'cc': 'CC',
+ 'bcc': 'BCC', 'content': 'TEXT',
+ 'size': 'SIZE', 'deleted': '\\Deleted',
+ 'draft': '\\Draft', 'flagged': '\\Flagged',
+ 'recent': '\\Recent', 'seen': '\\Seen',
+ 'subject': 'SUBJECT', 'answered': '\\Answered',
+ 'mime': None, 'email': None,
+ 'attachments': None
+ }
db['_lastsql'] = ''
@@ -6315,10 +6434,11 @@ class IMAPAdapter(NoSQLAdapter):
host = m.group('host')
port = int(m.group('port'))
over_ssl = False
- if port==993:
+ if port == 993:
over_ssl = True
- driver_args.update(host=host,port=port, password=password, user=user)
+ driver_args.update(host=host, port=port, password=password, user=user)
+
def connector(driver_args=driver_args):
# it is assumed sucessful authentication alLways
# TODO: support direct connection and login tests
@@ -6333,7 +6453,7 @@ class IMAPAdapter(NoSQLAdapter):
connection.mailbox_names = None
# dummy cursor function
- connection.cursor = lambda : True
+ connection.cursor = lambda: True
return connection
@@ -6351,7 +6471,7 @@ class IMAPAdapter(NoSQLAdapter):
closing
"""
- if getattr(self,'connection',None) != None:
+ if getattr(self, 'connection', None) is not None:
return
if f is None:
f = self.connector
@@ -6426,8 +6546,8 @@ class IMAPAdapter(NoSQLAdapter):
add adds to the date object
"""
- months = [None, "JAN","FEB","MAR","APR","MAY","JUN",
- "JUL", "AUG","SEP","OCT","NOV","DEC"]
+ months = [None, "JAN", "FEB", "MAR", "APR", "MAY", "JUN",
+ "JUL", "AUG", "SEP", "OCT", "NOV", "DEC"]
if isinstance(date, basestring):
# Prevent unexpected date response format
try:
@@ -6496,12 +6616,12 @@ class IMAPAdapter(NoSQLAdapter):
if not "NOSELECT" in item.upper():
sub_items = item.split("\"")
sub_items = [sub_item for sub_item in sub_items \
- if len(sub_item.strip()) > 0]
+ if len(sub_item.strip()) > 0]
# mailbox = sub_items[len(sub_items) -1]
mailbox = sub_items[-1].strip()
# remove unwanted characters and store original names
# Don't allow leading non alphabetic characters
- mailbox_name = re.sub('^[_0-9]*', '', re.sub('[^_\w]','',re.sub('[/ ]','_',mailbox)))
+ mailbox_name = re.sub('^[_0-9]*', '', re.sub('[^_\w]', '', re.sub('[/ ]', '_', mailbox)))
mailboxes.append(mailbox_name)
self.connection.mailbox_names[mailbox_name] = mailbox
@@ -6554,26 +6674,28 @@ class IMAPAdapter(NoSQLAdapter):
for name in names:
self.db.define_table("%s" % name,
- Field("uid", writable=False),
- Field("created", "datetime", writable=False),
- Field("content", "text", writable=False),
- Field("to", writable=False),
- Field("cc", writable=False),
- Field("bcc", writable=False),
- Field("sender", writable=False),
- Field("size", "integer", writable=False),
- Field("subject", writable=False),
- Field("mime", writable=False),
- Field("email", "text", writable=False, readable=False),
- Field("attachments", "text", writable=False, readable=False),
- Field("encoding", writable=False),
- Field("answered", "boolean"),
- Field("deleted", "boolean"),
- Field("draft", "boolean"),
- Field("flagged", "boolean"),
- Field("recent", "boolean", writable=False),
- Field("seen", "boolean")
- )
+ Field("uid", writable=False),
+ Field("created", "datetime", writable=False),
+ Field("content", "text", writable=False),
+ Field("to", writable=False),
+ Field("cc", writable=False),
+ Field("bcc", writable=False),
+ Field("sender", writable=False),
+ Field("size", "integer", writable=False),
+ Field("subject", writable=False),
+ Field("mime", writable=False),
+ Field("email", "text", writable=False,
+ readable=False),
+ Field("attachments", "text", writable=False,
+ readable=False),
+ Field("encoding", writable=False),
+ Field("answered", "boolean"),
+ Field("deleted", "boolean"),
+ Field("draft", "boolean"),
+ Field("flagged", "boolean"),
+ Field("recent", "boolean", writable=False),
+ Field("seen", "boolean")
+ )
# Set a special _mailbox attribute for storing
# native mailbox names
@@ -6594,13 +6716,8 @@ class IMAPAdapter(NoSQLAdapter):
# but required by DAL
pass
- def _select(self, query, fields, attributes):
- if use_common_filters(query):
- query = self.common_filter(query, [self.get_query_mailbox(query),])
- return str(query)
-
def select(self, query, fields, attributes):
- """ Search and Fetch records and return web2py rows
+ """ Searches and Fetches records and return web2py rows
"""
# move this statement elsewhere (upper-level)
if use_common_filters(query):
@@ -6788,7 +6905,7 @@ class IMAPAdapter(NoSQLAdapter):
elif (("text" in maintype) and
("%s.content" % tablename in colnames)):
values.update({"text": self.encode_text(payload,
- self.get_charset(part))})
+ self.get_charset(part))})
content.append(values)
if "%s.size" % tablename in colnames:
@@ -6809,10 +6926,10 @@ class IMAPAdapter(NoSQLAdapter):
# parse result and return a rows object
colnames = colnames
- processor = attributes.get('processor',self.parse)
+ processor = attributes.get('processor', self.parse)
return processor(imapqry_array, fields, colnames)
- def _insert(self, table, fields):
+ def insert(self, table, fields):
def add_payload(message, obj):
payload = Message()
encoding = obj.get("encoding", "utf-8")
@@ -6842,8 +6959,8 @@ class IMAPAdapter(NoSQLAdapter):
attachments = d.get("attachments", [])
content = d.get("content", [])
flags = " ".join(["\\%s" % flag.capitalize() for flag in
- ("answered", "deleted", "draft", "flagged",
- "recent", "seen") if d.get(flag, False)])
+ ("answered", "deleted", "draft", "flagged",
+ "recent", "seen") if d.get(flag, False)])
if not message:
from email.message import Message
mime = d.get("mime", None)
@@ -6862,11 +6979,9 @@ class IMAPAdapter(NoSQLAdapter):
if isinstance(value, basestring):
message[item] = value
else:
- message[item] = ";".join([i for i in
- value])
+ message[item] = ";".join([i for i in value])
if (not message.is_multipart() and
- (not message.get_content_type().startswith(
- "multipart"))):
+ (not message.get_content_type().startswith("multipart"))):
if isinstance(content, basestring):
message.set_payload(content)
elif len(content) > 0:
@@ -6875,22 +6990,20 @@ class IMAPAdapter(NoSQLAdapter):
[add_payload(message, c) for c in content]
[add_payload(message, a) for a in attachments]
message = message.as_string()
- return (mailbox, flags, struct_time, message)
+
+ result, data = self.connection.append(mailbox, flags, struct_time, message)
+ if result == "OK":
+ uid = int(re.findall("\d+", str(data))[-1])
+ return self.db(table.uid == uid).select(table.id).first().id
+ else:
+ raise Exception("IMAP message append failed: %s" % data)
else:
raise NotImplementedError("IMAP empty insert is not implemented")
- def insert(self, table, fields):
- values = self._insert(table, fields)
- result, data = self.connection.append(*values)
- if result == "OK":
- uid = int(re.findall("\d+", str(data))[-1])
- return self.db(table.uid==uid).select(table.id).first().id
- else:
- raise Exception("IMAP message append failed: %s" % data)
-
- def _update(self, tablename, query, fields, commit=False):
+ def update(self, tablename, query, fields):
# TODO: the adapter should implement an .expand method
commands = list()
+ rowcount = 0
if use_common_filters(query):
query = self.common_filter(query, [tablename,])
mark = []
@@ -6920,11 +7033,7 @@ class IMAPAdapter(NoSQLAdapter):
commands.append((number, "+FLAGS", "(%s)" % " ".join(mark)))
if len(unmark) > 0:
commands.append((number, "-FLAGS", "(%s)" % " ".join(unmark)))
- return commands
- def update(self, tablename, query, fields):
- rowcount = 0
- commands = self._update(tablename, query, fields)
for command in commands:
result, data = self.connection.store(*command)
if result == "OK":
@@ -6933,10 +7042,7 @@ class IMAPAdapter(NoSQLAdapter):
raise Exception("IMAP storing error: %s" % data)
return rowcount
- def _count(self, query, distinct=None):
- raise NotImplementedError()
-
- def count(self,query,distinct=None):
+ def count(self, query, distinct=None):
counter = 0
tablename = self.get_query_mailbox(query)
if query and tablename is not None:
@@ -7109,7 +7215,7 @@ class IMAPAdapter(NoSQLAdapter):
result = result.replace("NOT NOT", "").strip()
return result
- def EQ(self,first,second):
+ def EQ(self, first, second):
name = self.search_fields[first.name]
result = None
if name is not None:
@@ -7148,44 +7254,45 @@ class IMAPAdapter(NoSQLAdapter):
# end of adapters
########################################################################
-ADAPTERS = {
- 'sqlite': SQLiteAdapter,
- 'spatialite': SpatiaLiteAdapter,
- 'sqlite:memory': SQLiteAdapter,
- 'spatialite:memory': SpatiaLiteAdapter,
- 'mysql': MySQLAdapter,
- 'postgres': PostgreSQLAdapter,
- 'postgres:psycopg2': PostgreSQLAdapter,
- 'postgres:pg8000': PostgreSQLAdapter,
- 'postgres2:psycopg2': NewPostgreSQLAdapter,
- 'postgres2:pg8000': NewPostgreSQLAdapter,
- 'oracle': OracleAdapter,
- 'mssql': MSSQLAdapter,
- 'mssql2': MSSQL2Adapter,
- 'mssql3': MSSQL3Adapter,
- 'mssql4' : MSSQL4Adapter,
- 'vertica': VerticaAdapter,
- 'sybase': SybaseAdapter,
- 'db2': DB2Adapter,
- 'teradata': TeradataAdapter,
- 'informix': InformixAdapter,
- 'informix-se': InformixSEAdapter,
- 'firebird': FireBirdAdapter,
- 'firebird_embedded': FireBirdAdapter,
- 'ingres': IngresAdapter,
- 'ingresu': IngresUnicodeAdapter,
- 'sapdb': SAPDBAdapter,
- 'cubrid': CubridAdapter,
- 'jdbc:sqlite': JDBCSQLiteAdapter,
- 'jdbc:sqlite:memory': JDBCSQLiteAdapter,
- 'jdbc:postgres': JDBCPostgreSQLAdapter,
- 'gae': GoogleDatastoreAdapter, # discouraged, for backward compatibility
- 'google:datastore': GoogleDatastoreAdapter,
- 'google:sql': GoogleSQLAdapter,
- 'couchdb': CouchDBAdapter,
- 'mongodb': MongoDBAdapter,
- 'imap': IMAPAdapter
-}
+ADAPTERS = {'sqlite': SQLiteAdapter,
+ 'spatialite': SpatiaLiteAdapter,
+ 'sqlite:memory': SQLiteAdapter,
+ 'spatialite:memory': SpatiaLiteAdapter,
+ 'mysql': MySQLAdapter,
+ 'postgres': PostgreSQLAdapter,
+ 'postgres:psycopg2': PostgreSQLAdapter,
+ 'postgres:pg8000': PostgreSQLAdapter,
+ 'postgres2:psycopg2': NewPostgreSQLAdapter,
+ 'postgres2:pg8000': NewPostgreSQLAdapter,
+ 'oracle': OracleAdapter,
+ 'mssql': MSSQLAdapter,
+ 'mssql2': MSSQL2Adapter,
+ 'mssql3': MSSQL3Adapter,
+ 'mssql4' : MSSQL4Adapter,
+ 'vertica': VerticaAdapter,
+ 'sybase': SybaseAdapter,
+ 'db2': DB2Adapter,
+ 'teradata': TeradataAdapter,
+ 'informix': InformixAdapter,
+ 'informix-se': InformixSEAdapter,
+ 'firebird': FireBirdAdapter,
+ 'firebird_embedded': FireBirdAdapter,
+ 'ingres': IngresAdapter,
+ 'ingresu': IngresUnicodeAdapter,
+ 'sapdb': SAPDBAdapter,
+ 'cubrid': CubridAdapter,
+ 'jdbc:sqlite': JDBCSQLiteAdapter,
+ 'jdbc:sqlite:memory': JDBCSQLiteAdapter,
+ 'jdbc:postgres': JDBCPostgreSQLAdapter,
+ 'gae': GoogleDatastoreAdapter, # discouraged, for backward compatibility
+ 'google:datastore': GoogleDatastoreAdapter,
+ 'google:datastore+ndb': GoogleDatastoreAdapter,
+ 'google:sql': GoogleSQLAdapter,
+ 'couchdb': CouchDBAdapter,
+ 'mongodb': MongoDBAdapter,
+ 'imap': IMAPAdapter
+ }
+
def sqlhtml_validators(field):
"""
@@ -7205,19 +7312,20 @@ def sqlhtml_validators(field):
return field_type.validator
else:
field_type = field_type.type
- elif not isinstance(field_type,str):
+ elif not isinstance(field_type, str):
return []
- requires=[]
- def ff(r,id):
- row=r(id)
+ requires = []
+
+ def ff(r, id):
+ row = r(id)
if not row:
- return id
- elif hasattr(r, '_format') and isinstance(r._format,str):
+ return str(id)
+ elif hasattr(r, '_format') and isinstance(r._format, str):
return r._format % row
elif hasattr(r, '_format') and callable(r._format):
return r._format(row)
else:
- return id
+ return str(id)
if field_type in (('string', 'text', 'password')):
requires.append(validators.IS_LENGTH(field_length))
elif field_type == 'json':
@@ -7240,13 +7348,15 @@ def sqlhtml_validators(field):
field_type.find('.') < 0 and \
field_type[10:] in db.tables:
referenced = db[field_type[10:]]
+
def repr_ref(id, row=None, r=referenced, f=ff): return f(r, id)
+
field.represent = field.represent or repr_ref
if hasattr(referenced, '_format') and referenced._format:
- requires = validators.IS_IN_DB(db,referenced._id,
+ requires = validators.IS_IN_DB(db, referenced._id,
referenced._format)
if field.unique:
- requires._and = validators.IS_NOT_IN_DB(db,field)
+ requires._and = validators.IS_NOT_IN_DB(db, field)
if field.tablename == field_type[10:]:
return validators.IS_EMPTY_OR(requires)
return requires
@@ -7254,6 +7364,7 @@ def sqlhtml_validators(field):
field_type.find('.') < 0 and \
field_type[15:] in db.tables:
referenced = db[field_type[15:]]
+
def list_ref_repr(ids, row=None, r=referenced, f=ff):
if not ids:
return None
@@ -7262,46 +7373,50 @@ def sqlhtml_validators(field):
if isinstance(db._adapter, GoogleDatastoreAdapter):
def count(values): return db(id.belongs(values)).select(id)
rx = range(0, len(ids), 30)
- refs = reduce(lambda a,b:a&b, [count(ids[i:i+30]) for i in rx])
+ refs = reduce(lambda a, b:a&b, [count(ids[i:i+30]) for i in rx])
else:
refs = db(id.belongs(ids)).select(id)
- return (refs and ', '.join(f(r,x.id) for x in refs) or '')
+ return (refs and ', '.join(f(r, x.id) for x in refs) or '')
+
field.represent = field.represent or list_ref_repr
if hasattr(referenced, '_format') and referenced._format:
- requires = validators.IS_IN_DB(db,referenced._id,
- referenced._format,multiple=True)
+ requires = validators.IS_IN_DB(db, referenced._id,
+ referenced._format, multiple=True)
else:
- requires = validators.IS_IN_DB(db,referenced._id,
+ requires = validators.IS_IN_DB(db, referenced._id,
multiple=True)
if field.unique:
- requires._and = validators.IS_NOT_IN_DB(db,field)
+ requires._and = validators.IS_NOT_IN_DB(db, field)
if not field.notnull:
requires = validators.IS_EMPTY_OR(requires)
return requires
elif field_type.startswith('list:'):
- def repr_list(values,row=None): return', '.join(str(v) for v in (values or []))
+ def repr_list(values, row=None): return', '.join(str(v) for v in (values or []))
field.represent = field.represent or repr_list
if field.unique:
- requires.insert(0,validators.IS_NOT_IN_DB(db,field))
+ requires.append(validators.IS_NOT_IN_DB(db, field))
sff = ['in', 'do', 'da', 'ti', 'de', 'bo']
if field.notnull and not field_type[:2] in sff:
- requires.insert(0, validators.IS_NOT_EMPTY())
+ requires.append(validators.IS_NOT_EMPTY())
elif not field.notnull and field_type[:2] in sff and requires:
- requires[-1] = validators.IS_EMPTY_OR(requires[-1])
+ requires[0] = validators.IS_EMPTY_OR(requires[0])
return requires
def bar_escape(item):
return str(item).replace('|', '||')
+
def bar_encode(items):
return '|%s|' % '|'.join(bar_escape(item) for item in items if str(item).strip())
+
def bar_decode_integer(value):
- if not hasattr(value,'split') and hasattr(value,'read'):
+ if not hasattr(value, 'split') and hasattr(value, 'read'):
value = value.read()
return [long(x) for x in value.split('|') if x.strip()]
+
def bar_decode_string(value):
return [x.replace('||', '|') for x in
REGEX_UNPACK.split(value[1:-1]) if x.strip()]
@@ -7310,29 +7425,29 @@ def bar_decode_string(value):
class Row(object):
"""
- a dictionary that lets you do d['a'] as well as d.a
- this is only used to store a Row
+ A dictionary that lets you do d['a'] as well as d.a
+ this is only used to store a `Row`
"""
- __init__ = lambda self,*args,**kwargs: self.__dict__.update(*args,**kwargs)
+ __init__ = lambda self, *args, **kwargs: self.__dict__.update(*args, **kwargs)
def __getitem__(self, k):
if isinstance(k, Table):
try:
return ogetattr(self, k._tablename)
- except (KeyError,AttributeError,TypeError):
+ except (KeyError, AttributeError, TypeError):
pass
elif isinstance(k, Field):
try:
return ogetattr(self, k.name)
- except (KeyError,AttributeError,TypeError):
+ except (KeyError, AttributeError, TypeError):
pass
try:
return ogetattr(ogetattr(self, k.tablename), k.name)
- except (KeyError,AttributeError,TypeError):
+ except (KeyError, AttributeError, TypeError):
pass
- key=str(k)
+ key = str(k)
_extra = ogetattr(self, '__dict__').get('_extra', None)
if _extra is not None:
v = _extra.get(key, DEFAULT)
@@ -7340,20 +7455,20 @@ class Row(object):
return v
try:
return ogetattr(self, key)
- except (KeyError,AttributeError,TypeError):
+ except (KeyError, AttributeError, TypeError):
pass
m = REGEX_TABLE_DOT_FIELD.match(key)
if m:
try:
return ogetattr(self, m.group(1))[m.group(2)]
- except (KeyError,AttributeError,TypeError):
+ except (KeyError, AttributeError, TypeError):
key = m.group(2)
try:
return ogetattr(self, key)
- except (KeyError,AttributeError,TypeError), ae:
+ except (KeyError, AttributeError, TypeError), ae:
try:
- self[key] = ogetattr(self,'__get_lazy_reference__')(key)
+ self[key] = ogetattr(self, '__get_lazy_reference__')(key)
return self[key]
except:
raise ae
@@ -7366,12 +7481,11 @@ class Row(object):
__call__ = __getitem__
-
def get(self, key, default=None):
try:
return self.__getitem__(key)
except(KeyError, AttributeError, TypeError):
- return self.__dict__.get(key,default)
+ return self.__dict__.get(key, default)
has_key = __contains__ = lambda self, key: key in self.__dict__
@@ -7391,9 +7505,9 @@ class Row(object):
__str__ = __repr__ = lambda self: '' % self.as_dict()
- __int__ = lambda self: object.__getattribute__(self,'id')
+ __int__ = lambda self: object.__getattribute__(self, 'id')
- __long__ = lambda self: long(object.__getattribute__(self,'id'))
+ __long__ = lambda self: long(object.__getattribute__(self, 'id'))
__getattr__ = __getitem__
@@ -7406,13 +7520,13 @@ class Row(object):
# except:
# raise ae
- def __eq__(self,other):
+ def __eq__(self, other):
try:
return self.as_dict() == other.as_dict()
except AttributeError:
return False
- def __ne__(self,other):
+ def __ne__(self, other):
return not (self == other)
def __copy__(self):
@@ -7420,7 +7534,7 @@ class Row(object):
def as_dict(self, datetime_to_str=False, custom_types=None):
SERIALIZABLE_TYPES = [str, unicode, int, long, float, bool, list, dict]
- if isinstance(custom_types,(list,tuple,set)):
+ if isinstance(custom_types, (list, tuple, set)):
SERIALIZABLE_TYPES += list(custom_types)
elif custom_types:
SERIALIZABLE_TYPES.append(custom_types)
@@ -7429,24 +7543,25 @@ class Row(object):
v=d[k]
if d[k] is None:
continue
- elif isinstance(v,Row):
+ elif isinstance(v, Row):
d[k]=v.as_dict()
- elif isinstance(v,Reference):
+ elif isinstance(v, Reference):
d[k]=long(v)
- elif isinstance(v,decimal.Decimal):
+ elif isinstance(v, decimal.Decimal):
d[k]=float(v)
elif isinstance(v, (datetime.date, datetime.datetime, datetime.time)):
if datetime_to_str:
- d[k] = v.isoformat().replace('T',' ')[:19]
- elif not isinstance(v,tuple(SERIALIZABLE_TYPES)):
+ d[k] = v.isoformat().replace('T', ' ')[:19]
+ elif not isinstance(v, tuple(SERIALIZABLE_TYPES)):
del d[k]
return d
def as_xml(self, row_name="row", colnames=None, indent=' '):
- def f(row,field,indent=' '):
- if isinstance(row,Row):
+
+ def f(row, field, indent=' '):
+ if isinstance(row, Row):
spc = indent+' \n'
- items = [f(row[x],x,indent+' ') for x in row]
+ items = [f(row[x], x, indent+' ') for x in row]
return '%s<%s>\n%s\n%s%s>' % (
indent,
field,
@@ -7455,10 +7570,10 @@ class Row(object):
field)
elif not callable(row):
if REGEX_ALPHANUMERIC.match(field):
- return '%s<%s>%s%s>' % (indent,field,row,field)
+ return '%s<%s>%s%s>' % (indent, field, row, field)
else:
return '%s%s' % \
- (indent,field,row)
+ (indent, field, row)
else:
return None
return f(self, row_name, indent=indent)
@@ -7470,7 +7585,8 @@ class Row(object):
kwargs are passed to .as_dict method
only "object" mode supported
- serialize = False used by Rows.as_json
+ `serialize = False` used by Rows.as_json
+
TODO: return array mode with query column order
mode and colnames are not implemented
@@ -7499,14 +7615,15 @@ class SQLCallableList(list):
def __call__(self):
return copy.copy(self)
-def smart_query(fields,text):
- if not isinstance(fields,(list,tuple)):
+
+def smart_query(fields, text):
+ if not isinstance(fields, (list, tuple)):
fields = [fields]
new_fields = []
for field in fields:
- if isinstance(field,Field):
+ if isinstance(field, Field):
new_fields.append(field)
- elif isinstance(field,Table):
+ elif isinstance(field, Table):
for ofield in field:
new_fields.append(ofield)
else:
@@ -7528,50 +7645,50 @@ def smart_query(fields,text):
text = text[:m.start()]+('#%i' % i)+text[m.end():]
constants[str(i)] = m.group()[1:-1]
i+=1
- text = re.sub('\s+',' ',text).lower()
- for a,b in [('&','and'),
- ('|','or'),
- ('~','not'),
- ('==','='),
- ('<','<'),
- ('>','>'),
- ('<=','<='),
- ('>=','>='),
- ('<>','!='),
- ('=<','<='),
- ('=>','>='),
- ('=','='),
- (' less or equal than ','<='),
- (' greater or equal than ','>='),
- (' equal or less than ','<='),
- (' equal or greater than ','>='),
- (' less or equal ','<='),
- (' greater or equal ','>='),
- (' equal or less ','<='),
- (' equal or greater ','>='),
- (' not equal to ','!='),
- (' not equal ','!='),
- (' equal to ','='),
- (' equal ','='),
- (' equals ','='),
- (' less than ','<'),
- (' greater than ','>'),
- (' starts with ','startswith'),
- (' ends with ','endswith'),
- (' not in ' , 'notbelongs'),
- (' in ' , 'belongs'),
- (' is ','=')]:
+ text = re.sub('\s+', ' ', text).lower()
+ for a, b in [('&', 'and'),
+ ('|', 'or'),
+ ('~', 'not'),
+ ('==', '='),
+ ('<', '<'),
+ ('>', '>'),
+ ('<=', '<='),
+ ('>=', '>='),
+ ('<>', '!='),
+ ('=<', '<='),
+ ('=>', '>='),
+ ('=', '='),
+ (' less or equal than ', '<='),
+ (' greater or equal than ', '>='),
+ (' equal or less than ', '<='),
+ (' equal or greater than ', '>='),
+ (' less or equal ', '<='),
+ (' greater or equal ', '>='),
+ (' equal or less ', '<='),
+ (' equal or greater ', '>='),
+ (' not equal to ', '!='),
+ (' not equal ', '!='),
+ (' equal to ', '='),
+ (' equal ', '='),
+ (' equals ', '='),
+ (' less than ', '<'),
+ (' greater than ', '>'),
+ (' starts with ', 'startswith'),
+ (' ends with ', 'endswith'),
+ (' not in ', 'notbelongs'),
+ (' in ', 'belongs'),
+ (' is ', '=')]:
if a[0]==' ':
- text = text.replace(' is'+a,' %s ' % b)
- text = text.replace(a,' %s ' % b)
- text = re.sub('\s+',' ',text).lower()
- text = re.sub('(?P[\<\>\!\=])\s+(?P[\<\>\!\=])','\g\g',text)
+ text = text.replace(' is'+a, ' %s ' % b)
+ text = text.replace(a, ' %s ' % b)
+ text = re.sub('\s+', ' ', text).lower()
+ text = re.sub('(?P[\<\>\!\=])\s+(?P[\<\>\!\=])', '\g\g', text)
query = field = neg = op = logic = None
for item in text.split():
if field is None:
if item == 'not':
neg = True
- elif not neg and not logic and item in ('and','or'):
+ elif not neg and not logic and item in ('and', 'or'):
logic = item
elif item in field_map:
field = field_map[item]
@@ -7588,12 +7705,12 @@ def smart_query(fields,text):
value = item
if field.type in ('text', 'string', 'json'):
if op == '=': op = 'like'
- if op == '=': new_query = field==value
- elif op == '<': new_query = field': new_query = field>value
- elif op == '<=': new_query = field<=value
- elif op == '>=': new_query = field>=value
- elif op == '!=': new_query = field!=value
+ if op == '=': new_query = field == value
+ elif op == '<': new_query = field < value
+ elif op == '>': new_query = field > value
+ elif op == '<=': new_query = field <= value
+ elif op == '>=': new_query = field >= value
+ elif op == '!=': new_query = field != value
elif op == 'belongs': new_query = field.belongs(value.split(','))
elif op == 'notbelongs': new_query = ~field.belongs(value.split(','))
elif field.type in ('text', 'string', 'json'):
@@ -7602,7 +7719,7 @@ def smart_query(fields,text):
elif op == 'startswith': new_query = field.startswith(value)
elif op == 'endswith': new_query = field.endswith(value)
else: raise RuntimeError("Invalid operation")
- elif field._db._adapter.dbengine=='google:datastore' and \
+ elif field._db._adapter.dbengine == 'google:datastore' and \
field.type in ('list:integer', 'list:string', 'list:reference'):
if op == 'contains': new_query = field.contains(value)
else: raise RuntimeError("Invalid operation")
@@ -7617,30 +7734,90 @@ def smart_query(fields,text):
field = op = neg = logic = None
return query
+
class DAL(object):
"""
- an instance of this class represents a database connection
+ An instance of this class represents a database connection
- Example::
+ Args:
+ uri(str): contains information for connecting to a database.
+ Defaults to `'sqlite://dummy.db'`
- db = DAL('sqlite://test.db')
+ Note:
+ experimental: you can specify a dictionary as uri
+ parameter i.e. with::
- or
+ db = DAL({"uri": "sqlite://storage.sqlite",
+ "tables": {...}, ...})
+
+ for an example of dict input you can check the output
+ of the scaffolding db model with
+
+ db.as_dict()
+
+ Note that for compatibility with Python older than
+ version 2.6.5 you should cast your dict input keys
+ to str due to a syntax limitation on kwarg names.
+ for proper DAL dictionary input you can use one of::
+
+ obj = serializers.cast_keys(dict, [encoding="utf-8"])
+ #or else (for parsing json input)
+ obj = serializers.loads_json(data, unicode_keys=False)
+
+ pool_size: How many open connections to make to the database object.
+ folder: where .table files will be created. Automatically set within
+ web2py. Use an explicit path when using DAL outside web2py
+ db_codec: string encoding of the database (default: 'UTF-8')
+ table_hash: database identifier with .tables. If your connection hash
+ change you can still using old .tables if they have db_hash
+ as prefix
+ check_reserved: list of adapters to check tablenames and column names
+ against sql/nosql reserved keywords. Defaults to `None`
+
+ - 'common' List of sql keywords that are common to all database
+ types such as "SELECT, INSERT". (recommended)
+ - 'all' Checks against all known SQL keywords
+ - ''' Checks against the specific adapters list of
+ keywords
+ - '_nonreserved' Checks against the specific adapters
+ list of nonreserved keywords. (if available)
+
+ migrate: sets default migrate behavior for all tables
+ fake_migrate: sets default fake_migrate behavior for all tables
+ migrate_enabled: If set to False disables ALL migrations
+ fake_migrate_all: If set to True fake migrates ALL tables
+ attempts: Number of times to attempt connecting
+ auto_import: If set to True, tries import automatically table
+ definitions from the databases folder (works only for simple models)
+ bigint_id: If set, turn on bigint instead of int for id and reference
+ fields
+ lazy_tables: delaya table definition until table access
+ after_connection: can a callable that will be executed after the
+ connection
+
+ Example:
+ Use as::
+
+ db = DAL('sqlite://test.db')
+
+ or::
+
+ db = DAL(**{"uri": ..., "tables": [...]...}) # experimental
+
+ db.define_table('tablename', Field('fieldname1'),
+ Field('fieldname2'))
- db = DAL(**{"uri": ..., "tables": [...]...}) # experimental
- db.define_table('tablename', Field('fieldname1'),
- Field('fieldname2'))
"""
def __new__(cls, uri='sqlite://dummy.db', *args, **kwargs):
- if not hasattr(THREAD_LOCAL,'db_instances'):
+ if not hasattr(THREAD_LOCAL, 'db_instances'):
THREAD_LOCAL.db_instances = {}
- if not hasattr(THREAD_LOCAL,'db_instances_zombie'):
+ if not hasattr(THREAD_LOCAL, 'db_instances_zombie'):
THREAD_LOCAL.db_instances_zombie = {}
if uri == '