From 377a69ced92d9ea21b0a823b1435b10849c57c1e Mon Sep 17 00:00:00 2001 From: BuhtigithuB Date: Sun, 4 May 2014 12:02:08 -0400 Subject: [PATCH 1/2] PEP8 improvements white space here and there I search with regexp ",[^\s-]" to spot missing space after comma. I also, add double break line in front of function or class. I change a couple of comparator in order to add space in front and after " == " for instance. --- gluon/dal.py | 4101 ++++++++++++++++++++++++++------------------------ 1 file changed, 2105 insertions(+), 1996 deletions(-) diff --git a/gluon/dal.py b/gluon/dal.py index 796e7831..047433f6 100644 --- a/gluon/dal.py +++ b/gluon/dal.py @@ -44,11 +44,11 @@ Example of usage:: >>> # from dal import DAL, Field ### create DAL connection (and create DB if it doesn't exist) - >>> db = DAL(('sqlite://storage.sqlite','mysql://a:b@localhost/x'), + >>> db = DAL(('sqlite://storage.sqlite', 'mysql://a:b@localhost/x'), ... folder=None) ### define a table 'person' (create/alter as necessary) - >>> person = db.define_table('person',Field('name','string')) + >>> person = db.define_table('person', Field('name', 'string')) ### insert a record >>> id = person.insert(name='James') @@ -60,7 +60,7 @@ Example of usage:: >>> james = person(name='James') ### retrieve it by arbitrary query - >>> query = (person.name=='James') & (person.name.startswith('J')) + >>> query = (person.name == 'James') & (person.name.startswith('J')) >>> james = db(query).select(person.ALL)[0] ### update one record @@ -77,7 +77,7 @@ Example of usage:: ### retrieve multiple records (rows) >>> people = db(person).select(orderby=person.name, - ... groupby=person.name, limitby=(0,100)) + ... groupby=person.name, limitby=(0, 100)) ### further filter them >>> james = people.find(lambda row: row.name == 'James').first() @@ -147,11 +147,10 @@ DEFAULTLENGTH = {'string':512, 'text':2**15, 'blob':2**31} TIMINGSSIZE = 100 -SPATIALLIBS = { - 'Windows':'libspatialite', - 'Linux':'libspatialite.so', - 'Darwin':'libspatialite.dylib' - } +SPATIALLIBS = {'Windows':'libspatialite', + 'Linux':'libspatialite.so', + 'Darwin':'libspatialite.dylib' + } DEFAULT_URI = 'sqlite://dummy.db' import re @@ -191,7 +190,7 @@ else: from io import StringIO as StringIO import copyreg long = int - hashlib_md5 = lambda s: hashlib.md5(bytes(s,'utf8')) + hashlib_md5 = lambda s: hashlib.md5(bytes(s, 'utf8')) bytes, unicode = bytes, str if PYTHON_VERSION[:2] < (2, 7): @@ -204,14 +203,13 @@ CALLABLETYPES = (types.LambdaType, types.FunctionType, types.BuiltinFunctionType, types.MethodType, types.BuiltinMethodType) -TABLE_ARGS = set( - ('migrate','primarykey','fake_migrate','format','redefine', - 'singular','plural','trigger_name','sequence_name','fields', - 'common_filter','polymodel','table_class','on_define','rname')) +TABLE_ARGS = set(('migrate', 'primarykey', 'fake_migrate', 'format', 'redefine', + 'singular', 'plural', 'trigger_name', 'sequence_name', 'fields', + 'common_filter', 'polymodel', 'table_class', 'on_define', 'rname')) -SELECT_ARGS = set( - ('orderby', 'groupby', 'limitby','required', 'cache', 'left', - 'distinct', 'having', 'join','for_update', 'processor','cacheable', 'orderby_on_limitby')) +SELECT_ARGS = set(('orderby', 'groupby', 'limitby', 'required', 'cache', 'left', + 'distinct', 'having', 'join', 'for_update', 'processor', + 'cacheable', 'orderby_on_limitby')) ogetattr = object.__getattribute__ osetattr = object.__setattr__ @@ -225,6 +223,7 @@ try: from gluon.utils import web2py_uuid except (ImportError, SystemError): import uuid + def web2py_uuid(): return str(uuid.uuid4()) try: @@ -247,7 +246,7 @@ except ImportError: simplejson = None LOGGER = logging.getLogger("web2py.dal") -DEFAULT = lambda:0 +DEFAULT = lambda: 0 GLOBAL_LOCKER = threading.RLock() THREAD_LOCAL = threading.local() @@ -447,21 +446,21 @@ if not 'google' in DRIVERS: except: LOGGER.debug('no IMAP driver imaplib') -PLURALIZE_RULES = [ - (re.compile('child$'), re.compile('child$'), 'children'), - (re.compile('oot$'), re.compile('oot$'), 'eet'), - (re.compile('ooth$'), re.compile('ooth$'), 'eeth'), - (re.compile('l[eo]af$'), re.compile('l([eo])af$'), 'l\\1aves'), - (re.compile('sis$'), re.compile('sis$'), 'ses'), - (re.compile('man$'), re.compile('man$'), 'men'), - (re.compile('ife$'), re.compile('ife$'), 'ives'), - (re.compile('eau$'), re.compile('eau$'), 'eaux'), - (re.compile('lf$'), re.compile('lf$'), 'lves'), - (re.compile('[sxz]$'), re.compile('$'), 'es'), - (re.compile('[^aeioudgkprt]h$'), re.compile('$'), 'es'), - (re.compile('(qu|[^aeiou])y$'), re.compile('y$'), 'ies'), - (re.compile('$'), re.compile('$'), 's'), - ] +PLURALIZE_RULES = [(re.compile('child$'), re.compile('child$'), 'children'), + (re.compile('oot$'), re.compile('oot$'), 'eet'), + (re.compile('ooth$'), re.compile('ooth$'), 'eeth'), + (re.compile('l[eo]af$'), re.compile('l([eo])af$'), 'l\\1aves'), + (re.compile('sis$'), re.compile('sis$'), 'ses'), + (re.compile('man$'), re.compile('man$'), 'men'), + (re.compile('ife$'), re.compile('ife$'), 'ives'), + (re.compile('eau$'), re.compile('eau$'), 'eaux'), + (re.compile('lf$'), re.compile('lf$'), 'lves'), + (re.compile('[sxz]$'), re.compile('$'), 'es'), + (re.compile('[^aeioudgkprt]h$'), re.compile('$'), 'es'), + (re.compile('(qu|[^aeiou])y$'), re.compile('y$'), 'ies'), + (re.compile('$'), re.compile('$'), 's'), + ] + def pluralize(singular, rules=PLURALIZE_RULES): for line in rules: @@ -469,25 +468,31 @@ def pluralize(singular, rules=PLURALIZE_RULES): plural = re_search.search(singular) and re_sub.sub(replace, singular) if plural: return plural -def hide_password(uri): - if isinstance(uri,(list,tuple)): - return [hide_password(item) for item in uri] - return REGEX_NOPASSWD.sub('******',uri) -def OR(a,b): +def hide_password(uri): + if isinstance(uri, (list, tuple)): + return [hide_password(item) for item in uri] + return REGEX_NOPASSWD.sub('******', uri) + + +def OR(a, b): return a|b -def AND(a,b): + +def AND(a, b): return a&b + def IDENTITY(x): return x -def varquote_aux(name,quotestr='%s'): + +def varquote_aux(name, quotestr='%s'): return name if REGEX_W.match(name) else quotestr % name -def quote_keyword(a,keyword='timestamp'): + +def quote_keyword(a, keyword='timestamp'): regex = re.compile('\.keyword(?=\w)') - a = regex.sub('."%s"' % keyword,a) + a = regex.sub('."%s"' % keyword, a) return a if 'google' in DRIVERS: @@ -527,10 +532,10 @@ if 'google' in DRIVERS: return value elif isinstance(value, basestring): return decimal.Decimal(value) - raise gae.BadValueError("Property %s must be a Decimal or string."\ - % self.name) + raise gae.BadValueError("Property %s must be a Decimal or string." + % self.name) - #TODO Needs more testing + # TODO Needs more testing class NDBDecimalProperty(ndb.StringProperty): """ NDB decimal implementation @@ -560,8 +565,9 @@ if 'google' in DRIVERS: return value elif isinstance(value, basestring): return decimal.Decimal(value) - raise TypeError("Property %s must be a Decimal or string."\ - % self._name) + raise TypeError("Property %s must be a Decimal or string." + % self._name) + ################################################################################### # class that handles connection pooling (all adapters are derived from this one) @@ -578,7 +584,7 @@ class ConnectionPool(object): # ## this allows gluon to commit/rollback all dbs in this thread - def close(self,action='commit',really=True): + def close(self, action='commit', really=True): if action: if callable(action): action(self) @@ -599,20 +605,20 @@ class ConnectionPool(object): @staticmethod def close_all_instances(action): """ to close cleanly databases in a multithreaded environment """ - dbs = getattr(THREAD_LOCAL,'db_instances',{}).items() + dbs = getattr(THREAD_LOCAL, 'db_instances', {}).items() for db_uid, db_group in dbs: for db in db_group: - if hasattr(db,'_adapter'): + if hasattr(db, '_adapter'): db._adapter.close(action) - getattr(THREAD_LOCAL,'db_instances',{}).clear() - getattr(THREAD_LOCAL,'db_instances_zombie',{}).clear() + getattr(THREAD_LOCAL, 'db_instances', {}).clear() + getattr(THREAD_LOCAL, 'db_instances_zombie', {}).clear() if callable(action): action(None) return def find_or_make_work_folder(self): #this actually does not make the folder. it has to be there - self.folder = getattr(THREAD_LOCAL,'folder','') + self.folder = getattr(THREAD_LOCAL, 'folder', '') if (os.path.isabs(self.folder) and isinstance(self, UseDatabaseStoredFile) and @@ -641,7 +647,7 @@ class ConnectionPool(object): if the connection is not active (closed by db server) it will loop if not `self.pool_size` or no active connections in pool makes a new one """ - if getattr(self,'connection', None) != None: + if getattr(self, 'connection', None) is not None: return if f is None: f = self.connector @@ -677,6 +683,7 @@ class ConnectionPool(object): break self.after_connection_hook() + ################################################################################### # metaclass to prepare adapter classes static values ################################################################################### @@ -706,10 +713,10 @@ class AdapterMeta(type): return obj + ############################################################################### # this is a generic adapter that does nothing; all others are derived from this ############################################################################### - class BaseAdapter(ConnectionPool): __metaclass__ = AdapterMeta @@ -717,7 +724,7 @@ class BaseAdapter(ConnectionPool): native_json = False driver = None driver_name = None - drivers = () # list of drivers from which to pick + drivers = () # list of drivers from which to pick connection = None commit_on_alter_table = False support_distributed_transaction = False @@ -725,53 +732,51 @@ class BaseAdapter(ConnectionPool): can_select_for_update = True dbpath = None folder = None - connector = lambda *args, **kwargs: None # __init__ should override this + connector = lambda *args, **kwargs: None # __init__ should override this TRUE = 'T' FALSE = 'F' T_SEP = ' ' QUOTE_TEMPLATE = '"%s"' + types = {'boolean': 'CHAR(1)', + 'string': 'CHAR(%(length)s)', + 'text': 'TEXT', + 'json': 'TEXT', + 'password': 'CHAR(%(length)s)', + 'blob': 'BLOB', + 'upload': 'CHAR(%(length)s)', + 'integer': 'INTEGER', + 'bigint': 'INTEGER', + 'float': 'DOUBLE', + 'double': 'DOUBLE', + 'decimal': 'DOUBLE', + 'date': 'DATE', + 'time': 'TIME', + 'datetime': 'TIMESTAMP', + 'id': 'INTEGER PRIMARY KEY AUTOINCREMENT', + 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', + 'list:integer': 'TEXT', + 'list:string': 'TEXT', + 'list:reference': 'TEXT', + # the two below are only used when DAL(...bigint_id=True) and replace 'id','reference' + 'big-id': 'BIGINT PRIMARY KEY AUTOINCREMENT', + 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', + 'reference FK': ', CONSTRAINT "FK_%(constraint_name)s" FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', + } - types = { - 'boolean': 'CHAR(1)', - 'string': 'CHAR(%(length)s)', - 'text': 'TEXT', - 'json': 'TEXT', - 'password': 'CHAR(%(length)s)', - 'blob': 'BLOB', - 'upload': 'CHAR(%(length)s)', - 'integer': 'INTEGER', - 'bigint': 'INTEGER', - 'float':'DOUBLE', - 'double': 'DOUBLE', - 'decimal': 'DOUBLE', - 'date': 'DATE', - 'time': 'TIME', - 'datetime': 'TIMESTAMP', - 'id': 'INTEGER PRIMARY KEY AUTOINCREMENT', - 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', - 'list:integer': 'TEXT', - 'list:string': 'TEXT', - 'list:reference': 'TEXT', - # the two below are only used when DAL(...bigint_id=True) and replace 'id','reference' - 'big-id': 'BIGINT PRIMARY KEY AUTOINCREMENT', - 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', - 'reference FK': ', CONSTRAINT "FK_%(constraint_name)s" FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', - } - - def isOperationalError(self,exception): + def isOperationalError(self, exception): if not hasattr(self.driver, "OperationalError"): return None return isinstance(exception, self.driver.OperationalError) - def isProgrammingError(self,exception): + def isProgrammingError(self, exception): if not hasattr(self.driver, "ProgrammingError"): return None return isinstance(exception, self.driver.ProgrammingError) def id_query(self, table): - pkeys = getattr(table,'_primarykey',None) + pkeys = getattr(table, '_primarykey', None) if pkeys: return table[pkeys[0]] != None else: @@ -781,20 +786,20 @@ class BaseAdapter(ConnectionPool): return "'%s'" % obj.replace("'", "''") def smart_adapt(self, obj): - if isinstance(obj,(int,float)): + if isinstance(obj, (int, float)): return str(obj) return self.adapt(str(obj)) def file_exists(self, filename): - #to be used ONLY for files that on GAE may not be on filesystem + # to be used ONLY for files that on GAE may not be on filesystem return exists(filename) def file_open(self, filename, mode='rb', lock=True): - #to be used ONLY for files that on GAE may not be on filesystem + # to be used ONLY for files that on GAE may not be on filesystem if have_portalocker and lock: - fileobj = portalocker.LockedFile(filename,mode) + fileobj = portalocker.LockedFile(filename, mode) else: - fileobj = open(filename,mode) + fileobj = open(filename, mode) return fileobj def file_close(self, fileobj): @@ -805,15 +810,15 @@ class BaseAdapter(ConnectionPool): def file_delete(self, filename): os.unlink(filename) - def find_driver(self,adapter_args,uri=None): + def find_driver(self, adapter_args, uri=None): self.adapter_args = adapter_args - if getattr(self,'driver',None) != None: + if getattr(self, 'driver', None) != None: return drivers_available = [driver for driver in self.drivers if driver in globals()] if uri: - items = uri.split('://',1)[0].split(':') - request_driver = items[1] if len(items)>1 else None + items = uri.split('://', 1)[0].split(':') + request_driver = items[1] if len(items) > 1 else None else: request_driver = None request_driver = request_driver or adapter_args.get('driver') @@ -837,7 +842,7 @@ class BaseAdapter(ConnectionPool): """ isabs = None - logfilename = self.adapter_args.get('logfile','sql.log') + logfilename = self.adapter_args.get('logfile', 'sql.log') writelog = bool(logfilename) if writelog: isabs = os.path.isabs(logfilename) @@ -851,10 +856,9 @@ class BaseAdapter(ConnectionPool): logfile.write(message) self.file_close(logfile) - - def __init__(self, db,uri,pool_size=0, folder=None, db_codec='UTF-8', + def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8', credential_decoder=IDENTITY, driver_args={}, - adapter_args={},do_connect=True, after_connection=None): + adapter_args={}, do_connect=True, after_connection=None): self.db = db self.dbengine = "None" self.uri = uri @@ -862,21 +866,22 @@ class BaseAdapter(ConnectionPool): self.folder = folder self.db_codec = db_codec self._after_connection = after_connection + class Dummy(object): lastrowid = 1 + def __getattr__(self, value): return lambda *a, **b: [] self.connection = Dummy() self.cursor = Dummy() - - def sequence_name(self,tablename): + def sequence_name(self, tablename): return self.QUOTE_TEMPLATE % ('%s_sequence' % tablename) - def trigger_name(self,tablename): + def trigger_name(self, tablename): return '%s_sequence' % tablename - def varquote(self,name): + def varquote(self, name): return name def create_table(self, table, @@ -897,7 +902,7 @@ class BaseAdapter(ConnectionPool): sortable += 1 field_name = field.name field_type = field.type - if isinstance(field_type,SQLCustomType): + if isinstance(field_type, SQLCustomType): ftype = field_type.native or field_type.type elif field_type.startswith('reference'): referenced = field_type[10:].strip() @@ -906,9 +911,9 @@ class BaseAdapter(ConnectionPool): constraint_name = self.constraint_name(tablename, field_name) # if not '.' in referenced \ # and referenced != tablename \ - # and hasattr(table,'_primarykey'): + # and hasattr(table, '_primarykey'): # ftype = types['integer'] - #else: + # else: try: rtable = db[referenced] rfield = rtable._id @@ -917,7 +922,7 @@ class BaseAdapter(ConnectionPool): except (KeyError, ValueError, AttributeError), e: LOGGER.debug('Error: %s' % e) try: - rtablename,rfieldname = referenced.split('.') + rtablename, rfieldname = referenced.split('.') rtable = db[rtablename] rfield = rtable[rfieldname] except Exception, e: @@ -938,7 +943,7 @@ class BaseAdapter(ConnectionPool): else: ftype = ftype + \ types['reference FK'] % dict( - constraint_name = constraint_name, # should be quoted + constraint_name = constraint_name, # should be quoted foreign_key = rtable.sqlsafe + ' (' + rfield.sqlsafe_name + ')', table_name = table.sqlsafe, field_name = field.sqlsafe_name, @@ -951,12 +956,12 @@ class BaseAdapter(ConnectionPool): id_fieldname = table._id.sqlsafe_name else: #make a guess id_fieldname = self.QUOTE_TEMPLATE % 'id' - #gotcha: the referenced table must be defined before - #the referencing one to be able to create the table - #Also if it's not recommended, we can still support - #references to tablenames without rname to make - #migrations and model relationship work also if tables - #are not defined in order + # gotcha: the referenced table must be defined before + # the referencing one to be able to create the table + # Also if it's not recommended, we can still support + # references to tablenames without rname to make + # migrations and model relationship work also if tables + # are not defined in order if referenced == tablename: real_referenced = db[referenced].sqlsafe else: @@ -965,30 +970,29 @@ class BaseAdapter(ConnectionPool): or referenced) rfield = db[referenced]._id ftype = types[field_type[:9]] % dict( - index_name = self.QUOTE_TEMPLATE % (field_name+'__idx'), - field_name = field.sqlsafe_name, - constraint_name = self.QUOTE_TEMPLATE % constraint_name, - foreign_key = '%s (%s)' % (real_referenced, rfield.sqlsafe_name), + index_name=self.QUOTE_TEMPLATE % (field_name+'__idx'), + field_name=field.sqlsafe_name, + constraint_name=self.QUOTE_TEMPLATE % constraint_name, + foreign_key='%s (%s)' % (real_referenced, rfield.sqlsafe_name), on_delete_action=field.ondelete) elif field_type.startswith('list:reference'): ftype = types[field_type[:14]] elif field_type.startswith('decimal'): - precision, scale = map(int,field_type[8:-1].split(',')) + precision, scale = map(int, field_type[8:-1].split(',')) ftype = types[field_type[:7]] % \ - dict(precision=precision,scale=scale) + dict(precision=precision, scale=scale) elif field_type.startswith('geo'): - if not hasattr(self,'srid'): + if not hasattr(self, 'srid'): raise RuntimeError('Adapter does not support geometry') srid = self.srid geotype, parms = field_type[:-1].split('(') if not geotype in types: - raise SyntaxError( - 'Field: unknown field type: %s for %s' \ - % (field_type, field_name)) + raise SyntaxError('Field: unknown field type: %s for %s' + % (field_type, field_name)) ftype = types[geotype] if self.dbengine == 'postgres' and geotype == 'geometry': # parameters: schema, srid, dimension - dimension = 2 # GIS.dimension ??? + dimension = 2 # GIS.dimension ??? parms = parms.split(',') if len(parms) == 3: schema, srid, dimension = parms @@ -1003,8 +1007,8 @@ class BaseAdapter(ConnectionPool): dimension=dimension) postcreation_fields.append(ftype) elif not field_type in types: - raise SyntaxError('Field: unknown field type: %s for %s' % \ - (field_type, field_name)) + raise SyntaxError('Field: unknown field type: %s for %s' + % (field_type, field_name)) else: ftype = types[field_type]\ % dict(length=field.length) @@ -1040,8 +1044,8 @@ class BaseAdapter(ConnectionPool): sql_fields_aux[field_name] = dict(sql=ftype) # Postgres - PostGIS: # geometry fields are added after the table has been created, not now - if not (self.dbengine == 'postgres' and \ - field_type.startswith('geom')): + if not (self.dbengine == 'postgres' + and field_type.startswith('geom')): fields.append('%s %s' % (field.sqlsafe_name, ftype)) other = ';' @@ -1049,14 +1053,14 @@ class BaseAdapter(ConnectionPool): if self.dbengine == 'mysql': if not hasattr(table, "_primarykey"): fields.append('PRIMARY KEY (%s)' % (self.QUOTE_TEMPLATE % table._id.name)) - engine = self.adapter_args.get('engine','InnoDB') + engine = self.adapter_args.get('engine', 'InnoDB') other = ' ENGINE=%s CHARACTER SET utf8;' % engine fields = ',\n '.join(fields) for rtablename in TFK: rfields = TFK[rtablename] pkeys = [self.QUOTE_TEMPLATE % pk for pk in db[rtablename]._primarykey] - fkeys = [self.QUOTE_TEMPLATE % rfields[k].name for k in pkeys ] + fkeys = [self.QUOTE_TEMPLATE % rfields[k].name for k in pkeys] fields = fields + ',\n ' + \ types['reference TFK'] % dict( table_name = table.sqlsafe, @@ -1067,10 +1071,10 @@ class BaseAdapter(ConnectionPool): table_rname = table.sqlsafe - if getattr(table,'_primarykey',None): + if getattr(table, '_primarykey', None): query = "CREATE TABLE %s(\n %s,\n %s) %s" % \ (table.sqlsafe, fields, - self.PRIMARY_KEY(', '.join([self.QUOTE_TEMPLATE % pk for pk in table._primarykey])),other) + self.PRIMARY_KEY(', '.join([self.QUOTE_TEMPLATE % pk for pk in table._primarykey])), other) else: query = "CREATE TABLE %s(\n %s\n)%s" % \ (table.sqlsafe, fields, other) @@ -1101,7 +1105,7 @@ class BaseAdapter(ConnectionPool): % (datetime.datetime.today().isoformat(), query), table) if not fake_migrate: - self.create_sequence_and_triggers(query,table) + self.create_sequence_and_triggers(query, table) table._db.commit() # Postgres geom fields are added now, # after the table has been created @@ -1125,41 +1129,40 @@ class BaseAdapter(ConnectionPool): raise RuntimeError('File %s appears corrupted' % table._dbt) self.file_close(tfile) if sql_fields != sql_fields_old: - self.migrate_table( - table, - sql_fields, sql_fields_old, - sql_fields_aux, None, - fake_migrate=fake_migrate - ) + self.migrate_table(table, + sql_fields, sql_fields_old, + sql_fields_aux, None, + fake_migrate=fake_migrate + ) return query - def migrate_table( - self, - table, - sql_fields, - sql_fields_old, - sql_fields_aux, - logfile, - fake_migrate=False, - ): + def migrate_table(self, + table, + sql_fields, + sql_fields_old, + sql_fields_aux, + logfile, + fake_migrate=False, + ): # logfile is deprecated (moved to adapter.log method) db = table._db db._migrated.append(table._tablename) tablename = table._tablename + def fix(item): - k,v=item - if not isinstance(v,dict): - v=dict(type='unknown',sql=v) + k, v = item + if not isinstance(v, dict): + v = dict(type='unknown', sql=v) if self.ignore_field_case is not True: return k, v - return k.lower(),v + return k.lower(), v # make sure all field names are lower case to avoid # migrations because of case cahnge - sql_fields = dict(map(fix,sql_fields.iteritems())) - sql_fields_old = dict(map(fix,sql_fields_old.iteritems())) - sql_fields_aux = dict(map(fix,sql_fields_aux.iteritems())) + sql_fields = dict(map(fix, sql_fields.iteritems())) + sql_fields_old = dict(map(fix, sql_fields_old.iteritems())) + sql_fields_aux = dict(map(fix, sql_fields_aux.iteritems())) if db._debug: - logging.debug('migrating %s to %s' % (sql_fields_old,sql_fields)) + logging.debug('migrating %s to %s' % (sql_fields_old, sql_fields)) keys = sql_fields.keys() for key in sql_fields_old: @@ -1176,11 +1179,10 @@ class BaseAdapter(ConnectionPool): if self.dbengine in ('postgres',) and \ sql_fields[key]['type'].startswith('geometry'): # 'sql' == ftype in sql - query = [ sql_fields[key]['sql'] ] + query = [sql_fields[key]['sql']] else: - query = ['ALTER TABLE %s ADD %s %s;' % \ - (table.sqlsafe, key, - sql_fields_aux[key]['sql'].replace(', ', new_add))] + query = ['ALTER TABLE %s ADD %s %s;' % (table.sqlsafe, key, + sql_fields_aux[key]['sql'].replace(', ', new_add))] metadata_change = True elif self.dbengine in ('sqlite', 'spatialite'): if key in sql_fields: @@ -1193,9 +1195,9 @@ class BaseAdapter(ConnectionPool): ftype.startswith('geometry')): geotype, parms = ftype[:-1].split('(') schema = parms.split(',')[0] - query = [ "SELECT DropGeometryColumn ('%(schema)s', "+ + query = ["SELECT DropGeometryColumn ('%(schema)s', "+ "'%(table)s', '%(field)s');" % - dict(schema=schema, table=tablename, field=key,) ] + dict(schema=schema, table=tablename, field=key)] elif self.dbengine in ('firebird',): query = ['ALTER TABLE %s DROP %s;' % (self.QUOTE_TEMPLATE % tablename, self.QUOTE_TEMPLATE % key)] @@ -1239,7 +1241,7 @@ class BaseAdapter(ConnectionPool): self.log(sub_query + '\n', table) if fake_migrate: if db._adapter.commit_on_alter_table: - self.save_dbt(table,sql_fields_current) + self.save_dbt(table, sql_fields_current) self.log('faked!\n', table) else: self.execute(sub_query) @@ -1250,18 +1252,18 @@ class BaseAdapter(ConnectionPool): # update table._dbt after alter table. if db._adapter.commit_on_alter_table: db.commit() - self.save_dbt(table,sql_fields_current) + self.save_dbt(table, sql_fields_current) self.log('success!\n', table) elif metadata_change: - self.save_dbt(table,sql_fields_current) + self.save_dbt(table, sql_fields_current) if metadata_change and not (query and db._adapter.commit_on_alter_table): db.commit() - self.save_dbt(table,sql_fields_current) + self.save_dbt(table, sql_fields_current) self.log('success!\n', table) - def save_dbt(self,table, sql_fields_current): + def save_dbt(self, table, sql_fields_current): tfile = self.file_open(table._dbt, 'w') pickle.dump(sql_fields_current, tfile) self.file_close(tfile) @@ -1298,7 +1300,7 @@ class BaseAdapter(ConnectionPool): return 'Random()' def NOT_NULL(self, default, field_type): - return 'NOT NULL DEFAULT %s' % self.represent(default,field_type) + return 'NOT NULL DEFAULT %s' % self.represent(default, field_type) def COALESCE(self, first, second): expressions = [self.expand(first)]+[self.expand(e) for e in second] @@ -1350,13 +1352,13 @@ class BaseAdapter(ConnectionPool): return 'INSERT INTO %s DEFAULT VALUES;' % (table.sqlsafe) def insert(self, table, fields): - query = self._insert(table,fields) + query = self._insert(table, fields) try: self.execute(query) except Exception: e = sys.exc_info()[1] - if hasattr(table,'_on_insert_error'): - return table._on_insert_error(table,fields,e) + if hasattr(table, '_on_insert_error'): + return table._on_insert_error(table, fields, e) raise e if hasattr(table, '_primarykey'): mydict = dict([(k[0].name, k[1]) for k in fields if k[0].name in table._primarykey]) @@ -1372,7 +1374,7 @@ class BaseAdapter(ConnectionPool): return rid def bulk_insert(self, table, items): - return [self.insert(table,item) for item in items] + return [self.insert(table, item) for item in items] def NOT(self, first): return '(NOT %s)' % self.expand(first) @@ -1392,15 +1394,15 @@ class BaseAdapter(ConnectionPool): return '(%s IN (%s))' % (self.expand(first), items) def REGEXP(self, first, second): - "Regular expression operator" + """Regular expression operator""" raise NotImplementedError def LIKE(self, first, second): - "Case sensitive like operator" + """Case sensitive like operator""" raise NotImplementedError def ILIKE(self, first, second): - "Case insensitive like operator" + """Case insensitive like operator""" return '(%s LIKE %s)' % (self.expand(first), self.expand(second, 'string')) @@ -1412,24 +1414,24 @@ class BaseAdapter(ConnectionPool): return '(%s LIKE %s)' % (self.expand(first), self.expand('%'+second, 'string')) - def CONTAINS(self,first,second,case_sensitive=False): - if first.type in ('string','text', 'json'): - if isinstance(second,Expression): - second = Expression(None,self.CONCAT('%',Expression( - None,self.REPLACE(second,('%','%%'))),'%')) + def CONTAINS(self, first, second, case_sensitive=False): + if first.type in ('string', 'text', 'json'): + if isinstance(second, Expression): + second = Expression(None, self.CONCAT('%', Expression( + None, self.REPLACE(second, ('%', '%%'))), '%')) else: - second = '%'+str(second).replace('%','%%')+'%' + second = '%'+str(second).replace('%', '%%')+'%' elif first.type.startswith('list:'): - if isinstance(second,Expression): - second = Expression(None,self.CONCAT( - '%|',Expression(None,self.REPLACE( - Expression(None,self.REPLACE( - second,('%','%%'))),('|','||'))),'|%')) + if isinstance(second, Expression): + second = Expression(None, self.CONCAT( + '%|', Expression(None, self.REPLACE( + Expression(None, self.REPLACE( + second, ('%', '%%'))), ('|', '||'))), '|%')) else: - second = '%|'+str(second).replace('%','%%')\ - .replace('|','||')+'|%' + second = '%|'+str(second).replace('%', '%%')\ + .replace('|', '||')+'|%' op = case_sensitive and self.LIKE or self.ILIKE - return op(first,second) + return op(first, second) def EQ(self, first, second=None): if second is None: @@ -1443,41 +1445,41 @@ class BaseAdapter(ConnectionPool): return '(%s <> %s)' % (self.expand(first), self.expand(second, first.type)) - def LT(self,first,second=None): + def LT(self, first, second=None): if second is None: raise RuntimeError("Cannot compare %s < None" % first) return '(%s < %s)' % (self.expand(first), - self.expand(second,first.type)) + self.expand(second, first.type)) - def LE(self,first,second=None): + def LE(self, first, second=None): if second is None: raise RuntimeError("Cannot compare %s <= None" % first) return '(%s <= %s)' % (self.expand(first), - self.expand(second,first.type)) + self.expand(second, first.type)) - def GT(self,first,second=None): + def GT(self, first, second=None): if second is None: raise RuntimeError("Cannot compare %s > None" % first) return '(%s > %s)' % (self.expand(first), - self.expand(second,first.type)) + self.expand(second, first.type)) - def GE(self,first,second=None): + def GE(self, first, second=None): if second is None: raise RuntimeError("Cannot compare %s >= None" % first) return '(%s >= %s)' % (self.expand(first), - self.expand(second,first.type)) + self.expand(second, first.type)) def is_numerical_type(self, ftype): - return ftype in ('integer','boolean','double','bigint') or \ + return ftype in ('integer', 'boolean', 'double', 'bigint') or \ ftype.startswith('decimal') def REPLACE(self, first, (second, third)): - return 'REPLACE(%s,%s,%s)' % (self.expand(first,'string'), - self.expand(second,'string'), - self.expand(third,'string')) + return 'REPLACE(%s,%s,%s)' % (self.expand(first, 'string'), + self.expand(second, 'string'), + self.expand(third, 'string')) def CONCAT(self, *items): - return '(%s)' % ' || '.join(self.expand(x,'string') for x in items) + return '(%s)' % ' || '.join(self.expand(x, 'string') for x in items) def ADD(self, first, second): if self.is_numerical_type(first.type) or isinstance(first.type, Field): @@ -1503,12 +1505,12 @@ class BaseAdapter(ConnectionPool): self.expand(second, first.type)) def AS(self, first, second): - return '%s AS %s' % (self.expand(first), second) + return '%s AS %s' % (self.expand(first), second) def ON(self, first, second): table_rname = self.table_alias(first) if use_common_filters(second): - second = self.common_filter(second,[first._tablename]) + second = self.common_filter(second, [first._tablename]) return ('%s ON %s') % (self.expand(table_rname), self.expand(second)) def INVERT(self, first): @@ -1528,8 +1530,9 @@ class BaseAdapter(ConnectionPool): out = '%s.%s' % (table_rname, expression._rname or (self.QUOTE_TEMPLATE % (expression.name))) else: out = '%s.%s' % (self.QUOTE_TEMPLATE % et._tablename, self.QUOTE_TEMPLATE % expression.name) - if field_type == 'string' and not expression.type in ( - 'string','text','json','password'): + if field_type == 'string' \ + and not expression.type in ('string', 'text', 'json', + 'password'): out = self.CAST(out, self.types['text']) return out elif isinstance(expression, (Expression, Query)): @@ -1540,18 +1543,18 @@ class BaseAdapter(ConnectionPool): if not second is None: out = op(first, second, **optional_args) elif not first is None: - out = op(first,**optional_args) + out = op(first, **optional_args) elif isinstance(op, str): if op.endswith(';'): - op=op[:-1] + op = op[:-1] out = '(%s)' % op else: out = op() return out elif field_type: - return str(self.represent(expression,field_type)) - elif isinstance(expression,(list,tuple)): - return ','.join(self.represent(item,field_type) \ + return str(self.represent(expression, field_type)) + elif isinstance(expression, (list, tuple)): + return ','.join(self.represent(item, field_type) \ for item in expression) elif isinstance(expression, bool): return '1' if expression else '0' @@ -1563,7 +1566,6 @@ class BaseAdapter(ConnectionPool): tbl = self.db[tbl] return tbl.sqlsafe_alias - def alias(self, table, alias): """ Given a table object, makes a new table object @@ -1615,8 +1617,8 @@ class BaseAdapter(ConnectionPool): except Exception: e = sys.exc_info()[1] table = self.db[tablename] - if hasattr(table,'_on_update_error'): - return table._on_update_error(table,query,fields,e) + if hasattr(table, '_on_update_error'): + return table._on_update_error(table, query, fields, e) raise e try: return self.cursor.rowcount @@ -1645,21 +1647,21 @@ class BaseAdapter(ConnectionPool): try: counter = self.cursor.rowcount except: - counter = None + counter = None ### special code to handle CASCADE in SQLite & SpatiaLite if self.dbengine in ('sqlite', 'spatialite') and counter: for field in table._referenced_by: - if field.type=='reference '+table._tablename \ - and field.ondelete=='CASCADE': + if field.type == 'reference '+table._tablename \ + and field.ondelete == 'CASCADE': db(field.belongs(deleted)).delete() ### end special code to handle CASCADE in SQLite & SpatiaLite return counter def get_table(self, query): tablenames = self.tables(query) - if len(tablenames)==1: + if len(tablenames) == 1: return tablenames[0] - elif len(tablenames)<1: + elif len(tablenames) < 1: raise RuntimeError("No table selected") else: raise RuntimeError("Too many tables selected") @@ -1669,15 +1671,15 @@ class BaseAdapter(ConnectionPool): new_fields = [] append = new_fields.append for item in fields: - if isinstance(item,SQLALL): + if isinstance(item, SQLALL): new_fields += item._table - elif isinstance(item,str): + elif isinstance(item, str): m = self.REGEX_TABLE_DOT_FIELD.match(item) if m: - tablename,fieldname = m.groups() + tablename, fieldname = m.groups() append(db[tablename][fieldname]) else: - append(Expression(db,lambda item=item:item)) + append(Expression(db, lambda item=item: item)) else: append(item) # ## if no fields specified take them all from the requested tables @@ -1698,7 +1700,7 @@ class BaseAdapter(ConnectionPool): if isinstance(field, basestring): m = self.REGEX_TABLE_DOT_FIELD.match(field) if m: - tn,fn = m.groups() + tn, fn = m.groups() field = self.db[tn][fn] for tablename in tables(field): if not tablename in tablenames: @@ -1706,13 +1708,17 @@ class BaseAdapter(ConnectionPool): if len(tablenames) < 1: raise SyntaxError('Set: no tables selected') + def colexpand(field): return self.expand(field, colnames=True) + self._colnames = map(colexpand, fields) + def geoexpand(field): - if isinstance(field.type,str) and field.type.startswith('geo') and isinstance(field, Field): + if isinstance(field.type, str) and field.type.startswith('geo') and isinstance(field, Field): field = field.st_astext() return self.expand(field) + sql_f = ', '.join(map(geoexpand, fields)) sql_o = '' sql_s = '' @@ -1736,7 +1742,7 @@ class BaseAdapter(ConnectionPool): if not isinstance(inner_join, (tuple, list)): inner_join = [inner_join] ijoint = [t._tablename for t in inner_join - if not isinstance(t,Expression)] + if not isinstance(t, Expression)] ijoinon = [t for t in inner_join if isinstance(t, Expression)] itables_to_merge={} #issue 490 [itables_to_merge.update( @@ -1770,26 +1776,25 @@ class BaseAdapter(ConnectionPool): excluded = tablenames if use_common_filters(query): - query = self.common_filter(query,tablenames_for_common_filters) + query = self.common_filter(query, tablenames_for_common_filters) sql_w = ' WHERE ' + self.expand(query) if query else '' if inner_join and not left: - sql_t = ', '.join([self.table_alias(t) for t in iexcluded + \ - itables_to_merge.keys()]) + sql_t = ', '.join([self.table_alias(t) + for t in iexcluded + itables_to_merge.keys()]) for t in ijoinon: sql_t += ' %s %s' % (icommand, t) elif not inner_join and left: - sql_t = ', '.join([self.table_alias(t) for t in excluded + \ - tables_to_merge.keys()]) + sql_t = ', '.join([self.table_alias(t) + for t in excluded + tables_to_merge.keys()]) if joint: sql_t += ' %s %s' % (command, ','.join([t for t in joint])) for t in joinon: sql_t += ' %s %s' % (command, t) elif inner_join and left: - all_tables_in_query = set(important_tablenames + \ - iimportant_tablenames + \ - tablenames) + all_tables_in_query = set(important_tablenames + + iimportant_tablenames + tablenames) tables_in_joinon = set(joinont + ijoinont) tables_not_in_joinon = \ all_tables_in_query.difference(tables_in_joinon) @@ -1840,7 +1845,7 @@ class BaseAdapter(ConnectionPool): def _fetchall(self): return self.cursor.fetchall() - def _select_aux(self,sql,fields,attributes): + def _select_aux(self, sql, fields, attributes): args_get = attributes.get cache = args_get('cache',None) if not cache: @@ -1853,14 +1858,14 @@ class BaseAdapter(ConnectionPool): def _select_aux2(): self.execute(sql) return self._fetchall() - rows = cache_model(key,_select_aux2,time_expire) - if isinstance(rows,tuple): + rows = cache_model(key, _select_aux2, time_expire) + if isinstance(rows, tuple): rows = list(rows) - limitby = args_get('limitby', None) or (0,) - rows = self.rowslice(rows,limitby[0],None) - processor = args_get('processor',self.parse) - cacheable = args_get('cacheable',False) - return processor(rows,fields,self._colnames,cacheable=cacheable) + limitby = args_get('limitby', None) or (0, ) + rows = self.rowslice(rows, limitby[0], None) + processor = args_get('processor', self.parse) + cacheable = args_get('cacheable', False) + return processor(rows, fields, self._colnames, cacheable=cacheable) def select(self, query, fields, attributes): """ @@ -1868,18 +1873,18 @@ class BaseAdapter(ConnectionPool): """ sql = self._select(query, fields, attributes) cache = attributes.get('cache', None) - if cache and attributes.get('cacheable',False): + if cache and attributes.get('cacheable', False): del attributes['cache'] (cache_model, time_expire) = cache key = self.uri + '/' + sql - if len(key)>200: key = hashlib_md5(key).hexdigest() - args = (sql,fields,attributes) + if len(key) > 200: key = hashlib_md5(key).hexdigest() + args = (sql, fields, attributes) return cache_model( key, - lambda self=self,args=args:self._select_aux(*args), + lambda self=self, args=args: self._select_aux(*args), time_expire) else: - return self._select_aux(sql,fields,attributes) + return self._select_aux(sql, fields, attributes) def _count(self, query, distinct=None): tablenames = self.tables(query) @@ -1891,7 +1896,7 @@ class BaseAdapter(ConnectionPool): sql_w = '' sql_t = ','.join(self.table_alias(t) for t in tablenames) if distinct: - if isinstance(distinct,(list, tuple)): + if isinstance(distinct, (list, tuple)): distinct = xorify(distinct) sql_d = self.expand(distinct) return 'SELECT count(DISTINCT %s) FROM %s%s;' % \ @@ -1944,24 +1949,23 @@ class BaseAdapter(ConnectionPool): return ', ADD ' def constraint_name(self, table, fieldname): - return '%s_%s__constraint' % (table,fieldname) + return '%s_%s__constraint' % (table, fieldname) def create_sequence_and_triggers(self, query, table, **args): self.execute(query) - def log_execute(self, *a, **b): if not self.connection: raise ValueError(a[0]) if not self.connection: return None command = a[0] - if hasattr(self,'filter_sql_command'): + if hasattr(self, 'filter_sql_command'): command = self.filter_sql_command(command) if self.db._debug: LOGGER.debug('SQL: %s' % command) self.db._lastsql = command t0 = time.time() ret = self.cursor.execute(command, *a[1:], **b) - self.db._timings.append((command,time.time()-t0)) + self.db._timings.append((command, time.time()-t0)) del self.db._timings[:-TIMINGSSIZE] return ret @@ -1974,7 +1978,7 @@ class BaseAdapter(ConnectionPool): obj = obj() if isinstance(fieldtype, SQLCustomType): value = fieldtype.encoder(obj) - if fieldtype.type in ('string','text', 'json'): + if fieldtype.type in ('string', 'text', 'json'): return self.adapt(value) return value if isinstance(obj, (Expression, Field)): @@ -1985,9 +1989,9 @@ class BaseAdapter(ConnectionPool): elif not isinstance(obj, (list, tuple)): obj = [obj] if field_is_type('list:string'): - obj = map(str,obj) + obj = map(str, obj) else: - obj = map(int,[o for o in obj if o != '']) + obj = map(int, [o for o in obj if o != '']) # we don't want to bar_encode json objects if isinstance(obj, (list, tuple)) and (not fieldtype == "json"): obj = bar_encode(obj) @@ -2053,7 +2057,7 @@ class BaseAdapter(ConnectionPool): obj = simplejson.dumps(obj) else: raise RuntimeError("missing simplejson") - if not isinstance(obj,bytes): + if not isinstance(obj, bytes): obj = bytes(obj) try: obj.decode(self.db_codec) @@ -2094,7 +2098,7 @@ class BaseAdapter(ConnectionPool): return value else: key = REGEX_TYPE.match(field_type).group(0) - return self.parsemap[key](value,field_type) + return self.parsemap[key](value, field_type) def parse_reference(self, value, field_type): referee = field_type[10:].strip() @@ -2109,14 +2113,14 @@ class BaseAdapter(ConnectionPool): def parse_date(self, value, field_type): if isinstance(value, datetime.datetime): return value.date() - if not isinstance(value, (datetime.date,datetime.datetime)): + if not isinstance(value, (datetime.date, datetime.datetime)): (y, m, d) = map(int, str(value)[:10].strip().split('-')) value = datetime.date(y, m, d) return value def parse_time(self, value, field_type): if not isinstance(value, datetime.time): - time_items = map(int,str(value)[:8].strip().split(':')[:3]) + time_items = map(int, str(value)[:8].strip().split(':')[:3]) if len(time_items) == 3: (h, mi, s) = time_items else: @@ -2127,21 +2131,21 @@ class BaseAdapter(ConnectionPool): def parse_datetime(self, value, field_type): if not isinstance(value, datetime.datetime): value = str(value) - date_part,time_part,timezone = value[:10],value[11:19],value[19:] + date_part, time_part, timezone = value[:10], value[11:19], value[19:] if '+' in timezone: - ms,tz = timezone.split('+') - h,m = tz.split(':') + ms, tz = timezone.split('+') + h, m = tz.split(':') dt = datetime.timedelta(seconds=3600*int(h)+60*int(m)) elif '-' in timezone: - ms,tz = timezone.split('-') - h,m = tz.split(':') + ms, tz = timezone.split('-') + h, m = tz.split(':') dt = -datetime.timedelta(seconds=3600*int(h)+60*int(m)) else: dt = None - (y, m, d) = map(int,date_part.split('-')) - time_parts = time_part and time_part.split(':')[:3] or (0,0,0) + (y, m, d) = map(int, date_part.split('-')) + time_parts = time_part and time_part.split(':')[:3] or (0, 0, 0) while len(time_parts)<3: time_parts.append(0) - time_items = map(int,time_parts) + time_items = map(int, time_parts) (h, mi, s) = time_items value = datetime.datetime(y, m, d, h, mi, s) if dt: @@ -2198,27 +2202,26 @@ class BaseAdapter(ConnectionPool): return value def build_parsemap(self): - self.parsemap = { - 'id':self.parse_id, - 'integer':self.parse_integer, - 'bigint':self.parse_integer, - 'float':self.parse_double, - 'double':self.parse_double, - 'reference':self.parse_reference, - 'boolean':self.parse_boolean, - 'date':self.parse_date, - 'time':self.parse_time, - 'datetime':self.parse_datetime, - 'blob':self.parse_blob, - 'decimal':self.parse_decimal, - 'json':self.parse_json, - 'list:integer':self.parse_list_integers, - 'list:reference':self.parse_list_references, - 'list:string':self.parse_list_strings, - } + self.parsemap = {'id': self.parse_id, + 'integer': self.parse_integer, + 'bigint': self.parse_integer, + 'float': self.parse_double, + 'double': self.parse_double, + 'reference': self.parse_reference, + 'boolean': self.parse_boolean, + 'date': self.parse_date, + 'time': self.parse_time, + 'datetime': self.parse_datetime, + 'blob': self.parse_blob, + 'decimal': self.parse_decimal, + 'json': self.parse_json, + 'list:integer': self.parse_list_integers, + 'list:reference': self.parse_list_references, + 'list:string': self.parse_list_strings, + } def parse(self, rows, fields, colnames, blob_decode=True, - cacheable = False): + cacheable=False): db = self.db virtualtables = [] new_rows = [] @@ -2233,26 +2236,26 @@ class BaseAdapter(ConnectionPool): field = table[fieldname] ft = field.type tmps.append((tablename, fieldname, table, field, ft)) - for (i,row) in enumerate(rows): + for (i, row) in enumerate(rows): new_row = Row() - for (j,colname) in enumerate(colnames): + for (j, colname) in enumerate(colnames): value = row[j] tmp = tmps[j] if tmp: - (tablename,fieldname,table,field,ft) = tmp + (tablename, fieldname, table, field, ft) = tmp colset = new_row.get(tablename, None) if colset is None: colset = new_row[tablename] = Row() if tablename not in virtualtables: virtualtables.append(tablename) - value = self.parse_value(value,ft,blob_decode) + value = self.parse_value(value, ft, blob_decode) if field.filter_out: value = field.filter_out(value) colset[fieldname] = value # for backward compatibility - if ft=='id' and fieldname!='id' and \ - not 'id' in table.fields: + if ft == 'id' and fieldname != 'id' \ + and not 'id' in table.fields: colset['id'] = value if ft == 'id' and not cacheable: @@ -2265,50 +2268,49 @@ class BaseAdapter(ConnectionPool): colset.gae_item = value else: id = value - colset.update_record = RecordUpdater(colset,table,id) - colset.delete_record = RecordDeleter(table,id) + colset.update_record = RecordUpdater(colset, table, id) + colset.delete_record = RecordDeleter(table, id) if table._db._lazy_tables: colset['__get_lazy_reference__'] = LazyReferenceGetter(table, id) for rfield in table._referenced_by: referee_link = db._referee_name and \ db._referee_name % dict( - table=rfield.tablename,field=rfield.name) + table=rfield.tablename, field=rfield.name) if referee_link and not referee_link in colset: - colset[referee_link] = LazySet(rfield,id) + colset[referee_link] = LazySet(rfield, id) else: if not '_extra' in new_row: new_row['_extra'] = Row() new_row['_extra'][colname] = \ self.parse_value(value, - fields[j].type,blob_decode) + fields[j].type, blob_decode) new_column_name = \ REGEX_SELECT_AS_PARSER.search(colname) if not new_column_name is None: column_name = new_column_name.groups(0) - setattr(new_row,column_name[0],value) + setattr(new_row, column_name[0], value) new_rows.append(new_row) rowsobj = Rows(db, new_rows, colnames, rawrows=rows) - for tablename in virtualtables: table = db[tablename] - fields_virtual = [(f,v) for (f,v) in table.iteritems() - if isinstance(v,FieldVirtual)] - fields_lazy = [(f,v) for (f,v) in table.iteritems() - if isinstance(v,FieldMethod)] + fields_virtual = [(f, v) for (f, v) in table.iteritems() + if isinstance(v, FieldVirtual)] + fields_lazy = [(f, v) for (f, v) in table.iteritems() + if isinstance(v, FieldMethod)] if fields_virtual or fields_lazy: for row in rowsobj.records: box = row[tablename] - for f,v in fields_virtual: + for f, v in fields_virtual: try: box[f] = v.f(row) except AttributeError: - pass # not enough fields to define virtual field - for f,v in fields_lazy: + pass # not enough fields to define virtual field + for f, v in fields_lazy: try: - box[f] = (v.handler or VirtualCommand)(v.f,row) + box[f] = (v.handler or VirtualCommand)(v.f, row) except AttributeError: - pass # not enough fields to define virtual field + pass # not enough fields to define virtual field ### old style virtual fields for item in table.virtualfields: @@ -2340,14 +2342,14 @@ class BaseAdapter(ConnectionPool): query = query & newquery return query - def CASE(self,query,t,f): + def CASE(self, query, t, f): def represent(x): - types = {type(True):'boolean',type(0):'integer',type(1.0):'double'} + types = {type(True):'boolean', type(0):'integer', type(1.0):'double'} if x is None: return 'NULL' - elif isinstance(x,Expression): return str(x) - else: return self.represent(x,types.get(type(x),'string')) - return Expression(self.db,'CASE WHEN %s THEN %s ELSE %s END' % \ - (self.expand(query),represent(t),represent(f))) + elif isinstance(x, Expression): return str(x) + else: return self.represent(x, types.get(type(x), 'string')) + return Expression(self.db, 'CASE WHEN %s THEN %s ELSE %s END' % \ + (self.expand(query), represent(t), represent(f))) def sqlsafe_table(self, tablename, ot=None): if ot is not None: @@ -2357,28 +2359,27 @@ class BaseAdapter(ConnectionPool): def sqlsafe_field(self, fieldname): return self.QUOTE_TEMPLATE % fieldname + ################################################################################### # List of all the available adapters; they all extend BaseAdapter. ################################################################################### - class SQLiteAdapter(BaseAdapter): - drivers = ('sqlite2','sqlite3') + drivers = ('sqlite2', 'sqlite3') can_select_for_update = None # support ourselves with BEGIN TRANSACTION - def EXTRACT(self,field,what): + def EXTRACT(self, field, what): return "web2py_extract('%s',%s)" % (what, self.expand(field)) @staticmethod def web2py_extract(lookup, s): - table = { - 'year': (0, 4), - 'month': (5, 7), - 'day': (8, 10), - 'hour': (11, 13), - 'minute': (14, 16), - 'second': (17, 19), - } + table = {'year': (0, 4), + 'month': (5, 7), + 'day': (8, 10), + 'hour': (11, 13), + 'minute': (14, 16), + 'second': (17, 19), + } try: if lookup != 'epoch': (i, j) = table[lookup] @@ -2392,7 +2393,7 @@ class SQLiteAdapter(BaseAdapter): def web2py_regexp(expression, item): return re.compile(expression).search(item) is not None - def __init__(self, db, uri, pool_size=0, folder=None, db_codec ='UTF-8', + def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8', credential_decoder=IDENTITY, driver_args={}, adapter_args={}, do_connect=True, after_connection=None): self.db = db @@ -2410,7 +2411,7 @@ class SQLiteAdapter(BaseAdapter): if uri.startswith('sqlite:memory'): self.dbpath = ':memory:' else: - self.dbpath = uri.split('://',1)[1] + self.dbpath = uri.split('://', 1)[1] if self.dbpath[0] != '/': if PYTHON_VERSION[0] == 2: self.dbpath = pjoin( @@ -2421,8 +2422,10 @@ class SQLiteAdapter(BaseAdapter): driver_args['check_same_thread'] = False if not 'detect_types' in driver_args and do_connect: driver_args['detect_types'] = self.driver.PARSE_DECLTYPES + def connector(dbpath=self.dbpath, driver_args=driver_args): return self.driver.Connection(dbpath, **driver_args) + self.connector = connector if do_connect: self.reconnect() @@ -2432,7 +2435,7 @@ class SQLiteAdapter(BaseAdapter): self.connection.create_function("REGEXP", 2, SQLiteAdapter.web2py_regexp) - if self.adapter_args.get('foreign_keys',True): + if self.adapter_args.get('foreign_keys', True): self.execute('PRAGMA foreign_keys=ON;') def _truncate(self, table, mode=''): @@ -2443,9 +2446,9 @@ class SQLiteAdapter(BaseAdapter): def lastrowid(self, table): return self.cursor.lastrowid - def REGEXP(self,first,second): + def REGEXP(self, first, second): return '(%s REGEXP %s)' % (self.expand(first), - self.expand(second,'string')) + self.expand(second, 'string')) def select(self, query, fields, attributes): """ @@ -2457,13 +2460,14 @@ class SQLiteAdapter(BaseAdapter): self.execute('BEGIN IMMEDIATE TRANSACTION;') return super(SQLiteAdapter, self).select(query, fields, attributes) + class SpatiaLiteAdapter(SQLiteAdapter): - drivers = ('sqlite3','sqlite2') + drivers = ('sqlite3', 'sqlite2') types = copy.copy(BaseAdapter.types) types.update(geometry='GEOMETRY') - def __init__(self, db, uri, pool_size=0, folder=None, db_codec ='UTF-8', + def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8', credential_decoder=IDENTITY, driver_args={}, adapter_args={}, do_connect=True, srid=4326, after_connection=None): self.db = db @@ -2481,7 +2485,7 @@ class SpatiaLiteAdapter(SQLiteAdapter): if uri.startswith('spatialite:memory'): self.dbpath = ':memory:' else: - self.dbpath = uri.split('://',1)[1] + self.dbpath = uri.split('://', 1)[1] if self.dbpath[0] != '/': self.dbpath = pjoin( self.folder.decode(path_encoding).encode('utf8'), self.dbpath) @@ -2489,8 +2493,10 @@ class SpatiaLiteAdapter(SQLiteAdapter): driver_args['check_same_thread'] = False if not 'detect_types' in driver_args and do_connect: driver_args['detect_types'] = self.driver.PARSE_DECLTYPES + def connector(dbpath=self.dbpath, driver_args=driver_args): return self.driver.Connection(dbpath, **driver_args) + self.connector = connector if do_connect: self.reconnect() @@ -2514,60 +2520,60 @@ class SpatiaLiteAdapter(SQLiteAdapter): second['precision'], second['options']) def ST_ASTEXT(self, first): - return 'AsText(%s)' %(self.expand(first)) + return 'AsText(%s)' % (self.expand(first)) def ST_CONTAINS(self, first, second): - return 'Contains(%s,%s)' %(self.expand(first), + return 'Contains(%s,%s)' % (self.expand(first), self.expand(second, first.type)) def ST_DISTANCE(self, first, second): - return 'Distance(%s,%s)' %(self.expand(first), + return 'Distance(%s,%s)' % (self.expand(first), self.expand(second, first.type)) def ST_EQUALS(self, first, second): - return 'Equals(%s,%s)' %(self.expand(first), + return 'Equals(%s,%s)' % (self.expand(first), self.expand(second, first.type)) def ST_INTERSECTS(self, first, second): - return 'Intersects(%s,%s)' %(self.expand(first), + return 'Intersects(%s,%s)' % (self.expand(first), self.expand(second, first.type)) def ST_OVERLAPS(self, first, second): - return 'Overlaps(%s,%s)' %(self.expand(first), + return 'Overlaps(%s,%s)' % (self.expand(first), self.expand(second, first.type)) def ST_SIMPLIFY(self, first, second): - return 'Simplify(%s,%s)' %(self.expand(first), + return 'Simplify(%s,%s)' % (self.expand(first), self.expand(second, 'double')) def ST_TOUCHES(self, first, second): - return 'Touches(%s,%s)' %(self.expand(first), + return 'Touches(%s,%s)' % (self.expand(first), self.expand(second, first.type)) def ST_WITHIN(self, first, second): - return 'Within(%s,%s)' %(self.expand(first), + return 'Within(%s,%s)' % (self.expand(first), self.expand(second, first.type)) def represent(self, obj, fieldtype): field_is_type = fieldtype.startswith if field_is_type('geo'): - srid = 4326 # Spatialite default srid for geometry + srid = 4326 # Spatialite default srid for geometry geotype, parms = fieldtype[:-1].split('(') parms = parms.split(',') if len(parms) >= 2: schema, srid = parms[:2] -# if field_is_type('geometry'): - value = "ST_GeomFromText('%s',%s)" %(obj, srid) -# elif field_is_type('geography'): -# value = "ST_GeogFromText('SRID=%s;%s')" %(srid, obj) -# else: -# raise SyntaxError, 'Invalid field type %s' %fieldtype + # if field_is_type('geometry'): + value = "ST_GeomFromText('%s',%s)" % (obj, srid) + # elif field_is_type('geography'): + # value = "ST_GeogFromText('SRID=%s;%s')" %(srid, obj) + # else: + # raise SyntaxError, 'Invalid field type %s' %fieldtype return value return BaseAdapter.represent(self, obj, fieldtype) class JDBCSQLiteAdapter(SQLiteAdapter): - drivers = ('zxJDBC_sqlite',) + drivers = ('zxJDBC_sqlite', ) def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8', credential_decoder=IDENTITY, driver_args={}, @@ -2586,14 +2592,16 @@ class JDBCSQLiteAdapter(SQLiteAdapter): if uri.startswith('sqlite:memory'): self.dbpath = ':memory:' else: - self.dbpath = uri.split('://',1)[1] + self.dbpath = uri.split('://', 1)[1] if self.dbpath[0] != '/': self.dbpath = pjoin( self.folder.decode(path_encoding).encode('utf8'), self.dbpath) - def connector(dbpath=self.dbpath,driver_args=driver_args): + + def connector(dbpath=self.dbpath, driver_args=driver_args): return self.driver.connect( self.driver.getConnection('jdbc:sqlite:'+dbpath), **driver_args) + self.connector = connector if do_connect: self.reconnect() @@ -2607,45 +2615,44 @@ class JDBCSQLiteAdapter(SQLiteAdapter): class MySQLAdapter(BaseAdapter): - drivers = ('MySQLdb','pymysql', 'mysqlconnector') + drivers = ('MySQLdb', 'pymysql', 'mysqlconnector') commit_on_alter_table = True support_distributed_transaction = True - types = { - 'boolean': 'CHAR(1)', - 'string': 'VARCHAR(%(length)s)', - 'text': 'LONGTEXT', - 'json': 'LONGTEXT', - 'password': 'VARCHAR(%(length)s)', - 'blob': 'LONGBLOB', - 'upload': 'VARCHAR(%(length)s)', - 'integer': 'INT', - 'bigint': 'BIGINT', - 'float': 'FLOAT', - 'double': 'DOUBLE', - 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', - 'date': 'DATE', - 'time': 'TIME', - 'datetime': 'DATETIME', - 'id': 'INT AUTO_INCREMENT NOT NULL', - 'reference': 'INT, INDEX %(index_name)s (%(field_name)s), FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', - 'list:integer': 'LONGTEXT', - 'list:string': 'LONGTEXT', - 'list:reference': 'LONGTEXT', - 'big-id': 'BIGINT AUTO_INCREMENT NOT NULL', - 'big-reference': 'BIGINT, INDEX %(index_name)s (%(field_name)s), FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', - 'reference FK': ', CONSTRAINT `FK_%(constraint_name)s` FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', - } + types = {'boolean': 'CHAR(1)', + 'string': 'VARCHAR(%(length)s)', + 'text': 'LONGTEXT', + 'json': 'LONGTEXT', + 'password': 'VARCHAR(%(length)s)', + 'blob': 'LONGBLOB', + 'upload': 'VARCHAR(%(length)s)', + 'integer': 'INT', + 'bigint': 'BIGINT', + 'float': 'FLOAT', + 'double': 'DOUBLE', + 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', + 'date': 'DATE', + 'time': 'TIME', + 'datetime': 'DATETIME', + 'id': 'INT AUTO_INCREMENT NOT NULL', + 'reference': 'INT, INDEX %(index_name)s (%(field_name)s), FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', + 'list:integer': 'LONGTEXT', + 'list:string': 'LONGTEXT', + 'list:reference': 'LONGTEXT', + 'big-id': 'BIGINT AUTO_INCREMENT NOT NULL', + 'big-reference': 'BIGINT, INDEX %(index_name)s (%(field_name)s), FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', + 'reference FK': ', CONSTRAINT `FK_%(constraint_name)s` FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', + } QUOTE_TEMPLATE = "`%s`" - def varquote(self,name): - return varquote_aux(name,'`%s`') + def varquote(self, name): + return varquote_aux(name, '`%s`') def RANDOM(self): return 'RAND()' - def SUBSTRING(self,field,parameters): + def SUBSTRING(self, field, parameters): return 'SUBSTRING(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1]) @@ -2653,13 +2660,13 @@ class MySQLAdapter(BaseAdapter): return "UNIX_TIMESTAMP(%s)" % self.expand(first) def CONCAT(self, *items): - return 'CONCAT(%s)' % ','.join(self.expand(x,'string') for x in items) + return 'CONCAT(%s)' % ','.join(self.expand(x, 'string') for x in items) - def REGEXP(self,first,second): + def REGEXP(self, first, second): return '(%s REGEXP %s)' % (self.expand(first), - self.expand(second,'string')) + self.expand(second, 'string')) - def _drop(self,table,mode): + def _drop(self, table, mode): # breaks db integrity but without this mysql does not drop table table_rname = table.sqlsafe return ['SET FOREIGN_KEY_CHECKS=0;','DROP TABLE %s;' % table_rname, @@ -2668,14 +2675,14 @@ class MySQLAdapter(BaseAdapter): def _insert_empty(self, table): return 'INSERT INTO %s VALUES (DEFAULT);' % (table.sqlsafe) - def distributed_transaction_begin(self,key): + def distributed_transaction_begin(self, key): self.execute('XA START;') - def prepare(self,key): + def prepare(self, key): self.execute("XA END;") self.execute("XA PREPARE;") - def commit_prepared(self,ley): + def commit_prepared(self, ley): self.execute("XA COMMIT;") def rollback_prepared(self,key): @@ -2683,19 +2690,19 @@ class MySQLAdapter(BaseAdapter): REGEX_URI = re.compile('^(?P[^:@]+)(\:(?P[^@]*))?@(?P[^\:/]+)(\:(?P[0-9]+))?/(?P[^?]+)(\?set_encoding=(?P\w+))?$') - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', + def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8', credential_decoder=IDENTITY, driver_args={}, adapter_args={}, do_connect=True, after_connection=None): self.db = db self.dbengine = "mysql" self.uri = uri - if do_connect: self.find_driver(adapter_args,uri) + if do_connect: self.find_driver(adapter_args, uri) self.pool_size = pool_size self.folder = folder self.db_codec = db_codec self._after_connection = after_connection self.find_or_make_work_folder() - ruri = uri.split('://',1)[1] + ruri = uri.split('://', 1)[1] m = self.REGEX_URI.match(ruri) if not m: raise SyntaxError( @@ -2721,7 +2728,6 @@ class MySQLAdapter(BaseAdapter): port=port, charset=charset) - def connector(driver_args=driver_args): return self.driver.connect(**driver_args) self.connector = connector @@ -2731,60 +2737,57 @@ class MySQLAdapter(BaseAdapter): self.execute('SET FOREIGN_KEY_CHECKS=1;') self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';") - def lastrowid(self,table): + def lastrowid(self, table): self.execute('select last_insert_id();') return int(self.cursor.fetchone()[0]) class PostgreSQLAdapter(BaseAdapter): - drivers = ('psycopg2','pg8000') + drivers = ('psycopg2', 'pg8000') QUOTE_TEMPLATE = '"%s"' support_distributed_transaction = True - types = { - 'boolean': 'CHAR(1)', - 'string': 'VARCHAR(%(length)s)', - 'text': 'TEXT', - 'json': 'TEXT', - 'password': 'VARCHAR(%(length)s)', - 'blob': 'BYTEA', - 'upload': 'VARCHAR(%(length)s)', - 'integer': 'INTEGER', - 'bigint': 'BIGINT', - 'float': 'FLOAT', - 'double': 'FLOAT8', - 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', - 'date': 'DATE', - 'time': 'TIME', - 'datetime': 'TIMESTAMP', - 'id': 'SERIAL PRIMARY KEY', - 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', - 'list:integer': 'TEXT', - 'list:string': 'TEXT', - 'list:reference': 'TEXT', - 'geometry': 'GEOMETRY', - 'geography': 'GEOGRAPHY', - 'big-id': 'BIGSERIAL PRIMARY KEY', - 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', - 'reference FK': ', CONSTRAINT "FK_%(constraint_name)s" FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', - 'reference TFK': ' CONSTRAINT "FK_%(foreign_table)s_PK" FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', + types = {'boolean': 'CHAR(1)', + 'string': 'VARCHAR(%(length)s)', + 'text': 'TEXT', + 'json': 'TEXT', + 'password': 'VARCHAR(%(length)s)', + 'blob': 'BYTEA', + 'upload': 'VARCHAR(%(length)s)', + 'integer': 'INTEGER', + 'bigint': 'BIGINT', + 'float': 'FLOAT', + 'double': 'FLOAT8', + 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', + 'date': 'DATE', + 'time': 'TIME', + 'datetime': 'TIMESTAMP', + 'id': 'SERIAL PRIMARY KEY', + 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', + 'list:integer': 'TEXT', + 'list:string': 'TEXT', + 'list:reference': 'TEXT', + 'geometry': 'GEOMETRY', + 'geography': 'GEOGRAPHY', + 'big-id': 'BIGSERIAL PRIMARY KEY', + 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', + 'reference FK': ', CONSTRAINT "FK_%(constraint_name)s" FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', + 'reference TFK': ' CONSTRAINT "FK_%(foreign_table)s_PK" FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', + } - } + def varquote(self, name): + return varquote_aux(name, '"%s"') - - def varquote(self,name): - return varquote_aux(name,'"%s"') - - def adapt(self,obj): + def adapt(self, obj): if self.driver_name == 'psycopg2': return psycopg2_adapt(obj).getquoted() elif self.driver_name == 'pg8000': - return "'%s'" % str(obj).replace("%","%%").replace("'","''") + return "'%s'" % str(obj).replace("%", "%%").replace("'", "''") else: - return "'%s'" % str(obj).replace("'","''") + return "'%s'" % str(obj).replace("'", "''") - def sequence_name(self,table): + def sequence_name(self, table): return self.QUOTE_TEMPLATE % (table + '_id_seq') def RANDOM(self): @@ -2792,21 +2795,21 @@ class PostgreSQLAdapter(BaseAdapter): def ADD(self, first, second): t = first.type - if t in ('text','string','password', 'json', 'upload','blob'): + if t in ('text', 'string', 'password', 'json', 'upload', 'blob'): return '(%s || %s)' % (self.expand(first), self.expand(second, t)) else: return '(%s + %s)' % (self.expand(first), self.expand(second, t)) - def distributed_transaction_begin(self,key): + def distributed_transaction_begin(self, key): return - def prepare(self,key): + def prepare(self, key): self.execute("PREPARE TRANSACTION '%s';" % key) - def commit_prepared(self,key): + def commit_prepared(self, key): self.execute("COMMIT PREPARED '%s';" % key) - def rollback_prepared(self,key): + def rollback_prepared(self, key): self.execute("ROLLBACK PREPARED '%s';" % key) def create_sequence_and_triggers(self, query, table, **args): @@ -2818,21 +2821,21 @@ class PostgreSQLAdapter(BaseAdapter): REGEX_URI = re.compile('^(?P[^:@]+)(\:(?P[^@]*))?@(?P[^\:@]+)(\:(?P[0-9]+))?/(?P[^\?]+)(\?sslmode=(?P.+))?$') - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', + def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8', credential_decoder=IDENTITY, driver_args={}, adapter_args={}, do_connect=True, srid=4326, after_connection=None): self.db = db self.dbengine = "postgres" self.uri = uri - if do_connect: self.find_driver(adapter_args,uri) + if do_connect: self.find_driver(adapter_args, uri) self.pool_size = pool_size self.folder = folder self.db_codec = db_codec self._after_connection = after_connection self.srid = srid self.find_or_make_work_folder() - ruri = uri.split('://',1)[1] + ruri = uri.split('://', 1)[1] m = self.REGEX_URI.match(ruri) if not m: raise SyntaxError("Invalid URI string in DAL") @@ -2864,8 +2867,10 @@ class PostgreSQLAdapter(BaseAdapter): self.driver.__version__) else: self.__version__ = None - def connector(msg=msg,driver_args=driver_args): - return self.driver.connect(msg,**driver_args) + + def connector(msg=msg, driver_args=driver_args): + return self.driver.connect(msg, **driver_args) + self.connector = connector if do_connect: self.reconnect() @@ -2874,7 +2879,7 @@ class PostgreSQLAdapter(BaseAdapter): self.execute("SET standard_conforming_strings=on;") self.try_json() - def lastrowid(self,table = None): + def lastrowid(self, table=None): self.execute("select lastval()") return int(self.cursor.fetchone()[0]) @@ -2894,33 +2899,33 @@ class PostgreSQLAdapter(BaseAdapter): self.native_json = True else: LOGGER.debug("Your database version does not support the JSON data type (using TEXT instead)") - def LIKE(self,first,second): - args = (self.expand(first), self.expand(second,'string')) + def LIKE(self, first, second): + args = (self.expand(first), self.expand(second, 'string')) if not first.type in ('string', 'text', 'json'): return '(%s LIKE %s)' % ( self.CAST(args[0], 'CHAR(%s)' % first.length), args[1]) else: return '(%s LIKE %s)' % args - def ILIKE(self,first,second): - args = (self.expand(first), self.expand(second,'string')) + def ILIKE(self, first, second): + args = (self.expand(first), self.expand(second, 'string')) if not first.type in ('string', 'text', 'json'): return '(%s LIKE %s)' % ( self.CAST(args[0], 'CHAR(%s)' % first.length), args[1]) else: return '(%s ILIKE %s)' % args - def REGEXP(self,first,second): + def REGEXP(self, first, second): return '(%s ~ %s)' % (self.expand(first), - self.expand(second,'string')) + self.expand(second, 'string')) - def STARTSWITH(self,first,second): + def STARTSWITH(self, first, second): return '(%s ILIKE %s)' % (self.expand(first), - self.expand(second+'%','string')) + self.expand(second+'%', 'string')) - def ENDSWITH(self,first,second): + def ENDSWITH(self, first, second): return '(%s ILIKE %s)' % (self.expand(first), - self.expand('%'+second,'string')) + self.expand('%'+second, 'string')) # GIS functions @@ -2928,82 +2933,92 @@ class PostgreSQLAdapter(BaseAdapter): """ http://postgis.org/docs/ST_AsGeoJSON.html """ - return 'ST_AsGeoJSON(%s,%s,%s,%s)' %(second['version'], - self.expand(first), second['precision'], second['options']) + return 'ST_AsGeoJSON(%s,%s,%s,%s)' % (second['version'], + self.expand(first), + second['precision'], + second['options']) def ST_ASTEXT(self, first): """ http://postgis.org/docs/ST_AsText.html """ - return 'ST_AsText(%s)' %(self.expand(first)) + return 'ST_AsText(%s)' % (self.expand(first)) def ST_X(self, first): """ http://postgis.org/docs/ST_X.html """ - return 'ST_X(%s)' %(self.expand(first)) + return 'ST_X(%s)' % (self.expand(first)) def ST_Y(self, first): """ http://postgis.org/docs/ST_Y.html """ - return 'ST_Y(%s)' %(self.expand(first)) + return 'ST_Y(%s)' % (self.expand(first)) def ST_CONTAINS(self, first, second): """ http://postgis.org/docs/ST_Contains.html """ - return 'ST_Contains(%s,%s)' %(self.expand(first), self.expand(second, first.type)) + return 'ST_Contains(%s,%s)' % (self.expand(first), + self.expand(second, first.type)) def ST_DISTANCE(self, first, second): """ http://postgis.org/docs/ST_Distance.html """ - return 'ST_Distance(%s,%s)' %(self.expand(first), self.expand(second, first.type)) + return 'ST_Distance(%s,%s)' % (self.expand(first), + self.expand(second, first.type)) def ST_EQUALS(self, first, second): """ http://postgis.org/docs/ST_Equals.html """ - return 'ST_Equals(%s,%s)' %(self.expand(first), self.expand(second, first.type)) + return 'ST_Equals(%s,%s)' % (self.expand(first), + self.expand(second, first.type)) def ST_INTERSECTS(self, first, second): """ http://postgis.org/docs/ST_Intersects.html """ - return 'ST_Intersects(%s,%s)' %(self.expand(first), self.expand(second, first.type)) + return 'ST_Intersects(%s,%s)' % (self.expand(first), + self.expand(second, first.type)) def ST_OVERLAPS(self, first, second): """ http://postgis.org/docs/ST_Overlaps.html """ - return 'ST_Overlaps(%s,%s)' %(self.expand(first), self.expand(second, first.type)) + return 'ST_Overlaps(%s,%s)' % (self.expand(first), + self.expand(second, first.type)) def ST_SIMPLIFY(self, first, second): """ http://postgis.org/docs/ST_Simplify.html """ - return 'ST_Simplify(%s,%s)' %(self.expand(first), self.expand(second, 'double')) + return 'ST_Simplify(%s,%s)' % (self.expand(first), + self.expand(second, 'double')) def ST_TOUCHES(self, first, second): """ http://postgis.org/docs/ST_Touches.html """ - return 'ST_Touches(%s,%s)' %(self.expand(first), self.expand(second, first.type)) + return 'ST_Touches(%s,%s)' % (self.expand(first), + self.expand(second, first.type)) def ST_WITHIN(self, first, second): """ http://postgis.org/docs/ST_Within.html """ - return 'ST_Within(%s,%s)' %(self.expand(first), self.expand(second, first.type)) + return 'ST_Within(%s,%s)' % (self.expand(first), + self.expand(second, first.type)) def ST_DWITHIN(self, first, (second, third)): """ http://postgis.org/docs/ST_DWithin.html """ - return 'ST_DWithin(%s,%s,%s)' %(self.expand(first), - self.expand(second, first.type), - self.expand(third, 'double')) + return 'ST_DWithin(%s,%s,%s)' % (self.expand(first), + self.expand(second, first.type), + self.expand(third, 'double')) def represent(self, obj, fieldtype): field_is_type = fieldtype.startswith @@ -3014,9 +3029,9 @@ class PostgreSQLAdapter(BaseAdapter): if len(parms) >= 2: schema, srid = parms[:2] if field_is_type('geometry'): - value = "ST_GeomFromText('%s',%s)" %(obj, srid) + value = "ST_GeomFromText('%s',%s)" % (obj, srid) elif field_is_type('geography'): - value = "ST_GeogFromText('SRID=%s;%s')" %(srid, obj) + value = "ST_GeogFromText('SRID=%s;%s')" % (srid, obj) # else: # raise SyntaxError('Invalid field type %s' %fieldtype) return value @@ -3027,35 +3042,35 @@ class PostgreSQLAdapter(BaseAdapter): raise ValueError('Invalid mode: %s' % mode) return ['DROP TABLE ' + table.sqlsafe + ' ' + str(mode) + ';'] -class NewPostgreSQLAdapter(PostgreSQLAdapter): - drivers = ('psycopg2','pg8000') - types = { - 'boolean': 'CHAR(1)', - 'string': 'VARCHAR(%(length)s)', - 'text': 'TEXT', - 'json': 'TEXT', - 'password': 'VARCHAR(%(length)s)', - 'blob': 'BYTEA', - 'upload': 'VARCHAR(%(length)s)', - 'integer': 'INTEGER', - 'bigint': 'BIGINT', - 'float': 'FLOAT', - 'double': 'FLOAT8', - 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', - 'date': 'DATE', - 'time': 'TIME', - 'datetime': 'TIMESTAMP', - 'id': 'SERIAL PRIMARY KEY', - 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', - 'list:integer': 'BIGINT[]', - 'list:string': 'TEXT[]', - 'list:reference': 'BIGINT[]', - 'geometry': 'GEOMETRY', - 'geography': 'GEOGRAPHY', - 'big-id': 'BIGSERIAL PRIMARY KEY', - 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', - } +class NewPostgreSQLAdapter(PostgreSQLAdapter): + drivers = ('psycopg2', 'pg8000') + + types = {'boolean': 'CHAR(1)', + 'string': 'VARCHAR(%(length)s)', + 'text': 'TEXT', + 'json': 'TEXT', + 'password': 'VARCHAR(%(length)s)', + 'blob': 'BYTEA', + 'upload': 'VARCHAR(%(length)s)', + 'integer': 'INTEGER', + 'bigint': 'BIGINT', + 'float': 'FLOAT', + 'double': 'FLOAT8', + 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', + 'date': 'DATE', + 'time': 'TIME', + 'datetime': 'TIMESTAMP', + 'id': 'SERIAL PRIMARY KEY', + 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', + 'list:integer': 'BIGINT[]', + 'list:string': 'TEXT[]', + 'list:reference': 'BIGINT[]', + 'geometry': 'GEOMETRY', + 'geography': 'GEOGRAPHY', + 'big-id': 'BIGSERIAL PRIMARY KEY', + 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', + } def parse_list_integers(self, value, field_type): return value @@ -3074,31 +3089,31 @@ class NewPostgreSQLAdapter(PostgreSQLAdapter): elif not isinstance(obj, (list, tuple)): obj = [obj] if field_is_type('list:string'): - obj = map(str,obj) + obj = map(str, obj) else: - obj = map(int,obj) + obj = map(int, obj) return 'ARRAY[%s]' % ','.join(repr(item) for item in obj) return BaseAdapter.represent(self, obj, fieldtype) class JDBCPostgreSQLAdapter(PostgreSQLAdapter): - drivers = ('zxJDBC',) + drivers = ('zxJDBC', ) REGEX_URI = re.compile('^(?P[^:@]+)(\:(?P[^@]*))?@(?P[^\:/]+)(\:(?P[0-9]+))?/(?P.+)$') - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', + def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8', credential_decoder=IDENTITY, driver_args={}, - adapter_args={}, do_connect=True, after_connection=None ): + adapter_args={}, do_connect=True, after_connection=None): self.db = db self.dbengine = "postgres" self.uri = uri - if do_connect: self.find_driver(adapter_args,uri) + if do_connect: self.find_driver(adapter_args, uri) self.pool_size = pool_size self.folder = folder self.db_codec = db_codec self._after_connection = after_connection self.find_or_make_work_folder() - ruri = uri.split('://',1)[1] + ruri = uri.split('://', 1)[1] m = self.REGEX_URI.match(ruri) if not m: raise SyntaxError("Invalid URI string in DAL") @@ -3116,8 +3131,10 @@ class JDBCPostgreSQLAdapter(PostgreSQLAdapter): raise SyntaxError('Database name required') port = m.group('port') or '5432' msg = ('jdbc:postgresql://%s:%s/%s' % (host, port, db), user, password) - def connector(msg=msg,driver_args=driver_args): - return self.driver.connect(*msg,**driver_args) + + def connector(msg=msg, driver_args=driver_args): + return self.driver.connect(*msg, **driver_args) + self.connector = connector if do_connect: self.reconnect() @@ -3129,38 +3146,36 @@ class JDBCPostgreSQLAdapter(PostgreSQLAdapter): class OracleAdapter(BaseAdapter): - drivers = ('cx_Oracle',) + drivers = ('cx_Oracle', ) commit_on_alter_table = False - types = { - 'boolean': 'CHAR(1)', - 'string': 'VARCHAR2(%(length)s)', - 'text': 'CLOB', - 'json': 'CLOB', - 'password': 'VARCHAR2(%(length)s)', - 'blob': 'CLOB', - 'upload': 'VARCHAR2(%(length)s)', - 'integer': 'INT', - 'bigint': 'NUMBER', - 'float': 'FLOAT', - 'double': 'BINARY_DOUBLE', - 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', - 'date': 'DATE', - 'time': 'CHAR(8)', - 'datetime': 'DATE', - 'id': 'NUMBER PRIMARY KEY', - 'reference': 'NUMBER, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', - 'list:integer': 'CLOB', - 'list:string': 'CLOB', - 'list:reference': 'CLOB', - 'big-id': 'NUMBER PRIMARY KEY', - 'big-reference': 'NUMBER, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', - 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', - 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', - } + types = {'boolean': 'CHAR(1)', + 'string': 'VARCHAR2(%(length)s)', + 'text': 'CLOB', + 'json': 'CLOB', + 'password': 'VARCHAR2(%(length)s)', + 'blob': 'CLOB', + 'upload': 'VARCHAR2(%(length)s)', + 'integer': 'INT', + 'bigint': 'NUMBER', + 'float': 'FLOAT', + 'double': 'BINARY_DOUBLE', + 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', + 'date': 'DATE', + 'time': 'CHAR(8)', + 'datetime': 'DATE', + 'id': 'NUMBER PRIMARY KEY', + 'reference': 'NUMBER, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', + 'list:integer': 'CLOB', + 'list:string': 'CLOB', + 'list:reference': 'CLOB', + 'big-id': 'NUMBER PRIMARY KEY', + 'big-reference': 'NUMBER, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', + 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', + 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', + } - - def trigger_name(self,tablename): + def trigger_name(self, tablename): return '%s_trigger' % tablename def LEFT_JOIN(self): @@ -3169,10 +3184,10 @@ class OracleAdapter(BaseAdapter): def RANDOM(self): return 'dbms_random.value' - def NOT_NULL(self,default,field_type): - return 'DEFAULT %s NOT NULL' % self.represent(default,field_type) + def NOT_NULL(self, default, field_type): + return 'DEFAULT %s NOT NULL' % self.represent(default, field_type) - def _drop(self,table,mode): + def _drop(self, table, mode): sequence_name = table._sequence_name return ['DROP TABLE %s %s;' % (table.sqlsafe, mode), 'DROP SEQUENCE %s;' % sequence_name] @@ -3204,7 +3219,7 @@ class OracleAdapter(BaseAdapter): return "to_date('%s','yyyy-mm-dd')" % obj elif fieldtype == 'datetime': if isinstance(obj, datetime.datetime): - obj = obj.isoformat()[:19].replace('T',' ') + obj = obj.isoformat()[:19].replace('T', ' ') elif isinstance(obj, datetime.date): obj = obj.isoformat()[:10]+' 00:00:00' else: @@ -3212,7 +3227,7 @@ class OracleAdapter(BaseAdapter): return "to_date('%s','yyyy-mm-dd hh24:mi:ss')" % obj return None - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', + def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8', credential_decoder=IDENTITY, driver_args={}, adapter_args={}, do_connect=True, after_connection=None): self.db = db @@ -3224,11 +3239,13 @@ class OracleAdapter(BaseAdapter): self.db_codec = db_codec self._after_connection = after_connection self.find_or_make_work_folder() - ruri = uri.split('://',1)[1] + ruri = uri.split('://', 1)[1] if not 'threaded' in driver_args: - driver_args['threaded']=True - def connector(uri=ruri,driver_args=driver_args): - return self.driver.connect(uri,**driver_args) + driver_args['threaded'] = True + + def connector(uri=ruri, driver_args=driver_args): + return self.driver.connect(uri, **driver_args) + self.connector = connector if do_connect: self.reconnect() @@ -3248,7 +3265,7 @@ class OracleAdapter(BaseAdapter): command = command[:m.start('clob')] + str(i) + command[m.end('clob'):] args.append(m.group('clob')[6:-2].replace("''", "'")) i += 1 - if command[-1:]==';': + if command[-1:] == ';': command = command[:-1] return self.log_execute(command, args) @@ -3278,14 +3295,14 @@ class OracleAdapter(BaseAdapter): SELECT %(sequence_name)s.nextval INTO :NEW.%(id)s FROM DUAL; END; """ % dict(trigger_name=trigger_name, tablename=tablename, - sequence_name=sequence_name,id=id_name)) + sequence_name=sequence_name, id=id_name)) - def lastrowid(self,table): + def lastrowid(self, table): sequence_name = table._sequence_name self.execute('SELECT %s.currval FROM dual;' % sequence_name) return long(self.cursor.fetchone()[0]) - #def parse_value(self, value, field_type, blob_decode=True): + # def parse_value(self, value, field_type, blob_decode=True): # if blob_decode and isinstance(value, cx_Oracle.LOB): # try: # value = value.read() @@ -3295,61 +3312,60 @@ class OracleAdapter(BaseAdapter): # return BaseAdapter.parse_value(self, value, field_type, blob_decode) def _fetchall(self): - if any(x[1]==cx_Oracle.CLOB for x in self.cursor.description): - return [tuple([(c.read() if type(c) == cx_Oracle.LOB else c) \ - for c in r]) for r in self.cursor] + if any(x[1] == cx_Oracle.CLOB for x in self.cursor.description): + return [tuple([(c.read() if type(c) == cx_Oracle.LOB else c) + for c in r]) for r in self.cursor] else: return self.cursor.fetchall() def sqlsafe_table(self, tablename, ot=None): if ot is not None: - return (self.QUOTE_TEMPLATE + ' ' \ - + self.QUOTE_TEMPLATE) % (ot, tablename) + return (self.QUOTE_TEMPLATE + ' ' + self.QUOTE_TEMPLATE) \ + % (ot, tablename) return self.QUOTE_TEMPLATE % tablename class MSSQLAdapter(BaseAdapter): - drivers = ('pyodbc',) + drivers = ('pyodbc', ) T_SEP = 'T' QUOTE_TEMPLATE = '"%s"' - types = { - 'boolean': 'BIT', - 'string': 'VARCHAR(%(length)s)', - 'text': 'TEXT', - 'json': 'TEXT', - 'password': 'VARCHAR(%(length)s)', - 'blob': 'IMAGE', - 'upload': 'VARCHAR(%(length)s)', - 'integer': 'INT', - 'bigint': 'BIGINT', - 'float': 'FLOAT', - 'double': 'FLOAT', - 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', - 'date': 'DATETIME', - 'time': 'CHAR(8)', - 'datetime': 'DATETIME', - 'id': 'INT IDENTITY PRIMARY KEY', - 'reference': 'INT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', - 'list:integer': 'TEXT', - 'list:string': 'TEXT', - 'list:reference': 'TEXT', - 'geometry': 'geometry', - 'geography': 'geography', - 'big-id': 'BIGINT IDENTITY PRIMARY KEY', - 'big-reference': 'BIGINT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', - 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', - 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', - } + types = {'boolean': 'BIT', + 'string': 'VARCHAR(%(length)s)', + 'text': 'TEXT', + 'json': 'TEXT', + 'password': 'VARCHAR(%(length)s)', + 'blob': 'IMAGE', + 'upload': 'VARCHAR(%(length)s)', + 'integer': 'INT', + 'bigint': 'BIGINT', + 'float': 'FLOAT', + 'double': 'FLOAT', + 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', + 'date': 'DATETIME', + 'time': 'CHAR(8)', + 'datetime': 'DATETIME', + 'id': 'INT IDENTITY PRIMARY KEY', + 'reference': 'INT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', + 'list:integer': 'TEXT', + 'list:string': 'TEXT', + 'list:reference': 'TEXT', + 'geometry': 'geometry', + 'geography': 'geography', + 'big-id': 'BIGINT IDENTITY PRIMARY KEY', + 'big-reference': 'BIGINT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', + 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', + 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', + } - def concat_add(self,tablename): + def concat_add(self, tablename): return '; ALTER TABLE %s ADD ' % tablename - def varquote(self,name): - return varquote_aux(name,'[%s]') + def varquote(self, name): + return varquote_aux(name, '[%s]') - def EXTRACT(self,field,what): + def EXTRACT(self, field, what): return "DATEPART(%s,%s)" % (what, self.expand(field)) def LEFT_JOIN(self): @@ -3364,7 +3380,7 @@ class MSSQLAdapter(BaseAdapter): def CAST(self, first, second): return first # apparently no cast necessary in MSSQL - def SUBSTRING(self,field,parameters): + def SUBSTRING(self, field, parameters): return 'SUBSTRING(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1]) def PRIMARY_KEY(self,key): @@ -3389,14 +3405,14 @@ class MSSQLAdapter(BaseAdapter): REGEX_URI = re.compile('^(?P[^:@]+)(\:(?P[^@]*))?@(?P[^\:/]+)(\:(?P[0-9]+))?/(?P[^\?]+)(\?(?P.*))?$') REGEX_ARGPATTERN = re.compile('(?P[^=]+)=(?P[^&]*)') - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', + def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8', credential_decoder=IDENTITY, driver_args={}, adapter_args={}, do_connect=True, srid=4326, after_connection=None): self.db = db self.dbengine = "mssql" self.uri = uri - if do_connect: self.find_driver(adapter_args,uri) + if do_connect: self.find_driver(adapter_args, uri) self.pool_size = pool_size self.folder = folder self.db_codec = db_codec @@ -3404,7 +3420,7 @@ class MSSQLAdapter(BaseAdapter): self.srid = srid self.find_or_make_work_folder() # ## read: http://bytes.com/groups/python/460325-cx_oracle-utf8 - ruri = uri.split('://',1)[1] + ruri = uri.split('://', 1)[1] if '@' not in ruri: try: m = self.REGEX_DSN.match(ruri) @@ -3441,24 +3457,26 @@ class MSSQLAdapter(BaseAdapter): # Parse the optional url name-value arg pairs after the '?' # (in the form of arg1=value1&arg2=value2&...) # Default values (drivers like FreeTDS insist on uppercase parameter keys) - argsdict = { 'DRIVER':'{SQL Server}' } + argsdict = {'DRIVER':'{SQL Server}'} urlargs = m.group('urlargs') or '' for argmatch in self.REGEX_ARGPATTERN.finditer(urlargs): argsdict[str(argmatch.group('argkey')).upper()] = argmatch.group('argvalue') urlargs = ';'.join(['%s=%s' % (ak, av) for (ak, av) in argsdict.iteritems()]) cnxn = 'SERVER=%s;PORT=%s;DATABASE=%s;UID=%s;PWD=%s;%s' \ % (host, port, db, user, password, urlargs) - def connector(cnxn=cnxn,driver_args=driver_args): - return self.driver.connect(cnxn,**driver_args) + + def connector(cnxn=cnxn, driver_args=driver_args): + return self.driver.connect(cnxn, **driver_args) + self.connector = connector if do_connect: self.reconnect() - def lastrowid(self,table): + def lastrowid(self, table): #self.execute('SELECT @@IDENTITY;') self.execute('SELECT SCOPE_IDENTITY();') return long(self.cursor.fetchone()[0]) - def rowslice(self,rows,minimum=0,maximum=None): + def rowslice(self, rows, minimum=0, maximum=None): if maximum is None: return rows[minimum:] return rows[minimum:maximum] @@ -3467,37 +3485,44 @@ class MSSQLAdapter(BaseAdapter): return "DATEDIFF(second, '1970-01-01 00:00:00', %s)" % self.expand(first) def CONCAT(self, *items): - return '(%s)' % ' + '.join(self.expand(x,'string') for x in items) + return '(%s)' % ' + '.join(self.expand(x, 'string') for x in items) # GIS Spatial Extensions # No STAsGeoJSON in MSSQL def ST_ASTEXT(self, first): - return '%s.STAsText()' %(self.expand(first)) + return '%s.STAsText()' % (self.expand(first)) def ST_CONTAINS(self, first, second): - return '%s.STContains(%s)=1' %(self.expand(first), self.expand(second, first.type)) + return '%s.STContains(%s)=1' % (self.expand(first), + self.expand(second, first.type)) def ST_DISTANCE(self, first, second): - return '%s.STDistance(%s)' %(self.expand(first), self.expand(second, first.type)) + return '%s.STDistance(%s)' % (self.expand(first), + self.expand(second, first.type)) def ST_EQUALS(self, first, second): - return '%s.STEquals(%s)=1' %(self.expand(first), self.expand(second, first.type)) + return '%s.STEquals(%s)=1' % (self.expand(first), + self.expand(second, first.type)) def ST_INTERSECTS(self, first, second): - return '%s.STIntersects(%s)=1' %(self.expand(first), self.expand(second, first.type)) + return '%s.STIntersects(%s)=1' % (self.expand(first), + self.expand(second, first.type)) def ST_OVERLAPS(self, first, second): - return '%s.STOverlaps(%s)=1' %(self.expand(first), self.expand(second, first.type)) + return '%s.STOverlaps(%s)=1' % (self.expand(first), + self.expand(second, first.type)) # no STSimplify in MSSQL def ST_TOUCHES(self, first, second): - return '%s.STTouches(%s)=1' %(self.expand(first), self.expand(second, first.type)) + return '%s.STTouches(%s)=1' % (self.expand(first), + self.expand(second, first.type)) def ST_WITHIN(self, first, second): - return '%s.STWithin(%s)=1' %(self.expand(first), self.expand(second, first.type)) + return '%s.STWithin(%s)=1' % (self.expand(first), + self.expand(second, first.type)) def represent(self, obj, fieldtype): field_is_type = fieldtype.startswith @@ -3506,13 +3531,13 @@ class MSSQLAdapter(BaseAdapter): geotype, parms = fieldtype[:-1].split('(') if parms: srid = parms - return "geometry::STGeomFromText('%s',%s)" %(obj, srid) + return "geometry::STGeomFromText('%s',%s)" % (obj, srid) elif fieldtype == 'geography': srid = 4326 # MS SQL default srid for geography geotype, parms = fieldtype[:-1].split('(') if parms: srid = parms - return "geography::STGeomFromText('%s',%s)" %(obj, srid) + return "geography::STGeomFromText('%s',%s)" % (obj, srid) # else: # raise SyntaxError('Invalid field type %s' %fieldtype) return "geometry::STGeomFromText('%s',%s)" %(obj, srid) @@ -3538,11 +3563,13 @@ class MSSQL3Adapter(MSSQLAdapter): sql_f_iproxy = ['%s AS %s' % (o, n) for (o, n) in zip(sql_f_inner, sql_f_outer)] sql_f_iproxy = ', '.join(sql_f_iproxy) sql_f_oproxy = ', '.join(sql_f_outer) - return 'SELECT %s %s FROM (SELECT %s ROW_NUMBER() OVER (ORDER BY %s) AS w_row, %s FROM %s%s%s) TMP WHERE w_row BETWEEN %i AND %s;' % (sql_s,sql_f_oproxy,sql_s,sql_f,sql_f_iproxy,sql_t,sql_w,sql_g_inner,lmin,lmax) - return 'SELECT %s %s FROM %s%s%s;' % (sql_s,sql_f,sql_t,sql_w,sql_o) - def rowslice(self,rows,minimum=0,maximum=None): + return 'SELECT %s %s FROM (SELECT %s ROW_NUMBER() OVER (ORDER BY %s) AS w_row, %s FROM %s%s%s) TMP WHERE w_row BETWEEN %i AND %s;' % (sql_s, sql_f_oproxy, sql_s, sql_f, sql_f_iproxy, sql_t, sql_w, sql_g_inner, lmin, lmax) + return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o) + + def rowslice(self, rows, minimum=0, maximum=None): return rows + class MSSQL4Adapter(MSSQLAdapter): """Support for "native" pagination @@ -3566,76 +3593,74 @@ class MSSQL4Adapter(MSSQLAdapter): return 'SELECT %s %s FROM %s%s%s;' % \ (sql_s, sql_f, sql_t, sql_w, sql_o) - def rowslice(self,rows,minimum=0,maximum=None): + def rowslice(self, rows, minimum=0, maximum=None): return rows -class MSSQL2Adapter(MSSQLAdapter): - drivers = ('pyodbc',) - types = { - 'boolean': 'CHAR(1)', - 'string': 'NVARCHAR(%(length)s)', - 'text': 'NTEXT', - 'json': 'NTEXT', - 'password': 'NVARCHAR(%(length)s)', - 'blob': 'IMAGE', - 'upload': 'NVARCHAR(%(length)s)', - 'integer': 'INT', - 'bigint': 'BIGINT', - 'float': 'FLOAT', - 'double': 'FLOAT', - 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', - 'date': 'DATETIME', - 'time': 'CHAR(8)', - 'datetime': 'DATETIME', - 'id': 'INT IDENTITY PRIMARY KEY', - 'reference': 'INT, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', - 'list:integer': 'NTEXT', - 'list:string': 'NTEXT', - 'list:reference': 'NTEXT', - 'big-id': 'BIGINT IDENTITY PRIMARY KEY', - 'big-reference': 'BIGINT, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', - 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', - 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', - } +class MSSQL2Adapter(MSSQLAdapter): + drivers = ('pyodbc', ) + + types = {'boolean': 'CHAR(1)', + 'string': 'NVARCHAR(%(length)s)', + 'text': 'NTEXT', + 'json': 'NTEXT', + 'password': 'NVARCHAR(%(length)s)', + 'blob': 'IMAGE', + 'upload': 'NVARCHAR(%(length)s)', + 'integer': 'INT', + 'bigint': 'BIGINT', + 'float': 'FLOAT', + 'double': 'FLOAT', + 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', + 'date': 'DATETIME', + 'time': 'CHAR(8)', + 'datetime': 'DATETIME', + 'id': 'INT IDENTITY PRIMARY KEY', + 'reference': 'INT, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', + 'list:integer': 'NTEXT', + 'list:string': 'NTEXT', + 'list:reference': 'NTEXT', + 'big-id': 'BIGINT IDENTITY PRIMARY KEY', + 'big-reference': 'BIGINT, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', + 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', + 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', + } def represent(self, obj, fieldtype): value = BaseAdapter.represent(self, obj, fieldtype) - if fieldtype in ('string','text', 'json') and value[:1]=="'": + if fieldtype in ('string', 'text', 'json') and value[:1] == "'": value = 'N'+value return value - def execute(self,a): + def execute(self, a): return self.log_execute(a.decode('utf8')) class VerticaAdapter(MSSQLAdapter): - drivers = ('pyodbc',) + drivers = ('pyodbc', ) T_SEP = ' ' - types = { - 'boolean': 'BOOLEAN', - 'string': 'VARCHAR(%(length)s)', - 'text': 'BYTEA', - 'json': 'VARCHAR(%(length)s)', - 'password': 'VARCHAR(%(length)s)', - 'blob': 'BYTEA', - 'upload': 'VARCHAR(%(length)s)', - 'integer': 'INT', - 'bigint': 'BIGINT', - 'float': 'FLOAT', - 'double': 'DOUBLE PRECISION', - 'decimal': 'DECIMAL(%(precision)s,%(scale)s)', - 'date': 'DATE', - 'time': 'TIME', - 'datetime': 'DATETIME', - 'id': 'IDENTITY', - 'reference': 'INT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', - 'list:integer': 'BYTEA', - 'list:string': 'BYTEA', - 'list:reference': 'BYTEA', - 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', - } - + types = {'boolean': 'BOOLEAN', + 'string': 'VARCHAR(%(length)s)', + 'text': 'BYTEA', + 'json': 'VARCHAR(%(length)s)', + 'password': 'VARCHAR(%(length)s)', + 'blob': 'BYTEA', + 'upload': 'VARCHAR(%(length)s)', + 'integer': 'INT', + 'bigint': 'BIGINT', + 'float': 'FLOAT', + 'double': 'DOUBLE PRECISION', + 'decimal': 'DECIMAL(%(precision)s,%(scale)s)', + 'date': 'DATE', + 'time': 'TIME', + 'datetime': 'DATETIME', + 'id': 'IDENTITY', + 'reference': 'INT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', + 'list:integer': 'BYTEA', + 'list:string': 'BYTEA', + 'list:reference': 'BYTEA', + 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', + } def EXTRACT(self, first, what): return "DATE_PART('%s', TIMESTAMP %s)" % (what, self.expand(first)) @@ -3651,54 +3676,53 @@ class VerticaAdapter(MSSQLAdapter): return 'SELECT %s %s FROM %s%s%s;' % \ (sql_s, sql_f, sql_t, sql_w, sql_o) - def lastrowid(self,table): + def lastrowid(self, table): self.execute('SELECT LAST_INSERT_ID();') return long(self.cursor.fetchone()[0]) def execute(self, a): return self.log_execute(a) + class SybaseAdapter(MSSQLAdapter): - drivers = ('Sybase',) + drivers = ('Sybase', ) - types = { - 'boolean': 'BIT', - 'string': 'CHAR VARYING(%(length)s)', - 'text': 'TEXT', - 'json': 'TEXT', - 'password': 'CHAR VARYING(%(length)s)', - 'blob': 'IMAGE', - 'upload': 'CHAR VARYING(%(length)s)', - 'integer': 'INT', - 'bigint': 'BIGINT', - 'float': 'FLOAT', - 'double': 'FLOAT', - 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', - 'date': 'DATETIME', - 'time': 'CHAR(8)', - 'datetime': 'DATETIME', - 'id': 'INT IDENTITY PRIMARY KEY', - 'reference': 'INT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', - 'list:integer': 'TEXT', - 'list:string': 'TEXT', - 'list:reference': 'TEXT', - 'geometry': 'geometry', - 'geography': 'geography', - 'big-id': 'BIGINT IDENTITY PRIMARY KEY', - 'big-reference': 'BIGINT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', - 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', - 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', - } + types = {'boolean': 'BIT', + 'string': 'CHAR VARYING(%(length)s)', + 'text': 'TEXT', + 'json': 'TEXT', + 'password': 'CHAR VARYING(%(length)s)', + 'blob': 'IMAGE', + 'upload': 'CHAR VARYING(%(length)s)', + 'integer': 'INT', + 'bigint': 'BIGINT', + 'float': 'FLOAT', + 'double': 'FLOAT', + 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', + 'date': 'DATETIME', + 'time': 'CHAR(8)', + 'datetime': 'DATETIME', + 'id': 'INT IDENTITY PRIMARY KEY', + 'reference': 'INT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', + 'list:integer': 'TEXT', + 'list:string': 'TEXT', + 'list:reference': 'TEXT', + 'geometry': 'geometry', + 'geography': 'geography', + 'big-id': 'BIGINT IDENTITY PRIMARY KEY', + 'big-reference': 'BIGINT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', + 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', + 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', + } - - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', + def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8', credential_decoder=IDENTITY, driver_args={}, adapter_args={}, do_connect=True, srid=4326, after_connection=None): self.db = db self.dbengine = "sybase" self.uri = uri - if do_connect: self.find_driver(adapter_args,uri) + if do_connect: self.find_driver(adapter_args, uri) self.pool_size = pool_size self.folder = folder self.db_codec = db_codec @@ -3706,7 +3730,7 @@ class SybaseAdapter(MSSQLAdapter): self.srid = srid self.find_or_make_work_folder() # ## read: http://bytes.com/groups/python/460325-cx_oracle-utf8 - ruri = uri.split('://',1)[1] + ruri = uri.split('://', 1)[1] if '@' not in ruri: try: m = self.REGEX_DSN.match(ruri) @@ -3739,51 +3763,51 @@ class SybaseAdapter(MSSQLAdapter): raise SyntaxError('Database name required') port = m.group('port') or '1433' - dsn = 'sybase:host=%s:%s;dbname=%s' % (host,port,db) + dsn = 'sybase:host=%s:%s;dbname=%s' % (host, port, db) driver_args.update(user = credential_decoder(user), password = credential_decoder(password)) - def connector(dsn=dsn,driver_args=driver_args): - return self.driver.connect(dsn,**driver_args) + def connector(dsn=dsn, driver_args=driver_args): + return self.driver.connect(dsn, **driver_args) + self.connector = connector if do_connect: self.reconnect() class FireBirdAdapter(BaseAdapter): - drivers = ('kinterbasdb','firebirdsql','fdb','pyodbc') + drivers = ('kinterbasdb', 'firebirdsql', 'fdb', 'pyodbc') commit_on_alter_table = False support_distributed_transaction = True - types = { - 'boolean': 'CHAR(1)', - 'string': 'VARCHAR(%(length)s)', - 'text': 'BLOB SUB_TYPE 1', - 'json': 'BLOB SUB_TYPE 1', - 'password': 'VARCHAR(%(length)s)', - 'blob': 'BLOB SUB_TYPE 0', - 'upload': 'VARCHAR(%(length)s)', - 'integer': 'INTEGER', - 'bigint': 'BIGINT', - 'float': 'FLOAT', - 'double': 'DOUBLE PRECISION', - 'decimal': 'DECIMAL(%(precision)s,%(scale)s)', - 'date': 'DATE', - 'time': 'TIME', - 'datetime': 'TIMESTAMP', - 'id': 'INTEGER PRIMARY KEY', - 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', - 'list:integer': 'BLOB SUB_TYPE 1', - 'list:string': 'BLOB SUB_TYPE 1', - 'list:reference': 'BLOB SUB_TYPE 1', - 'big-id': 'BIGINT PRIMARY KEY', - 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', - } + types = {'boolean': 'CHAR(1)', + 'string': 'VARCHAR(%(length)s)', + 'text': 'BLOB SUB_TYPE 1', + 'json': 'BLOB SUB_TYPE 1', + 'password': 'VARCHAR(%(length)s)', + 'blob': 'BLOB SUB_TYPE 0', + 'upload': 'VARCHAR(%(length)s)', + 'integer': 'INTEGER', + 'bigint': 'BIGINT', + 'float': 'FLOAT', + 'double': 'DOUBLE PRECISION', + 'decimal': 'DECIMAL(%(precision)s,%(scale)s)', + 'date': 'DATE', + 'time': 'TIME', + 'datetime': 'TIMESTAMP', + 'id': 'INTEGER PRIMARY KEY', + 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', + 'list:integer': 'BLOB SUB_TYPE 1', + 'list:string': 'BLOB SUB_TYPE 1', + 'list:reference': 'BLOB SUB_TYPE 1', + 'big-id': 'BIGINT PRIMARY KEY', + 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', + } - def sequence_name(self,tablename): + def sequence_name(self, tablename): return ('genid_' + self.QUOTE_TEMPLATE) % tablename - def trigger_name(self,tablename): + def trigger_name(self, tablename): return 'trg_id_%s' % tablename def RANDOM(self): @@ -3792,23 +3816,23 @@ class FireBirdAdapter(BaseAdapter): def EPOCH(self, first): return "DATEDIFF(second, '1970-01-01 00:00:00', %s)" % self.expand(first) - def NOT_NULL(self,default,field_type): - return 'DEFAULT %s NOT NULL' % self.represent(default,field_type) + def NOT_NULL(self, default, field_type): + return 'DEFAULT %s NOT NULL' % self.represent(default, field_type) - def SUBSTRING(self,field,parameters): + def SUBSTRING(self, field, parameters): return 'SUBSTRING(%s from %s for %s)' % (self.expand(field), parameters[0], parameters[1]) def LENGTH(self, first): return "CHAR_LENGTH(%s)" % self.expand(first) - def CONTAINS(self,first,second,case_sensitive=False): + def CONTAINS(self, first, second, case_sensitive=False): if first.type.startswith('list:'): - second = Expression(None,self.CONCAT('|',Expression( - None,self.REPLACE(second,('|','||'))),'|')) + second = Expression(None, self.CONCAT('|', Expression( + None, self.REPLACE(second, ('|', '||'))), '|')) return '(%s CONTAINING %s)' % (self.expand(first), self.expand(second, 'string')) - def _drop(self,table,mode): + def _drop(self, table, mode): sequence_name = table._sequence_name return ['DROP TABLE %s %s;' % (table.sqlsafe, mode), 'DROP GENERATOR %s;' % sequence_name] @@ -3824,19 +3848,19 @@ class FireBirdAdapter(BaseAdapter): REGEX_URI = re.compile('^(?P[^:@]+)(\:(?P[^@]*))?@(?P[^\:/]+)(\:(?P[0-9]+))?/(?P.+?)(\?set_encoding=(?P\w+))?$') - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', + def __init__(self, db, uri, pool_size=0, folder=None, db_codec ='UTF-8', credential_decoder=IDENTITY, driver_args={}, adapter_args={}, do_connect=True, after_connection=None): self.db = db self.dbengine = "firebird" self.uri = uri - if do_connect: self.find_driver(adapter_args,uri) + if do_connect: self.find_driver(adapter_args, uri) self.pool_size = pool_size self.folder = folder self.db_codec = db_codec self._after_connection = after_connection self.find_or_make_work_folder() - ruri = uri.split('://',1)[1] + ruri = uri.split('://', 1)[1] m = self.REGEX_URI.match(ruri) if not m: raise SyntaxError("Invalid URI string in DAL: %s" % self.uri) @@ -3854,13 +3878,14 @@ class FireBirdAdapter(BaseAdapter): if not db: raise SyntaxError('Database name required') charset = m.group('charset') or 'UTF8' - driver_args.update(dsn='%s/%s:%s' % (host,port,db), - user = credential_decoder(user), - password = credential_decoder(password), - charset = charset) + driver_args.update(dsn='%s/%s:%s' % (host, port, db), + user=credential_decoder(user), + password=credential_decoder(password), + charset=charset) def connector(driver_args=driver_args): return self.driver.connect(**driver_args) + self.connector = connector if do_connect: self.reconnect() @@ -3873,30 +3898,30 @@ class FireBirdAdapter(BaseAdapter): self.execute('set generator %s to 0;' % sequence_name) self.execute('create trigger %s for %s active before insert position 0 as\nbegin\nif(new.id is null) then\nbegin\nnew.id = gen_id(%s, 1);\nend\nend;' % (trigger_name, tablename, sequence_name)) - def lastrowid(self,table): + def lastrowid(self, table): sequence_name = table._sequence_name self.execute('SELECT gen_id(%s, 0) FROM rdb$database' % sequence_name) return long(self.cursor.fetchone()[0]) class FireBirdEmbeddedAdapter(FireBirdAdapter): - drivers = ('kinterbasdb','firebirdsql','fdb','pyodbc') + drivers = ('kinterbasdb', 'firebirdsql', 'fdb', 'pyodbc') REGEX_URI = re.compile('^(?P[^:@]+)(\:(?P[^@]*))?@(?P[^\?]+)(\?set_encoding=(?P\w+))?$') - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', + def __init__(self, db, uri, pool_size=0, folder=None, db_codec ='UTF-8', credential_decoder=IDENTITY, driver_args={}, adapter_args={}, do_connect=True, after_connection=None): self.db = db self.dbengine = "firebird" self.uri = uri - if do_connect: self.find_driver(adapter_args,uri) + if do_connect: self.find_driver(adapter_args, uri) self.pool_size = pool_size self.folder = folder self.db_codec = db_codec self._after_connection = after_connection self.find_or_make_work_folder() - ruri = uri.split('://',1)[1] + ruri = uri.split('://', 1)[1] m = self.REGEX_URI.match(ruri) if not m: raise SyntaxError( @@ -3922,44 +3947,45 @@ class FireBirdEmbeddedAdapter(FireBirdAdapter): def connector(driver_args=driver_args): return self.driver.connect(**driver_args) + self.connector = connector if do_connect: self.reconnect() -class InformixAdapter(BaseAdapter): - drivers = ('informixdb',) - types = { - 'boolean': 'CHAR(1)', - 'string': 'VARCHAR(%(length)s)', - 'text': 'BLOB SUB_TYPE 1', - 'json': 'BLOB SUB_TYPE 1', - 'password': 'VARCHAR(%(length)s)', - 'blob': 'BLOB SUB_TYPE 0', - 'upload': 'VARCHAR(%(length)s)', - 'integer': 'INTEGER', - 'bigint': 'BIGINT', - 'float': 'FLOAT', - 'double': 'DOUBLE PRECISION', - 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', - 'date': 'DATE', - 'time': 'CHAR(8)', - 'datetime': 'DATETIME', - 'id': 'SERIAL', - 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', - 'list:integer': 'BLOB SUB_TYPE 1', - 'list:string': 'BLOB SUB_TYPE 1', - 'list:reference': 'BLOB SUB_TYPE 1', - 'big-id': 'BIGSERIAL', - 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', - 'reference FK': 'REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s CONSTRAINT FK_%(table_name)s_%(field_name)s', - 'reference TFK': 'FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s CONSTRAINT TFK_%(table_name)s_%(field_name)s', - } +class InformixAdapter(BaseAdapter): + drivers = ('informixdb', ) + + types = {'boolean': 'CHAR(1)', + 'string': 'VARCHAR(%(length)s)', + 'text': 'BLOB SUB_TYPE 1', + 'json': 'BLOB SUB_TYPE 1', + 'password': 'VARCHAR(%(length)s)', + 'blob': 'BLOB SUB_TYPE 0', + 'upload': 'VARCHAR(%(length)s)', + 'integer': 'INTEGER', + 'bigint': 'BIGINT', + 'float': 'FLOAT', + 'double': 'DOUBLE PRECISION', + 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', + 'date': 'DATE', + 'time': 'CHAR(8)', + 'datetime': 'DATETIME', + 'id': 'SERIAL', + 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', + 'list:integer': 'BLOB SUB_TYPE 1', + 'list:string': 'BLOB SUB_TYPE 1', + 'list:reference': 'BLOB SUB_TYPE 1', + 'big-id': 'BIGSERIAL', + 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', + 'reference FK': 'REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s CONSTRAINT FK_%(table_name)s_%(field_name)s', + 'reference TFK': 'FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s CONSTRAINT TFK_%(table_name)s_%(field_name)s', + } def RANDOM(self): return 'Random()' - def NOT_NULL(self,default,field_type): - return 'DEFAULT %s NOT NULL' % self.represent(default,field_type) + def NOT_NULL(self, default, field_type): + return 'DEFAULT %s NOT NULL' % self.represent(default, field_type) def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby): if limitby: @@ -3983,7 +4009,7 @@ class InformixAdapter(BaseAdapter): return "to_date('%s','%%Y-%%m-%%d')" % obj elif fieldtype == 'datetime': if isinstance(obj, datetime.datetime): - obj = obj.isoformat()[:19].replace('T',' ') + obj = obj.isoformat()[:19].replace('T', ' ') elif isinstance(obj, datetime.date): obj = obj.isoformat()[:10]+' 00:00:00' else: @@ -3993,7 +4019,7 @@ class InformixAdapter(BaseAdapter): REGEX_URI = re.compile('^(?P[^:@]+)(\:(?P[^@]*))?@(?P[^\:/]+)(\:(?P[0-9]+))?/(?P.+)$') - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', + def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8', credential_decoder=IDENTITY, driver_args={}, adapter_args={}, do_connect=True, after_connection=None): self.db = db @@ -4005,7 +4031,7 @@ class InformixAdapter(BaseAdapter): self.db_codec = db_codec self._after_connection = after_connection self.find_or_make_work_folder() - ruri = uri.split('://',1)[1] + ruri = uri.split('://', 1)[1] m = self.REGEX_URI.match(ruri) if not m: raise SyntaxError( @@ -4024,21 +4050,24 @@ class InformixAdapter(BaseAdapter): raise SyntaxError('Database name required') user = credential_decoder(user) password = credential_decoder(password) - dsn = '%s@%s' % (db,host) - driver_args.update(user=user,password=password,autocommit=True) - def connector(dsn=dsn,driver_args=driver_args): - return self.driver.connect(dsn,**driver_args) + dsn = '%s@%s' % (db, host) + driver_args.update(user=user, password=password, autocommit=True) + + def connector(dsn=dsn, driver_args=driver_args): + return self.driver.connect(dsn, **driver_args) + self.connector = connector if do_connect: self.reconnect() - def execute(self,command): - if command[-1:]==';': + def execute(self, command): + if command[-1:] == ';': command = command[:-1] return self.log_execute(command) - def lastrowid(self,table): + def lastrowid(self, table): return self.cursor.sqlerrd[1] + class InformixSEAdapter(InformixAdapter): """ work in progress """ @@ -4046,40 +4075,39 @@ class InformixSEAdapter(InformixAdapter): return 'SELECT %s %s FROM %s%s%s;' % \ (sql_s, sql_f, sql_t, sql_w, sql_o) - def rowslice(self,rows,minimum=0,maximum=None): + def rowslice(self, rows, minimum=0, maximum=None): if maximum is None: return rows[minimum:] return rows[minimum:maximum] class DB2Adapter(BaseAdapter): - drivers = ('pyodbc',) + drivers = ('pyodbc', ) - types = { - 'boolean': 'CHAR(1)', - 'string': 'VARCHAR(%(length)s)', - 'text': 'CLOB', - 'json': 'CLOB', - 'password': 'VARCHAR(%(length)s)', - 'blob': 'BLOB', - 'upload': 'VARCHAR(%(length)s)', - 'integer': 'INT', - 'bigint': 'BIGINT', - 'float': 'REAL', - 'double': 'DOUBLE', - 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', - 'date': 'DATE', - 'time': 'TIME', - 'datetime': 'TIMESTAMP', - 'id': 'INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL', - 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', - 'list:integer': 'CLOB', - 'list:string': 'CLOB', - 'list:reference': 'CLOB', - 'big-id': 'BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL', - 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', - 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', - 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', - } + types = {'boolean': 'CHAR(1)', + 'string': 'VARCHAR(%(length)s)', + 'text': 'CLOB', + 'json': 'CLOB', + 'password': 'VARCHAR(%(length)s)', + 'blob': 'BLOB', + 'upload': 'VARCHAR(%(length)s)', + 'integer': 'INT', + 'bigint': 'BIGINT', + 'float': 'REAL', + 'double': 'DOUBLE', + 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', + 'date': 'DATE', + 'time': 'TIME', + 'datetime': 'TIMESTAMP', + 'id': 'INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL', + 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', + 'list:integer': 'CLOB', + 'list:string': 'CLOB', + 'list:reference': 'CLOB', + 'big-id': 'BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL', + 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', + 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', + 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', + } def LEFT_JOIN(self): return 'LEFT OUTER JOIN' @@ -4099,96 +4127,99 @@ class DB2Adapter(BaseAdapter): return "BLOB('%s')" % obj elif fieldtype == 'datetime': if isinstance(obj, datetime.datetime): - obj = obj.isoformat()[:19].replace('T','-').replace(':','.') + obj = obj.isoformat()[:19].replace('T', '-').replace(':', '.') elif isinstance(obj, datetime.date): obj = obj.isoformat()[:10]+'-00.00.00' return "'%s'" % obj return None - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', + def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8', credential_decoder=IDENTITY, driver_args={}, adapter_args={}, do_connect=True, after_connection=None): self.db = db self.dbengine = "db2" self.uri = uri - if do_connect: self.find_driver(adapter_args,uri) + if do_connect: self.find_driver(adapter_args, uri) self.pool_size = pool_size self.folder = folder self.db_codec = db_codec self._after_connection = after_connection self.find_or_make_work_folder() ruri = uri.split('://', 1)[1] - def connector(cnxn=ruri,driver_args=driver_args): - return self.driver.connect(cnxn,**driver_args) + + def connector(cnxn=ruri, driver_args=driver_args): + return self.driver.connect(cnxn, **driver_args) + self.connector = connector if do_connect: self.reconnect() - def execute(self,command): + def execute(self, command): if command[-1:]==';': command = command[:-1] return self.log_execute(command) - def lastrowid(self,table): + def lastrowid(self, table): self.execute('SELECT DISTINCT IDENTITY_VAL_LOCAL() FROM %s;' % table) return long(self.cursor.fetchone()[0]) - def rowslice(self,rows,minimum=0,maximum=None): + def rowslice(self, rows, minimum=0, maximum=None): if maximum is None: return rows[minimum:] return rows[minimum:maximum] class TeradataAdapter(BaseAdapter): - drivers = ('pyodbc',) + drivers = ('pyodbc', ) - types = { - 'boolean': 'CHAR(1)', - 'string': 'VARCHAR(%(length)s)', - 'text': 'VARCHAR(2000)', - 'json': 'VARCHAR(4000)', - 'password': 'VARCHAR(%(length)s)', - 'blob': 'BLOB', - 'upload': 'VARCHAR(%(length)s)', - 'integer': 'INT', - 'bigint': 'BIGINT', - 'float': 'REAL', - 'double': 'DOUBLE', - 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', - 'date': 'DATE', - 'time': 'TIME', - 'datetime': 'TIMESTAMP', - # Modified Constraint syntax for Teradata. - # Teradata does not support ON DELETE. - 'id': 'INT GENERATED ALWAYS AS IDENTITY', # Teradata Specific - 'reference': 'INT', - 'list:integer': 'VARCHAR(4000)', - 'list:string': 'VARCHAR(4000)', - 'list:reference': 'VARCHAR(4000)', - 'big-id': 'BIGINT GENERATED ALWAYS AS IDENTITY', # Teradata Specific - 'big-reference': 'BIGINT', - 'reference FK': ' REFERENCES %(foreign_key)s', - 'reference TFK': ' FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s)', - } + types = {'boolean': 'CHAR(1)', + 'string': 'VARCHAR(%(length)s)', + 'text': 'VARCHAR(2000)', + 'json': 'VARCHAR(4000)', + 'password': 'VARCHAR(%(length)s)', + 'blob': 'BLOB', + 'upload': 'VARCHAR(%(length)s)', + 'integer': 'INT', + 'bigint': 'BIGINT', + 'float': 'REAL', + 'double': 'DOUBLE', + 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', + 'date': 'DATE', + 'time': 'TIME', + 'datetime': 'TIMESTAMP', + # Modified Constraint syntax for Teradata. + # Teradata does not support ON DELETE. + 'id': 'INT GENERATED ALWAYS AS IDENTITY', # Teradata Specific + 'reference': 'INT', + 'list:integer': 'VARCHAR(4000)', + 'list:string': 'VARCHAR(4000)', + 'list:reference': 'VARCHAR(4000)', + 'big-id': 'BIGINT GENERATED ALWAYS AS IDENTITY', # Teradata Specific + 'big-reference': 'BIGINT', + 'reference FK': ' REFERENCES %(foreign_key)s', + 'reference TFK': ' FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s)', + } - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', + def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8', credential_decoder=IDENTITY, driver_args={}, adapter_args={}, do_connect=True, after_connection=None): self.db = db self.dbengine = "teradata" self.uri = uri - if do_connect: self.find_driver(adapter_args,uri) + if do_connect: self.find_driver(adapter_args, uri) self.pool_size = pool_size self.folder = folder self.db_codec = db_codec self._after_connection = after_connection self.find_or_make_work_folder() ruri = uri.split('://', 1)[1] - def connector(cnxn=ruri,driver_args=driver_args): - return self.driver.connect(cnxn,**driver_args) + + def connector(cnxn=ruri, driver_args=driver_args): + return self.driver.connect(cnxn, **driver_args) + self.connector = connector if do_connect: self.reconnect() - def close(self,action='commit',really=True): + def close(self, action='commit', really=True): # Teradata does not implicitly close off the cursor # leading to SQL_ACTIVE_STATEMENTS limit errors self.cursor.close() @@ -4208,39 +4239,39 @@ class TeradataAdapter(BaseAdapter): tablename = table._tablename return ['DELETE FROM %s ALL;' % (tablename)] -INGRES_SEQNAME='ii***lineitemsequence' # NOTE invalid database object name - # (ANSI-SQL wants this form of name - # to be a delimited identifier) +INGRES_SEQNAME='ii***lineitemsequence' # NOTE invalid database object name + # (ANSI-SQL wants this form of name + # to be a delimited identifier) + class IngresAdapter(BaseAdapter): - drivers = ('pyodbc',) + drivers = ('pyodbc', ) - types = { - 'boolean': 'CHAR(1)', - 'string': 'VARCHAR(%(length)s)', - 'text': 'CLOB', - 'json': 'CLOB', - 'password': 'VARCHAR(%(length)s)', ## Not sure what this contains utf8 or nvarchar. Or even bytes? - 'blob': 'BLOB', - 'upload': 'VARCHAR(%(length)s)', ## FIXME utf8 or nvarchar... or blob? what is this type? - 'integer': 'INTEGER4', # or int8... - 'bigint': 'BIGINT', - 'float': 'FLOAT', - 'double': 'FLOAT8', - 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', - 'date': 'ANSIDATE', - 'time': 'TIME WITHOUT TIME ZONE', - 'datetime': 'TIMESTAMP WITHOUT TIME ZONE', - 'id': 'int not null unique with default next value for %s' % INGRES_SEQNAME, - 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', - 'list:integer': 'CLOB', - 'list:string': 'CLOB', - 'list:reference': 'CLOB', - 'big-id': 'bigint not null unique with default next value for %s' % INGRES_SEQNAME, - 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', - 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', - 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', ## FIXME TODO - } + types = {'boolean': 'CHAR(1)', + 'string': 'VARCHAR(%(length)s)', + 'text': 'CLOB', + 'json': 'CLOB', + 'password': 'VARCHAR(%(length)s)', ## Not sure what this contains utf8 or nvarchar. Or even bytes? + 'blob': 'BLOB', + 'upload': 'VARCHAR(%(length)s)', ## FIXME utf8 or nvarchar... or blob? what is this type? + 'integer': 'INTEGER4', # or int8... + 'bigint': 'BIGINT', + 'float': 'FLOAT', + 'double': 'FLOAT8', + 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', + 'date': 'ANSIDATE', + 'time': 'TIME WITHOUT TIME ZONE', + 'datetime': 'TIMESTAMP WITHOUT TIME ZONE', + 'id': 'int not null unique with default next value for %s' % INGRES_SEQNAME, + 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', + 'list:integer': 'CLOB', + 'list:string': 'CLOB', + 'list:reference': 'CLOB', + 'big-id': 'bigint not null unique with default next value for %s' % INGRES_SEQNAME, + 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', + 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', + 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', ## FIXME TODO + } def LEFT_JOIN(self): return 'LEFT OUTER JOIN' @@ -4259,14 +4290,14 @@ class IngresAdapter(BaseAdapter): sql_o += ' OFFSET %d' % (lmin, ) return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o) - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', + def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8', credential_decoder=IDENTITY, driver_args={}, adapter_args={}, do_connect=True, after_connection=None): self.db = db self.dbengine = "ingres" self._driver = pyodbc self.uri = uri - if do_connect: self.find_driver(adapter_args,uri) + if do_connect: self.find_driver(adapter_args, uri) self.pool_size = pool_size self.folder = folder self.db_codec = db_codec @@ -4287,8 +4318,9 @@ class IngresAdapter(BaseAdapter): vnode = '(local)' servertype = 'ingres' ruri = 'Driver={%s};Server=%s;Database=%s' % (default_driver_name, vnode, database_name) - def connector(cnxn=ruri,driver_args=driver_args): - return self.driver.connect(cnxn,**driver_args) + + def connector(cnxn=ruri, driver_args=driver_args): + return self.driver.connect(cnxn, **driver_args) self.connector = connector @@ -4299,7 +4331,7 @@ class IngresAdapter(BaseAdapter): # post create table auto inc code (if needed) # modify table to btree for performance.... # Older Ingres releases could use rule/trigger like Oracle above. - if hasattr(table,'_primarykey'): + if hasattr(table, '_primarykey'): modify_tbl_sql = 'modify %s to btree unique on %s' % \ (table._tablename, ', '.join(["'%s'" % x for x in table.primarykey])) @@ -4311,8 +4343,7 @@ class IngresAdapter(BaseAdapter): self.execute(query) self.execute('modify %s to btree unique on %s' % (table._tablename, 'id')) - - def lastrowid(self,table): + def lastrowid(self, table): tmp_seqname='%s_iisq' % table self.execute('select current value for %s' % tmp_seqname) return long(self.cursor.fetchone()[0]) # don't really need int type cast here... @@ -4320,65 +4351,64 @@ class IngresAdapter(BaseAdapter): class IngresUnicodeAdapter(IngresAdapter): - drivers = ('pyodbc',) + drivers = ('pyodbc', ) + + types = {'boolean': 'CHAR(1)', + 'string': 'NVARCHAR(%(length)s)', + 'text': 'NCLOB', + 'json': 'NCLOB', + 'password': 'NVARCHAR(%(length)s)', # Not sure what this contains utf8 or nvarchar. Or even bytes? + 'blob': 'BLOB', + 'upload': 'VARCHAR(%(length)s)', # FIXME utf8 or nvarchar... or blob? what is this type? + 'integer': 'INTEGER4', # or int8... + 'bigint': 'BIGINT', + 'float': 'FLOAT', + 'double': 'FLOAT8', + 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', + 'date': 'ANSIDATE', + 'time': 'TIME WITHOUT TIME ZONE', + 'datetime': 'TIMESTAMP WITHOUT TIME ZONE', + 'id': 'INTEGER4 not null unique with default next value for %s'% INGRES_SEQNAME, + 'reference': 'INTEGER4, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', + 'list:integer': 'NCLOB', + 'list:string': 'NCLOB', + 'list:reference': 'NCLOB', + 'big-id': 'BIGINT not null unique with default next value for %s'% INGRES_SEQNAME, + 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', + 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', + 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', ## FIXME TODO + } - types = { - 'boolean': 'CHAR(1)', - 'string': 'NVARCHAR(%(length)s)', - 'text': 'NCLOB', - 'json': 'NCLOB', - 'password': 'NVARCHAR(%(length)s)', ## Not sure what this contains utf8 or nvarchar. Or even bytes? - 'blob': 'BLOB', - 'upload': 'VARCHAR(%(length)s)', ## FIXME utf8 or nvarchar... or blob? what is this type? - 'integer': 'INTEGER4', # or int8... - 'bigint': 'BIGINT', - 'float': 'FLOAT', - 'double': 'FLOAT8', - 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', - 'date': 'ANSIDATE', - 'time': 'TIME WITHOUT TIME ZONE', - 'datetime': 'TIMESTAMP WITHOUT TIME ZONE', - 'id': 'INTEGER4 not null unique with default next value for %s'% INGRES_SEQNAME, - 'reference': 'INTEGER4, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', - 'list:integer': 'NCLOB', - 'list:string': 'NCLOB', - 'list:reference': 'NCLOB', - 'big-id': 'BIGINT not null unique with default next value for %s'% INGRES_SEQNAME, - 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', - 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', - 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', ## FIXME TODO - } class SAPDBAdapter(BaseAdapter): - drivers = ('sapdb',) + drivers = ('sapdb', ) support_distributed_transaction = False - types = { - 'boolean': 'CHAR(1)', - 'string': 'VARCHAR(%(length)s)', - 'text': 'LONG', - 'json': 'LONG', - 'password': 'VARCHAR(%(length)s)', - 'blob': 'LONG', - 'upload': 'VARCHAR(%(length)s)', - 'integer': 'INT', - 'bigint': 'BIGINT', - 'float': 'FLOAT', - 'double': 'DOUBLE PRECISION', - 'decimal': 'FIXED(%(precision)s,%(scale)s)', - 'date': 'DATE', - 'time': 'TIME', - 'datetime': 'TIMESTAMP', - 'id': 'INT PRIMARY KEY', - 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', - 'list:integer': 'LONG', - 'list:string': 'LONG', - 'list:reference': 'LONG', - 'big-id': 'BIGINT PRIMARY KEY', - 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', - } + types = {'boolean': 'CHAR(1)', + 'string': 'VARCHAR(%(length)s)', + 'text': 'LONG', + 'json': 'LONG', + 'password': 'VARCHAR(%(length)s)', + 'blob': 'LONG', + 'upload': 'VARCHAR(%(length)s)', + 'integer': 'INT', + 'bigint': 'BIGINT', + 'float': 'FLOAT', + 'double': 'DOUBLE PRECISION', + 'decimal': 'FIXED(%(precision)s,%(scale)s)', + 'date': 'DATE', + 'time': 'TIME', + 'datetime': 'TIMESTAMP', + 'id': 'INT PRIMARY KEY', + 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', + 'list:integer': 'LONG', + 'list:string': 'LONG', + 'list:reference': 'LONG', + 'big-id': 'BIGINT PRIMARY KEY', + 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', + } - def sequence_name(self,table): + def sequence_name(self, table): return (self.QUOTE_TEMPLATE + '_id_Seq') % table def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby): @@ -4394,26 +4424,26 @@ class SAPDBAdapter(BaseAdapter): def create_sequence_and_triggers(self, query, table, **args): # following lines should only be executed if table._sequence_name does not exist self.execute('CREATE SEQUENCE %s;' % table._sequence_name) - self.execute("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT NEXTVAL('%s');" \ - % (table._tablename, table._id.name, table._sequence_name)) + self.execute("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT NEXTVAL('%s');" + % (table._tablename, table._id.name, table._sequence_name)) self.execute(query) REGEX_URI = re.compile('^(?P[^:@]+)(\:(?P[^@]*))?@(?P[^\:@]+)(\:(?P[0-9]+))?/(?P[^\?]+)(\?sslmode=(?P.+))?$') - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', + def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8', credential_decoder=IDENTITY, driver_args={}, adapter_args={}, do_connect=True, after_connection=None): self.db = db self.dbengine = "sapdb" self.uri = uri - if do_connect: self.find_driver(adapter_args,uri) + if do_connect: self.find_driver(adapter_args, uri) self.pool_size = pool_size self.folder = folder self.db_codec = db_codec self._after_connection = after_connection self.find_or_make_work_folder() - ruri = uri.split('://',1)[1] + ruri = uri.split('://', 1)[1] m = self.REGEX_URI.match(ruri) if not m: raise SyntaxError("Invalid URI string in DAL") @@ -4429,19 +4459,21 @@ class SAPDBAdapter(BaseAdapter): db = m.group('db') if not db: raise SyntaxError('Database name required') + def connector(user=user, password=password, database=db, - host=host, driver_args=driver_args): + host=host, driver_args=driver_args): return self.driver.Connection(user, password, database, host, **driver_args) self.connector = connector if do_connect: self.reconnect() - def lastrowid(self,table): + def lastrowid(self, table): self.execute("select %s.NEXTVAL from dual" % table._sequence_name) return long(self.cursor.fetchone()[0]) + class CubridAdapter(MySQLAdapter): - drivers = ('cubriddb',) + drivers = ('cubriddb', ) REGEX_URI = re.compile('^(?P[^:@]+)(\:(?P[^@]*))?@(?P[^\:/]+)(\:(?P[0-9]+))?/(?P[^?]+)(\?set_encoding=(?P\w+))?$') @@ -4451,13 +4483,13 @@ class CubridAdapter(MySQLAdapter): self.db = db self.dbengine = "cubrid" self.uri = uri - if do_connect: self.find_driver(adapter_args,uri) + if do_connect: self.find_driver(adapter_args, uri) self.pool_size = pool_size self.folder = folder self.db_codec = db_codec self._after_connection = after_connection self.find_or_make_work_folder() - ruri = uri.split('://',1)[1] + ruri = uri.split('://', 1)[1] m = self.REGEX_URI.match(ruri) if not m: raise SyntaxError( @@ -4477,9 +4509,11 @@ class CubridAdapter(MySQLAdapter): port = int(m.group('port') or '30000') user = credential_decoder(user) passwd = credential_decoder(password) - def connector(host=host,port=port,db=db, - user=user,passwd=passwd,driver_args=driver_args): - return self.driver.connect(host,port,db,user,passwd,**driver_args) + + def connector(host=host, port=port, db=db, + user=user, passwd=passwd, driver_args=driver_args): + return self.driver.connect(host, port, db, user, passwd, **driver_args) + self.connector = connector if do_connect: self.reconnect() @@ -4489,15 +4523,14 @@ class CubridAdapter(MySQLAdapter): ######## GAE MySQL ########## - class DatabaseStoredFile: web2py_filesystem = False - def escape(self,obj): + def escape(self, obj): return self.db._adapter.escape(obj) - def __init__(self,db,filename,mode): + def __init__(self, db, filename, mode): if not db._adapter.dbengine in ('mysql', 'postgres', 'sqlite'): raise RuntimeError("only MySQL/Postgres/SQLite can store metadata .table files in database for now") self.db = db @@ -4510,11 +4543,11 @@ class DatabaseStoredFile: sql = "CREATE TABLE IF NOT EXISTS web2py_filesystem (path VARCHAR(255), content TEXT, PRIMARY KEY(path));" self.db.executesql(sql) DatabaseStoredFile.web2py_filesystem = True - self.p=0 + self.p = 0 self.data = '' - if mode in ('r','rw','a'): + if mode in ('r', 'rw', 'a'): query = "SELECT content FROM web2py_filesystem WHERE path='%s'" \ - % filename + % filename rows = self.db.executesql(query) if rows: self.data = rows[0][0] @@ -4524,7 +4557,7 @@ class DatabaseStoredFile: self.data = datafile.read() finally: datafile.close() - elif mode in ('r','rw'): + elif mode in ('r', 'rw'): raise RuntimeError("File %s does not exist" % filename) def read(self, bytes): @@ -4533,22 +4566,22 @@ class DatabaseStoredFile: return data def readline(self): - i = self.data.find('\n',self.p)+1 + i = self.data.find('\n', self.p)+1 if i>0: data, self.p = self.data[self.p:i], i else: data, self.p = self.data[self.p:], len(self.data) return data - def write(self,data): + def write(self, data): self.data += data def close_connection(self): if self.db is not None: self.db.executesql( "DELETE FROM web2py_filesystem WHERE path='%s'" % self.filename) - query = "INSERT INTO web2py_filesystem(path,content) VALUES ('%s','%s')"\ - % (self.filename, self.data.replace("'","''")) + query = "INSERT INTO web2py_filesystem(path,content) VALUES ('%s','%s')" \ + % (self.filename, self.data.replace("'", "''")) self.db.executesql(query) self.db.commit() self.db = None @@ -4577,20 +4610,21 @@ class DatabaseStoredFile: class UseDatabaseStoredFile: def file_exists(self, filename): - return DatabaseStoredFile.exists(self.db,filename) + return DatabaseStoredFile.exists(self.db, filename) def file_open(self, filename, mode='rb', lock=True): - return DatabaseStoredFile(self.db,filename,mode) + return DatabaseStoredFile(self.db, filename, mode) def file_close(self, fileobj): fileobj.close_connection() - def file_delete(self,filename): + def file_delete(self, filename): query = "DELETE FROM web2py_filesystem WHERE path='%s'" % filename self.db.executesql(query) self.db.commit() -class GoogleSQLAdapter(UseDatabaseStoredFile,MySQLAdapter): + +class GoogleSQLAdapter(UseDatabaseStoredFile, MySQLAdapter): uploads_in_blob = True REGEX_URI = re.compile('^(?P.*)/(?P.*)$') @@ -4607,8 +4641,7 @@ class GoogleSQLAdapter(UseDatabaseStoredFile,MySQLAdapter): self.db_codec = db_codec self._after_connection = after_connection if do_connect: self.find_driver(adapter_args, uri) - self.folder = folder or pjoin('$HOME',THREAD_LOCAL.folder.split( - os.sep+'applications'+os.sep,1)[1]) + self.folder = folder or pjoin('$HOME', THREAD_LOCAL.folder.split(os.sep+'applications'+os.sep, 1)[1]) ruri = uri.split("://")[1] m = self.REGEX_URI.match(ruri) if not m: @@ -4618,11 +4651,13 @@ class GoogleSQLAdapter(UseDatabaseStoredFile,MySQLAdapter): driver_args['instance'] = instance if not 'charset' in driver_args: driver_args['charset'] = 'utf8' - self.createdb = createdb = adapter_args.get('createdb',True) + self.createdb = createdb = adapter_args.get('createdb', True) if not createdb: driver_args['database'] = db + def connector(driver_args=driver_args): return rdbms.connect(**driver_args) + self.connector = connector if do_connect: self.reconnect() @@ -4637,10 +4672,11 @@ class GoogleSQLAdapter(UseDatabaseStoredFile,MySQLAdapter): def execute(self, command, *a, **b): return self.log_execute(command.decode('utf8'), *a, **b) - def find_driver(self,adapter_args,uri=None): + def find_driver(self, adapter_args, uri=None): self.adapter_args = adapter_args self.driver = "google" + class NoSQLAdapter(BaseAdapter): can_select_for_update = False QUOTE_TEMPLATE = '%s' @@ -4667,7 +4703,7 @@ class NoSQLAdapter(BaseAdapter): if self.dbengine == 'google:datastore': if isinstance(fieldtype, gae.Property): return obj - is_string = isinstance(fieldtype,str) + is_string = isinstance(fieldtype, str) is_list = is_string and field_is_type('list:') if is_list: if not obj: @@ -4675,12 +4711,12 @@ class NoSQLAdapter(BaseAdapter): if not isinstance(obj, (list, tuple)): obj = [obj] if obj == '' and not \ - (is_string and fieldtype[:2] in ['st','te', 'pa','up']): + (is_string and fieldtype[:2] in ['st', 'te', 'pa', 'up']): return None if not obj is None: if isinstance(obj, list) and not is_list: obj = [self.represent(o, fieldtype) for o in obj] - elif fieldtype in ('integer','bigint','id'): + elif fieldtype in ('integer', 'bigint', 'id'): obj = long(obj) elif fieldtype == 'double': obj = float(obj) @@ -4695,14 +4731,14 @@ class NoSQLAdapter(BaseAdapter): obj = False elif fieldtype == 'date': if not isinstance(obj, datetime.date): - (y, m, d) = map(int,str(obj).strip().split('-')) + (y, m, d) = map(int, str(obj).strip().split('-')) obj = datetime.date(y, m, d) - elif isinstance(obj,datetime.datetime): + elif isinstance(obj, datetime.datetime): (y, m, d) = (obj.year, obj.month, obj.day) obj = datetime.date(y, m, d) elif fieldtype == 'time': if not isinstance(obj, datetime.time): - time_items = map(int,str(obj).strip().split(':')[:3]) + time_items = map(int, str(obj).strip().split(':')[:3]) if len(time_items) == 3: (h, mi, s) = time_items else: @@ -4710,8 +4746,8 @@ class NoSQLAdapter(BaseAdapter): obj = datetime.time(h, mi, s) elif fieldtype == 'datetime': if not isinstance(obj, datetime.datetime): - (y, m, d) = map(int,str(obj)[:10].strip().split('-')) - time_items = map(int,str(obj)[11:].strip().split(':')[:3]) + (y, m, d) = map(int, str(obj)[:10].strip().split('-')) + time_items = map(int, str(obj)[11:].strip().split(':')[:3]) while len(time_items)<3: time_items.append(0) (h, mi, s) = time_items @@ -4728,28 +4764,28 @@ class NoSQLAdapter(BaseAdapter): else: raise RuntimeError("missing simplejson") elif is_string and field_is_type('list:string'): - return map(self.to_unicode,obj) + return map(self.to_unicode, obj) elif is_list: - return map(int,obj) + return map(int, obj) else: obj = self.to_unicode(obj) return obj - def _insert(self,table,fields): + def _insert(self, table, fields): return 'insert %s in %s' % (fields, table) - def _count(self,query,distinct=None): + def _count(self, query, distinct=None): return 'count %s' % repr(query) - def _select(self,query,fields,attributes): + def _select(self, query, fields, attributes): return 'select %s where %s' % (repr(fields), repr(query)) - def _delete(self,tablename, query): - return 'delete %s where %s' % (repr(tablename),repr(query)) + def _delete(self, tablename, query): + return 'delete %s where %s' % (repr(tablename), repr(query)) - def _update(self,tablename,query,fields): + def _update(self, tablename, query, fields): return 'update %s (%s) where %s' % (repr(tablename), - repr(fields),repr(query)) + repr(fields), repr(query)) def commit(self): """ @@ -4769,47 +4805,80 @@ class NoSQLAdapter(BaseAdapter): """ pass - # these functions should never be called! - def OR(self,first,second): raise SyntaxError("Not supported") - def AND(self,first,second): raise SyntaxError("Not supported") - def AS(self,first,second): raise SyntaxError("Not supported") - def ON(self,first,second): raise SyntaxError("Not supported") - def STARTSWITH(self,first,second=None): raise SyntaxError("Not supported") - def ENDSWITH(self,first,second=None): raise SyntaxError("Not supported") - def ADD(self,first,second): raise SyntaxError("Not supported") - def SUB(self,first,second): raise SyntaxError("Not supported") - def MUL(self,first,second): raise SyntaxError("Not supported") - def DIV(self,first,second): raise SyntaxError("Not supported") + def OR(self, first, second): raise SyntaxError("Not supported") + + def AND(self, first, second): raise SyntaxError("Not supported") + + def AS(self, first, second): raise SyntaxError("Not supported") + + def ON(self, first, second): raise SyntaxError("Not supported") + + def STARTSWITH(self, first, second=None): raise SyntaxError("Not supported") + + def ENDSWITH(self, first, second=None): raise SyntaxError("Not supported") + + def ADD(self, first, second): raise SyntaxError("Not supported") + + def SUB(self, first, second): raise SyntaxError("Not supported") + + def MUL(self, first, second): raise SyntaxError("Not supported") + + def DIV(self, first, second): raise SyntaxError("Not supported") + def LOWER(self,first): raise SyntaxError("Not supported") + def UPPER(self,first): raise SyntaxError("Not supported") + def EXTRACT(self,first,what): raise SyntaxError("Not supported") + def LENGTH(self, first): raise SyntaxError("Not supported") + def AGGREGATE(self,first,what): raise SyntaxError("Not supported") + def LEFT_JOIN(self): raise SyntaxError("Not supported") + def RANDOM(self): raise SyntaxError("Not supported") - def SUBSTRING(self,field,parameters): raise SyntaxError("Not supported") - def PRIMARY_KEY(self,key): raise SyntaxError("Not supported") - def ILIKE(self,first,second): raise SyntaxError("Not supported") - def drop(self,table,mode): raise SyntaxError("Not supported") - def alias(self,table,alias): raise SyntaxError("Not supported") - def migrate_table(self,*a,**b): raise SyntaxError("Not supported") - def distributed_transaction_begin(self,key): raise SyntaxError("Not supported") - def prepare(self,key): raise SyntaxError("Not supported") - def commit_prepared(self,key): raise SyntaxError("Not supported") - def rollback_prepared(self,key): raise SyntaxError("Not supported") - def concat_add(self,table): raise SyntaxError("Not supported") + + def SUBSTRING(self, field, parameters): raise SyntaxError("Not supported") + + def PRIMARY_KEY(self, key): raise SyntaxError("Not supported") + + def ILIKE(self, first, second): raise SyntaxError("Not supported") + + def drop(self, table, mode): raise SyntaxError("Not supported") + + def alias(self, table, alias): raise SyntaxError("Not supported") + + def migrate_table(self, *a, **b): raise SyntaxError("Not supported") + + def distributed_transaction_begin(self, key): raise SyntaxError("Not supported") + + def prepare(self, key): raise SyntaxError("Not supported") + + def commit_prepared(self, key): raise SyntaxError("Not supported") + + def rollback_prepared(self, key): raise SyntaxError("Not supported") + + def concat_add(self, table): raise SyntaxError("Not supported") + def constraint_name(self, table, fieldname): raise SyntaxError("Not supported") + def create_sequence_and_triggers(self, query, table, **args): pass - def log_execute(self,*a,**b): raise SyntaxError("Not supported") - def execute(self,*a,**b): raise SyntaxError("Not supported") + + def log_execute(self, *a, **b): raise SyntaxError("Not supported") + + def execute(self, *a, **b): raise SyntaxError("Not supported") + def represent_exceptions(self, obj, fieldtype): raise SyntaxError("Not supported") - def lastrowid(self,table): raise SyntaxError("Not supported") - def rowslice(self,rows,minimum=0,maximum=None): raise SyntaxError("Not supported") + + def lastrowid(self, table): raise SyntaxError("Not supported") + + def rowslice(self, rows, minimum=0, maximum=None): raise SyntaxError("Not supported") class GAEF(object): - def __init__(self,name,op,value,apply): + def __init__(self, name, op, value, apply): self.name=name=='id' and '__key__' or name self.op=op self.value=value @@ -4840,61 +4909,61 @@ class GoogleDatastoreAdapter(NoSQLAdapter): reconnect = lambda *args, **kwargs: None def file_exists(self, filename): pass + def file_open(self, filename, mode='rb', lock=True): pass + def file_close(self, fileobj): pass REGEX_NAMESPACE = re.compile('.*://(?P.+)') - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', + def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8', credential_decoder=IDENTITY, driver_args={}, adapter_args={}, do_connect=True, after_connection=None): - self.use_ndb = adapter_args.get('use_ndb',uri.startswith('google:datastore+ndb')) + self.use_ndb = adapter_args.get('use_ndb', uri.startswith('google:datastore+ndb')) if self.use_ndb is True: - self.types.update({ - 'boolean': ndb.BooleanProperty, - 'string': (lambda **kwargs: ndb.StringProperty(**kwargs)), - 'text': ndb.TextProperty, - 'json': ndb.TextProperty, - 'password': ndb.StringProperty, - 'blob': ndb.BlobProperty, - 'upload': ndb.StringProperty, - 'integer': ndb.IntegerProperty, - 'bigint': ndb.IntegerProperty, - 'float': ndb.FloatProperty, - 'double': ndb.FloatProperty, - 'decimal': NDBDecimalProperty, - 'date': ndb.DateProperty, - 'time': ndb.TimeProperty, - 'datetime': ndb.DateTimeProperty, - 'id': None, - 'reference': ndb.IntegerProperty, - 'list:string': (lambda **kwargs: ndb.StringProperty(repeated=True,default=None, **kwargs)), - 'list:integer': (lambda **kwargs: ndb.IntegerProperty(repeated=True,default=None, **kwargs)), - 'list:reference': (lambda **kwargs: ndb.IntegerProperty(repeated=True,default=None, **kwargs)), - }) + self.types.update({'boolean': ndb.BooleanProperty, + 'string': (lambda **kwargs: ndb.StringProperty(**kwargs)), + 'text': ndb.TextProperty, + 'json': ndb.TextProperty, + 'password': ndb.StringProperty, + 'blob': ndb.BlobProperty, + 'upload': ndb.StringProperty, + 'integer': ndb.IntegerProperty, + 'bigint': ndb.IntegerProperty, + 'float': ndb.FloatProperty, + 'double': ndb.FloatProperty, + 'decimal': NDBDecimalProperty, + 'date': ndb.DateProperty, + 'time': ndb.TimeProperty, + 'datetime': ndb.DateTimeProperty, + 'id': None, + 'reference': ndb.IntegerProperty, + 'list:string': (lambda **kwargs: ndb.StringProperty(repeated=True,default=None, **kwargs)), + 'list:integer': (lambda **kwargs: ndb.IntegerProperty(repeated=True,default=None, **kwargs)), + 'list:reference': (lambda **kwargs: ndb.IntegerProperty(repeated=True,default=None, **kwargs)), + }) else: - self.types.update({ - 'boolean': gae.BooleanProperty, - 'string': (lambda **kwargs: gae.StringProperty(multiline=True, **kwargs)), - 'text': gae.TextProperty, - 'json': gae.TextProperty, - 'password': gae.StringProperty, - 'blob': gae.BlobProperty, - 'upload': gae.StringProperty, - 'integer': gae.IntegerProperty, - 'bigint': gae.IntegerProperty, - 'float': gae.FloatProperty, - 'double': gae.FloatProperty, - 'decimal': GAEDecimalProperty, - 'date': gae.DateProperty, - 'time': gae.TimeProperty, - 'datetime': gae.DateTimeProperty, - 'id': None, - 'reference': gae.IntegerProperty, - 'list:string': (lambda **kwargs: gae.StringListProperty(default=None, **kwargs)), - 'list:integer': (lambda **kwargs: gae.ListProperty(int,default=None, **kwargs)), - 'list:reference': (lambda **kwargs: gae.ListProperty(int,default=None, **kwargs)), - }) + self.types.update({'boolean': gae.BooleanProperty, + 'string': (lambda **kwargs: gae.StringProperty(multiline=True, **kwargs)), + 'text': gae.TextProperty, + 'json': gae.TextProperty, + 'password': gae.StringProperty, + 'blob': gae.BlobProperty, + 'upload': gae.StringProperty, + 'integer': gae.IntegerProperty, + 'bigint': gae.IntegerProperty, + 'float': gae.FloatProperty, + 'double': gae.FloatProperty, + 'decimal': GAEDecimalProperty, + 'date': gae.DateProperty, + 'time': gae.TimeProperty, + 'datetime': gae.DateTimeProperty, + 'id': None, + 'reference': gae.IntegerProperty, + 'list:string': (lambda **kwargs: gae.StringListProperty(default=None, **kwargs)), + 'list:integer': (lambda **kwargs: gae.ListProperty(int,default=None, **kwargs)), + 'list:reference': (lambda **kwargs: gae.ListProperty(int,default=None, **kwargs)), + }) self.db = db self.uri = uri self.dbengine = 'google:datastore' @@ -4915,10 +4984,10 @@ class GoogleDatastoreAdapter(NoSQLAdapter): def parse_id(self, value, field_type): return value - def create_table(self,table,migrate=True,fake_migrate=False, polymodel=None): + def create_table(self, table, migrate=True, fake_migrate=False, polymodel=None): myfields = {} for field in table: - if isinstance(polymodel,Table) and field.name in polymodel.fields(): + if isinstance(polymodel, Table) and field.name in polymodel.fields(): continue attr = {} if isinstance(field.custom_qualifier, dict): @@ -4947,8 +5016,7 @@ class GoogleDatastoreAdapter(NoSQLAdapter): ftype = self.types[field_type[:14]](**attr) elif field_type.startswith('list:'): ftype = self.types[field_type](**attr) - elif not field_type in self.types\ - or not self.types[field_type]: + elif not field_type in self.types or not self.types[field_type]: raise SyntaxError('Field: unknown field type: %s' % field_type) else: ftype = self.types[field_type](**attr) @@ -4964,14 +5032,14 @@ class GoogleDatastoreAdapter(NoSQLAdapter): elif polymodel==True: pm_cls = (self.use_ndb and NDBPolyModel) or PolyModel table._tableobj = classobj(table._tablename, (pm_cls, ), myfields) - elif isinstance(polymodel,Table): + elif isinstance(polymodel, Table): table._tableobj = classobj(table._tablename, (polymodel._tableobj, ), myfields) else: raise SyntaxError("polymodel must be None, True, a table or a tablename") return None - def expand(self,expression,field_type=None): - if isinstance(expression,Field): + def expand(self, expression, field_type=None): + if isinstance(expression, Field): if expression.type in ('text', 'blob', 'json'): raise SyntaxError('AppEngine does not index by: %s' % expression.type) return expression.name @@ -4983,115 +5051,114 @@ class GoogleDatastoreAdapter(NoSQLAdapter): else: return expression.op() elif field_type: - return self.represent(expression,field_type) - elif isinstance(expression,(list,tuple)): - return ','.join([self.represent(item,field_type) for item in expression]) + return self.represent(expression, field_type) + elif isinstance(expression, (list, tuple)): + return ','.join([self.represent(item, field_type) for item in expression]) else: return str(expression) ### TODO from gql.py Expression - def AND(self,first,second): + def AND(self, first, second): a = self.expand(first) b = self.expand(second) if b[0].name=='__key__' and a[0].name!='__key__': return b+a return a+b - def EQ(self,first,second=None): + def EQ(self, first, second=None): if isinstance(second, Key): - return [GAEF(first.name,'=',second,lambda a,b:a==b)] - return [GAEF(first.name,'=',self.represent(second,first.type),lambda a,b:a==b)] + return [GAEF(first.name, '=', second, lambda a, b:a==b)] + return [GAEF(first.name, '=', self.represent(second, first.type), lambda a, b:a==b)] - def NE(self,first,second=None): + def NE(self, first, second=None): if first.type != 'id': - return [GAEF(first.name,'!=',self.represent(second,first.type),lambda a,b:a!=b)] + return [GAEF(first.name, '!=', self.represent(second, first.type), lambda a, b:a!=b)] else: if not second is None: second = Key.from_path(first._tablename, long(second)) - return [GAEF(first.name,'!=',second,lambda a,b:a!=b)] + return [GAEF(first.name, '!=', second, lambda a, b:a!=b)] - def LT(self,first,second=None): + def LT(self, first, second=None): if first.type != 'id': - return [GAEF(first.name,'<',self.represent(second,first.type),lambda a,b:a',self.represent(second,first.type),lambda a,b:a>b)] + return [GAEF(first.name, '>', self.represent(second, first.type), lambda a, b:a>b)] else: second = Key.from_path(first._tablename, long(second)) - return [GAEF(first.name,'>',second,lambda a,b:a>b)] + return [GAEF(first.name, '>', second, lambda a, b:a>b)] - def GE(self,first,second=None): + def GE(self, first, second=None): if first.type != 'id': - return [GAEF(first.name,'>=',self.represent(second,first.type),lambda a,b:a>=b)] + return [GAEF(first.name, '>=', self.represent(second, first.type), lambda a, b:a>=b)] else: second = Key.from_path(first._tablename, long(second)) - return [GAEF(first.name,'>=',second,lambda a,b:a>=b)] + return [GAEF(first.name, '>=', second, lambda a, b:a>=b)] - def INVERT(self,first): + def INVERT(self, first): return '-%s' % first.name - def COMMA(self,first,second): - return '%s, %s' % (self.expand(first),self.expand(second)) + def COMMA(self, first, second): + return '%s, %s' % (self.expand(first), self.expand(second)) - def BELONGS(self,first,second=None): - if not isinstance(second,(list, tuple, set)): + def BELONGS(self, first, second=None): + if not isinstance(second, (list, tuple, set)): raise SyntaxError("Not supported") if not self.use_ndb: - if isinstance(second,set): + if isinstance(second, set): second = list(second) if first.type == 'id': second = [Key.from_path(first._tablename, int(i)) for i in second] - return [GAEF(first.name,'in',second,lambda a,b:a in b)] + return [GAEF(first.name, 'in', second, lambda a, b:a in b)] - def CONTAINS(self,first,second,case_sensitive=False): + def CONTAINS(self, first, second, case_sensitive=False): # silently ignoring: GAE can only do case sensitive matches! if not first.type.startswith('list:'): raise SyntaxError("Not supported") - return [GAEF(first.name,'=',self.expand(second,first.type[5:]),lambda a,b:b in a)] + return [GAEF(first.name, '=', self.expand(second, first.type[5:]), lambda a, b:b in a)] - def NOT(self,first): - nops = { self.EQ: self.NE, - self.NE: self.EQ, - self.LT: self.GE, - self.GT: self.LE, - self.LE: self.GT, - self.GE: self.LT} - if not isinstance(first,Query): + def NOT(self, first): + nops = {self.EQ: self.NE, + self.NE: self.EQ, + self.LT: self.GE, + self.GT: self.LE, + self.LE: self.GT, + self.GE: self.LT} + if not isinstance(first, Query): raise SyntaxError("Not suported") - nop = nops.get(first.op,None) + nop = nops.get(first.op, None) if not nop: raise SyntaxError("Not suported %s" % first.op.__name__) first.op = nop return self.expand(first) - def truncate(self,table,mode): + def truncate(self, table, mode): self.db(self.db._adapter.id_query(table)).delete() - GAE_FILTER_OPTIONS = { - '=': lambda q, t, p, v: q.filter(getattr(t,p) == v), - '>': lambda q, t, p, v: q.filter(getattr(t,p) > v), - '<': lambda q, t, p, v: q.filter(getattr(t,p) < v), - '<=': lambda q, t, p, v: q.filter(getattr(t,p) <= v), - '>=': lambda q, t, p, v: q.filter(getattr(t,p) >= v), - '!=': lambda q, t, p, v: q.filter(getattr(t,p) != v), - 'in': lambda q, t, p, v: q.filter(getattr(t,p).IN(v)), - } + GAE_FILTER_OPTIONS = {'=': lambda q, t, p, v: q.filter(getattr(t, p) == v), + '>': lambda q, t, p, v: q.filter(getattr(t, p) > v), + '<': lambda q, t, p, v: q.filter(getattr(t, p) < v), + '<=': lambda q, t, p, v: q.filter(getattr(t, p) <= v), + '>=': lambda q, t, p, v: q.filter(getattr(t, p) >= v), + '!=': lambda q, t, p, v: q.filter(getattr(t, p) != v), + 'in': lambda q, t, p, v: q.filter(getattr(t, p).IN(v)), + } def filter(self, query, tableobj, prop, op, value): return self.GAE_FILTER_OPTIONS[op](query, tableobj, prop, value) - def select_raw(self,query,fields=None,attributes=None,count_only=False): + def select_raw(self, query, fields=None, attributes=None, count_only=False): db = self.db fields = fields or [] attributes = attributes or {} @@ -5099,7 +5166,7 @@ class GoogleDatastoreAdapter(NoSQLAdapter): new_fields = [] for item in fields: - if isinstance(item,SQLALL): + if isinstance(item, SQLALL): new_fields += item._table else: new_fields.append(item) @@ -5115,15 +5182,15 @@ class GoogleDatastoreAdapter(NoSQLAdapter): if query: if use_common_filters(query): - query = self.common_filter(query,[tablename]) + query = self.common_filter(query, [tablename]) - #tableobj is a GAE/NDB Model class (or subclass) + # tableobj is a GAE/NDB Model class (or subclass) tableobj = db[tablename]._tableobj filters = self.expand(query) projection = None if len(db[tablename].fields) == len(fields): - #getting all fields, not a projection query + # getting all fields, not a projection query projection = None elif args_get('projection') == True: projection = [] @@ -5134,7 +5201,7 @@ class GoogleDatastoreAdapter(NoSQLAdapter): else: projection.append(f.name) - elif args_get('filterfields') == True: + elif args_get('filterfields') is True: projection = [] for f in fields: projection.append(f.name) @@ -5160,10 +5227,10 @@ class GoogleDatastoreAdapter(NoSQLAdapter): filter.name in query_projection and filter.op in ('=', '<=', '>=')): raise SyntaxError("projection fields cannot have equality filters") - if filter.name=='__key__' and filter.op=='>' and filter.value==0: + if filter.name == '__key__' and filter.op == '>' and filter.value == 0: continue - elif filter.name=='__key__' and filter.op=='=': - if filter.value==0: + elif filter.name == '__key__' and filter.op == '=': + if filter.value == 0: items = [] elif isinstance(filter.value, (self.use_ndb and ndb.Key) or Key): # key qeuries return a class instance, @@ -5177,20 +5244,21 @@ class GoogleDatastoreAdapter(NoSQLAdapter): # extra values will be ignored in post-processing later item = tableobj.get_by_id(filter.value) items = [item] if item else [] - elif isinstance(items,list): # i.e. there is a single record! - items = [i for i in items if filter.apply( - getattr(item,filter.name),filter.value)] - else: - if filter.name=='__key__' and filter.op != 'in': + elif isinstance(items, list): # i.e. there is a single record! + items = [i for i in items if filter.apply(getattr(item, + filter.name), + filter.value)] + else: + if filter.name == '__key__' and filter.op != 'in': items.order(tableobj._key) if self.use_ndb else items.order('__key__') if self.use_ndb: items = self.filter(items, tableobj, filter.name, filter.op, filter.value) else: - items = items.filter('%s %s' % (filter.name,filter.op), filter.value) - + items = items.filter('%s %s' % (filter.name, filter.op), filter.value) + if count_only: - items = [len(items) if isinstance(items,list) else items.count()] - elif not isinstance(items,list): + items = [len(items) if isinstance(items, list) else items.count()] + elif not isinstance(items, list): query = items if args_get('left', None): raise SyntaxError('Set: no left join in appengine') @@ -5201,7 +5269,7 @@ class GoogleDatastoreAdapter(NoSQLAdapter): ### THIS REALLY NEEDS IMPROVEMENT !!! if isinstance(orderby, (list, tuple)): orderby = xorify(orderby) - if isinstance(orderby,Expression): + if isinstance(orderby, Expression): orderby = self.expand(orderby) orders = orderby.split(', ') for order in orders: @@ -5211,37 +5279,37 @@ class GoogleDatastoreAdapter(NoSQLAdapter): s = str(o) desc = s[0] == '-' s = (desc and s[1:]) or s - return (desc and -getattr(tableobj, s)) or getattr(tableobj, s) - _order = {'-id':-tableobj._key,'id':tableobj._key}.get(order) + return (desc and -getattr(tableobj, s)) or getattr(tableobj, s) + _order = {'-id': -tableobj._key, 'id': tableobj._key}.get(order) if _order is None: _order = make_order(order) query = query.order(_order) else: - order={'-id':'-__key__','id':'__key__'}.get(order,order) + order = {'-id': '-__key__', 'id': '__key__'}.get(order, order) query = query.order(order) if args_get('limitby', None): (lmin, lmax) = attributes['limitby'] - limit, fetch_args = lmax-lmin, {'offset':lmin,'keys_only':True} + limit, fetch_args = lmax-lmin, {'offset': lmin, 'keys_only': True} if self.use_ndb: - keys, cursor, more = query.fetch_page(limit,**fetch_args) - items = ndb.get_multi(keys) + keys, cursor, more = query.fetch_page(limit, **fetch_args) + items = ndb.get_multi(keys) else: - keys = query.fetch(limit, **fetch_args) + keys = query.fetch(limit, **fetch_args) items = gae.get(keys) - cursor = query.cursor() - #cursor is only useful if there was a limit and we didn't return + cursor = query.cursor() + # cursor is only useful if there was a limit and we didn't return # all results if args_get('reusecursor'): db['_lastcursor'] = cursor else: # if a limit is not specified, always return an iterator - rows = query + rows = query return (items, tablename, projection or db[tablename].fields) - def select(self,query,fields,attributes): + def select(self, query, fields, attributes): """ This is the GAE version of select. Some notes to consider: - db['_lastsql'] is not set because there is not SQL statement string @@ -5265,14 +5333,14 @@ class GoogleDatastoreAdapter(NoSQLAdapter): to follow google's limitations: https://developers.google.com/appengine/docs/python/datastore/queries#Query_Cursors """ - (items, tablename, fields) = self.select_raw(query,fields,attributes) - # self.db['_lastsql'] = self._select(query,fields,attributes) + (items, tablename, fields) = self.select_raw(query, fields, attributes) + # self.db['_lastsql'] = self._select(query, fields, attributes) rows = [[(t==self.db[tablename]._id.name and item) or \ (t=='nativeRef' and item) or getattr(item, t) \ for t in fields] for item in items] colnames = ['%s.%s' % (tablename, t) for t in fields] - processor = attributes.get('processor',self.parse) - return processor(rows,fields,colnames,False) + processor = attributes.get('processor', self.parse) + return processor(rows, fields, colnames, False) def parse_list_integers(self, value, field_type): return value[:] if self.use_ndb else value @@ -5280,22 +5348,22 @@ class GoogleDatastoreAdapter(NoSQLAdapter): def parse_list_strings(self, value, field_type): return value[:] if self.use_ndb else value - def count(self,query,distinct=None,limit=None): + def count(self, query, distinct=None, limit=None): if distinct: raise RuntimeError("COUNT DISTINCT not supported") - (items, tablename, fields) = self.select_raw(query,count_only=True) + (items, tablename, fields) = self.select_raw(query, count_only=True) return items[0] - def delete(self,tablename, query): + def delete(self, tablename, query): """ This function was changed on 2010-05-04 because according to http://code.google.com/p/googleappengine/issues/detail?id=3119 GAE no longer supports deleting more than 1000 records. """ - # self.db['_lastsql'] = self._delete(tablename,query) + # self.db['_lastsql'] = self._delete(tablename, query) (items, tablename, fields) = self.select_raw(query) # items can be one item or a query - if not isinstance(items,list): + if not isinstance(items, list): #use a keys_only query to ensure that this runs as a datastore # small operations leftitems = items.fetch(1000, keys_only=True) @@ -5315,21 +5383,21 @@ class GoogleDatastoreAdapter(NoSQLAdapter): gae.delete(items) return counter - def update(self,tablename,query,update_fields): - # self.db['_lastsql'] = self._update(tablename,query,update_fields) + def update(self, tablename, query, update_fields): + # self.db['_lastsql'] = self._update(tablename, query, update_fields) (items, tablename, fields) = self.select_raw(query) counter = 0 for item in items: for field, value in update_fields: - setattr(item, field.name, self.represent(value,field.type)) + setattr(item, field.name, self.represent(value, field.type)) item.put() counter += 1 LOGGER.info(str(counter)) return counter - def insert(self,table,fields): - dfields=dict((f.name,self.represent(v,f.type)) for f,v in fields) - # table._db['_lastsql'] = self._insert(table,fields) + def insert(self, table, fields): + dfields = dict((f.name, self.represent(v, f.type)) for f, v in fields) + # table._db['_lastsql'] = self._insert(table, fields) tmp = table._tableobj(**dfields) tmp.put() key = tmp.key if self.use_ndb else tmp.key() @@ -5337,10 +5405,10 @@ class GoogleDatastoreAdapter(NoSQLAdapter): (rid._table, rid._record, rid._gaekey) = (table, None, key) return rid - def bulk_insert(self,table,items): + def bulk_insert(self, table, items): parsed_items = [] for item in items: - dfields=dict((f.name,self.represent(v,f.type)) for f,v in item) + dfields = dict((f.name, self.represent(v, f.type)) for f, v in item) parsed_items.append(table._tableobj(**dfields)) if self.use_ndb: ndb.put_multi(parsed_items) @@ -5348,78 +5416,81 @@ class GoogleDatastoreAdapter(NoSQLAdapter): gae.put(parsed_items) return True + def uuid2int(uuidv): return uuid.UUID(uuidv).int + def int2uuid(n): return str(uuid.UUID(int=n)) + class CouchDBAdapter(NoSQLAdapter): - drivers = ('couchdb',) + drivers = ('couchdb', ) uploads_in_blob = True - types = { - 'boolean': bool, - 'string': str, - 'text': str, - 'json': str, - 'password': str, - 'blob': str, - 'upload': str, - 'integer': long, - 'bigint': long, - 'float': float, - 'double': float, - 'date': datetime.date, - 'time': datetime.time, - 'datetime': datetime.datetime, - 'id': long, - 'reference': long, - 'list:string': list, - 'list:integer': list, - 'list:reference': list, - } + types = {'boolean': bool, + 'string': str, + 'text': str, + 'json': str, + 'password': str, + 'blob': str, + 'upload': str, + 'integer': long, + 'bigint': long, + 'float': float, + 'double': float, + 'date': datetime.date, + 'time': datetime.time, + 'datetime': datetime.datetime, + 'id': long, + 'reference': long, + 'list:string': list, + 'list:integer': list, + 'list:reference': list, + } def file_exists(self, filename): pass + def file_open(self, filename, mode='rb', lock=True): pass + def file_close(self, fileobj): pass - def expand(self,expression,field_type=None): - if isinstance(expression,Field): - if expression.type=='id': + def expand(self, expression, field_type=None): + if isinstance(expression, Field): + if expression.type == 'id': return "%s._id" % expression.tablename - return BaseAdapter.expand(self,expression,field_type) + return BaseAdapter.expand(self, expression, field_type) - def AND(self,first,second): - return '(%s && %s)' % (self.expand(first),self.expand(second)) + def AND(self, first, second): + return '(%s && %s)' % (self.expand(first), self.expand(second)) - def OR(self,first,second): - return '(%s || %s)' % (self.expand(first),self.expand(second)) + def OR(self, first, second): + return '(%s || %s)' % (self.expand(first), self.expand(second)) - def EQ(self,first,second): + def EQ(self, first, second): if second is None: return '(%s == null)' % self.expand(first) - return '(%s == %s)' % (self.expand(first),self.expand(second,first.type)) + return '(%s == %s)' % (self.expand(first), self.expand(second, first.type)) - def NE(self,first,second): + def NE(self, first, second): if second is None: return '(%s != null)' % self.expand(first) - return '(%s != %s)' % (self.expand(first),self.expand(second,first.type)) + return '(%s != %s)' % (self.expand(first), self.expand(second, first.type)) - def COMMA(self,first,second): - return '%s + %s' % (self.expand(first),self.expand(second)) + def COMMA(self, first, second): + return '%s + %s' % (self.expand(first), self.expand(second)) def represent(self, obj, fieldtype): value = NoSQLAdapter.represent(self, obj, fieldtype) - if fieldtype=='id': + if fieldtype == 'id': return repr(str(long(value))) - elif fieldtype in ('date','time','datetime','boolean'): + elif fieldtype in ('date', 'time', 'datetime', 'boolean'): return serializers.json(value) - return repr(not isinstance(value,unicode) and value \ - or value and value.encode('utf8')) + return repr(not isinstance(value, unicode) and value or value and value.encode('utf8')) - def __init__(self,db,uri='couchdb://127.0.0.1:5984', - pool_size=0,folder=None,db_codec ='UTF-8', + def __init__(self, db, uri='couchdb://127.0.0.1:5984', + pool_size=0, folder=None, db_codec='UTF-8', credential_decoder=IDENTITY, driver_args={}, adapter_args={}, do_connect=True, after_connection=None): self.db = db @@ -5432,10 +5503,12 @@ class CouchDBAdapter(NoSQLAdapter): self._after_connection = after_connection self.pool_size = pool_size - url='http://'+uri[10:] - def connector(url=url,driver_args=driver_args): - return self.driver.Server(url,**driver_args) - self.reconnect(connector,cursor=False) + url = 'http://'+uri[10:] + + def connector(url=url, driver_args=driver_args): + return self.driver.Server(url, **driver_args) + + self.reconnect(connector, cursor=False) def create_table(self, table, migrate=True, fake_migrate=False, polymodel=None): if migrate: @@ -5444,55 +5517,58 @@ class CouchDBAdapter(NoSQLAdapter): except: pass - def insert(self,table,fields): + def insert(self, table, fields): id = uuid2int(web2py_uuid()) ctable = self.connection[table._tablename] - values = dict((k.name,self.represent(v,k.type)) for k,v in fields) + values = dict((k.name, self.represent(v, k.type)) for k, v in fields) values['_id'] = str(id) ctable.save(values) return id - def _select(self,query,fields,attributes): - if not isinstance(query,Query): + def _select(self, query, fields, attributes): + if not isinstance(query, Query): raise SyntaxError("Not Supported") for key in set(attributes.keys())-SELECT_ARGS: raise SyntaxError('invalid select attribute: %s' % key) new_fields=[] for item in fields: - if isinstance(item,SQLALL): + if isinstance(item, SQLALL): new_fields += item._table else: new_fields.append(item) + def uid(fd): - return fd=='id' and '_id' or fd - def get(row,fd): - return fd=='id' and long(row['_id']) or row.get(fd,None) + return fd == 'id' and '_id' or fd + + def get(row, fd): + return fd=='id' and long(row['_id']) or row.get(fd, None) + fields = new_fields tablename = self.get_table(query) fieldnames = [f.name for f in (fields or self.db[tablename])] - colnames = ['%s.%s' % (tablename,k) for k in fieldnames] - fields = ','.join(['%s.%s' % (tablename,uid(f)) for f in fieldnames]) - fn="(function(%(t)s){if(%(query)s)emit(%(order)s,[%(fields)s]);})" %\ - dict(t=tablename, - query=self.expand(query), - order='%s._id' % tablename, - fields=fields) + colnames = ['%s.%s' % (tablename, k) for k in fieldnames] + fields = ','.join(['%s.%s' % (tablename, uid(f)) for f in fieldnames]) + fn = "(function(%(t)s){if(%(query)s)emit(%(order)s,[%(fields)s]);})" \ + % dict(t=tablename, + query=self.expand(query), + order='%s._id' % tablename, + fields=fields) return fn, colnames - def select(self,query,fields,attributes): - if not isinstance(query,Query): + def select(self, query, fields, attributes): + if not isinstance(query, Query): raise SyntaxError("Not Supported") - fn, colnames = self._select(query,fields,attributes) + fn, colnames = self._select(query, fields, attributes) tablename = colnames[0].split('.')[0] ctable = self.connection[tablename] rows = [cols['value'] for cols in ctable.query(fn)] - processor = attributes.get('processor',self.parse) - return processor(rows,fields,colnames,False) + processor = attributes.get('processor', self.parse) + return processor(rows, fields, colnames, False) - def delete(self,tablename,query): - if not isinstance(query,Query): + def delete(self, tablename, query): + if not isinstance(query, Query): raise SyntaxError("Not Supported") - if query.first.type=='id' and query.op==self.EQ: + if query.first.type == 'id' and query.op == self.EQ: id = query.second tablename = query.first.tablename assert(tablename == query.first.tablename) @@ -5504,89 +5580,90 @@ class CouchDBAdapter(NoSQLAdapter): return 0 else: tablename = self.get_table(query) - rows = self.select(query,[self.db[tablename]._id],{}) + rows = self.select(query, [self.db[tablename]._id], {}) ctable = self.connection[tablename] for row in rows: del ctable[str(row.id)] return len(rows) - def update(self,tablename,query,fields): - if not isinstance(query,Query): + def update(self, tablename, query, fields): + if not isinstance(query, Query): raise SyntaxError("Not Supported") - if query.first.type=='id' and query.op==self.EQ: + if query.first.type == 'id' and query.op == self.EQ: id = query.second tablename = query.first.tablename ctable = self.connection[tablename] try: doc = ctable[str(id)] - for key,value in fields: - doc[key.name] = self.represent(value,self.db[tablename][key.name].type) + for key, value in fields: + doc[key.name] = self.represent(value, self.db[tablename][key.name].type) ctable.save(doc) return 1 except couchdb.http.ResourceNotFound: return 0 else: tablename = self.get_table(query) - rows = self.select(query,[self.db[tablename]._id],{}) + rows = self.select(query, [self.db[tablename]._id], {}) ctable = self.connection[tablename] table = self.db[tablename] for row in rows: doc = ctable[str(row.id)] - for key,value in fields: - doc[key.name] = self.represent(value,table[key.name].type) + for key, value in fields: + doc[key.name] = self.represent(value, table[key.name].type) ctable.save(doc) return len(rows) - def count(self,query,distinct=None): + def count(self, query, distinct=None): if distinct: raise RuntimeError("COUNT DISTINCT not supported") - if not isinstance(query,Query): + if not isinstance(query, Query): raise SyntaxError("Not Supported") tablename = self.get_table(query) - rows = self.select(query,[self.db[tablename]._id],{}) + rows = self.select(query, [self.db[tablename]._id], {}) return len(rows) + def cleanup(text): """ Validates that the given text is clean: only contains [0-9a-zA-Z_] """ - #if not REGEX_ALPHANUMERIC.match(text): - # raise SyntaxError('invalid table or field name: %s' % text) + # if not REGEX_ALPHANUMERIC.match(text): + # raise SyntaxError('invalid table or field name: %s' % text) return text + class MongoDBAdapter(NoSQLAdapter): native_json = True - drivers = ('pymongo',) + drivers = ('pymongo', ) uploads_in_blob = False - types = { - 'boolean': bool, - 'string': str, - 'text': str, - 'json': str, - 'password': str, - 'blob': str, - 'upload': str, - 'integer': long, - 'bigint': long, - 'float': float, - 'double': float, - 'date': datetime.date, - 'time': datetime.time, - 'datetime': datetime.datetime, - 'id': long, - 'reference': long, - 'list:string': list, - 'list:integer': list, - 'list:reference': list, - } + types = {'boolean': bool, + 'string': str, + 'text': str, + 'json': str, + 'password': str, + 'blob': str, + 'upload': str, + 'integer': long, + 'bigint': long, + 'float': float, + 'double': float, + 'date': datetime.date, + 'time': datetime.time, + 'datetime': datetime.datetime, + 'id': long, + 'reference': long, + 'list:string': list, + 'list:integer': list, + 'list:reference': list, + } error_messages = {"javascript_needed": "This must yet be replaced" + " with javascript in order to work."} - def __init__(self,db,uri='mongodb://127.0.0.1:5984/db', - pool_size=0, folder=None, db_codec ='UTF-8', + def __init__(self, db, uri='mongodb://127.0.0.1:5984/db', + pool_size=0, folder=None, db_codec='UTF-8', credential_decoder=IDENTITY, driver_args={}, adapter_args={}, do_connect=True, after_connection=None): @@ -5612,21 +5689,21 @@ class MongoDBAdapter(NoSQLAdapter): self.pool_size = pool_size #this is the minimum amount of replicates that it should wait # for on insert/update - self.minimumreplication = adapter_args.get('minimumreplication',0) + self.minimumreplication = adapter_args.get('minimumreplication', 0) # by default all inserts and selects are performand asynchronous, # but now the default is # synchronous, except when overruled by either this default or # function parameter - self.safe = adapter_args.get('safe',True) + self.safe = adapter_args.get('safe', True) # load user setting for uploads in blob storage self.uploads_in_blob = adapter_args.get('uploads_in_blob', False) - if isinstance(m,tuple): + if isinstance(m, tuple): m = {"database" : m[1]} - if m.get('database')==None: + if m.get('database') is None: raise SyntaxError("Database is required!") - def connector(uri=self.uri,m=m): + def connector(uri=self.uri, m=m): # Connection() is deprecated if hasattr(self.driver, "MongoClient"): Connection = self.driver.MongoClient @@ -5634,7 +5711,7 @@ class MongoDBAdapter(NoSQLAdapter): Connection = self.driver.Connection return Connection(uri)[m.get('database')] - self.reconnect(connector,cursor=False) + self.reconnect(connector, cursor=False) def object_id(self, arg=None): """ Convert input to a valid Mongodb ObjectId instance @@ -5691,8 +5768,8 @@ class MongoDBAdapter(NoSQLAdapter): else: value = NoSQLAdapter.represent(self, obj, fieldtype) # reference types must be convert to ObjectID - if fieldtype =='date': - if value == None: + if fieldtype == 'date': + if value is None: return value # this piece of data can be stripped off based on the fieldtype t = datetime.time(0, 0, 0) @@ -5700,7 +5777,7 @@ class MongoDBAdapter(NoSQLAdapter): # string or integer return datetime.datetime.combine(value, t) elif fieldtype == 'time': - if value == None: + if value is None: return value # this piece of data can be stripped of based on the fieldtype d = datetime.date(2000, 1, 1) @@ -5708,7 +5785,7 @@ class MongoDBAdapter(NoSQLAdapter): # string or integer return datetime.datetime.combine(d, value) elif fieldtype == "blob": - if value== None: + if value is None: return value from bson import Binary if not isinstance(value, Binary): @@ -5726,7 +5803,7 @@ class MongoDBAdapter(NoSQLAdapter): return value elif ((isinstance(fieldtype, basestring) and fieldtype.startswith("reference")) or - (isinstance(fieldtype, Table)) or fieldtype=="id"): + (isinstance(fieldtype, Table)) or fieldtype == "id"): value = self.object_id(value) return value @@ -5738,11 +5815,11 @@ class MongoDBAdapter(NoSQLAdapter): def count(self, query, distinct=None, snapshot=True): if distinct: raise RuntimeError("COUNT DISTINCT not supported") - if not isinstance(query,Query): + if not isinstance(query, Query): raise SyntaxError("Not Supported") tablename = self.get_table(query) - return long(self.select(query,[self.db[tablename]._id], {}, - count=True,snapshot=snapshot)['count']) + return long(self.select(query, [self.db[tablename]._id], {}, + count=True, snapshot=snapshot)['count']) # Maybe it would be faster if we just implemented the pymongo # .count() function which is probably quicker? # therefor call __select() connection[table].find(query).count() @@ -5755,7 +5832,7 @@ class MongoDBAdapter(NoSQLAdapter): # convert second arg to an objectid field # (if its not already) # if second arg is 0 convert to objectid - if isinstance(expression.first,Field) and \ + if isinstance(expression.first, Field) and \ ((expression.first.type == 'id') or \ ("reference" in expression.first.type)): if expression.first.type == 'id': @@ -5769,10 +5846,10 @@ class MongoDBAdapter(NoSQLAdapter): result = expression.op(expression.first, expression.second) if isinstance(expression, Field): - if expression.type=='id': + if expression.type =='id': result = "_id" else: - result = expression.name + result = expression.name elif isinstance(expression, (Expression, Query)): if not expression.second is None: result = expression.op(expression.first, expression.second) @@ -5783,9 +5860,9 @@ class MongoDBAdapter(NoSQLAdapter): else: result = expression.op elif field_type: - result = self.represent(expression,field_type) - elif isinstance(expression,(list,tuple)): - result = ','.join(self.represent(item,field_type) for + result = self.represent(expression, field_type) + elif isinstance(expression, (list, tuple)): + result = ','.join(self.represent(item, field_type) for item in expression) else: result = expression @@ -5805,8 +5882,7 @@ class MongoDBAdapter(NoSQLAdapter): snapshot=False): mongofields_dict = self.SON() mongoqry_dict = {} - new_fields=[] - mongosort_list = [] + new_fields, mongosort_list = [], [] # try an orderby attribute orderby = attributes.get('orderby', False) limitby = attributes.get('limitby', False) @@ -5814,8 +5890,8 @@ class MongoDBAdapter(NoSQLAdapter): if 'for_update' in attributes: logging.warn('mongodb does not support for_update') for key in set(attributes.keys())-set(('limitby', - 'orderby','for_update')): - if attributes[key]!=None: + 'orderby', 'for_update')): + if attributes[key] is not None: logging.warn('select attribute not implemented: %s' % key) if limitby: limitby_skip, limitby_limit = limitby[0], int(limitby[1]) @@ -5836,7 +5912,7 @@ class MongoDBAdapter(NoSQLAdapter): else: new_fields.append(item) fields = new_fields - if isinstance(query,Query): + if isinstance(query, Query): tablename = self.get_table(query) elif len(fields) != 0: tablename = fields[0].tablename @@ -5849,16 +5925,16 @@ class MongoDBAdapter(NoSQLAdapter): mongofields_dict[field.name] = 1 ctable = self.connection[tablename] if count: - return {'count' : ctable.find( + return {'count': ctable.find( mongoqry_dict, mongofields_dict, skip=limitby_skip, limit=limitby_limit, sort=mongosort_list, snapshot=snapshot).count()} else: # pymongo cursor object mongo_list_dicts = ctable.find(mongoqry_dict, - mongofields_dict, skip=limitby_skip, - limit=limitby_limit, sort=mongosort_list, - snapshot=snapshot) + mongofields_dict, skip=limitby_skip, + limit=limitby_limit, sort=mongosort_list, + snapshot=snapshot) rows = [] # populate row in proper order # Here we replace ._id with .id to follow the standard naming @@ -5874,7 +5950,7 @@ class MongoDBAdapter(NoSQLAdapter): newnames.append(".".join((tablename, field.name))) for record in mongo_list_dicts: - row=[] + row = [] for colname in colnames: tablename, fieldname = colname.split(".") # switch to Mongo _id uuids for retrieving @@ -5896,7 +5972,7 @@ class MongoDBAdapter(NoSQLAdapter): For safety, we use by default synchronous requests""" values = dict() - if safe==None: + if safe is None: safe = self.safe ctable = self.connection[table._tablename] for k, v in fields: @@ -5909,7 +5985,7 @@ class MongoDBAdapter(NoSQLAdapter): return long(str(values['_id']), 16) def update(self, tablename, query, fields, safe=None): - if safe == None: + if safe is None: safe = self.safe # return amount of adjusted rows or zero, but no exceptions # @ related not finding the result @@ -5945,14 +6021,13 @@ class MongoDBAdapter(NoSQLAdapter): amount = 0 amount = self.count(query, False) if not isinstance(query, Query): - raise RuntimeError("query type %s is not supported" % \ - type(query)) + raise RuntimeError("query type %s is not supported" % type(query)) filter = self.expand(query) self.connection[tablename].remove(filter, safe=safe) return amount def bulk_insert(self, table, items): - return [self.insert(table,item) for item in items] + return [self.insert(table, item) for item in items] ## OPERATORS def INVERT(self, first): @@ -5963,23 +6038,23 @@ class MongoDBAdapter(NoSQLAdapter): def NOT(self, first): return {'$not': self.expand(first)} - def AND(self,first,second): + def AND(self, first, second): # pymongo expects: .find({'$and': [{'x':'1'}, {'y':'2'}]}) - return {'$and': [self.expand(first),self.expand(second)]} + return {'$and': [self.expand(first), self.expand(second)]} - def OR(self,first,second): + def OR(self, first, second): # pymongo expects: .find({'$or': [{'name':'1'}, {'name':'2'}]}) - return {'$or': [self.expand(first),self.expand(second)]} + return {'$or': [self.expand(first), self.expand(second)]} def BELONGS(self, first, second): if isinstance(second, str): - return {self.expand(first) : {"$in" : [ second[:-1]]} } - elif second==[] or second==() or second==set(): - return {1:0} + return {self.expand(first): {"$in": [second[:-1]]}} + elif second == [] or second == () or second == set(): + return {1: 0} items = [self.expand(item, first.type) for item in second] - return {self.expand(first) : {"$in" : items} } + return {self.expand(first): {"$in": items}} - def EQ(self,first,second=None): + def EQ(self, first, second=None): result = {} result[self.expand(first)] = self.expand(second) return result @@ -5989,26 +6064,26 @@ class MongoDBAdapter(NoSQLAdapter): result[self.expand(first)] = {'$ne': self.expand(second)} return result - def LT(self,first,second=None): + def LT(self, first, second=None): if second is None: raise RuntimeError("Cannot compare %s < None" % first) result = {} result[self.expand(first)] = {'$lt': self.expand(second)} return result - def LE(self,first,second=None): + def LE(self, first, second=None): if second is None: raise RuntimeError("Cannot compare %s <= None" % first) result = {} result[self.expand(first)] = {'$lte': self.expand(second)} return result - def GT(self,first,second): + def GT(self, first, second): result = {} result[self.expand(first)] = {'$gt': self.expand(second)} return result - def GE(self,first,second=None): + def GE(self, first, second=None): if second is None: raise RuntimeError("Cannot compare %s >= None" % first) result = {} @@ -6059,40 +6134,37 @@ class MongoDBAdapter(NoSQLAdapter): return '%s, %s' % (self.expand(first), self.expand(second)) def LIKE(self, first, second): - #escaping regex operators? - return {self.expand(first): ('%s' % \ - self.expand(second, 'string').replace('%','/'))} + # escaping regex operators? + return {self.expand(first): ('%s' % self.expand(second, 'string').replace('%', '/'))} def ILIKE(self, first, second): - val = second if isinstance(second,self.ObjectId) else { + val = second if isinstance(second, self.ObjectId) else { '$regex': second.replace('%', ''), '$options': 'i'} return {self.expand(first): val} def STARTSWITH(self, first, second): #escaping regex operators? - return {self.expand(first): ('/^%s/' % \ - self.expand(second, 'string'))} + return {self.expand(first): ('/^%s/' % self.expand(second, 'string'))} def ENDSWITH(self, first, second): #escaping regex operators? - return {self.expand(first): ('/%s^/' % \ - self.expand(second, 'string'))} + return {self.expand(first): ('/%s^/' % self.expand(second, 'string'))} def CONTAINS(self, first, second, case_sensitive=False): # silently ignore, only case sensitive # There is a technical difference, but mongodb doesn't support # that, but the result will be the same - val = second if isinstance(second,self.ObjectId) else \ - {'$regex':".*" + re.escape(self.expand(second, 'string')) + ".*"} - return {self.expand(first) : val} + val = second if isinstance(second, self.ObjectId) else \ + {'$regex': ".*" + re.escape(self.expand(second, 'string')) + ".*"} + return {self.expand(first): val} def LIKE(self, first, second): import re return {self.expand(first): {'$regex': \ re.escape(self.expand(second, - 'string')).replace('%','.*')}} + 'string')).replace('%', '.*')}} - #TODO verify full compatibilty with official SQL Like operator + # TODO verify full compatibilty with official SQL Like operator def STARTSWITH(self, first, second): #TODO Solve almost the same problem as with endswith import re @@ -6100,9 +6172,9 @@ class MongoDBAdapter(NoSQLAdapter): re.escape(self.expand(second, 'string'))}} - #TODO verify full compatibilty with official SQL Like operator + # TODO verify full compatibilty with official SQL Like operator def ENDSWITH(self, first, second): - #escaping regex operators? + # escaping regex operators? #TODO if searched for a name like zsa_corbitt and the function # is endswith('a') then this is also returned. # Aldo it end with a t @@ -6110,18 +6182,18 @@ class MongoDBAdapter(NoSQLAdapter): return {self.expand(first): {'$regex': \ re.escape(self.expand(second, 'string')) + '$'}} - #TODO verify full compatibilty with official oracle contains operator + # TODO verify full compatibilty with official oracle contains operator def CONTAINS(self, first, second, case_sensitive=False): # silently ignore, only case sensitive - #There is a technical difference, but mongodb doesn't support + # There is a technical difference, but mongodb doesn't support # that, but the result will be the same - #TODO contains operators need to be transformed to Regex + # TODO contains operators need to be transformed to Regex return {self.expand(first) : {'$regex': \ ".*" + re.escape(self.expand(second, 'string')) + ".*"}} class IMAPAdapter(NoSQLAdapter): - drivers = ('imaplib',) + drivers = ('imaplib', ) """ IMAP server adapter @@ -6251,18 +6323,17 @@ class IMAPAdapter(NoSQLAdapter): """ - types = { - 'string': str, - 'text': str, - 'date': datetime.date, - 'datetime': datetime.datetime, - 'id': long, - 'boolean': bool, - 'integer': int, - 'bigint': long, - 'blob': str, - 'list:string': str - } + types = {'string': str, + 'text': str, + 'date': datetime.date, + 'datetime': datetime.datetime, + 'id': long, + 'boolean': bool, + 'integer': int, + 'bigint': long, + 'blob': str, + 'list:string': str + } dbengine = 'imap' @@ -6273,7 +6344,7 @@ class IMAPAdapter(NoSQLAdapter): uri, pool_size=0, folder=None, - db_codec ='UTF-8', + db_codec='UTF-8', credential_decoder=IDENTITY, driver_args={}, adapter_args={}, @@ -6286,7 +6357,7 @@ class IMAPAdapter(NoSQLAdapter): self.db = db self.uri = uri if do_connect: self.find_driver(adapter_args) - self.pool_size=pool_size + self.pool_size = pool_size self.folder = folder self.db_codec = db_codec self._after_connection = after_connection @@ -6305,18 +6376,17 @@ class IMAPAdapter(NoSQLAdapter): self.flags = {'deleted': '\\Deleted', 'draft': '\\Draft', 'flagged': '\\Flagged', 'recent': '\\Recent', 'seen': '\\Seen', 'answered': '\\Answered'} - self.search_fields = { - 'id': 'MESSAGE', 'created': 'DATE', - 'uid': 'UID', 'sender': 'FROM', - 'to': 'TO', 'cc': 'CC', - 'bcc': 'BCC', 'content': 'TEXT', - 'size': 'SIZE', 'deleted': '\\Deleted', - 'draft': '\\Draft', 'flagged': '\\Flagged', - 'recent': '\\Recent', 'seen': '\\Seen', - 'subject': 'SUBJECT', 'answered': '\\Answered', - 'mime': None, 'email': None, - 'attachments': None - } + self.search_fields = {'id': 'MESSAGE', 'created': 'DATE', + 'uid': 'UID', 'sender': 'FROM', + 'to': 'TO', 'cc': 'CC', + 'bcc': 'BCC', 'content': 'TEXT', + 'size': 'SIZE', 'deleted': '\\Deleted', + 'draft': '\\Draft', 'flagged': '\\Flagged', + 'recent': '\\Recent', 'seen': '\\Seen', + 'subject': 'SUBJECT', 'answered': '\\Answered', + 'mime': None, 'email': None, + 'attachments': None + } db['_lastsql'] = '' @@ -6326,10 +6396,11 @@ class IMAPAdapter(NoSQLAdapter): host = m.group('host') port = int(m.group('port')) over_ssl = False - if port==993: + if port == 993: over_ssl = True - driver_args.update(host=host,port=port, password=password, user=user) + driver_args.update(host=host, port=port, password=password, user=user) + def connector(driver_args=driver_args): # it is assumed sucessful authentication alLways # TODO: support direct connection and login tests @@ -6344,7 +6415,7 @@ class IMAPAdapter(NoSQLAdapter): connection.mailbox_names = None # dummy cursor function - connection.cursor = lambda : True + connection.cursor = lambda: True return connection @@ -6362,7 +6433,7 @@ class IMAPAdapter(NoSQLAdapter): closing """ - if getattr(self,'connection',None) != None: + if getattr(self, 'connection', None) is not None: return if f is None: f = self.connector @@ -6437,8 +6508,8 @@ class IMAPAdapter(NoSQLAdapter): add adds to the date object """ - months = [None, "JAN","FEB","MAR","APR","MAY","JUN", - "JUL", "AUG","SEP","OCT","NOV","DEC"] + months = [None, "JAN", "FEB", "MAR", "APR", "MAY", "JUN", + "JUL", "AUG", "SEP", "OCT", "NOV", "DEC"] if isinstance(date, basestring): # Prevent unexpected date response format try: @@ -6507,12 +6578,12 @@ class IMAPAdapter(NoSQLAdapter): if not "NOSELECT" in item.upper(): sub_items = item.split("\"") sub_items = [sub_item for sub_item in sub_items \ - if len(sub_item.strip()) > 0] + if len(sub_item.strip()) > 0] # mailbox = sub_items[len(sub_items) -1] mailbox = sub_items[-1].strip() # remove unwanted characters and store original names # Don't allow leading non alphabetic characters - mailbox_name = re.sub('^[_0-9]*', '', re.sub('[^_\w]','',re.sub('[/ ]','_',mailbox))) + mailbox_name = re.sub('^[_0-9]*', '', re.sub('[^_\w]', '', re.sub('[/ ]', '_', mailbox))) mailboxes.append(mailbox_name) self.connection.mailbox_names[mailbox_name] = mailbox @@ -6565,26 +6636,28 @@ class IMAPAdapter(NoSQLAdapter): for name in names: self.db.define_table("%s" % name, - Field("uid", writable=False), - Field("created", "datetime", writable=False), - Field("content", "text", writable=False), - Field("to", writable=False), - Field("cc", writable=False), - Field("bcc", writable=False), - Field("sender", writable=False), - Field("size", "integer", writable=False), - Field("subject", writable=False), - Field("mime", writable=False), - Field("email", "text", writable=False, readable=False), - Field("attachments", "text", writable=False, readable=False), - Field("encoding", writable=False), - Field("answered", "boolean"), - Field("deleted", "boolean"), - Field("draft", "boolean"), - Field("flagged", "boolean"), - Field("recent", "boolean", writable=False), - Field("seen", "boolean") - ) + Field("uid", writable=False), + Field("created", "datetime", writable=False), + Field("content", "text", writable=False), + Field("to", writable=False), + Field("cc", writable=False), + Field("bcc", writable=False), + Field("sender", writable=False), + Field("size", "integer", writable=False), + Field("subject", writable=False), + Field("mime", writable=False), + Field("email", "text", writable=False, + readable=False), + Field("attachments", "text", writable=False, + readable=False), + Field("encoding", writable=False), + Field("answered", "boolean"), + Field("deleted", "boolean"), + Field("draft", "boolean"), + Field("flagged", "boolean"), + Field("recent", "boolean", writable=False), + Field("seen", "boolean") + ) # Set a special _mailbox attribute for storing # native mailbox names @@ -6794,7 +6867,7 @@ class IMAPAdapter(NoSQLAdapter): elif (("text" in maintype) and ("%s.content" % tablename in colnames)): values.update({"text": self.encode_text(payload, - self.get_charset(part))}) + self.get_charset(part))}) content.append(values) if "%s.size" % tablename in colnames: @@ -6815,7 +6888,7 @@ class IMAPAdapter(NoSQLAdapter): # parse result and return a rows object colnames = colnames - processor = attributes.get('processor',self.parse) + processor = attributes.get('processor', self.parse) return processor(imapqry_array, fields, colnames) def insert(self, table, fields): @@ -6848,8 +6921,8 @@ class IMAPAdapter(NoSQLAdapter): attachments = d.get("attachments", []) content = d.get("content", []) flags = " ".join(["\\%s" % flag.capitalize() for flag in - ("answered", "deleted", "draft", "flagged", - "recent", "seen") if d.get(flag, False)]) + ("answered", "deleted", "draft", "flagged", + "recent", "seen") if d.get(flag, False)]) if not message: from email.message import Message mime = d.get("mime", None) @@ -6868,11 +6941,9 @@ class IMAPAdapter(NoSQLAdapter): if isinstance(value, basestring): message[item] = value else: - message[item] = ";".join([i for i in - value]) + message[item] = ";".join([i for i in value]) if (not message.is_multipart() and - (not message.get_content_type().startswith( - "multipart"))): + (not message.get_content_type().startswith("multipart"))): if isinstance(content, basestring): message.set_payload(content) elif len(content) > 0: @@ -6885,7 +6956,7 @@ class IMAPAdapter(NoSQLAdapter): result, data = self.connection.append(mailbox, flags, struct_time, message) if result == "OK": uid = int(re.findall("\d+", str(data))[-1]) - return self.db(table.uid==uid).select(table.id).first().id + return self.db(table.uid == uid).select(table.id).first().id else: raise Exception("IMAP message append failed: %s" % data) else: @@ -6933,7 +7004,7 @@ class IMAPAdapter(NoSQLAdapter): raise Exception("IMAP storing error: %s" % data) return rowcount - def count(self,query,distinct=None): + def count(self, query, distinct=None): counter = 0 tablename = self.get_query_mailbox(query) if query and tablename is not None: @@ -7106,7 +7177,7 @@ class IMAPAdapter(NoSQLAdapter): result = result.replace("NOT NOT", "").strip() return result - def EQ(self,first,second): + def EQ(self, first, second): name = self.search_fields[first.name] result = None if name is not None: @@ -7145,45 +7216,45 @@ class IMAPAdapter(NoSQLAdapter): # end of adapters ######################################################################## -ADAPTERS = { - 'sqlite': SQLiteAdapter, - 'spatialite': SpatiaLiteAdapter, - 'sqlite:memory': SQLiteAdapter, - 'spatialite:memory': SpatiaLiteAdapter, - 'mysql': MySQLAdapter, - 'postgres': PostgreSQLAdapter, - 'postgres:psycopg2': PostgreSQLAdapter, - 'postgres:pg8000': PostgreSQLAdapter, - 'postgres2:psycopg2': NewPostgreSQLAdapter, - 'postgres2:pg8000': NewPostgreSQLAdapter, - 'oracle': OracleAdapter, - 'mssql': MSSQLAdapter, - 'mssql2': MSSQL2Adapter, - 'mssql3': MSSQL3Adapter, - 'mssql4' : MSSQL4Adapter, - 'vertica': VerticaAdapter, - 'sybase': SybaseAdapter, - 'db2': DB2Adapter, - 'teradata': TeradataAdapter, - 'informix': InformixAdapter, - 'informix-se': InformixSEAdapter, - 'firebird': FireBirdAdapter, - 'firebird_embedded': FireBirdAdapter, - 'ingres': IngresAdapter, - 'ingresu': IngresUnicodeAdapter, - 'sapdb': SAPDBAdapter, - 'cubrid': CubridAdapter, - 'jdbc:sqlite': JDBCSQLiteAdapter, - 'jdbc:sqlite:memory': JDBCSQLiteAdapter, - 'jdbc:postgres': JDBCPostgreSQLAdapter, - 'gae': GoogleDatastoreAdapter, # discouraged, for backward compatibility - 'google:datastore': GoogleDatastoreAdapter, - 'google:datastore+ndb': GoogleDatastoreAdapter, - 'google:sql': GoogleSQLAdapter, - 'couchdb': CouchDBAdapter, - 'mongodb': MongoDBAdapter, - 'imap': IMAPAdapter -} +ADAPTERS = {'sqlite': SQLiteAdapter, + 'spatialite': SpatiaLiteAdapter, + 'sqlite:memory': SQLiteAdapter, + 'spatialite:memory': SpatiaLiteAdapter, + 'mysql': MySQLAdapter, + 'postgres': PostgreSQLAdapter, + 'postgres:psycopg2': PostgreSQLAdapter, + 'postgres:pg8000': PostgreSQLAdapter, + 'postgres2:psycopg2': NewPostgreSQLAdapter, + 'postgres2:pg8000': NewPostgreSQLAdapter, + 'oracle': OracleAdapter, + 'mssql': MSSQLAdapter, + 'mssql2': MSSQL2Adapter, + 'mssql3': MSSQL3Adapter, + 'mssql4' : MSSQL4Adapter, + 'vertica': VerticaAdapter, + 'sybase': SybaseAdapter, + 'db2': DB2Adapter, + 'teradata': TeradataAdapter, + 'informix': InformixAdapter, + 'informix-se': InformixSEAdapter, + 'firebird': FireBirdAdapter, + 'firebird_embedded': FireBirdAdapter, + 'ingres': IngresAdapter, + 'ingresu': IngresUnicodeAdapter, + 'sapdb': SAPDBAdapter, + 'cubrid': CubridAdapter, + 'jdbc:sqlite': JDBCSQLiteAdapter, + 'jdbc:sqlite:memory': JDBCSQLiteAdapter, + 'jdbc:postgres': JDBCPostgreSQLAdapter, + 'gae': GoogleDatastoreAdapter, # discouraged, for backward compatibility + 'google:datastore': GoogleDatastoreAdapter, + 'google:datastore+ndb': GoogleDatastoreAdapter, + 'google:sql': GoogleSQLAdapter, + 'couchdb': CouchDBAdapter, + 'mongodb': MongoDBAdapter, + 'imap': IMAPAdapter + } + def sqlhtml_validators(field): """ @@ -7203,14 +7274,15 @@ def sqlhtml_validators(field): return field_type.validator else: field_type = field_type.type - elif not isinstance(field_type,str): + elif not isinstance(field_type, str): return [] - requires=[] - def ff(r,id): - row=r(id) + requires = [] + + def ff(r, id): + row = r(id) if not row: return id - elif hasattr(r, '_format') and isinstance(r._format,str): + elif hasattr(r, '_format') and isinstance(r._format, str): return r._format % row elif hasattr(r, '_format') and callable(r._format): return r._format(row) @@ -7238,13 +7310,15 @@ def sqlhtml_validators(field): field_type.find('.') < 0 and \ field_type[10:] in db.tables: referenced = db[field_type[10:]] + def repr_ref(id, row=None, r=referenced, f=ff): return f(r, id) + field.represent = field.represent or repr_ref if hasattr(referenced, '_format') and referenced._format: - requires = validators.IS_IN_DB(db,referenced._id, + requires = validators.IS_IN_DB(db, referenced._id, referenced._format) if field.unique: - requires._and = validators.IS_NOT_IN_DB(db,field) + requires._and = validators.IS_NOT_IN_DB(db, field) if field.tablename == field_type[10:]: return validators.IS_EMPTY_OR(requires) return requires @@ -7252,6 +7326,7 @@ def sqlhtml_validators(field): field_type.find('.') < 0 and \ field_type[15:] in db.tables: referenced = db[field_type[15:]] + def list_ref_repr(ids, row=None, r=referenced, f=ff): if not ids: return None @@ -7260,27 +7335,28 @@ def sqlhtml_validators(field): if isinstance(db._adapter, GoogleDatastoreAdapter): def count(values): return db(id.belongs(values)).select(id) rx = range(0, len(ids), 30) - refs = reduce(lambda a,b:a&b, [count(ids[i:i+30]) for i in rx]) + refs = reduce(lambda a, b:a&b, [count(ids[i:i+30]) for i in rx]) else: refs = db(id.belongs(ids)).select(id) - return (refs and ', '.join(f(r,x.id) for x in refs) or '') + return (refs and ', '.join(f(r, x.id) for x in refs) or '') + field.represent = field.represent or list_ref_repr if hasattr(referenced, '_format') and referenced._format: - requires = validators.IS_IN_DB(db,referenced._id, - referenced._format,multiple=True) + requires = validators.IS_IN_DB(db, referenced._id, + referenced._format, multiple=True) else: - requires = validators.IS_IN_DB(db,referenced._id, + requires = validators.IS_IN_DB(db, referenced._id, multiple=True) if field.unique: - requires._and = validators.IS_NOT_IN_DB(db,field) + requires._and = validators.IS_NOT_IN_DB(db, field) if not field.notnull: requires = validators.IS_EMPTY_OR(requires) return requires elif field_type.startswith('list:'): - def repr_list(values,row=None): return', '.join(str(v) for v in (values or [])) + def repr_list(values, row=None): return', '.join(str(v) for v in (values or [])) field.represent = field.represent or repr_list if field.unique: - requires.insert(0,validators.IS_NOT_IN_DB(db,field)) + requires.insert(0, validators.IS_NOT_IN_DB(db, field)) sff = ['in', 'do', 'da', 'ti', 'de', 'bo'] if field.notnull and not field_type[:2] in sff: requires.insert(0, validators.IS_NOT_EMPTY()) @@ -7292,14 +7368,17 @@ def sqlhtml_validators(field): def bar_escape(item): return str(item).replace('|', '||') + def bar_encode(items): return '|%s|' % '|'.join(bar_escape(item) for item in items if str(item).strip()) + def bar_decode_integer(value): - if not hasattr(value,'split') and hasattr(value,'read'): + if not hasattr(value, 'split') and hasattr(value, 'read'): value = value.read() return [long(x) for x in value.split('|') if x.strip()] + def bar_decode_string(value): return [x.replace('||', '|') for x in REGEX_UNPACK.split(value[1:-1]) if x.strip()] @@ -7312,25 +7391,25 @@ class Row(object): this is only used to store a `Row` """ - __init__ = lambda self,*args,**kwargs: self.__dict__.update(*args,**kwargs) + __init__ = lambda self, *args, **kwargs: self.__dict__.update(*args, **kwargs) def __getitem__(self, k): if isinstance(k, Table): try: return ogetattr(self, k._tablename) - except (KeyError,AttributeError,TypeError): + except (KeyError, AttributeError, TypeError): pass elif isinstance(k, Field): try: return ogetattr(self, k.name) - except (KeyError,AttributeError,TypeError): + except (KeyError, AttributeError, TypeError): pass try: return ogetattr(ogetattr(self, k.tablename), k.name) - except (KeyError,AttributeError,TypeError): + except (KeyError, AttributeError, TypeError): pass - key=str(k) + key = str(k) _extra = ogetattr(self, '__dict__').get('_extra', None) if _extra is not None: v = _extra.get(key, DEFAULT) @@ -7338,20 +7417,20 @@ class Row(object): return v try: return ogetattr(self, key) - except (KeyError,AttributeError,TypeError): + except (KeyError, AttributeError, TypeError): pass m = REGEX_TABLE_DOT_FIELD.match(key) if m: try: return ogetattr(self, m.group(1))[m.group(2)] - except (KeyError,AttributeError,TypeError): + except (KeyError, AttributeError, TypeError): key = m.group(2) try: return ogetattr(self, key) - except (KeyError,AttributeError,TypeError), ae: + except (KeyError, AttributeError, TypeError), ae: try: - self[key] = ogetattr(self,'__get_lazy_reference__')(key) + self[key] = ogetattr(self, '__get_lazy_reference__')(key) return self[key] except: raise ae @@ -7364,12 +7443,11 @@ class Row(object): __call__ = __getitem__ - def get(self, key, default=None): try: return self.__getitem__(key) except(KeyError, AttributeError, TypeError): - return self.__dict__.get(key,default) + return self.__dict__.get(key, default) has_key = __contains__ = lambda self, key: key in self.__dict__ @@ -7389,9 +7467,9 @@ class Row(object): __str__ = __repr__ = lambda self: '' % self.as_dict() - __int__ = lambda self: object.__getattribute__(self,'id') + __int__ = lambda self: object.__getattribute__(self, 'id') - __long__ = lambda self: long(object.__getattribute__(self,'id')) + __long__ = lambda self: long(object.__getattribute__(self, 'id')) __getattr__ = __getitem__ @@ -7404,13 +7482,13 @@ class Row(object): # except: # raise ae - def __eq__(self,other): + def __eq__(self, other): try: return self.as_dict() == other.as_dict() except AttributeError: return False - def __ne__(self,other): + def __ne__(self, other): return not (self == other) def __copy__(self): @@ -7418,7 +7496,7 @@ class Row(object): def as_dict(self, datetime_to_str=False, custom_types=None): SERIALIZABLE_TYPES = [str, unicode, int, long, float, bool, list, dict] - if isinstance(custom_types,(list,tuple,set)): + if isinstance(custom_types, (list, tuple, set)): SERIALIZABLE_TYPES += custom_types elif custom_types: SERIALIZABLE_TYPES.append(custom_types) @@ -7427,24 +7505,25 @@ class Row(object): v=d[k] if d[k] is None: continue - elif isinstance(v,Row): + elif isinstance(v, Row): d[k]=v.as_dict() - elif isinstance(v,Reference): + elif isinstance(v, Reference): d[k]=long(v) - elif isinstance(v,decimal.Decimal): + elif isinstance(v, decimal.Decimal): d[k]=float(v) elif isinstance(v, (datetime.date, datetime.datetime, datetime.time)): if datetime_to_str: - d[k] = v.isoformat().replace('T',' ')[:19] - elif not isinstance(v,tuple(SERIALIZABLE_TYPES)): + d[k] = v.isoformat().replace('T', ' ')[:19] + elif not isinstance(v, tuple(SERIALIZABLE_TYPES)): del d[k] return d def as_xml(self, row_name="row", colnames=None, indent=' '): - def f(row,field,indent=' '): - if isinstance(row,Row): + + def f(row, field, indent=' '): + if isinstance(row, Row): spc = indent+' \n' - items = [f(row[x],x,indent+' ') for x in row] + items = [f(row[x], x, indent+' ') for x in row] return '%s<%s>\n%s\n%s' % ( indent, field, @@ -7453,10 +7532,10 @@ class Row(object): field) elif not callable(row): if REGEX_ALPHANUMERIC.match(field): - return '%s<%s>%s' % (indent,field,row,field) + return '%s<%s>%s' % (indent, field, row, field) else: return '%s%s' % \ - (indent,field,row) + (indent, field, row) else: return None return f(self, row_name, indent=indent) @@ -7498,14 +7577,15 @@ class SQLCallableList(list): def __call__(self): return copy.copy(self) -def smart_query(fields,text): - if not isinstance(fields,(list,tuple)): + +def smart_query(fields, text): + if not isinstance(fields, (list, tuple)): fields = [fields] new_fields = [] for field in fields: - if isinstance(field,Field): + if isinstance(field, Field): new_fields.append(field) - elif isinstance(field,Table): + elif isinstance(field, Table): for ofield in field: new_fields.append(ofield) else: @@ -7527,50 +7607,50 @@ def smart_query(fields,text): text = text[:m.start()]+('#%i' % i)+text[m.end():] constants[str(i)] = m.group()[1:-1] i+=1 - text = re.sub('\s+',' ',text).lower() - for a,b in [('&','and'), - ('|','or'), - ('~','not'), - ('==','='), - ('<','<'), - ('>','>'), - ('<=','<='), - ('>=','>='), - ('<>','!='), - ('=<','<='), - ('=>','>='), - ('=','='), - (' less or equal than ','<='), - (' greater or equal than ','>='), - (' equal or less than ','<='), - (' equal or greater than ','>='), - (' less or equal ','<='), - (' greater or equal ','>='), - (' equal or less ','<='), - (' equal or greater ','>='), - (' not equal to ','!='), - (' not equal ','!='), - (' equal to ','='), - (' equal ','='), - (' equals ','='), - (' less than ','<'), - (' greater than ','>'), - (' starts with ','startswith'), - (' ends with ','endswith'), - (' not in ' , 'notbelongs'), - (' in ' , 'belongs'), - (' is ','=')]: + text = re.sub('\s+', ' ', text).lower() + for a, b in [('&', 'and'), + ('|', 'or'), + ('~', 'not'), + ('==', '='), + ('<', '<'), + ('>', '>'), + ('<=', '<='), + ('>=', '>='), + ('<>', '!='), + ('=<', '<='), + ('=>', '>='), + ('=', '='), + (' less or equal than ', '<='), + (' greater or equal than ', '>='), + (' equal or less than ', '<='), + (' equal or greater than ', '>='), + (' less or equal ', '<='), + (' greater or equal ', '>='), + (' equal or less ', '<='), + (' equal or greater ', '>='), + (' not equal to ', '!='), + (' not equal ', '!='), + (' equal to ', '='), + (' equal ', '='), + (' equals ', '='), + (' less than ', '<'), + (' greater than ', '>'), + (' starts with ', 'startswith'), + (' ends with ', 'endswith'), + (' not in ', 'notbelongs'), + (' in ', 'belongs'), + (' is ', '=')]: if a[0]==' ': - text = text.replace(' is'+a,' %s ' % b) - text = text.replace(a,' %s ' % b) - text = re.sub('\s+',' ',text).lower() - text = re.sub('(?P[\<\>\!\=])\s+(?P[\<\>\!\=])','\g\g',text) + text = text.replace(' is'+a, ' %s ' % b) + text = text.replace(a, ' %s ' % b) + text = re.sub('\s+', ' ', text).lower() + text = re.sub('(?P[\<\>\!\=])\s+(?P[\<\>\!\=])', '\g\g', text) query = field = neg = op = logic = None for item in text.split(): if field is None: if item == 'not': neg = True - elif not neg and not logic and item in ('and','or'): + elif not neg and not logic and item in ('and', 'or'): logic = item elif item in field_map: field = field_map[item] @@ -7587,12 +7667,12 @@ def smart_query(fields,text): value = item if field.type in ('text', 'string', 'json'): if op == '=': op = 'like' - if op == '=': new_query = field==value - elif op == '<': new_query = field': new_query = field>value - elif op == '<=': new_query = field<=value - elif op == '>=': new_query = field>=value - elif op == '!=': new_query = field!=value + if op == '=': new_query = field == value + elif op == '<': new_query = field < value + elif op == '>': new_query = field > value + elif op == '<=': new_query = field <= value + elif op == '>=': new_query = field >= value + elif op == '!=': new_query = field != value elif op == 'belongs': new_query = field.belongs(value.split(',')) elif op == 'notbelongs': new_query = ~field.belongs(value.split(',')) elif field.type in ('text', 'string', 'json'): @@ -7601,7 +7681,7 @@ def smart_query(fields,text): elif op == 'startswith': new_query = field.startswith(value) elif op == 'endswith': new_query = field.endswith(value) else: raise RuntimeError("Invalid operation") - elif field._db._adapter.dbengine=='google:datastore' and \ + elif field._db._adapter.dbengine == 'google:datastore' and \ field.type in ('list:integer', 'list:string', 'list:reference'): if op == 'contains': new_query = field.contains(value) else: raise RuntimeError("Invalid operation") @@ -7616,6 +7696,7 @@ def smart_query(fields,text): field = op = neg = logic = None return query + class DAL(object): """ @@ -7650,8 +7731,8 @@ class DAL(object): folder: where .table files will be created. Automatically set within web2py. Use an explicit path when using DAL outside web2py db_codec: string encoding of the database (default: 'UTF-8') - table_hash: database identifier with .tables. If your connection hash - change you can still using old .tables if they have db_hash + table_hash: database identifier with .tables. If your connection hash + change you can still using old .tables if they have db_hash as prefix check_reserved: list of adapters to check tablenames and column names against sql/nosql reserved keywords. Defaults to `None` @@ -7693,12 +7774,12 @@ class DAL(object): """ def __new__(cls, uri='sqlite://dummy.db', *args, **kwargs): - if not hasattr(THREAD_LOCAL,'db_instances'): + if not hasattr(THREAD_LOCAL, 'db_instances'): THREAD_LOCAL.db_instances = {} - if not hasattr(THREAD_LOCAL,'db_instances_zombie'): + if not hasattr(THREAD_LOCAL, 'db_instances_zombie'): THREAD_LOCAL.db_instances_zombie = {} if uri == '': - db_uid = kwargs['db_uid'] # a zombie must have a db_uid! + db_uid = kwargs['db_uid'] # a zombie must have a db_uid! if db_uid in THREAD_LOCAL.db_instances: db_group = THREAD_LOCAL.db_instances[db_uid] db = db_group[-1] @@ -7708,13 +7789,13 @@ class DAL(object): db = super(DAL, cls).__new__(cls) THREAD_LOCAL.db_instances_zombie[db_uid] = db else: - db_uid = kwargs.get('db_uid',hashlib_md5(repr(uri)).hexdigest()) + db_uid = kwargs.get('db_uid', hashlib_md5(repr(uri)).hexdigest()) if db_uid in THREAD_LOCAL.db_instances_zombie: db = THREAD_LOCAL.db_instances_zombie[db_uid] del THREAD_LOCAL.db_instances_zombie[db_uid] else: db = super(DAL, cls).__new__(cls) - db_group = THREAD_LOCAL.db_instances.get(db_uid,[]) + db_group = THREAD_LOCAL.db_instances.get(db_uid, []) db_group.append(db) THREAD_LOCAL.db_instances[db_uid] = db_group db._db_uid = db_uid @@ -7742,18 +7823,16 @@ class DAL(object): } """ - dbs = getattr(THREAD_LOCAL,'db_instances',{}).items() + dbs = getattr(THREAD_LOCAL, 'db_instances', {}).items() infos = {} for db_uid, db_group in dbs: for db in db_group: if not db._uri: continue k = hide_password(db._adapter.uri) - infos[k] = dict( - dbstats = [(row[0], row[1]) for row in db._timings], - dbtables = {'defined': sorted( - list(set(db.tables)-set(db._LAZY_TABLES.keys()))), - 'lazy': sorted(db._LAZY_TABLES.keys())}) + infos[k] = dict(dbstats=[(row[0], row[1]) for row in db._timings], + dbtables={'defined': sorted(list(set(db.tables)-set(db._LAZY_TABLES.keys()))), + 'lazy': sorted(db._LAZY_TABLES.keys())}) return infos @staticmethod @@ -7761,7 +7840,7 @@ class DAL(object): if not instances: return thread_key = '%s.%s' % (socket.gethostname(), threading.currentThread()) - keys = ['%s.%i' % (thread_key, i) for (i,db) in instances] + keys = ['%s.%i' % (thread_key, i) for (i, db) in instances] instances = enumerate(instances) for (i, db) in instances: if not db._adapter.support_distributed_transaction(): @@ -7776,7 +7855,7 @@ class DAL(object): return instances = enumerate(instances) thread_key = '%s.%s' % (socket.gethostname(), threading.currentThread()) - keys = ['%s.%i' % (thread_key, i) for (i,db) in instances] + keys = ['%s.%i' % (thread_key, i) for (i, db) in instances] for (i, db) in instances: if not db._adapter.support_distributed_transaction(): raise SyntaxError( @@ -7839,7 +7918,7 @@ class DAL(object): if not str(attempts).isdigit() or attempts < 0: attempts = 5 if uri: - uris = isinstance(uri,(list,tuple)) and uri or [uri] + uris = isinstance(uri, (list, tuple)) and uri or [uri] error = '' connected = False for k in range(attempts): @@ -7852,7 +7931,8 @@ class DAL(object): raise SyntaxError("Error in URI '%s' or database not supported" % self._dbname) # notice that driver args or {} else driver_args # defaults to {} global, not correct - kwargs = dict(db=self,uri=uri, + kwargs = dict(db=self, + uri=uri, pool_size=pool_size, folder=folder, db_codec=db_codec, @@ -7886,8 +7966,8 @@ class DAL(object): if not connected: raise RuntimeError("Failure to connect, tried %d times:\n%s" % (attempts, tb)) else: - self._adapter = BaseAdapter(db=self,pool_size=0, - uri='None',folder=folder, + self._adapter = BaseAdapter(db=self, pool_size=0, + uri='None', folder=folder, db_codec=db_codec, after_connection=after_connection, entity_quoting=entity_quoting) migrate = fake_migrate = False @@ -7915,7 +7995,7 @@ class DAL(object): for table in tables: self.define_table(**table) else: - pattern = pjoin(path,self._uri_hash+'_*.table') + pattern = pjoin(path, self._uri_hash+'_*.table') for filename in glob.glob(pattern): tfile = self._adapter.file_open(filename, 'r') try: @@ -7924,12 +8004,12 @@ class DAL(object): mf = [(value['sortable'], Field(key, type=value['type'], - length=value.get('length',None), - notnull=value.get('notnull',False), - unique=value.get('unique',False))) \ + length=value.get('length', None), + notnull=value.get('notnull', False), + unique=value.get('unique', False))) \ for key, value in sql_fields.iteritems()] - mf.sort(lambda a,b: cmp(a[0],b[0])) - self.define_table(name,*[item[1] for item in mf], + mf.sort(lambda a, b: cmp(a[0], b[0])) + self.define_table(name, *[item[1] for item in mf], **dict(migrate=migrate, fake_migrate=fake_migrate)) finally: @@ -7945,20 +8025,20 @@ class DAL(object): raise SyntaxError( 'invalid table/column name "%s" is a "%s" reserved SQL/NOSQL keyword' % (name, backend.upper())) - def parse_as_rest(self,patterns,args,vars,queries=None,nested_select=True): + def parse_as_rest(self, patterns, args, vars, queries=None, nested_select=True): """ Example: Use as:: - db.define_table('person',Field('name'),Field('info')) + db.define_table('person', Field('name'), Field('info')) db.define_table('pet', - Field('ownedby',db.person), - Field('name'),Field('info') + Field('ownedby', db.person), + Field('name'), Field('info') ) @request.restful() def index(): - def GET(*args,**vars): + def GET(*args, **vars): patterns = [ "/friends[person]", "/{person.name}/:field", @@ -7968,13 +8048,13 @@ class DAL(object): ("/dogs[pet]", db.pet.info=='dog'), ("/dogs[pet]/{pet.name.startswith}", db.pet.info=='dog'), ] - parser = db.parse_as_rest(patterns,args,vars) + parser = db.parse_as_rest(patterns, args, vars) if parser.status == 200: return dict(content=parser.response) else: - raise HTTP(parser.status,parser.error) + raise HTTP(parser.status, parser.error) - def POST(table_name,**vars): + def POST(table_name, **vars): if table_name == 'person': return db.person.validate_and_insert(**vars) elif table_name == 'pet': @@ -7988,69 +8068,69 @@ class DAL(object): re1 = REGEX_SEARCH_PATTERN re2 = REGEX_SQUARE_BRACKETS - def auto_table(table,base='',depth=0): + def auto_table(table, base='', depth=0): patterns = [] for field in db[table].fields: if base: - tag = '%s/%s' % (base,field.replace('_','-')) + tag = '%s/%s' % (base, field.replace('_', '-')) else: - tag = '/%s/%s' % (table.replace('_','-'),field.replace('_','-')) + tag = '/%s/%s' % (table.replace('_', '-'), field.replace('_', '-')) f = db[table][field] if not f.readable: continue - if f.type=='id' or 'slug' in field or f.type.startswith('reference'): - tag += '/{%s.%s}' % (table,field) + if f.type == 'id' or 'slug' in field or f.type.startswith('reference'): + tag += '/{%s.%s}' % (table, field) patterns.append(tag) patterns.append(tag+'/:field') elif f.type.startswith('boolean'): - tag += '/{%s.%s}' % (table,field) + tag += '/{%s.%s}' % (table, field) patterns.append(tag) patterns.append(tag+'/:field') - elif f.type in ('float','double','integer','bigint'): - tag += '/{%s.%s.ge}/{%s.%s.lt}' % (table,field,table,field) + elif f.type in ('float', 'double', 'integer', 'bigint'): + tag += '/{%s.%s.ge}/{%s.%s.lt}' % (table, field, table, field) patterns.append(tag) patterns.append(tag+'/:field') elif f.type.startswith('list:'): - tag += '/{%s.%s.contains}' % (table,field) + tag += '/{%s.%s.contains}' % (table, field) patterns.append(tag) patterns.append(tag+'/:field') - elif f.type in ('date','datetime'): - tag+= '/{%s.%s.year}' % (table,field) + elif f.type in ('date', 'datetime'): + tag += '/{%s.%s.year}' % (table, field) patterns.append(tag) patterns.append(tag+'/:field') - tag+='/{%s.%s.month}' % (table,field) + tag += '/{%s.%s.month}' % (table, field) patterns.append(tag) patterns.append(tag+'/:field') - tag+='/{%s.%s.day}' % (table,field) + tag += '/{%s.%s.day}' % (table, field) patterns.append(tag) patterns.append(tag+'/:field') - if f.type in ('datetime','time'): - tag+= '/{%s.%s.hour}' % (table,field) + if f.type in ('datetime', 'time'): + tag += '/{%s.%s.hour}' % (table, field) patterns.append(tag) patterns.append(tag+'/:field') - tag+='/{%s.%s.minute}' % (table,field) + tag += '/{%s.%s.minute}' % (table, field) patterns.append(tag) patterns.append(tag+'/:field') - tag+='/{%s.%s.second}' % (table,field) + tag += '/{%s.%s.second}' % (table, field) patterns.append(tag) patterns.append(tag+'/:field') if depth>0: for f in db[table]._referenced_by: - tag+='/%s[%s.%s]' % (table,f.tablename,f.name) + tag+='/%s[%s.%s]' % (table, f.tablename, f.name) patterns.append(tag) - patterns += auto_table(table,base=tag,depth=depth-1) + patterns += auto_table(table, base=tag, depth=depth-1) return patterns if patterns == 'auto': patterns=[] for table in db.tables: if not table.startswith('auth_'): - patterns.append('/%s[%s]' % (table,table)) - patterns += auto_table(table,base='',depth=1) + patterns.append('/%s[%s]' % (table, table)) + patterns += auto_table(table, base='', depth=1) else: i = 0 - while i2: + elif len(pattern) > 2: pattern, basequery, exposedfields = pattern[0:3] - otable=table=None - if not isinstance(queries,dict): - dbset=db(queries) + otable = table = None + if not isinstance(queries, dict): + dbset = db(queries) if basequery is not None: dbset = dbset(basequery) - i=0 + i = 0 tags = pattern[1:].split('/') - if len(tags)!=len(args): + if len(tags) != len(args): continue for tag in tags: if re1.match(tag): @@ -8085,93 +8165,93 @@ class DAL(object): tokens = tag[1:-1].split('.') table, field = tokens[0], tokens[1] if not otable or table == otable: - if len(tokens)==2 or tokens[2]=='eq': - query = db[table][field]==args[i] - elif tokens[2]=='ne': - query = db[table][field]!=args[i] - elif tokens[2]=='lt': - query = db[table][field]args[i] - elif tokens[2]=='ge': - query = db[table][field]>=args[i] - elif tokens[2]=='le': - query = db[table][field]<=args[i] - elif tokens[2]=='year': - query = db[table][field].year()==args[i] - elif tokens[2]=='month': - query = db[table][field].month()==args[i] - elif tokens[2]=='day': - query = db[table][field].day()==args[i] - elif tokens[2]=='hour': - query = db[table][field].hour()==args[i] - elif tokens[2]=='minute': - query = db[table][field].minutes()==args[i] - elif tokens[2]=='second': - query = db[table][field].seconds()==args[i] - elif tokens[2]=='startswith': + if len(tokens) == 2 or tokens[2] == 'eq': + query = db[table][field] == args[i] + elif tokens[2] == 'ne': + query = db[table][field] != args[i] + elif tokens[2] == 'lt': + query = db[table][field] < args[i] + elif tokens[2] == 'gt': + query = db[table][field] > args[i] + elif tokens[2] == 'ge': + query = db[table][field] >= args[i] + elif tokens[2] == 'le': + query = db[table][field] <= args[i] + elif tokens[2] == 'year': + query = db[table][field].year() == args[i] + elif tokens[2] == 'month': + query = db[table][field].month() == args[i] + elif tokens[2] == 'day': + query = db[table][field].day() == args[i] + elif tokens[2] == 'hour': + query = db[table][field].hour() == args[i] + elif tokens[2] == 'minute': + query = db[table][field].minutes() == args[i] + elif tokens[2] == 'second': + query = db[table][field].seconds() == args[i] + elif tokens[2] == 'startswith': query = db[table][field].startswith(args[i]) - elif tokens[2]=='contains': + elif tokens[2] == 'contains': query = db[table][field].contains(args[i]) else: raise RuntimeError("invalid pattern: %s" % pattern) - if len(tokens)==4 and tokens[3]=='not': + if len(tokens) == 4 and tokens[3] == 'not': query = ~query - elif len(tokens)>=4: + elif len(tokens) >= 4: raise RuntimeError("invalid pattern: %s" % pattern) - if not otable and isinstance(queries,dict): + if not otable and isinstance(queries, dict): dbset = db(queries[table]) if basequery is not None: dbset = dbset(basequery) - dbset=dbset(query) + dbset = dbset(query) else: raise RuntimeError("missing relation in pattern: %s" % pattern) elif re2.match(tag) and args[i]==tag[:tag.find('[')]: ref = tag[tag.find('[')+1:-1] if '.' in ref and otable: - table,field = ref.split('.') + table, field = ref.split('.') selfld = '_id' if db[table][field].type.startswith('reference '): - refs = [ x.name for x in db[otable] if x.type == db[table][field].type ] + refs = [x.name for x in db[otable] if x.type == db[table][field].type] else: - refs = [ x.name for x in db[table]._referenced_by if x.tablename==otable ] + refs = [x.name for x in db[table]._referenced_by if x.tablename==otable] if refs: selfld = refs[0] if nested_select: try: dbset=db(db[table][field].belongs(dbset._select(db[otable][selfld]))) except ValueError: - return Row({'status':400,'pattern':pattern, - 'error':'invalid path','response':None}) + return Row({'status': 400, 'pattern': pattern, + 'error': 'invalid path', 'response': None}) else: items = [item.id for item in dbset.select(db[otable][selfld])] - dbset=db(db[table][field].belongs(items)) + dbset = db(db[table][field].belongs(items)) else: table = ref - if not otable and isinstance(queries,dict): + if not otable and isinstance(queries, dict): dbset = db(queries[table]) - dbset=dbset(db[table]) - elif tag==':field' and table: + dbset = dbset(db[table]) + elif tag == ':field' and table: # print 're3:'+tag field = args[i] if not field in db[table]: break # hand-built patterns should respect .readable=False as well if not db[table][field].readable: - return Row({'status':418,'pattern':pattern, - 'error':'I\'m a teapot','response':None}) + return Row({'status': 418, 'pattern': pattern, + 'error': 'I\'m a teapot', 'response': None}) try: distinct = vars.get('distinct', False) == 'True' - offset = long(vars.get('offset',None) or 0) - limits = (offset,long(vars.get('limit',None) or 1000)+offset) + offset = long(vars.get('offset', None) or 0) + limits = (offset, long(vars.get('limit', None) or 1000)+offset) except ValueError: - return Row({'status':400,'error':'invalid limits','response':None}) + return Row({'status': 400 ,'error': 'invalid limits', 'response': None}) items = dbset.select(db[table][field], distinct=distinct, limitby=limits) if items: - return Row({'status':200,'response':items, - 'pattern':pattern}) + return Row({'status': 200, 'response': items, + 'pattern': pattern}) else: - return Row({'status':404,'pattern':pattern, - 'error':'no record found','response':None}) + return Row({'status': 404, 'pattern': pattern, + 'error': 'no record found', 'response': None}) elif tag != args[i]: break otable = table @@ -8184,36 +8264,35 @@ class DAL(object): try: orderby = [db[table][f] if not f.startswith('~') else ~db[table][f[1:]] for f in ofields] except (KeyError, AttributeError): - return Row({'status':400,'error':'invalid orderby','response':None}) + return Row({'status': 400, 'error': 'invalid orderby', 'response': None}) if exposedfields: fields = [field for field in db[table] if str(field).split('.')[-1] in exposedfields and field.readable] else: fields = [field for field in db[table] if field.readable] count = dbset.count() try: - offset = long(vars.get('offset',None) or 0) - limits = (offset,long(vars.get('limit',None) or 1000)+offset) + offset = long(vars.get('offset', None) or 0) + limits = (offset, long(vars.get('limit', None) or 1000)+offset) except ValueError: - return Row({'status':400,'error':'invalid limits','response':None}) + return Row({'status': 400, 'error': 'invalid limits', 'response': None}) #if count > limits[1]-limits[0]: - # return Row({'status':400,'error':'too many records','response':None}) + # return Row({'status': 400, 'error': 'too many records', 'response': None}) try: - response = dbset.select(limitby=limits,orderby=orderby,*fields) + response = dbset.select(limitby=limits, orderby=orderby, *fields) except ValueError: - return Row({'status':400,'pattern':pattern, - 'error':'invalid path','response':None}) - return Row({'status':200,'response':response, - 'pattern':pattern,'count':count}) - return Row({'status':400,'error':'no matching pattern','response':None}) + return Row({'status': 400, 'pattern': pattern, + 'error': 'invalid path', 'response': None}) + return Row({'status': 200, 'response': response, + 'pattern': pattern, 'count': count}) + return Row({'status':4 00, 'error': 'no matching pattern', 'response': None}) - def define_table( - self, - tablename, - *fields, - **args - ): + def define_table(self, + tablename, + *fields, + **args + ): if not fields and 'fields' in args: - fields = args.get('fields',()) + fields = args.get('fields', ()) if not isinstance(tablename, str): if isinstance(tablename, unicode): try: @@ -8222,10 +8301,10 @@ class DAL(object): raise SyntaxError("invalid unicode table name") else: raise SyntaxError("missing table name") - elif hasattr(self,tablename) or tablename in self.tables: - if not args.get('redefine',False): + elif hasattr(self, tablename) or tablename in self.tables: + if not args.get('redefine', False): raise SyntaxError('table already defined: %s' % tablename) - elif tablename.startswith('_') or hasattr(self,tablename) or \ + elif tablename.startswith('_') or hasattr(self, tablename) or \ REGEX_PYTHON_KEYWORDS.match(tablename): raise SyntaxError('invalid table name: %s' % tablename) elif self.check_reserved: @@ -8234,28 +8313,27 @@ class DAL(object): invalid_args = set(args)-TABLE_ARGS if invalid_args: raise SyntaxError('invalid table "%s" attributes: %s' \ - % (tablename,invalid_args)) + % (tablename, invalid_args)) if self._lazy_tables and not tablename in self._LAZY_TABLES: - self._LAZY_TABLES[tablename] = (tablename,fields,args) + self._LAZY_TABLES[tablename] = (tablename, fields, args) table = None else: - table = self.lazy_define_table(tablename,*fields,**args) + table = self.lazy_define_table(tablename, *fields, **args) if not tablename in self.tables: self.tables.append(tablename) return table - def lazy_define_table( - self, - tablename, - *fields, - **args - ): + def lazy_define_table(self, + tablename, + *fields, + **args + ): args_get = args.get common_fields = self._common_fields if common_fields: fields = list(fields) + list(common_fields) - table_class = args_get('table_class',Table) + table_class = args_get('table_class', Table) table = table_class(self, tablename, *fields, **args) table._actual = True self[tablename] = table @@ -8265,23 +8343,23 @@ class DAL(object): if field.requires == DEFAULT: field.requires = sqlhtml_validators(field) - migrate = self._migrate_enabled and args_get('migrate',self._migrate) - if migrate and not self._uri in (None,'None') \ + migrate = self._migrate_enabled and args_get('migrate', self._migrate) + if migrate and not self._uri in (None, 'None') \ or self._adapter.dbengine=='google:datastore': fake_migrate = self._fake_migrate_all or \ - args_get('fake_migrate',self._fake_migrate) - polymodel = args_get('polymodel',None) + args_get('fake_migrate', self._fake_migrate) + polymodel = args_get('polymodel', None) try: GLOBAL_LOCKER.acquire() self._lastsql = self._adapter.create_table( - table,migrate=migrate, + table, migrate=migrate, fake_migrate=fake_migrate, polymodel=polymodel) finally: GLOBAL_LOCKER.release() else: table._dbt = None - on_define = args_get('on_define',None) + on_define = args_get('on_define', None) if on_define: on_define(table) return table @@ -8330,8 +8408,8 @@ class DAL(object): has_key = __contains__ - def get(self,key,default=None): - return self.__dict__.get(key,default) + def get(self, key, default=None): + return self.__dict__.get(key, default) def __iter__(self): for tablename in self.tables: @@ -8341,10 +8419,10 @@ class DAL(object): return self.__getattr__(str(key)) def __getattr__(self, key): - if ogetattr(self,'_lazy_tables') and \ - key in ogetattr(self,'_LAZY_TABLES'): + if ogetattr(self, '_lazy_tables') and \ + key in ogetattr(self, '_LAZY_TABLES'): tablename, fields, args = self._LAZY_TABLES.pop(key) - return self.lazy_define_table(tablename,*fields,**args) + return self.lazy_define_table(tablename, *fields, **args) return ogetattr(self, key) def __setitem__(self, key, value): @@ -8354,23 +8432,23 @@ class DAL(object): if key[:1]!='_' and key in self: raise SyntaxError( 'Object %s exists and cannot be redefined' % key) - osetattr(self,key,value) + osetattr(self, key, value) __delitem__ = object.__delattr__ def __repr__(self): - if hasattr(self,'_uri'): + if hasattr(self, '_uri'): return '' % hide_password(self._adapter.uri) else: return '' % self._db_uid - def smart_query(self,fields,text): - return Set(self, smart_query(fields,text)) + def smart_query(self, fields, text): + return Set(self, smart_query(fields, text)) def __call__(self, query=None, ignore_common_filters=None): - if isinstance(query,Table): + if isinstance(query, Table): query = self._adapter.id_query(query) - elif isinstance(query,Field): + elif isinstance(query, Field): query = query!=None elif isinstance(query, dict): icf = query.get("ignore_common_filters") @@ -8446,7 +8524,7 @@ class DAL(object): else: adapter.execute(query) if as_dict or as_ordered_dict: - if not hasattr(adapter.cursor,'description'): + if not hasattr(adapter.cursor, 'description'): raise RuntimeError("database does not support executesql(...,as_dict=True)") # Non-DAL legacy db query, converts cursor results to dict. # sequence of 7-item sequences. each sequence tells about a column. @@ -8465,7 +8543,7 @@ class DAL(object): _dict = OrderedDict else: _dict = dict - return [_dict(zip(fields,row)) for row in data] + return [_dict(zip(fields, row)) for row in data] try: data = adapter._fetchall() except: @@ -8493,7 +8571,7 @@ class DAL(object): if not field.table==thistable] def export_to_csv_file(self, ofile, *args, **kwargs): - step = long(kwargs.get('max_fetch_rows,',500)) + step = long(kwargs.get('max_fetch_rows,', 500)) write_colnames = kwargs['write_colnames'] = \ kwargs.get("write_colnames", True) for table in self.tables: @@ -8501,8 +8579,8 @@ class DAL(object): query = self._adapter.id_query(self[table]) nrows = self(query).count() kwargs['write_colnames'] = write_colnames - for k in range(0,nrows,step): - self(query).select(limitby=(k,k+step)).export_to_csv_file( + for k in range(0, nrows, step): + self(query).select(limitby=(k, k+step)).export_to_csv_file( ofile, *args, **kwargs) kwargs['write_colnames'] = False ofile.write('\r\n\r\n') @@ -8512,8 +8590,8 @@ class DAL(object): unique='uuid', map_tablenames=None, ignore_missing_tables=False, *args, **kwargs): - #if id_map is None: id_map={} - id_offset = {} # only used if id_map is None + # if id_map is None: id_map={} + id_offset = {} # only used if id_map is None map_tablenames = map_tablenames or {} for line in ifile: line = line.strip() @@ -8526,7 +8604,7 @@ class DAL(object): raise SyntaxError('invalid file format') else: tablename = line[6:] - tablename = map_tablenames.get(tablename,tablename) + tablename = map_tablenames.get(tablename, tablename) if tablename is not None and tablename in self.tables: self[tablename].import_from_csv_file( ifile, id_map, null, unique, id_offset, @@ -8541,13 +8619,15 @@ class DAL(object): def DAL_unpickler(db_uid): - return DAL('',db_uid=db_uid) + return DAL('', db_uid=db_uid) + def DAL_pickler(db): return DAL_unpickler, (db._db_uid,) copyreg.pickle(DAL, DAL_pickler, DAL_unpickler) + class SQLALL(object): """ Helper class providing a comma-separated string having all the field names @@ -8562,6 +8642,7 @@ class SQLALL(object): def __str__(self): return ', '.join([str(field) for field in self._table]) + # class Reference(int): class Reference(long): @@ -8578,7 +8659,7 @@ class Reference(long): if key in self._table: self.__allocate() if self._record: - return self._record.get(key,None) # to deal with case self.update_record() + return self._record.get(key, None) # to deal with case self.update_record() else: return None @@ -8598,7 +8679,7 @@ class Reference(long): self.__allocate() return self._record.get(key, None) - def __setitem__(self,key,value): + def __setitem__(self, key, value): self.__allocate() self._record[key] = value @@ -8606,6 +8687,7 @@ class Reference(long): def Reference_unpickler(data): return marshal.loads(data) + def Reference_pickler(data): try: marshal_dump = marshal.dumps(long(data)) @@ -8615,14 +8697,18 @@ def Reference_pickler(data): copyreg.pickle(Reference, Reference_pickler, Reference_unpickler) + class MethodAdder(object): - def __init__(self,table): + def __init__(self, table): self.table = table + def __call__(self): return self.register() - def __getattr__(self,method_name): + + def __getattr__(self, method_name): return self.register(method_name) - def register(self,method_name=None): + + def register(self, method_name=None): def _decorated(f): instance = self.table import types @@ -8632,6 +8718,7 @@ class MethodAdder(object): return f return _decorated + class Table(object): """ @@ -8649,12 +8736,11 @@ class Table(object): """ - def __init__( - self, - db, - tablename, - *fields, - **args): + def __init__(self, + db, + tablename, + *fields, + **args): """ Initializes the table and performs checking on the provided fields. @@ -8842,7 +8928,7 @@ class Table(object): newquery = lambda query, t=self, name=self._tablename: \ reduce(AND, [db[tn].is_active == True for tn in db._adapter.tables(query) - if tn == name or getattr(db[tn],'_ot',None)==name]) + if tn == name or getattr(db[tn], '_ot', None)==name]) query = self._common_filter if query: newquery = query & newquery @@ -8862,7 +8948,7 @@ class Table(object): self._referenced_by = [] self._references = [] for field in self: - #fieldname = field.name ##FIXME not used ? + # fieldname = field.name ## FIXME not used ? field_type = field.type if isinstance(field_type, str) and field_type[:10] == 'reference ': ref = field_type[10:].strip() @@ -8900,12 +8986,12 @@ class Table(object): def _filter_fields(self, record, id=False): return dict([(k, v) for (k, v) in record.iteritems() if k - in self.fields and (self[k].type!='id' or id)]) + in self.fields and (self[k].type != 'id' or id)]) - def _build_query(self,key): + def _build_query(self, key): """ for keyed table only """ query = None - for k,v in key.iteritems(): + for k, v in key.iteritems(): if k in self._primarykey: if query: query = query & (self[k] == v) @@ -8913,8 +8999,8 @@ class Table(object): query = (self[k] == v) else: raise SyntaxError( - 'Field %s is not part of the primary key of %s' % - (k,self._tablename) + 'Field %s is not part of the primary key of %s' + % (k, self._tablename) ) return query @@ -8942,19 +9028,19 @@ class Table(object): if not key is DEFAULT: if isinstance(key, Query): record = self._db(key).select( - limitby=(0,1),for_update=for_update, orderby=orderby, orderby_on_limitby=False).first() + limitby=(0, 1), for_update=for_update, orderby=orderby, orderby_on_limitby=False).first() elif not str(key).isdigit(): record = None else: record = self._db(self._id == key).select( - limitby=(0,1),for_update=for_update, orderby=orderby, orderby_on_limitby=False).first() + limitby=(0, 1), for_update=for_update, orderby=orderby, orderby_on_limitby=False).first() if record: - for k,v in kwargs.iteritems(): + for k, v in kwargs.iteritems(): if record[k]!=v: return None return record elif kwargs: - query = reduce(lambda a,b:a&b,[self[k]==v for k,v in kwargs.iteritems()]) - return self._db(query).select(limitby=(0,1),for_update=for_update, orderby=orderby, orderby_on_limitby=False).first() + query = reduce(lambda a, b:a&b, [self[k]==v for k, v in kwargs.iteritems()]) + return self._db(query).select(limitby=(0, 1), for_update=for_update, orderby=orderby, orderby_on_limitby=False).first() else: return None @@ -8971,8 +9057,8 @@ class Table(object): self._db(query).update(**self._filter_fields(value)) else: raise SyntaxError( - 'key must have all fields from primary key: %s'% - (self._primarykey)) + 'key must have all fields from primary key: %s' + % (self._primarykey)) elif str(key).isdigit(): if key == 0: self.insert(**self._filter_fields(value)) @@ -8990,7 +9076,7 @@ class Table(object): def __setattr__(self, key, value): if key[:1]!='_' and key in self: raise SyntaxError('Object exists and cannot be redefined: %s' % key) - osetattr(self,key,value) + osetattr(self, key, value) def __delitem__(self, key): if isinstance(key, dict): @@ -9001,7 +9087,7 @@ class Table(object): not self._db(self._id == key).delete(): raise SyntaxError('No such record: %s' % key) - def __contains__(self,key): + def __contains__(self, key): return hasattr(self, key) has_key = __contains__ @@ -9041,15 +9127,14 @@ class Table(object): if rname and not ot: return rname return self._db._adapter.sqlsafe_table(self._tablename, self._ot) - def _drop(self, mode=''): return self._db._adapter._drop(self, mode) def drop(self, mode=''): - return self._db._adapter.drop(self,mode) + return self._db._adapter.drop(self, mode) - def _listify(self,fields,update=False): - new_fields = {} # format: new_fields[name] = (field,value) + def _listify(self, fields, update=False): + new_fields = {} # format: new_fields[name] = (field, value) # store all fields passed as input in new_fields for name in fields: @@ -9114,7 +9199,7 @@ class Table(object): fields[field.name] = new_name def _defaults(self, fields): - "If there are no fields/values specified, return table defaults" + """If there are no fields/values specified, return table defaults""" if not fields: fields = {} for field in self: @@ -9133,15 +9218,15 @@ class Table(object): ret = self._db._adapter.insert(self, self._listify(fields)) if ret and self._after_insert: fields = Row(fields) - [f(fields,ret) for f in self._after_insert] + [f(fields, ret) for f in self._after_insert] return ret def validate_and_insert(self, **fields): response = Row() response.errors = Row() new_fields = copy.copy(fields) - for key,value in fields.iteritems(): - value,error = self[key].validate(value) + for key, value in fields.iteritems(): + value, error = self[key].validate(value) if error: response.errors[key] = "%s" % error else: @@ -9238,8 +9323,8 @@ class Table(object): """ items = [self._listify(item) for item in items] if any(f(item) for item in items for f in self._before_insert):return 0 - ret = self._db._adapter.bulk_insert(self,items) - ret and [[f(item,ret[k]) for k,item in enumerate(items)] for f in self._after_insert] + ret = self._db._adapter.bulk_insert(self, items) + ret and [[f(item, ret[k]) for k, item in enumerate(items)] for f in self._after_insert] return ret def _truncate(self, mode=None): @@ -9248,15 +9333,14 @@ class Table(object): def truncate(self, mode=None): return self._db._adapter.truncate(self, mode) - def import_from_csv_file( - self, - csvfile, - id_map=None, - null='', - unique='uuid', - id_offset=None, # id_offset used only when id_map is None - *args, **kwargs - ): + def import_from_csv_file(self, + csvfile, + id_map=None, + null='', + unique='uuid', + id_offset=None, # id_offset used only when id_map is None + *args, **kwargs + ): """ Import records from csv file. Column headers must have same names as table fields. @@ -9294,14 +9378,14 @@ class Table(object): list_reference_s='list:reference' if value == null: value = None - elif field.type=='blob': + elif field.type == 'blob': value = base64.b64decode(value) - elif field.type=='double' or field.type=='float': + elif field.type == 'double' or field.type == 'float': if not value.strip(): value = None else: value = float(value) - elif field.type in ('integer','bigint'): + elif field.type in ('integer', 'bigint'): if not value.strip(): value = None else: @@ -9311,7 +9395,7 @@ class Table(object): elif field.type.startswith(list_reference_s): ref_table = field.type[len(list_reference_s):].strip() if id_map is not None: - value = [id_map[ref_table][long(v)] \ + value = [id_map[ref_table][long(v)] for v in bar_decode_string(value)] else: value = [v for v in bar_decode_string(value)] @@ -9342,13 +9426,13 @@ class Table(object): break if not colnames: # assume this is the first line of the input, contains colnames - colnames = [x.split('.',1)[-1] for x in line][:len(line)] + colnames = [x.split('.', 1)[-1] for x in line][:len(line)] cols, cid = [], None - for i,colname in enumerate(colnames): + for i, colname in enumerate(colnames): if is_id(colname): cid = i elif colname in self.fields: - cols.append((i,self[colname])) + cols.append((i, self[colname])) if colname == unique: unique_idx = i else: @@ -9359,7 +9443,7 @@ class Table(object): items.append(fix(field, line[i], id_map, id_offset)) except ValueError: raise RuntimeError("Unable to parse line:%s field:%s value:'%s'" - % (lineno+1,field,line[i])) + % (lineno+1, field, line[i])) if not (id_map or cid is None or id_offset is None or unique_idx): csv_id = long(line[cid]) @@ -9370,9 +9454,9 @@ class Table(object): # then we are not restoring but # extending db table with csv db table id_offset[self._tablename] = (curr_id-csv_id) \ - if curr_id>csv_id else 0 + if curr_id > csv_id else 0 # create new id until we get the same as old_id+offset - while curr_id' % BaseAdapter.expand(self.db._adapter,self) + return '' % BaseAdapter.expand(self.db._adapter, self) def __str__(self): return str(self.db._adapter.expand(self)) def __and__(self, other): - return Query(self.db,self.db._adapter.AND,self,other) + return Query(self.db, self.db._adapter.AND, self, other) __rand__ = __and__ def __or__(self, other): - return Query(self.db,self.db._adapter.OR,self,other) + return Query(self.db, self.db._adapter.OR, self, other) __ror__ = __or__ def __invert__(self): if self.op==self.db._adapter.NOT: return self.first - return Query(self.db,self.db._adapter.NOT,self) + return Query(self.db, self.db._adapter.NOT, self) def __eq__(self, other): return repr(self) == repr(other) @@ -10279,8 +10358,8 @@ class Query(object): def __ne__(self, other): return not (self == other) - def case(self,t=1,f=0): - return self.db._adapter.CASE(self,t,f) + def case(self, t=1, f=0): + return self.db._adapter.CASE(self, t, f) def as_dict(self, flat=False, sanitize=True): """Experimental stuff @@ -10367,7 +10446,7 @@ def xorify(orderby): def use_common_filters(query): - return (query and hasattr(query,'ignore_common_filters') and \ + return (query and hasattr(query, 'ignore_common_filters') and \ not query.ignore_common_filters) @@ -10386,7 +10465,7 @@ class Set(object): myset.update(db.users.name='Massimo') myset.delete() # all elements in the set - myset.select(orderby=db.users.id, groupby=db.users.name, limitby=(0,10)) + myset.select(orderby=db.users.id, groupby=db.users.name, limitby=(0, 10)) and take subsets: @@ -10410,16 +10489,16 @@ class Set(object): self.query = query def __repr__(self): - return '' % BaseAdapter.expand(self.db._adapter,self.query) + return '' % BaseAdapter.expand(self.db._adapter, self.query) def __call__(self, query, ignore_common_filters=False): if query is None: return self - elif isinstance(query,Table): + elif isinstance(query, Table): query = self.db._adapter.id_query(query) - elif isinstance(query,str): - query = Expression(self.db,query) - elif isinstance(query,Field): + elif isinstance(query, str): + query = Expression(self.db, query) + elif isinstance(query, Field): query = query!=None if self.query: return Set(self.db, self.query & query, @@ -10428,29 +10507,29 @@ class Set(object): return Set(self.db, query, ignore_common_filters=ignore_common_filters) - def _count(self,distinct=None): - return self.db._adapter._count(self.query,distinct) + def _count(self, distinct=None): + return self.db._adapter._count(self.query, distinct) def _select(self, *fields, **attributes): adapter = self.db._adapter tablenames = adapter.tables(self.query, - attributes.get('join',None), - attributes.get('left',None), - attributes.get('orderby',None), - attributes.get('groupby',None)) + attributes.get('join', None), + attributes.get('left', None), + attributes.get('orderby', None), + attributes.get('groupby', None)) fields = adapter.expand_all(fields, tablenames) - return adapter._select(self.query,fields,attributes) + return adapter._select(self.query, fields, attributes) def _delete(self): db = self.db tablename = db._adapter.get_table(self.query) - return db._adapter._delete(tablename,self.query) + return db._adapter._delete(tablename, self.query) def _update(self, **update_fields): db = self.db tablename = db._adapter.get_table(self.query) - fields = db[tablename]._listify(update_fields,update=True) - return db._adapter._update(tablename,self.query,fields) + fields = db[tablename]._listify(update_fields, update=True) + return db._adapter._update(tablename, self.query, fields) def as_dict(self, flat=False, sanitize=True): if flat: @@ -10482,12 +10561,12 @@ class Set(object): return json(d) def parse(self, dquery): - "Experimental: Turn a dictionary into a Query object" + """Experimental: Turn a dictionary into a Query object""" self.dquery = dquery return self.build(self.dquery) def build(self, d): - "Experimental: see .parse()" + """Experimental: see .parse()""" op, first, second = (d["op"], d["first"], d.get("second", None)) left = right = built = None @@ -10541,41 +10620,41 @@ class Set(object): return built def isempty(self): - return not self.select(limitby=(0,1), orderby_on_limitby=False) + return not self.select(limitby=(0, 1), orderby_on_limitby=False) - def count(self,distinct=None, cache=None): + def count(self, distinct=None, cache=None): db = self.db if cache: cache_model, time_expire = cache sql = self._count(distinct=distinct) key = db._uri + '/' + sql - if len(key)>200: key = hashlib_md5(key).hexdigest() + if len(key) > 200: key = hashlib_md5(key).hexdigest() return cache_model( key, - (lambda self=self,distinct=distinct: \ - db._adapter.count(self.query,distinct)), + (lambda self=self, distinct=distinct: + db._adapter.count(self.query, distinct)), time_expire) - return db._adapter.count(self.query,distinct) + return db._adapter.count(self.query, distinct) def select(self, *fields, **attributes): adapter = self.db._adapter tablenames = adapter.tables(self.query, - attributes.get('join',None), - attributes.get('left',None), - attributes.get('orderby',None), - attributes.get('groupby',None)) + attributes.get('join', None), + attributes.get('left', None), + attributes.get('orderby', None), + attributes.get('groupby', None)) fields = adapter.expand_all(fields, tablenames) - return adapter.select(self.query,fields,attributes) + return adapter.select(self.query, fields, attributes) - def nested_select(self,*fields,**attributes): - return Expression(self.db,self._select(*fields,**attributes)) + def nested_select(self, *fields, **attributes): + return Expression(self.db, self._select(*fields, **attributes)) def delete(self): db = self.db tablename = db._adapter.get_table(self.query) table = db[tablename] if any(f(self) for f in table._before_delete): return 0 - ret = db._adapter.delete(tablename,self.query) + ret = db._adapter.delete(tablename, self.query) ret and [f(self) for f in table._after_delete] return ret @@ -10584,13 +10663,13 @@ class Set(object): tablename = db._adapter.get_table(self.query) table = db[tablename] table._attempt_upload(update_fields) - if any(f(self,update_fields) for f in table._before_update): + if any(f(self, update_fields) for f in table._before_update): return 0 - fields = table._listify(update_fields,update=True) + fields = table._listify(update_fields, update=True) if not fields: raise SyntaxError("No fields to update") - ret = db._adapter.update("%s" % table._tablename,self.query,fields) - ret and [f(self,update_fields) for f in table._after_update] + ret = db._adapter.update("%s" % table._tablename, self.query, fields) + ret and [f(self, update_fields) for f in table._after_update] return ret def update_naive(self, **update_fields): @@ -10599,10 +10678,10 @@ class Set(object): """ tablename = self.db._adapter.get_table(self.query) table = self.db[tablename] - fields = table._listify(update_fields,update=True) + fields = table._listify(update_fields, update=True) if not fields: raise SyntaxError("No fields to update") - ret = self.db._adapter.update("%s" % table,self.query,fields) + ret = self.db._adapter.update("%s" % table, self.query, fields) return ret def validate_and_update(self, **update_fields): @@ -10610,8 +10689,8 @@ class Set(object): response = Row() response.errors = Row() new_fields = copy.copy(update_fields) - for key,value in update_fields.iteritems(): - value,error = self.db[tablename][key].validate(value) + for key, value in update_fields.iteritems(): + value, error = self.db[tablename][key].validate(value) if error: response.errors[key] = error else: @@ -10620,11 +10699,11 @@ class Set(object): if response.errors: response.updated = None else: - if not any(f(self,new_fields) for f in table._before_update): - fields = table._listify(new_fields,update=True) + if not any(f(self, new_fields) for f in table._before_update): + fields = table._listify(new_fields, update=True) if not fields: raise SyntaxError("No fields to update") - ret = self.db._adapter.update(tablename,self.query,fields) - ret and [f(self,new_fields) for f in table._after_update] + ret = self.db._adapter.update(tablename, self.query, fields) + ret and [f(self, new_fields) for f in table._after_update] else: ret = 0 response.updated = ret @@ -10668,6 +10747,7 @@ class Set(object): os.unlink(oldpath) return False + class RecordUpdater(object): def __init__(self, colset, table, id): self.colset, self.db, self.tablename, self.id = \ @@ -10680,19 +10760,25 @@ class RecordUpdater(object): for fieldname in newfields.keys(): if not fieldname in table.fields or table[fieldname].type=='id': del newfields[fieldname] - table._db(table._id==id,ignore_common_filters=True).update(**newfields) + table._db(table._id==id, ignore_common_filters=True).update(**newfields) colset.update(newfields) return colset + class RecordDeleter(object): + def __init__(self, table, id): self.db, self.tablename, self.id = table._db, table._tablename, id + def __call__(self): return self.db(self.db[self.tablename]._id==self.id).delete() + class LazyReferenceGetter(object): + def __init__(self, table, id): self.db, self.tablename, self.id = table._db, table._tablename, id + def __call__(self, other_tablename): if self.db._lazy_tables is False: raise AttributeError() @@ -10704,55 +10790,76 @@ class LazyReferenceGetter(object): raise AttributeError() + class LazySet(object): + def __init__(self, field, id): self.db, self.tablename, self.fieldname, self.id = \ field.db, field._tablename, field.name, id + def _getset(self): query = self.db[self.tablename][self.fieldname]==self.id - return Set(self.db,query) + return Set(self.db, query) + def __repr__(self): return repr(self._getset()) + def __call__(self, query, ignore_common_filters=False): return self._getset()(query, ignore_common_filters) - def _count(self,distinct=None): + + def _count(self, distinct=None): return self._getset()._count(distinct) + def _select(self, *fields, **attributes): - return self._getset()._select(*fields,**attributes) + return self._getset()._select(*fields, **attributes) + def _delete(self): return self._getset()._delete() + def _update(self, **update_fields): return self._getset()._update(**update_fields) + def isempty(self): return self._getset().isempty() - def count(self,distinct=None, cache=None): - return self._getset().count(distinct,cache) + + def count(self, distinct=None, cache=None): + return self._getset().count(distinct, cache) + def select(self, *fields, **attributes): - return self._getset().select(*fields,**attributes) - def nested_select(self,*fields,**attributes): - return self._getset().nested_select(*fields,**attributes) + return self._getset().select(*fields, **attributes) + + def nested_select(self, *fields, **attributes): + return self._getset().nested_select(*fields, **attributes) + def delete(self): return self._getset().delete() + def update(self, **update_fields): return self._getset().update(**update_fields) + def update_naive(self, **update_fields): return self._getset().update_naive(**update_fields) + def validate_and_update(self, **update_fields): return self._getset().validate_and_update(**update_fields) + def delete_uploaded_files(self, upload_fields=None): return self._getset().delete_uploaded_files(upload_fields) + class VirtualCommand(object): - def __init__(self,method,row): + def __init__(self, method, row): self.method=method self.row=row - def __call__(self,*args,**kwargs): - return self.method(self.row,*args,**kwargs) + def __call__(self, *args, **kwargs): + return self.method(self.row, *args, **kwargs) + def lazy_virtualfield(f): f.__lazy__ = True return f + class Rows(object): """ @@ -10762,14 +10869,13 @@ class Rows(object): # ## TODO: this class still needs some work to care for ID/OID - def __init__( - self, - db=None, - records=[], - colnames=[], - compact=True, - rawrows=None - ): + def __init__(self, + db=None, + records=[], + colnames=[], + compact=True, + rawrows=None + ): self.db = db self.records = records self.colnames = colnames @@ -10779,11 +10885,11 @@ class Rows(object): def __repr__(self): return '' % len(self.records) - def setvirtualfields(self,**keyed_virtualfields): + def setvirtualfields(self, **keyed_virtualfields): """ For reference:: - db.define_table('x',Field('number','integer')) + db.define_table('x', Field('number', 'integer')) if db(db.x).isempty(): [db.x.insert(number=i) for i in range(10)] from gluon.dal import lazy_virtualfield @@ -10793,7 +10899,7 @@ class Rows(object): def normal_shift(self): return self.x.number+1 # lazy virtual field (because of @staticmethod) @lazy_virtualfield - def lazy_shift(instance,row,delta=4): return row.x.number+delta + def lazy_shift(instance, row, delta=4): return row.x.number+delta db.x.virtualfields.append(MyVirtualFields()) for row in db(db.x).select(): @@ -10803,7 +10909,7 @@ class Rows(object): if not keyed_virtualfields: return self for row in self.records: - for (tablename,virtualfields) in keyed_virtualfields.iteritems(): + for (tablename, virtualfields) in keyed_virtualfields.iteritems(): attributes = dir(virtualfields) if not tablename in row: box = row[tablename] = Row() @@ -10812,9 +10918,9 @@ class Rows(object): updated = False for attribute in attributes: if attribute[0] != '_': - method = getattr(virtualfields,attribute) - if hasattr(method,'__lazy__'): - box[attribute]=VirtualCommand(method,row) + method = getattr(virtualfields, attribute) + if hasattr(method, '__lazy__'): + box[attribute]=VirtualCommand(method, row) elif type(method)==types.MethodType: if not updated: virtualfields.__dict__.update(row) @@ -10822,20 +10928,20 @@ class Rows(object): box[attribute]=method() return self - def __and__(self,other): + def __and__(self, other): if self.colnames!=other.colnames: raise Exception('Cannot & incompatible Rows objects') records = self.records+other.records - return Rows(self.db,records,self.colnames, + return Rows(self.db, records, self.colnames, compact=self.compact or other.compact) - def __or__(self,other): + def __or__(self, other): if self.colnames!=other.colnames: raise Exception('Cannot | incompatible Rows objects') records = [record for record in other.records if not record in self.records] records = self.records + records - return Rows(self.db,records,self.colnames, + return Rows(self.db, records, self.colnames, compact=self.compact or other.compact) def __nonzero__(self): @@ -10847,7 +10953,7 @@ class Rows(object): return len(self.records) def __getslice__(self, a, b): - return Rows(self.db,self.records[a:b],self.colnames,compact=self.compact) + return Rows(self.db, self.records[a:b], self.colnames, compact=self.compact) def __getitem__(self, i): row = self.records[i] @@ -10883,7 +10989,7 @@ class Rows(object): return None return self[-1] - def find(self,f,limitby=None): + def find(self, f, limitby=None): """ Returns a new Rows object, a subset of the original object, filtered by the function `f` @@ -10892,9 +10998,9 @@ class Rows(object): return Rows(self.db, [], self.colnames, compact=self.compact) records = [] if limitby: - a,b = limitby + a, b = limitby else: - a,b = 0,len(self) + a, b = 0, len(self) k = 0 for i, row in enumerate(self): if f(row): @@ -10911,8 +11017,8 @@ class Rows(object): if not self.records: return Rows(self.db, [], self.colnames, compact=self.compact) removed = [] - i=0 - while i len(fields)-1: if one_result: return row @@ -11071,13 +11177,13 @@ class Rows(object): key = lambda r: key_generator.next() rows = self.as_list(compact, storage_to_dict, datetime_to_str, custom_types) - if isinstance(key,str) and key.count('.')==1: + if isinstance(key, str) and key.count('.')==1: (table, field) = key.split('.') - return dict([(r[table][field],r) for r in rows]) - elif isinstance(key,str): - return dict([(r[key],r) for r in rows]) + return dict([(r[table][field], r) for r in rows]) + elif isinstance(key, str): + return dict([(r[key], r) for r in rows]) else: - return dict([(key(r),r) for r in rows]) + return dict([(key(r), r) for r in rows]) def as_trees(self, parent_name='parent_id', children_name='children'): roots = [] @@ -11127,7 +11233,7 @@ class Rows(object): return unq_colnames colnames = kwargs.get('colnames', self.colnames) - write_colnames = kwargs.get('write_colnames',True) + write_colnames = kwargs.get('write_colnames', True) # a proper csv starting with the column names if write_colnames: writer.writerow(unquote_colnames(colnames)) @@ -11143,11 +11249,11 @@ class Rows(object): return null elif isinstance(value, unicode): return value.encode('utf8') - elif isinstance(value,Reference): + elif isinstance(value, Reference): return long(value) elif hasattr(value, 'isoformat'): return value.isoformat()[:19].replace('T', ' ') - elif isinstance(value, (list,tuple)): # for type='list:..' + elif isinstance(value, (list, tuple)): # for type='list:..' return bar_encode(value) return value @@ -11160,18 +11266,18 @@ class Rows(object): else: (t, f) = m.groups() field = self.db[t][f] - if isinstance(record.get(t, None), (Row,dict)): + if isinstance(record.get(t, None), (Row, dict)): value = record[t][f] else: value = record[f] if field.type=='blob' and not value is None: value = base64.b64encode(value) elif represent and field.represent: - value = field.represent(value,record) + value = field.represent(value, record) row.append(none_exception(value)) writer.writerow(row) - def xml(self,strict=False,row_name='row',rows_name='rows'): + def xml(self, strict=False, row_name='row', rows_name='rows'): """ Serializes the table using sqlhtml.SQLTABLE (if present) """ @@ -11185,7 +11291,7 @@ class Rows(object): import sqlhtml return sqlhtml.SQLTABLE(self).xml() - def as_xml(self,row_name='row',rows_name='rows'): + def as_xml(self, row_name='row', rows_name='rows'): return self.xml(strict=True, row_name=row_name, rows_name=rows_name) def as_json(self, mode='object', default=None): @@ -11257,8 +11363,8 @@ def test_all(): Field('name'),\ Field('birth','date'),\ migrate='test_person.table') - >>> person_id = db.person.insert(name='Marco',birth='2005-06-22') - >>> person_id = db.person.insert(name='Massimo',birth='1971-12-21') + >>> person_id = db.person.insert(name='Marco', birth='2005-06-22') + >>> person_id = db.person.insert(name='Massimo', birth='1971-12-21') commented len(db().select(db.person.ALL)) commented 2 @@ -11272,14 +11378,14 @@ def test_all(): 'Massimo' >>> db.person(name='Massimo').name 'Massimo' - >>> db.person(db.person.name=='Massimo').name + >>> db.person(db.person.name == 'Massimo').name 'Massimo' >>> row = db.person[2] >>> row.name == row['name'] == row['person.name'] == row('person.name') True - >>> db(db.person.name=='Massimo').update(name='massimo') # test update + >>> db(db.person.name == 'Massimo').update(name='massimo') # test update 1 - >>> db(db.person.name=='Marco').select().first().delete_record() # test delete + >>> db(db.person.name == 'Marco').select().first().delete_record() # test delete 1 Update a single record @@ -11291,33 +11397,33 @@ def test_all(): Examples of complex search conditions - >>> len(db((db.person.name=='Max')&(db.person.birth<'2003-01-01')).select()) + >>> len(db((db.person.name == 'Max') & (db.person.birth < '2003-01-01')).select()) 1 - >>> len(db((db.person.name=='Max')&(db.person.birth>> len(db((db.person.name == 'Max') & (db.person.birth>> len(db((db.person.name=='Max')|(db.person.birth<'2003-01-01')).select()) + >>> len(db((db.person.name == 'Max') | (db.person.birth < '2003-01-01')).select()) 1 - >>> me = db(db.person.id==person_id).select(db.person.name)[0] + >>> me = db(db.person.id == person_id).select(db.person.name)[0] >>> me.name 'Max' Examples of search conditions using extract from date/datetime/time - >>> len(db(db.person.birth.month()==12).select()) + >>> len(db(db.person.birth.month() == 12).select()) 1 - >>> len(db(db.person.birth.year()>1900).select()) + >>> len(db(db.person.birth.year() > 1900).select()) 1 Example of usage of NULL - >>> len(db(db.person.birth==None).select()) ### test NULL + >>> len(db(db.person.birth == None).select()) ### test NULL 0 - >>> len(db(db.person.birth!=None).select()) ### test NULL + >>> len(db(db.person.birth != None).select()) ### test NULL 1 Examples of search conditions using lower, upper, and like - >>> len(db(db.person.name.upper()=='MAX').select()) + >>> len(db(db.person.name.upper() == 'MAX').select()) 1 >>> len(db(db.person.name.like('%ax')).select()) 1 @@ -11334,7 +11440,7 @@ def test_all(): >>> people = db().select(db.person.name, orderby=db.person.name, groupby=db.person.name) - >>> people = db().select(db.person.name, orderby=order, limitby=(0,100)) + >>> people = db().select(db.person.name, orderby=order, limitby=(0, 100)) Example of one 2 many relation @@ -11348,10 +11454,10 @@ def test_all(): A simple JOIN - >>> len(db(db.dog.owner==db.person.id).select()) + >>> len(db(db.dog.owner == db.person.id).select()) 1 - >>> len(db().select(db.person.ALL, db.dog.name,left=db.dog.on(db.dog.owner==db.person.id))) + >>> len(db().select(db.person.ALL, db.dog.name,left=db.dog.on(db.dog.owner == db.person.id))) 1 Drop tables @@ -11375,7 +11481,7 @@ def test_all(): Define a Set - >>> authored_papers = db((db.author.id==db.authorship.author_id)&(db.paper.id==db.authorship.paper_id)) + >>> authored_papers = db((db.author.id == db.authorship.author_id) & (db.paper.id == db.authorship.paper_id)) >>> rows = authored_papers.select(db.author.name, db.paper.title) >>> for row in rows: print row.author.name, row.paper.title Massimo QCD @@ -11403,7 +11509,7 @@ def test_all(): >>> db(mynumber).select(mynumber.x.sum())[0](mynumber.x.sum()) 45 - >>> db(mynumber.x+2==5).select(mynumber.x + 2)[0](mynumber.x + 2) + >>> db(mynumber.x+2 == 5).select(mynumber.x + 2)[0](mynumber.x + 2) 5 Output in csv @@ -11437,16 +11543,19 @@ GQLDB = DAL DAL.Field = Field # was necessary in gluon/globals.py session.connect DAL.Table = Table # was necessary in gluon/globals.py session.connect + ################################################################################ # Geodal utils ################################################################################ -def geoPoint(x,y): - return "POINT (%f %f)" % (x,y) +def geoPoint(x, y): + return "POINT (%f %f)" % (x, y) + def geoLine(*line): return "LINESTRING (%s)" % ','.join("%f %f" % item for item in line) + def geoPolygon(*line): return "POLYGON ((%s))" % ','.join("%f %f" % item for item in line) From 4ab3631f1c84e106f53f3b495ee5851acf3b1842 Mon Sep 17 00:00:00 2001 From: BuhtigithuB Date: Sun, 4 May 2014 12:17:31 -0400 Subject: [PATCH 2/2] Fix wrong space insert in patch --- gluon/dal.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/gluon/dal.py b/gluon/dal.py index 047433f6..6b012aa4 100644 --- a/gluon/dal.py +++ b/gluon/dal.py @@ -8284,7 +8284,7 @@ class DAL(object): 'error': 'invalid path', 'response': None}) return Row({'status': 200, 'response': response, 'pattern': pattern, 'count': count}) - return Row({'status':4 00, 'error': 'no matching pattern', 'response': None}) + return Row({'status': 400, 'error': 'no matching pattern', 'response': None}) def define_table(self, tablename,