Merge pull request #486 from gi0baro/DAL-modular

Added modular DAL, updated gluon imports to new structure
This commit is contained in:
mdipierro
2014-09-30 00:24:26 -05:00
43 changed files with 11675 additions and 11754 deletions
+4 -3
View File
@@ -25,7 +25,8 @@ from gluon.restricted import restricted, compile2
from gluon.fileutils import mktree, listdir, read_file, write_file
from gluon.myregex import regex_expose, regex_longcomments
from gluon.languages import translator
from gluon.dal import BaseAdapter, SQLDB, SQLField, DAL, Field
from gluon.dal import DAL, Field
from gluon.dal.base import BaseAdapter
from gluon.sqlhtml import SQLFORM, SQLTABLE
from gluon.cache import Cache
from gluon.globals import current, Response
@@ -388,8 +389,8 @@ _base_environment_['HTTP'] = HTTP
_base_environment_['redirect'] = redirect
_base_environment_['DAL'] = DAL
_base_environment_['Field'] = Field
_base_environment_['SQLDB'] = SQLDB # for backward compatibility
_base_environment_['SQLField'] = SQLField # for backward compatibility
_base_environment_['SQLDB'] = DAL # for backward compatibility
_base_environment_['SQLField'] = Field # for backward compatibility
_base_environment_['SQLFORM'] = SQLFORM
_base_environment_['SQLTABLE'] = SQLTABLE
_base_environment_['LOAD'] = LOAD
+2 -1
View File
@@ -7,7 +7,8 @@ db = get_db()
"""
import os
from gluon import *
from gluon.dal import ADAPTERS, UseDatabaseStoredFile,PostgreSQLAdapter
from gluon.dal.adapters import ADAPTERS, PostgreSQLAdapter
from gluon.dal.helpers.classes import UseDatabaseStoredFile
class HerokuPostgresAdapter(UseDatabaseStoredFile,PostgreSQLAdapter):
drivers = ('psycopg2',)
-11716
View File
File diff suppressed because it is too large Load Diff
+4
View File
@@ -0,0 +1,4 @@
from .base import DAL
from .objects import Field
from .helpers.classes import SQLCustomType
from .helpers.methods import geoPoint, geoLine, geoPolygon
+21
View File
@@ -0,0 +1,21 @@
import sys
import hashlib
import os
PY2 = sys.version_info[0] == 2
if PY2:
import cPickle as pickle
import cStringIO as StringIO
import copy_reg as copyreg
hashlib_md5 = hashlib.md5
else:
import pickle
from io import StringIO
import copyreg
hashlib_md5 = lambda s: hashlib.md5(bytes(s,'utf8'))
pjoin = os.path.join
exists = os.path.exists
ogetattr = object.__getattribute__
osetattr = object.__setattr__
+13
View File
@@ -0,0 +1,13 @@
import threading
import logging
GLOBAL_LOCKER = threading.RLock()
THREAD_LOCAL = threading.local()
LOGGER = logging.getLogger("web2py.dal")
DEFAULT = lambda: None
def IDENTITY(x): return x
def OR(a,b): return a|b
def AND(a,b): return a&b
+307
View File
@@ -0,0 +1,307 @@
# -*- coding: utf-8 -*-
import decimal
import re
from ._globals import LOGGER
# verify presence of web2py modules
try:
from collections import OrderedDict
except:
from gluon.contrib.ordereddict import OrderedDict
try:
from gluon.utils import web2py_uuid
except (ImportError, SystemError):
import uuid
def web2py_uuid(): return str(uuid.uuid4())
try:
import portalocker
have_portalocker = True
except ImportError:
portalocker = None
have_portalocker = False
try:
from gluon import serializers
have_serializers = True
simplejson = None
except ImportError:
serializers = None
have_serializers = False
try:
import json as simplejson
except ImportError:
try:
import gluon.contrib.simplejson as simplejson
except ImportError:
simplejson = None
# list of drivers will be built on the fly
# and lists only what is available
DRIVERS = []
try:
from new import classobj
from google.appengine.ext import db as gae
from google.appengine.ext import ndb
from google.appengine.api import namespace_manager, rdbms
from google.appengine.api.datastore_types import Key ### for belongs on ID
from google.appengine.ext.db.polymodel import PolyModel
from google.appengine.ext.ndb.polymodel import PolyModel as NDBPolyModel
DRIVERS.append('google')
except ImportError:
classobj = None
gae = None
ndb = None
namespace_manager = rdbms = None
Key = None
PolyModel = NDBPolyModel = None
if not 'google' in DRIVERS:
try:
from pysqlite2 import dbapi2 as sqlite2
DRIVERS.append('sqlite2')
except ImportError:
LOGGER.debug('no SQLite drivers pysqlite2.dbapi2')
try:
from sqlite3 import dbapi2 as sqlite3
DRIVERS.append('sqlite3')
except ImportError:
LOGGER.debug('no SQLite drivers sqlite3')
try:
# first try contrib driver, then from site-packages (if installed)
try:
import gluon.contrib.pymysql as pymysql
# monkeypatch pymysql because they havent fixed the bug:
# https://github.com/petehunt/PyMySQL/issues/86
pymysql.ESCAPE_REGEX = re.compile("'")
pymysql.ESCAPE_MAP = {"'": "''"}
# end monkeypatch
except ImportError:
import pymysql
DRIVERS.append('pymysql')
except ImportError:
LOGGER.debug('no MySQL driver pymysql')
try:
import MySQLdb
DRIVERS.append('MySQLdb')
except ImportError:
LOGGER.debug('no MySQL driver MySQLDB')
try:
import mysql.connector as mysqlconnector
DRIVERS.append("mysqlconnector")
except ImportError:
LOGGER.debug("no driver mysql.connector")
try:
import psycopg2
from psycopg2.extensions import adapt as psycopg2_adapt
DRIVERS.append('psycopg2')
except ImportError:
psycopg2_adapt = None
LOGGER.debug('no PostgreSQL driver psycopg2')
try:
# first try contrib driver, then from site-packages (if installed)
try:
import gluon.contrib.pg8000.dbapi as pg8000
except ImportError:
import pg8000.dbapi as pg8000
DRIVERS.append('pg8000')
except ImportError:
LOGGER.debug('no PostgreSQL driver pg8000')
try:
import cx_Oracle
DRIVERS.append('cx_Oracle')
except ImportError:
cx_Oracle = None
LOGGER.debug('no Oracle driver cx_Oracle')
try:
try:
import pyodbc
except ImportError:
try:
import gluon.contrib.pypyodbc as pyodbc
except Exception, e:
raise ImportError(str(e))
DRIVERS.append('pyodbc')
#DRIVERS.append('DB2(pyodbc)')
#DRIVERS.append('Teradata(pyodbc)')
#DRIVERS.append('Ingres(pyodbc)')
except ImportError:
pyodbc = None
LOGGER.debug('no MSSQL/DB2/Teradata/Ingres driver pyodbc')
try:
import Sybase
DRIVERS.append('Sybase')
except ImportError:
LOGGER.debug('no Sybase driver')
try:
import kinterbasdb
DRIVERS.append('kinterbasdb')
#DRIVERS.append('Firebird(kinterbasdb)')
except ImportError:
LOGGER.debug('no Firebird/Interbase driver kinterbasdb')
try:
import fdb
DRIVERS.append('fdb')
except ImportError:
LOGGER.debug('no Firebird driver fdb')
try:
import firebirdsql
DRIVERS.append('firebirdsql')
except ImportError:
LOGGER.debug('no Firebird driver firebirdsql')
try:
import informixdb
DRIVERS.append('informixdb')
LOGGER.warning('Informix support is experimental')
except ImportError:
LOGGER.debug('no Informix driver informixdb')
try:
import sapdb
DRIVERS.append('sapdb')
LOGGER.warning('SAPDB support is experimental')
except ImportError:
LOGGER.debug('no SAP driver sapdb')
try:
import cubriddb
DRIVERS.append('cubriddb')
LOGGER.warning('Cubrid support is experimental')
except ImportError:
LOGGER.debug('no Cubrid driver cubriddb')
try:
from com.ziclix.python.sql import zxJDBC
import java.sql
# Try sqlite jdbc driver from http://www.zentus.com/sqlitejdbc/
from org.sqlite import JDBC # required by java.sql; ensure we have it
zxJDBC_sqlite = java.sql.DriverManager
DRIVERS.append('zxJDBC')
#DRIVERS.append('SQLite(zxJDBC)')
LOGGER.warning('zxJDBC support is experimental')
is_jdbc = True
except ImportError:
LOGGER.debug('no SQLite/PostgreSQL driver zxJDBC')
is_jdbc = False
try:
import couchdb
DRIVERS.append('couchdb')
except ImportError:
couchdb = None
LOGGER.debug('no Couchdb driver couchdb')
try:
import pymongo
DRIVERS.append('pymongo')
except:
LOGGER.debug('no MongoDB driver pymongo')
try:
import imaplib
DRIVERS.append('imaplib')
except:
LOGGER.debug('no IMAP driver imaplib')
GAEDecimalProperty = None
NDBDecimalProperty = None
else:
is_jdbc = False
class GAEDecimalProperty(gae.Property):
"""
GAE decimal implementation
"""
data_type = decimal.Decimal
def __init__(self, precision, scale, **kwargs):
super(GAEDecimalProperty, self).__init__(self, **kwargs)
d = '1.'
for x in range(scale):
d += '0'
self.round = decimal.Decimal(d)
def get_value_for_datastore(self, model_instance):
value = super(GAEDecimalProperty, self)\
.get_value_for_datastore(model_instance)
if value is None or value == '':
return None
else:
return str(value)
def make_value_from_datastore(self, value):
if value is None or value == '':
return None
else:
return decimal.Decimal(value).quantize(self.round)
def validate(self, value):
value = super(GAEDecimalProperty, self).validate(value)
if value is None or isinstance(value, decimal.Decimal):
return value
elif isinstance(value, basestring):
return decimal.Decimal(value)
raise gae.BadValueError("Property %s must be a Decimal or string."\
% self.name)
#TODO Needs more testing
class NDBDecimalProperty(ndb.StringProperty):
"""
NDB decimal implementation
"""
data_type = decimal.Decimal
def __init__(self, precision, scale, **kwargs):
d = '1.'
for x in range(scale):
d += '0'
self.round = decimal.Decimal(d)
def _to_base_type(self, value):
if value is None or value == '':
return None
else:
return str(value)
def _from_base_type(self, value):
if value is None or value == '':
return None
else:
return decimal.Decimal(value).quantize(self.round)
def _validate(self, value):
if value is None or isinstance(value, decimal.Decimal):
return value
elif isinstance(value, basestring):
return decimal.Decimal(value)
raise TypeError("Property %s must be a Decimal or string."\
% self._name)
psycopg2_adapt = None
cx_Oracle = None
pyodbc = None
couchdb = None
def get_driver(name):
return globals().get(name)
+59
View File
@@ -0,0 +1,59 @@
# -*- coding: utf-8 -*-
from .sqlite import SQLiteAdapter, SpatiaLiteAdapter, JDBCSQLiteAdapter
from .mysql import MySQLAdapter
from .postgre import PostgreSQLAdapter, NewPostgreSQLAdapter, JDBCPostgreSQLAdapter
from .oracle import OracleAdapter
from .mssql import MSSQLAdapter, MSSQL2Adapter, MSSQL3Adapter, MSSQL4Adapter, \
VerticaAdapter, SybaseAdapter
from .firebird import FireBirdAdapter
from .informix import InformixAdapter, InformixSEAdapter
from .db2 import DB2Adapter
from .teradata import TeradataAdapter
from .ingres import IngresAdapter, IngresUnicodeAdapter
from .sapdb import SAPDBAdapter
from .cubrid import CubridAdapter
from .google import GoogleDatastoreAdapter, GoogleSQLAdapter
from .couchdb import CouchDBAdapter
from .mongo import MongoDBAdapter
from .imap import IMAPAdapter
ADAPTERS = {
'sqlite': SQLiteAdapter,
'spatialite': SpatiaLiteAdapter,
'sqlite:memory': SQLiteAdapter,
'spatialite:memory': SpatiaLiteAdapter,
'mysql': MySQLAdapter,
'postgres': PostgreSQLAdapter,
'postgres:psycopg2': PostgreSQLAdapter,
'postgres:pg8000': PostgreSQLAdapter,
'postgres2:psycopg2': NewPostgreSQLAdapter,
'postgres2:pg8000': NewPostgreSQLAdapter,
'oracle': OracleAdapter,
'mssql': MSSQLAdapter,
'mssql2': MSSQL2Adapter,
'mssql3': MSSQL3Adapter,
'mssql4' : MSSQL4Adapter,
'vertica': VerticaAdapter,
'sybase': SybaseAdapter,
'db2': DB2Adapter,
'teradata': TeradataAdapter,
'informix': InformixAdapter,
'informix-se': InformixSEAdapter,
'firebird': FireBirdAdapter,
'firebird_embedded': FireBirdAdapter,
'ingres': IngresAdapter,
'ingresu': IngresUnicodeAdapter,
'sapdb': SAPDBAdapter,
'cubrid': CubridAdapter,
'jdbc:sqlite': JDBCSQLiteAdapter,
'jdbc:sqlite:memory': JDBCSQLiteAdapter,
'jdbc:postgres': JDBCPostgreSQLAdapter,
'gae': GoogleDatastoreAdapter, # discouraged, for backward compatibility
'google:datastore': GoogleDatastoreAdapter,
'google:datastore+ndb': GoogleDatastoreAdapter,
'google:sql': GoogleSQLAdapter,
'couchdb': CouchDBAdapter,
'mongodb': MongoDBAdapter,
'imap': IMAPAdapter
}
File diff suppressed because it is too large Load Diff
+202
View File
@@ -0,0 +1,202 @@
# -*- coding: utf-8 -*-
import datetime
from .._globals import IDENTITY
from .._load import serializers, couchdb, web2py_uuid
from ..objects import Field, Query
from ..helpers.classes import SQLALL
from ..helpers.methods import uuid2int
from .base import BaseAdapter, NoSQLAdapter, SELECT_ARGS
class CouchDBAdapter(NoSQLAdapter):
drivers = ('couchdb',)
uploads_in_blob = True
types = {
'boolean': bool,
'string': str,
'text': str,
'json': str,
'password': str,
'blob': str,
'upload': str,
'integer': long,
'bigint': long,
'float': float,
'double': float,
'date': datetime.date,
'time': datetime.time,
'datetime': datetime.datetime,
'id': long,
'reference': long,
'list:string': list,
'list:integer': list,
'list:reference': list,
}
def file_exists(self, filename): pass
def file_open(self, filename, mode='rb', lock=True): pass
def file_close(self, fileobj): pass
def expand(self,expression,field_type=None):
if isinstance(expression,Field):
if expression.type=='id':
return "%s._id" % expression.tablename
return BaseAdapter.expand(self,expression,field_type)
def AND(self,first,second):
return '(%s && %s)' % (self.expand(first),self.expand(second))
def OR(self,first,second):
return '(%s || %s)' % (self.expand(first),self.expand(second))
def EQ(self,first,second):
if second is None:
return '(%s == null)' % self.expand(first)
return '(%s == %s)' % (self.expand(first),self.expand(second,first.type))
def NE(self,first,second):
if second is None:
return '(%s != null)' % self.expand(first)
return '(%s != %s)' % (self.expand(first),self.expand(second,first.type))
def COMMA(self,first,second):
return '%s + %s' % (self.expand(first),self.expand(second))
def represent(self, obj, fieldtype):
value = NoSQLAdapter.represent(self, obj, fieldtype)
if fieldtype=='id':
return repr(str(long(value)))
elif fieldtype in ('date','time','datetime','boolean'):
return serializers.json(value)
return repr(not isinstance(value,unicode) and value \
or value and value.encode('utf8'))
def __init__(self,db,uri='couchdb://127.0.0.1:5984',
pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, after_connection=None):
self.db = db
self.uri = uri
if do_connect: self.find_driver(adapter_args)
self.dbengine = 'couchdb'
self.folder = folder
db['_lastsql'] = ''
self.db_codec = 'UTF-8'
self._after_connection = after_connection
self.pool_size = pool_size
url='http://'+uri[10:]
def connector(url=url,driver_args=driver_args):
return self.driver.Server(url,**driver_args)
self.reconnect(connector,cursor=False)
def create_table(self, table, migrate=True, fake_migrate=False, polymodel=None):
if migrate:
try:
self.connection.create(table._tablename)
except:
pass
def insert(self,table,fields):
id = uuid2int(web2py_uuid())
ctable = self.connection[table._tablename]
values = dict((k.name,self.represent(v,k.type)) for k,v in fields)
values['_id'] = str(id)
ctable.save(values)
return id
def _select(self,query,fields,attributes):
if not isinstance(query,Query):
raise SyntaxError("Not Supported")
for key in set(attributes.keys())-SELECT_ARGS:
raise SyntaxError('invalid select attribute: %s' % key)
new_fields=[]
for item in fields:
if isinstance(item,SQLALL):
new_fields += item._table
else:
new_fields.append(item)
def uid(fd):
return fd=='id' and '_id' or fd
def get(row,fd):
return fd=='id' and long(row['_id']) or row.get(fd,None)
fields = new_fields
tablename = self.get_table(query)
fieldnames = [f.name for f in (fields or self.db[tablename])]
colnames = ['%s.%s' % (tablename,k) for k in fieldnames]
fields = ','.join(['%s.%s' % (tablename,uid(f)) for f in fieldnames])
fn="(function(%(t)s){if(%(query)s)emit(%(order)s,[%(fields)s]);})" %\
dict(t=tablename,
query=self.expand(query),
order='%s._id' % tablename,
fields=fields)
return fn, colnames
def select(self,query,fields,attributes):
if not isinstance(query,Query):
raise SyntaxError("Not Supported")
fn, colnames = self._select(query,fields,attributes)
tablename = colnames[0].split('.')[0]
ctable = self.connection[tablename]
rows = [cols['value'] for cols in ctable.query(fn)]
processor = attributes.get('processor',self.parse)
return processor(rows,fields,colnames,False)
def delete(self,tablename,query):
if not isinstance(query,Query):
raise SyntaxError("Not Supported")
if query.first.type=='id' and query.op==self.EQ:
id = query.second
tablename = query.first.tablename
assert(tablename == query.first.tablename)
ctable = self.connection[tablename]
try:
del ctable[str(id)]
return 1
except couchdb.http.ResourceNotFound:
return 0
else:
tablename = self.get_table(query)
rows = self.select(query,[self.db[tablename]._id],{})
ctable = self.connection[tablename]
for row in rows:
del ctable[str(row.id)]
return len(rows)
def update(self,tablename,query,fields):
if not isinstance(query,Query):
raise SyntaxError("Not Supported")
if query.first.type=='id' and query.op==self.EQ:
id = query.second
tablename = query.first.tablename
ctable = self.connection[tablename]
try:
doc = ctable[str(id)]
for key,value in fields:
doc[key.name] = self.represent(value,self.db[tablename][key.name].type)
ctable.save(doc)
return 1
except couchdb.http.ResourceNotFound:
return 0
else:
tablename = self.get_table(query)
rows = self.select(query,[self.db[tablename]._id],{})
ctable = self.connection[tablename]
table = self.db[tablename]
for row in rows:
doc = ctable[str(row.id)]
for key,value in fields:
doc[key.name] = self.represent(value,table[key.name].type)
ctable.save(doc)
return len(rows)
def count(self,query,distinct=None):
if distinct:
raise RuntimeError("COUNT DISTINCT not supported")
if not isinstance(query,Query):
raise SyntaxError("Not Supported")
tablename = self.get_table(query)
rows = self.select(query,[self.db[tablename]._id],{})
return len(rows)
+54
View File
@@ -0,0 +1,54 @@
# -*- coding: utf-8 -*-
import re
from .._globals import IDENTITY
from .mysql import MySQLAdapter
class CubridAdapter(MySQLAdapter):
drivers = ('cubriddb',)
REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^?]+)(\?set_encoding=(?P<charset>\w+))?$')
def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, after_connection=None):
self.db = db
self.dbengine = "cubrid"
self.uri = uri
if do_connect: self.find_driver(adapter_args,uri)
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self._after_connection = after_connection
self.find_or_make_work_folder()
ruri = uri.split('://',1)[1]
m = self.REGEX_URI.match(ruri)
if not m:
raise SyntaxError(
"Invalid URI string in DAL: %s" % self.uri)
user = credential_decoder(m.group('user'))
if not user:
raise SyntaxError('User required')
password = credential_decoder(m.group('password'))
if not password:
password = ''
host = m.group('host')
if not host:
raise SyntaxError('Host name required')
db = m.group('db')
if not db:
raise SyntaxError('Database name required')
port = int(m.group('port') or '30000')
user = credential_decoder(user)
passwd = credential_decoder(password)
def connector(host=host,port=port,db=db,
user=user,passwd=passwd,driver_args=driver_args):
return self.driver.connect(host,port,db,user,passwd,**driver_args)
self.connector = connector
if do_connect: self.reconnect()
def after_connection(self):
self.execute('SET FOREIGN_KEY_CHECKS=1;')
self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
+93
View File
@@ -0,0 +1,93 @@
# -*- coding: utf-8 -*-
import base64
import datetime
from .._globals import IDENTITY
from .base import BaseAdapter
class DB2Adapter(BaseAdapter):
drivers = ('pyodbc',)
types = {
'boolean': 'CHAR(1)',
'string': 'VARCHAR(%(length)s)',
'text': 'CLOB',
'json': 'CLOB',
'password': 'VARCHAR(%(length)s)',
'blob': 'BLOB',
'upload': 'VARCHAR(%(length)s)',
'integer': 'INT',
'bigint': 'BIGINT',
'float': 'REAL',
'double': 'DOUBLE',
'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
'date': 'DATE',
'time': 'TIME',
'datetime': 'TIMESTAMP',
'id': 'INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL',
'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'list:integer': 'CLOB',
'list:string': 'CLOB',
'list:reference': 'CLOB',
'big-id': 'BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL',
'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s',
}
def LEFT_JOIN(self):
return 'LEFT OUTER JOIN'
def RANDOM(self):
return 'RAND()'
def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
if limitby:
(lmin, lmax) = limitby
sql_o += ' FETCH FIRST %i ROWS ONLY' % lmax
return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
def represent_exceptions(self, obj, fieldtype):
if fieldtype == 'blob':
obj = base64.b64encode(str(obj))
return "BLOB('%s')" % obj
elif fieldtype == 'datetime':
if isinstance(obj, datetime.datetime):
obj = obj.isoformat()[:19].replace('T','-').replace(':','.')
elif isinstance(obj, datetime.date):
obj = obj.isoformat()[:10]+'-00.00.00'
return "'%s'" % obj
return None
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, after_connection=None):
self.db = db
self.dbengine = "db2"
self.uri = uri
if do_connect: self.find_driver(adapter_args,uri)
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self._after_connection = after_connection
self.find_or_make_work_folder()
ruri = uri.split('://', 1)[1]
def connector(cnxn=ruri,driver_args=driver_args):
return self.driver.connect(cnxn,**driver_args)
self.connector = connector
if do_connect: self.reconnect()
def execute(self,command):
if command[-1:]==';':
command = command[:-1]
return self.log_execute(command)
def lastrowid(self,table):
self.execute('SELECT DISTINCT IDENTITY_VAL_LOCAL() FROM %s;' % table)
return long(self.cursor.fetchone()[0])
def rowslice(self,rows,minimum=0,maximum=None):
if maximum is None:
return rows[minimum:]
return rows[minimum:maximum]
+182
View File
@@ -0,0 +1,182 @@
# -*- coding: utf-8 -*-
import re
from .._globals import IDENTITY
from ..objects import Expression
from .base import BaseAdapter
class FireBirdAdapter(BaseAdapter):
drivers = ('kinterbasdb','firebirdsql','fdb','pyodbc')
commit_on_alter_table = False
support_distributed_transaction = True
types = {
'boolean': 'CHAR(1)',
'string': 'VARCHAR(%(length)s)',
'text': 'BLOB SUB_TYPE 1',
'json': 'BLOB SUB_TYPE 1',
'password': 'VARCHAR(%(length)s)',
'blob': 'BLOB SUB_TYPE 0',
'upload': 'VARCHAR(%(length)s)',
'integer': 'INTEGER',
'bigint': 'BIGINT',
'float': 'FLOAT',
'double': 'DOUBLE PRECISION',
'decimal': 'DECIMAL(%(precision)s,%(scale)s)',
'date': 'DATE',
'time': 'TIME',
'datetime': 'TIMESTAMP',
'id': 'INTEGER PRIMARY KEY',
'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'list:integer': 'BLOB SUB_TYPE 1',
'list:string': 'BLOB SUB_TYPE 1',
'list:reference': 'BLOB SUB_TYPE 1',
'big-id': 'BIGINT PRIMARY KEY',
'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
}
def sequence_name(self,tablename):
return ('genid_' + self.QUOTE_TEMPLATE) % tablename
def trigger_name(self,tablename):
return 'trg_id_%s' % tablename
def RANDOM(self):
return 'RAND()'
def EPOCH(self, first):
return "DATEDIFF(second, '1970-01-01 00:00:00', %s)" % self.expand(first)
def NOT_NULL(self,default,field_type):
return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
def SUBSTRING(self,field,parameters):
return 'SUBSTRING(%s from %s for %s)' % (self.expand(field), parameters[0], parameters[1])
def LENGTH(self, first):
return "CHAR_LENGTH(%s)" % self.expand(first)
def CONTAINS(self,first,second,case_sensitive=False):
if first.type.startswith('list:'):
second = Expression(None,self.CONCAT('|',Expression(
None,self.REPLACE(second,('|','||'))),'|'))
return '(%s CONTAINING %s)' % (self.expand(first),
self.expand(second, 'string'))
def _drop(self,table,mode):
sequence_name = table._sequence_name
return ['DROP TABLE %s %s;' % (table.sqlsafe, mode), 'DROP GENERATOR %s;' % sequence_name]
def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
if limitby:
(lmin, lmax) = limitby
sql_s = ' FIRST %i SKIP %i %s' % (lmax - lmin, lmin, sql_s)
return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
def _truncate(self,table,mode = ''):
return ['DELETE FROM %s;' % table._tablename,
'SET GENERATOR %s TO 0;' % table._sequence_name]
REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+?)(\?set_encoding=(?P<charset>\w+))?$')
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, after_connection=None):
self.db = db
self.dbengine = "firebird"
self.uri = uri
if do_connect: self.find_driver(adapter_args,uri)
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self._after_connection = after_connection
self.find_or_make_work_folder()
ruri = uri.split('://',1)[1]
m = self.REGEX_URI.match(ruri)
if not m:
raise SyntaxError("Invalid URI string in DAL: %s" % self.uri)
user = credential_decoder(m.group('user'))
if not user:
raise SyntaxError('User required')
password = credential_decoder(m.group('password'))
if not password:
password = ''
host = m.group('host')
if not host:
raise SyntaxError('Host name required')
port = int(m.group('port') or 3050)
db = m.group('db')
if not db:
raise SyntaxError('Database name required')
charset = m.group('charset') or 'UTF8'
driver_args.update(dsn='%s/%s:%s' % (host,port,db),
user = credential_decoder(user),
password = credential_decoder(password),
charset = charset)
def connector(driver_args=driver_args):
return self.driver.connect(**driver_args)
self.connector = connector
if do_connect: self.reconnect()
def create_sequence_and_triggers(self, query, table, **args):
tablename = table._tablename
sequence_name = table._sequence_name
trigger_name = table._trigger_name
self.execute(query)
self.execute('create generator %s;' % sequence_name)
self.execute('set generator %s to 0;' % sequence_name)
self.execute('create trigger %s for %s active before insert position 0 as\nbegin\nif(new.id is null) then\nbegin\nnew.id = gen_id(%s, 1);\nend\nend;' % (trigger_name, tablename, sequence_name))
def lastrowid(self,table):
sequence_name = table._sequence_name
self.execute('SELECT gen_id(%s, 0) FROM rdb$database' % sequence_name)
return long(self.cursor.fetchone()[0])
class FireBirdEmbeddedAdapter(FireBirdAdapter):
drivers = ('kinterbasdb','firebirdsql','fdb','pyodbc')
REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<path>[^\?]+)(\?set_encoding=(?P<charset>\w+))?$')
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, after_connection=None):
self.db = db
self.dbengine = "firebird"
self.uri = uri
if do_connect: self.find_driver(adapter_args,uri)
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self._after_connection = after_connection
self.find_or_make_work_folder()
ruri = uri.split('://',1)[1]
m = self.REGEX_URI.match(ruri)
if not m:
raise SyntaxError(
"Invalid URI string in DAL: %s" % self.uri)
user = credential_decoder(m.group('user'))
if not user:
raise SyntaxError('User required')
password = credential_decoder(m.group('password'))
if not password:
password = ''
pathdb = m.group('path')
if not pathdb:
raise SyntaxError('Path required')
charset = m.group('charset')
if not charset:
charset = 'UTF8'
host = ''
driver_args.update(host=host,
database=pathdb,
user=credential_decoder(user),
password=credential_decoder(password),
charset=charset)
def connector(driver_args=driver_args):
return self.driver.connect(**driver_args)
self.connector = connector
if do_connect: self.reconnect()
+619
View File
@@ -0,0 +1,619 @@
# -*- coding: utf-8 -*-
import os
import re
from .._compat import pjoin
from .._globals import IDENTITY, LOGGER, THREAD_LOCAL
from .._load import classobj, gae, ndb, NDBDecimalProperty, GAEDecimalProperty, \
namespace_manager, Key, NDBPolyModel, PolyModel, rdbms, have_serializers, \
serializers, simplejson
from ..objects import Table, Field, Expression, Query
from ..helpers.classes import SQLCustomType, SQLALL, Reference, UseDatabaseStoredFile
from ..helpers.methods import use_common_filters, xorify
from .base import NoSQLAdapter
from .mysql import MySQLAdapter
class GoogleSQLAdapter(UseDatabaseStoredFile, MySQLAdapter):
uploads_in_blob = True
REGEX_URI = re.compile('^(?P<instance>.*)/(?P<db>.*)$')
def __init__(self, db, uri='google:sql://realm:domain/database',
pool_size=0, folder=None, db_codec='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, after_connection=None):
self.db = db
self.dbengine = "mysql"
self.uri = uri
self.pool_size = pool_size
self.db_codec = db_codec
self._after_connection = after_connection
if do_connect: self.find_driver(adapter_args, uri)
self.folder = folder or pjoin('$HOME',THREAD_LOCAL.folder.split(
os.sep+'applications'+os.sep,1)[1])
ruri = uri.split("://")[1]
m = self.REGEX_URI.match(ruri)
if not m:
raise SyntaxError("Invalid URI string in SQLDB: %s" % self.uri)
instance = credential_decoder(m.group('instance'))
self.dbstring = db = credential_decoder(m.group('db'))
driver_args['instance'] = instance
if not 'charset' in driver_args:
driver_args['charset'] = 'utf8'
self.createdb = createdb = adapter_args.get('createdb',True)
if not createdb:
driver_args['database'] = db
def connector(driver_args=driver_args):
return rdbms.connect(**driver_args)
self.connector = connector
if do_connect: self.reconnect()
def after_connection(self):
if self.createdb:
# self.execute('DROP DATABASE %s' % self.dbstring)
self.execute('CREATE DATABASE IF NOT EXISTS %s' % self.dbstring)
self.execute('USE %s' % self.dbstring)
self.execute("SET FOREIGN_KEY_CHECKS=1;")
self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
def execute(self, command, *a, **b):
return self.log_execute(command.decode('utf8'), *a, **b)
def find_driver(self,adapter_args,uri=None):
self.adapter_args = adapter_args
self.driver = "google"
class GAEF(object):
def __init__(self,name,op,value,apply):
self.name=name=='id' and '__key__' or name
self.op=op
self.value=value
self.apply=apply
def __repr__(self):
return '(%s %s %s:%s)' % (self.name, self.op, repr(self.value), type(self.value))
class GoogleDatastoreAdapter(NoSQLAdapter):
"""
NDB:
You can enable NDB by using adapter_args::
db = DAL('google:datastore', adapter_args={'ndb_settings':ndb_settings, 'use_ndb':True})
ndb_settings is optional and can be used for per model caching settings.
It must be a dict in this form::
ndb_settings = {<table_name>:{<variable_name>:<variable_value>}}
See: https://developers.google.com/appengine/docs/python/ndb/cache
"""
MAX_FETCH_LIMIT = 1000000
uploads_in_blob = True
types = {}
# reconnect is not required for Datastore dbs
reconnect = lambda *args, **kwargs: None
def file_exists(self, filename): pass
def file_open(self, filename, mode='rb', lock=True): pass
def file_close(self, fileobj): pass
REGEX_NAMESPACE = re.compile('.*://(?P<namespace>.+)')
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, after_connection=None):
self.use_ndb = adapter_args.get('use_ndb',uri.startswith('google:datastore+ndb'))
if self.use_ndb is True:
self.types.update({
'boolean': ndb.BooleanProperty,
'string': (lambda **kwargs: ndb.StringProperty(**kwargs)),
'text': ndb.TextProperty,
'json': ndb.TextProperty,
'password': ndb.StringProperty,
'blob': ndb.BlobProperty,
'upload': ndb.StringProperty,
'integer': ndb.IntegerProperty,
'bigint': ndb.IntegerProperty,
'float': ndb.FloatProperty,
'double': ndb.FloatProperty,
'decimal': NDBDecimalProperty,
'date': ndb.DateProperty,
'time': ndb.TimeProperty,
'datetime': ndb.DateTimeProperty,
'id': None,
'reference': ndb.IntegerProperty,
'list:string': (lambda **kwargs: ndb.StringProperty(repeated=True,default=None, **kwargs)),
'list:integer': (lambda **kwargs: ndb.IntegerProperty(repeated=True,default=None, **kwargs)),
'list:reference': (lambda **kwargs: ndb.IntegerProperty(repeated=True,default=None, **kwargs)),
})
else:
self.types.update({
'boolean': gae.BooleanProperty,
'string': (lambda **kwargs: gae.StringProperty(multiline=True, **kwargs)),
'text': gae.TextProperty,
'json': gae.TextProperty,
'password': gae.StringProperty,
'blob': gae.BlobProperty,
'upload': gae.StringProperty,
'integer': gae.IntegerProperty,
'bigint': gae.IntegerProperty,
'float': gae.FloatProperty,
'double': gae.FloatProperty,
'decimal': GAEDecimalProperty,
'date': gae.DateProperty,
'time': gae.TimeProperty,
'datetime': gae.DateTimeProperty,
'id': None,
'reference': gae.IntegerProperty,
'list:string': (lambda **kwargs: gae.StringListProperty(default=None, **kwargs)),
'list:integer': (lambda **kwargs: gae.ListProperty(int,default=None, **kwargs)),
'list:reference': (lambda **kwargs: gae.ListProperty(int,default=None, **kwargs)),
})
self.db = db
self.uri = uri
self.dbengine = 'google:datastore'
self.folder = folder
db['_lastsql'] = ''
self.db_codec = 'UTF-8'
self._after_connection = after_connection
self.pool_size = 0
match = self.REGEX_NAMESPACE.match(uri)
if match:
namespace_manager.set_namespace(match.group('namespace'))
self.keyfunc = (self.use_ndb and ndb.Key) or Key.from_path
self.ndb_settings = None
if 'ndb_settings' in adapter_args:
self.ndb_settings = adapter_args['ndb_settings']
def parse_id(self, value, field_type):
return value
def represent(self, obj, fieldtype):
if fieldtype == "json":
if have_serializers:
return serializers.json(obj)
elif simplejson:
return simplejson.dumps(obj)
else:
raise Exception("Could not dump json object (missing json library)")
else:
return NoSQLAdapter.represent(self, obj, fieldtype)
def create_table(self,table,migrate=True,fake_migrate=False, polymodel=None):
myfields = {}
for field in table:
if isinstance(polymodel,Table) and field.name in polymodel.fields():
continue
attr = {}
if isinstance(field.custom_qualifier, dict):
#this is custom properties to add to the GAE field declartion
attr = field.custom_qualifier
field_type = field.type
if isinstance(field_type, SQLCustomType):
ftype = self.types[field_type.native or field_type.type](**attr)
elif isinstance(field_type, ((self.use_ndb and ndb.Property) or gae.Property)):
ftype = field_type
elif field_type.startswith('id'):
continue
elif field_type.startswith('decimal'):
precision, scale = field_type[7:].strip('()').split(',')
precision = int(precision)
scale = int(scale)
dec_cls = (self.use_ndb and NDBDecimalProperty) or GAEDecimalProperty
ftype = dec_cls(precision, scale, **attr)
elif field_type.startswith('reference'):
if field.notnull:
attr = dict(required=True)
ftype = self.types[field_type[:9]](**attr)
elif field_type.startswith('list:reference'):
if field.notnull:
attr['required'] = True
ftype = self.types[field_type[:14]](**attr)
elif field_type.startswith('list:'):
ftype = self.types[field_type](**attr)
elif not field_type in self.types\
or not self.types[field_type]:
raise SyntaxError('Field: unknown field type: %s' % field_type)
else:
ftype = self.types[field_type](**attr)
myfields[field.name] = ftype
if not polymodel:
model_cls = (self.use_ndb and ndb.Model) or gae.Model
table._tableobj = classobj(table._tablename, (model_cls, ), myfields)
if self.use_ndb:
# Set NDB caching variables
if self.ndb_settings and (table._tablename in self.ndb_settings):
for k, v in self.ndb_settings.iteritems():
setattr(table._tableobj, k, v)
elif polymodel==True:
pm_cls = (self.use_ndb and NDBPolyModel) or PolyModel
table._tableobj = classobj(table._tablename, (pm_cls, ), myfields)
elif isinstance(polymodel,Table):
table._tableobj = classobj(table._tablename, (polymodel._tableobj, ), myfields)
else:
raise SyntaxError("polymodel must be None, True, a table or a tablename")
return None
def expand(self,expression,field_type=None):
if isinstance(expression,Field):
if expression.type in ('text', 'blob', 'json'):
raise SyntaxError('AppEngine does not index by: %s' % expression.type)
return expression.name
elif isinstance(expression, (Expression, Query)):
if not expression.second is None:
return expression.op(expression.first, expression.second)
elif not expression.first is None:
return expression.op(expression.first)
else:
return expression.op()
elif field_type:
return self.represent(expression,field_type)
elif isinstance(expression,(list,tuple)):
return ','.join([self.represent(item,field_type) for item in expression])
else:
return str(expression)
### TODO from gql.py Expression
def AND(self,first,second):
a = self.expand(first)
b = self.expand(second)
if b[0].name=='__key__' and a[0].name!='__key__':
return b+a
return a+b
def EQ(self,first,second=None):
if isinstance(second, Key):
return [GAEF(first.name,'=',second,lambda a,b:a==b)]
return [GAEF(first.name,'=',self.represent(second,first.type),lambda a,b:a==b)]
def NE(self,first,second=None):
if first.type != 'id':
return [GAEF(first.name,'!=',self.represent(second,first.type),lambda a,b:a!=b)]
else:
if not second is None:
second = self.keyfunc(first._tablename, long(second))
return [GAEF(first.name,'!=',second,lambda a,b:a!=b)]
def LT(self,first,second=None):
if first.type != 'id':
return [GAEF(first.name,'<',self.represent(second,first.type),lambda a,b:a<b)]
else:
second = self.keyfunc(first._tablename, long(second))
return [GAEF(first.name,'<',second,lambda a,b:a<b)]
def LE(self,first,second=None):
if first.type != 'id':
return [GAEF(first.name,'<=',self.represent(second,first.type),lambda a,b:a<=b)]
else:
second = self.keyfunc(first._tablename, long(second))
return [GAEF(first.name,'<=',second,lambda a,b:a<=b)]
def GT(self,first,second=None):
if first.type != 'id' or second==0 or second == '0':
return [GAEF(first.name,'>',self.represent(second,first.type),lambda a,b:a>b)]
else:
second = self.keyfunc(first._tablename, long(second))
return [GAEF(first.name,'>',second,lambda a,b:a>b)]
def GE(self,first,second=None):
if first.type != 'id':
return [GAEF(first.name,'>=',self.represent(second,first.type),lambda a,b:a>=b)]
else:
second = self.keyfunc(first._tablename, long(second))
return [GAEF(first.name,'>=',second,lambda a,b:a>=b)]
def INVERT(self,first):
return '-%s' % first.name
def COMMA(self,first,second):
return '%s, %s' % (self.expand(first),self.expand(second))
def BELONGS(self,first,second=None):
if not isinstance(second,(list, tuple, set)):
raise SyntaxError("Not supported")
if not self.use_ndb:
if isinstance(second,set):
second = list(second)
if first.type == 'id':
second = [self.keyfunc(first._tablename, int(i)) for i in second]
return [GAEF(first.name,'in',second,lambda a,b:a in b)]
def CONTAINS(self,first,second,case_sensitive=False):
# silently ignoring: GAE can only do case sensitive matches!
if not first.type.startswith('list:'):
raise SyntaxError("Not supported")
return [GAEF(first.name,'=',self.expand(second,first.type[5:]),lambda a,b:b in a)]
def NOT(self,first):
nops = { self.EQ: self.NE,
self.NE: self.EQ,
self.LT: self.GE,
self.GT: self.LE,
self.LE: self.GT,
self.GE: self.LT}
if not isinstance(first,Query):
raise SyntaxError("Not suported")
nop = nops.get(first.op,None)
if not nop:
raise SyntaxError("Not suported %s" % first.op.__name__)
first.op = nop
return self.expand(first)
def truncate(self,table,mode):
self.db(self.db._adapter.id_query(table)).delete()
GAE_FILTER_OPTIONS = {
'=': lambda q, t, p, v: q.filter(getattr(t,p) == v),
'>': lambda q, t, p, v: q.filter(getattr(t,p) > v),
'<': lambda q, t, p, v: q.filter(getattr(t,p) < v),
'<=': lambda q, t, p, v: q.filter(getattr(t,p) <= v),
'>=': lambda q, t, p, v: q.filter(getattr(t,p) >= v),
'!=': lambda q, t, p, v: q.filter(getattr(t,p) != v),
'in': lambda q, t, p, v: q.filter(getattr(t,p).IN(v)),
}
def filter(self, query, tableobj, prop, op, value):
return self.GAE_FILTER_OPTIONS[op](query, tableobj, prop, value)
def select_raw(self,query,fields=None,attributes=None,count_only=False):
db = self.db
fields = fields or []
attributes = attributes or {}
args_get = attributes.get
new_fields = []
for item in fields:
if isinstance(item,SQLALL):
new_fields += item._table
else:
new_fields.append(item)
fields = new_fields
if query:
tablename = self.get_table(query)
elif fields:
tablename = fields[0].tablename
query = db._adapter.id_query(fields[0].table)
else:
raise SyntaxError("Unable to determine a tablename")
if query:
if use_common_filters(query):
query = self.common_filter(query,[tablename])
#tableobj is a GAE/NDB Model class (or subclass)
tableobj = db[tablename]._tableobj
filters = self.expand(query)
projection = None
if len(db[tablename].fields) == len(fields):
#getting all fields, not a projection query
projection = None
elif args_get('projection') == True:
projection = []
for f in fields:
if f.type in ['text', 'blob', 'json']:
raise SyntaxError(
"text and blob field types not allowed in projection queries")
else:
projection.append(f.name)
elif args_get('filterfields') is True:
projection = []
for f in fields:
projection.append(f.name)
# real projection's can't include 'id'.
# it will be added to the result later
query_projection = [
p for p in projection if \
p != db[tablename]._id.name] if projection and \
args_get('projection') == True\
else None
cursor = args_get('reusecursor')
cursor = cursor if isinstance(cursor, str) else None
if self.use_ndb:
qo = ndb.QueryOptions(projection=query_projection, cursor=cursor)
items = tableobj.query(default_options=qo)
else:
items = gae.Query(tableobj, projection=query_projection, cursor=cursor)
for filter in filters:
if (args_get('projection') == True and
filter.name in query_projection and
filter.op in ('=', '<=', '>=')):
raise SyntaxError("projection fields cannot have equality filters")
if filter.name=='__key__' and filter.op=='>' and filter.value==0:
continue
elif filter.name=='__key__' and filter.op=='=':
if filter.value==0:
items = []
elif isinstance(filter.value, (self.use_ndb and ndb.Key) or Key):
# key qeuries return a class instance,
# can't use projection
# extra values will be ignored in post-processing later
item = filter.value.get() if self.use_ndb else tableobj.get(filter.value)
items = [item] if item else []
else:
# key qeuries return a class instance,
# can't use projection
# extra values will be ignored in post-processing later
item = tableobj.get_by_id(filter.value)
items = [item] if item else []
elif isinstance(items,list): # i.e. there is a single record!
items = [i for i in items if filter.apply(
getattr(item,filter.name),filter.value)]
else:
if filter.name=='__key__' and filter.op != 'in':
items.order(tableobj._key) if self.use_ndb else items.order('__key__')
if self.use_ndb:
items = self.filter(items, tableobj, filter.name, filter.op, filter.value)
else:
items = items.filter('%s %s' % (filter.name,filter.op), filter.value)
if count_only:
items = [len(items) if isinstance(items,list) else items.count()]
elif not isinstance(items,list):
query = items
if args_get('left', None):
raise SyntaxError('Set: no left join in appengine')
if args_get('groupby', None):
raise SyntaxError('Set: no groupby in appengine')
orderby = args_get('orderby', False)
if orderby:
### THIS REALLY NEEDS IMPROVEMENT !!!
if isinstance(orderby, (list, tuple)):
orderby = xorify(orderby)
if isinstance(orderby,Expression):
orderby = self.expand(orderby)
orders = orderby.split(', ')
for order in orders:
if self.use_ndb:
#TODO There must be a better way
def make_order(o):
s = str(o)
desc = s[0] == '-'
s = (desc and s[1:]) or s
return (desc and -getattr(tableobj, s)) or getattr(tableobj, s)
_order = {'-id':-tableobj._key,'id':tableobj._key}.get(order)
if _order is None:
_order = make_order(order)
query = query.order(_order)
else:
order={'-id':'-__key__','id':'__key__'}.get(order,order)
query = query.order(order)
if args_get('limitby', None):
(lmin, lmax) = attributes['limitby']
limit, fetch_args = lmax-lmin, {'offset':lmin,'keys_only':True}
if self.use_ndb:
keys, cursor, more = query.fetch_page(limit,**fetch_args)
items = ndb.get_multi(keys)
else:
keys = query.fetch(limit, **fetch_args)
items = gae.get(keys)
cursor = query.cursor()
#cursor is only useful if there was a limit and we didn't return
# all results
if args_get('reusecursor'):
db['_lastcursor'] = cursor
else:
# if a limit is not specified, always return an iterator
rows = query
return (items, tablename, projection or db[tablename].fields)
def select(self,query,fields,attributes):
"""
This is the GAE version of select. Some notes to consider:
- db['_lastsql'] is not set because there is not SQL statement string
for a GAE query
- 'nativeRef' is a magical fieldname used for self references on GAE
- optional attribute 'projection' when set to True will trigger
use of the GAE projection queries. note that there are rules for
what is accepted imposed by GAE: each field must be indexed,
projection queries cannot contain blob or text fields, and you
cannot use == and also select that same field. see https://developers.google.com/appengine/docs/python/datastore/queries#Query_Projection
- optional attribute 'filterfields' when set to True web2py will only
parse the explicitly listed fields into the Rows object, even though
all fields are returned in the query. This can be used to reduce
memory usage in cases where true projection queries are not
usable.
- optional attribute 'reusecursor' allows use of cursor with queries
that have the limitby attribute. Set the attribute to True for the
first query, set it to the value of db['_lastcursor'] to continue
a previous query. The user must save the cursor value between
requests, and the filters must be identical. It is up to the user
to follow google's limitations: https://developers.google.com/appengine/docs/python/datastore/queries#Query_Cursors
"""
(items, tablename, fields) = self.select_raw(query,fields,attributes)
# self.db['_lastsql'] = self._select(query,fields,attributes)
rows = [[(t==self.db[tablename]._id.name and item) or \
(t=='nativeRef' and item) or getattr(item, t) \
for t in fields] for item in items]
colnames = ['%s.%s' % (tablename, t) for t in fields]
processor = attributes.get('processor',self.parse)
return processor(rows,fields,colnames,False)
def parse_list_integers(self, value, field_type):
return value[:] if self.use_ndb else value
def parse_list_strings(self, value, field_type):
return value[:] if self.use_ndb else value
def count(self,query,distinct=None,limit=None):
if distinct:
raise RuntimeError("COUNT DISTINCT not supported")
(items, tablename, fields) = self.select_raw(query,count_only=True)
return items[0]
def delete(self,tablename, query):
"""
This function was changed on 2010-05-04 because according to
http://code.google.com/p/googleappengine/issues/detail?id=3119
GAE no longer supports deleting more than 1000 records.
"""
# self.db['_lastsql'] = self._delete(tablename,query)
(items, tablename, fields) = self.select_raw(query)
# items can be one item or a query
if not isinstance(items,list):
#use a keys_only query to ensure that this runs as a datastore
# small operations
leftitems = items.fetch(1000, keys_only=True)
counter = 0
while len(leftitems):
counter += len(leftitems)
if self.use_ndb:
ndb.delete_multi(leftitems)
else:
gae.delete(leftitems)
leftitems = items.fetch(1000, keys_only=True)
else:
counter = len(items)
if self.use_ndb:
ndb.delete_multi([item.key for item in items])
else:
gae.delete(items)
return counter
def update(self,tablename,query,update_fields):
# self.db['_lastsql'] = self._update(tablename,query,update_fields)
(items, tablename, fields) = self.select_raw(query)
counter = 0
for item in items:
for field, value in update_fields:
setattr(item, field.name, self.represent(value,field.type))
item.put()
counter += 1
LOGGER.info(str(counter))
return counter
def insert(self,table,fields):
dfields=dict((f.name,self.represent(v,f.type)) for f,v in fields)
# table._db['_lastsql'] = self._insert(table,fields)
tmp = table._tableobj(**dfields)
tmp.put()
key = tmp.key if self.use_ndb else tmp.key()
rid = Reference(key.id())
(rid._table, rid._record, rid._gaekey) = (table, None, key)
return rid
def bulk_insert(self,table,items):
parsed_items = []
for item in items:
dfields=dict((f.name,self.represent(v,f.type)) for f,v in item)
parsed_items.append(table._tableobj(**dfields))
if self.use_ndb:
ndb.put_multi(parsed_items)
else:
gae.put(parsed_items)
return True
File diff suppressed because it is too large Load Diff
+134
View File
@@ -0,0 +1,134 @@
# -*- coding: utf-8 -*-
import datetime
import re
from .._globals import IDENTITY
from .base import BaseAdapter
class InformixAdapter(BaseAdapter):
drivers = ('informixdb',)
types = {
'boolean': 'CHAR(1)',
'string': 'VARCHAR(%(length)s)',
'text': 'BLOB SUB_TYPE 1',
'json': 'BLOB SUB_TYPE 1',
'password': 'VARCHAR(%(length)s)',
'blob': 'BLOB SUB_TYPE 0',
'upload': 'VARCHAR(%(length)s)',
'integer': 'INTEGER',
'bigint': 'BIGINT',
'float': 'FLOAT',
'double': 'DOUBLE PRECISION',
'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
'date': 'DATE',
'time': 'CHAR(8)',
'datetime': 'DATETIME',
'id': 'SERIAL',
'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'list:integer': 'BLOB SUB_TYPE 1',
'list:string': 'BLOB SUB_TYPE 1',
'list:reference': 'BLOB SUB_TYPE 1',
'big-id': 'BIGSERIAL',
'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference FK': 'REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s CONSTRAINT FK_%(table_name)s_%(field_name)s',
'reference TFK': 'FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s CONSTRAINT TFK_%(table_name)s_%(field_name)s',
}
def RANDOM(self):
return 'Random()'
def NOT_NULL(self,default,field_type):
return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
if limitby:
(lmin, lmax) = limitby
fetch_amt = lmax - lmin
dbms_version = int(self.connection.dbms_version.split('.')[0])
if lmin and (dbms_version >= 10):
# Requires Informix 10.0+
sql_s += ' SKIP %d' % (lmin, )
if fetch_amt and (dbms_version >= 9):
# Requires Informix 9.0+
sql_s += ' FIRST %d' % (fetch_amt, )
return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
def represent_exceptions(self, obj, fieldtype):
if fieldtype == 'date':
if isinstance(obj, (datetime.date, datetime.datetime)):
obj = obj.isoformat()[:10]
else:
obj = str(obj)
return "to_date('%s','%%Y-%%m-%%d')" % obj
elif fieldtype == 'datetime':
if isinstance(obj, datetime.datetime):
obj = obj.isoformat()[:19].replace('T',' ')
elif isinstance(obj, datetime.date):
obj = obj.isoformat()[:10]+' 00:00:00'
else:
obj = str(obj)
return "to_date('%s','%%Y-%%m-%%d %%H:%%M:%%S')" % obj
return None
REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$')
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, after_connection=None):
self.db = db
self.dbengine = "informix"
self.uri = uri
if do_connect: self.find_driver(adapter_args,uri)
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self._after_connection = after_connection
self.find_or_make_work_folder()
ruri = uri.split('://',1)[1]
m = self.REGEX_URI.match(ruri)
if not m:
raise SyntaxError(
"Invalid URI string in DAL: %s" % self.uri)
user = credential_decoder(m.group('user'))
if not user:
raise SyntaxError('User required')
password = credential_decoder(m.group('password'))
if not password:
password = ''
host = m.group('host')
if not host:
raise SyntaxError('Host name required')
db = m.group('db')
if not db:
raise SyntaxError('Database name required')
user = credential_decoder(user)
password = credential_decoder(password)
dsn = '%s@%s' % (db,host)
driver_args.update(user=user,password=password,autocommit=True)
def connector(dsn=dsn,driver_args=driver_args):
return self.driver.connect(dsn,**driver_args)
self.connector = connector
if do_connect: self.reconnect()
def execute(self,command):
if command[-1:]==';':
command = command[:-1]
return self.log_execute(command)
def lastrowid(self,table):
return self.cursor.sqlerrd[1]
class InformixSEAdapter(InformixAdapter):
""" work in progress """
def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
return 'SELECT %s %s FROM %s%s%s;' % \
(sql_s, sql_f, sql_t, sql_w, sql_o)
def rowslice(self,rows,minimum=0,maximum=None):
if maximum is None:
return rows[minimum:]
return rows[minimum:maximum]
+147
View File
@@ -0,0 +1,147 @@
# -*- coding: utf-8 -*-
from .._globals import IDENTITY
from .._load import pyodbc
from .base import BaseAdapter
# NOTE invalid database object name (ANSI-SQL wants
# this form of name to be a delimited identifier)
INGRES_SEQNAME='ii***lineitemsequence'
class IngresAdapter(BaseAdapter):
drivers = ('pyodbc',)
types = {
'boolean': 'CHAR(1)',
'string': 'VARCHAR(%(length)s)',
'text': 'CLOB',
'json': 'CLOB',
'password': 'VARCHAR(%(length)s)', ## Not sure what this contains utf8 or nvarchar. Or even bytes?
'blob': 'BLOB',
'upload': 'VARCHAR(%(length)s)', ## FIXME utf8 or nvarchar... or blob? what is this type?
'integer': 'INTEGER4', # or int8...
'bigint': 'BIGINT',
'float': 'FLOAT',
'double': 'FLOAT8',
'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
'date': 'ANSIDATE',
'time': 'TIME WITHOUT TIME ZONE',
'datetime': 'TIMESTAMP WITHOUT TIME ZONE',
'id': 'int not null unique with default next value for %s' % INGRES_SEQNAME,
'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'list:integer': 'CLOB',
'list:string': 'CLOB',
'list:reference': 'CLOB',
'big-id': 'bigint not null unique with default next value for %s' % INGRES_SEQNAME,
'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', ## FIXME TODO
}
def LEFT_JOIN(self):
return 'LEFT OUTER JOIN'
def RANDOM(self):
return 'RANDOM()'
def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
if limitby:
(lmin, lmax) = limitby
fetch_amt = lmax - lmin
if fetch_amt:
sql_s += ' FIRST %d ' % (fetch_amt, )
if lmin:
# Requires Ingres 9.2+
sql_o += ' OFFSET %d' % (lmin, )
return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, after_connection=None):
self.db = db
self.dbengine = "ingres"
self._driver = pyodbc
self.uri = uri
if do_connect: self.find_driver(adapter_args,uri)
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self._after_connection = after_connection
self.find_or_make_work_folder()
connstr = uri.split(':', 1)[1]
# Simple URI processing
connstr = connstr.lstrip()
while connstr.startswith('/'):
connstr = connstr[1:]
if '=' in connstr:
# Assume we have a regular ODBC connection string and just use it
ruri = connstr
else:
# Assume only (local) dbname is passed in with OS auth
database_name = connstr
default_driver_name = 'Ingres'
vnode = '(local)'
servertype = 'ingres'
ruri = 'Driver={%s};Server=%s;Database=%s' % (default_driver_name, vnode, database_name)
def connector(cnxn=ruri,driver_args=driver_args):
return self.driver.connect(cnxn,**driver_args)
self.connector = connector
# TODO if version is >= 10, set types['id'] to Identity column, see http://community.actian.com/wiki/Using_Ingres_Identity_Columns
if do_connect: self.reconnect()
def create_sequence_and_triggers(self, query, table, **args):
# post create table auto inc code (if needed)
# modify table to btree for performance....
# Older Ingres releases could use rule/trigger like Oracle above.
if hasattr(table,'_primarykey'):
modify_tbl_sql = 'modify %s to btree unique on %s' % \
(table._tablename,
', '.join(["'%s'" % x for x in table.primarykey]))
self.execute(modify_tbl_sql)
else:
tmp_seqname='%s_iisq' % table._tablename
query=query.replace(INGRES_SEQNAME, tmp_seqname)
self.execute('create sequence %s' % tmp_seqname)
self.execute(query)
self.execute('modify %s to btree unique on %s' % (table._tablename, 'id'))
def lastrowid(self,table):
tmp_seqname='%s_iisq' % table
self.execute('select current value for %s' % tmp_seqname)
return long(self.cursor.fetchone()[0]) # don't really need int type cast here...
class IngresUnicodeAdapter(IngresAdapter):
drivers = ('pyodbc',)
types = {
'boolean': 'CHAR(1)',
'string': 'NVARCHAR(%(length)s)',
'text': 'NCLOB',
'json': 'NCLOB',
'password': 'NVARCHAR(%(length)s)', ## Not sure what this contains utf8 or nvarchar. Or even bytes?
'blob': 'BLOB',
'upload': 'VARCHAR(%(length)s)', ## FIXME utf8 or nvarchar... or blob? what is this type?
'integer': 'INTEGER4', # or int8...
'bigint': 'BIGINT',
'float': 'FLOAT',
'double': 'FLOAT8',
'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
'date': 'ANSIDATE',
'time': 'TIME WITHOUT TIME ZONE',
'datetime': 'TIMESTAMP WITHOUT TIME ZONE',
'id': 'INTEGER4 not null unique with default next value for %s'% INGRES_SEQNAME,
'reference': 'INTEGER4, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'list:integer': 'NCLOB',
'list:string': 'NCLOB',
'list:reference': 'NCLOB',
'big-id': 'BIGINT not null unique with default next value for %s'% INGRES_SEQNAME,
'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', ## FIXME TODO
}
+575
View File
@@ -0,0 +1,575 @@
# -*- coding: utf-8 -*-
import datetime
import logging
import re
from .._globals import IDENTITY
from ..objects import Table, Query, Field, Expression
from ..helpers.classes import SQLALL
from ..helpers.methods import xorify
from .base import NoSQLAdapter
class MongoDBAdapter(NoSQLAdapter):
drivers = ('pymongo',)
driver_auto_json = ['loads','dumps']
uploads_in_blob = False
types = {
'boolean': bool,
'string': str,
'text': str,
'json': str,
'password': str,
'blob': str,
'upload': str,
'integer': long,
'bigint': long,
'float': float,
'double': float,
'date': datetime.date,
'time': datetime.time,
'datetime': datetime.datetime,
'id': long,
'reference': long,
'list:string': list,
'list:integer': list,
'list:reference': list,
}
error_messages = {"javascript_needed": "This must yet be replaced" +
" with javascript in order to work."}
def __init__(self,db,uri='mongodb://127.0.0.1:5984/db',
pool_size=0, folder=None, db_codec ='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, after_connection=None):
self.db = db
self.uri = uri
if do_connect: self.find_driver(adapter_args)
import random
from bson.objectid import ObjectId
from bson.son import SON
import pymongo.uri_parser
m = pymongo.uri_parser.parse_uri(uri)
self.SON = SON
self.ObjectId = ObjectId
self.random = random
self.dbengine = 'mongodb'
self.folder = folder
db['_lastsql'] = ''
self.db_codec = 'UTF-8'
self._after_connection = after_connection
self.pool_size = pool_size
#this is the minimum amount of replicates that it should wait
# for on insert/update
self.minimumreplication = adapter_args.get('minimumreplication',0)
# by default all inserts and selects are performand asynchronous,
# but now the default is
# synchronous, except when overruled by either this default or
# function parameter
self.safe = adapter_args.get('safe',True)
# load user setting for uploads in blob storage
self.uploads_in_blob = adapter_args.get('uploads_in_blob', False)
if isinstance(m,tuple):
m = {"database" : m[1]}
if m.get('database') is None:
raise SyntaxError("Database is required!")
def connector(uri=self.uri,m=m):
# Connection() is deprecated
if hasattr(self.driver, "MongoClient"):
Connection = self.driver.MongoClient
else:
Connection = self.driver.Connection
return Connection(uri)[m.get('database')]
self.reconnect(connector,cursor=False)
def object_id(self, arg=None):
""" Convert input to a valid Mongodb ObjectId instance
self.object_id("<random>") -> ObjectId (not unique) instance """
if not arg:
arg = 0
if isinstance(arg, basestring):
# we assume an integer as default input
rawhex = len(arg.replace("0x", "").replace("L", "")) == 24
if arg.isdigit() and (not rawhex):
arg = int(arg)
elif arg == "<random>":
arg = int("0x%sL" % \
"".join([self.random.choice("0123456789abcdef") \
for x in range(24)]), 0)
elif arg.isalnum():
if not arg.startswith("0x"):
arg = "0x%s" % arg
try:
arg = int(arg, 0)
except ValueError, e:
raise ValueError(
"invalid objectid argument string: %s" % e)
else:
raise ValueError("Invalid objectid argument string. " +
"Requires an integer or base 16 value")
elif isinstance(arg, self.ObjectId):
return arg
if not isinstance(arg, (int, long)):
raise TypeError("object_id argument must be of type " +
"ObjectId or an objectid representable integer")
hexvalue = hex(arg)[2:].rstrip('L').zfill(24)
return self.ObjectId(hexvalue)
def parse_reference(self, value, field_type):
# here we have to check for ObjectID before base parse
if isinstance(value, self.ObjectId):
value = long(str(value), 16)
return super(MongoDBAdapter,
self).parse_reference(value, field_type)
def parse_id(self, value, field_type):
if isinstance(value, self.ObjectId):
value = long(str(value), 16)
return super(MongoDBAdapter,
self).parse_id(value, field_type)
def represent(self, obj, fieldtype):
# the base adatpter does not support MongoDB ObjectId
if isinstance(obj, self.ObjectId):
value = obj
else:
value = NoSQLAdapter.represent(self, obj, fieldtype)
# reference types must be convert to ObjectID
if fieldtype =='date':
if value is None:
return value
# this piece of data can be stripped off based on the fieldtype
t = datetime.time(0, 0, 0)
# mongodb doesn't has a date object and so it must datetime,
# string or integer
return datetime.datetime.combine(value, t)
elif fieldtype == 'time':
if value is None:
return value
# this piece of data can be stripped of based on the fieldtype
d = datetime.date(2000, 1, 1)
# mongodb doesn't has a time object and so it must datetime,
# string or integer
return datetime.datetime.combine(d, value)
elif fieldtype == "blob":
if value is None:
return value
from bson import Binary
if not isinstance(value, Binary):
if not isinstance(value, basestring):
return Binary(str(value))
return Binary(value)
return value
elif (isinstance(fieldtype, basestring) and
fieldtype.startswith('list:')):
if fieldtype.startswith('list:reference'):
newval = []
for v in value:
newval.append(self.object_id(v))
return newval
return value
elif ((isinstance(fieldtype, basestring) and
fieldtype.startswith("reference")) or
(isinstance(fieldtype, Table)) or fieldtype=="id"):
value = self.object_id(value)
return value
def create_table(self, table, migrate=True, fake_migrate=False,
polymodel=None, isCapped=False):
if isCapped:
raise RuntimeError("Not implemented")
def count(self, query, distinct=None, snapshot=True):
if distinct:
raise RuntimeError("COUNT DISTINCT not supported")
if not isinstance(query,Query):
raise SyntaxError("Not Supported")
tablename = self.get_table(query)
return long(self.select(query,[self.db[tablename]._id], {},
count=True,snapshot=snapshot)['count'])
# Maybe it would be faster if we just implemented the pymongo
# .count() function which is probably quicker?
# therefor call __select() connection[table].find(query).count()
# Since this will probably reduce the return set?
def expand(self, expression, field_type=None):
if isinstance(expression, Query):
# any query using 'id':=
# set name as _id (as per pymongo/mongodb primary key)
# convert second arg to an objectid field
# (if its not already)
# if second arg is 0 convert to objectid
if isinstance(expression.first,Field) and \
((expression.first.type == 'id') or \
("reference" in expression.first.type)):
if expression.first.type == 'id':
expression.first.name = '_id'
# cast to Mongo ObjectId
if isinstance(expression.second, (tuple, list, set)):
expression.second = [self.object_id(item) for
item in expression.second]
else:
expression.second = self.object_id(expression.second)
result = expression.op(expression.first, expression.second)
if isinstance(expression, Field):
if expression.type=='id':
result = "_id"
else:
result = expression.name
elif isinstance(expression, (Expression, Query)):
if not expression.second is None:
result = expression.op(expression.first, expression.second)
elif not expression.first is None:
result = expression.op(expression.first)
elif not isinstance(expression.op, str):
result = expression.op()
else:
result = expression.op
elif field_type:
result = self.represent(expression,field_type)
elif isinstance(expression,(list,tuple)):
result = [self.represent(item,field_type) for
item in expression]
else:
result = expression
return result
def drop(self, table, mode=''):
ctable = self.connection[table._tablename]
ctable.drop()
def truncate(self, table, mode, safe=None):
if safe == None:
safe=self.safe
ctable = self.connection[table._tablename]
ctable.remove(None, safe=True)
def select(self, query, fields, attributes, count=False,
snapshot=False):
mongofields_dict = self.SON()
mongoqry_dict = {}
new_fields, mongosort_list = [], []
# try an orderby attribute
orderby = attributes.get('orderby', False)
limitby = attributes.get('limitby', False)
# distinct = attributes.get('distinct', False)
if 'for_update' in attributes:
logging.warn('mongodb does not support for_update')
for key in set(attributes.keys())-set(('limitby',
'orderby','for_update')):
if attributes[key] is not None:
logging.warn('select attribute not implemented: %s' % key)
if limitby:
limitby_skip, limitby_limit = limitby[0], int(limitby[1])
else:
limitby_skip = limitby_limit = 0
if orderby:
if isinstance(orderby, (list, tuple)):
orderby = xorify(orderby)
# !!!! need to add 'random'
for f in self.expand(orderby).split(','):
if f.startswith('-'):
mongosort_list.append((f[1:], -1))
else:
mongosort_list.append((f, 1))
for item in fields:
if isinstance(item, SQLALL):
new_fields += item._table
else:
new_fields.append(item)
fields = new_fields
if isinstance(query,Query):
tablename = self.get_table(query)
elif len(fields) != 0:
tablename = fields[0].tablename
else:
raise SyntaxError("The table name could not be found in " +
"the query nor from the select statement.")
mongoqry_dict = self.expand(query)
fields = fields or self.db[tablename]
for field in fields:
mongofields_dict[field.name] = 1
ctable = self.connection[tablename]
if count:
return {'count' : ctable.find(
mongoqry_dict, mongofields_dict,
skip=limitby_skip, limit=limitby_limit,
sort=mongosort_list, snapshot=snapshot).count()}
else:
# pymongo cursor object
mongo_list_dicts = ctable.find(mongoqry_dict,
mongofields_dict, skip=limitby_skip,
limit=limitby_limit, sort=mongosort_list,
snapshot=snapshot)
rows = []
# populate row in proper order
# Here we replace ._id with .id to follow the standard naming
colnames = []
newnames = []
for field in fields:
colname = str(field)
colnames.append(colname)
tablename, fieldname = colname.split(".")
if fieldname == "_id":
# Mongodb reserved uuid key
field.name = "id"
newnames.append(".".join((tablename, field.name)))
for record in mongo_list_dicts:
row=[]
for colname in colnames:
tablename, fieldname = colname.split(".")
# switch to Mongo _id uuids for retrieving
# record id's
if fieldname == "id": fieldname = "_id"
if fieldname in record:
value = record[fieldname]
else:
value = None
row.append(value)
rows.append(row)
processor = attributes.get('processor', self.parse)
result = processor(rows, fields, newnames, False)
return result
def insert(self, table, fields, safe=None):
"""Safe determines whether a asynchronous request is done or a
synchronous action is done
For safety, we use by default synchronous requests"""
values = dict()
if safe is None:
safe = self.safe
ctable = self.connection[table._tablename]
for k, v in fields:
if not k.name in ["id", "safe"]:
fieldname = k.name
fieldtype = table[k.name].type
values[fieldname] = self.represent(v, fieldtype)
ctable.insert(values, safe=safe)
return long(str(values['_id']), 16)
def update(self, tablename, query, fields, safe=None):
if safe == None:
safe = self.safe
# return amount of adjusted rows or zero, but no exceptions
# @ related not finding the result
if not isinstance(query, Query):
raise RuntimeError("Not implemented")
amount = self.count(query, False)
if not isinstance(query, Query):
raise SyntaxError("Not Supported")
filter = None
if query:
filter = self.expand(query)
# do not try to update id fields to avoid backend errors
modify = {'$set': dict((k.name, self.represent(v, k.type)) for
k, v in fields if (not k.name in ("_id", "id")))}
try:
result = self.connection[tablename].update(filter,
modify, multi=True, safe=safe)
if safe:
try:
# if result count is available fetch it
return result["n"]
except (KeyError, AttributeError, TypeError):
return amount
else:
return amount
except Exception, e:
# TODO Reverse update query to verifiy that the query succeded
raise RuntimeError("uncaught exception when updating rows: %s" % e)
def delete(self, tablename, query, safe=None):
if safe is None:
safe = self.safe
amount = 0
amount = self.count(query, False)
if not isinstance(query, Query):
raise RuntimeError("query type %s is not supported" % \
type(query))
filter = self.expand(query)
self.connection[tablename].remove(filter, safe=safe)
return amount
def bulk_insert(self, table, items):
return [self.insert(table,item) for item in items]
## OPERATORS
def INVERT(self, first):
#print "in invert first=%s" % first
return '-%s' % self.expand(first)
# TODO This will probably not work:(
def NOT(self, first):
return {'$not': self.expand(first)}
def AND(self,first,second):
# pymongo expects: .find({'$and': [{'x':'1'}, {'y':'2'}]})
return {'$and': [self.expand(first),self.expand(second)]}
def OR(self,first,second):
# pymongo expects: .find({'$or': [{'name':'1'}, {'name':'2'}]})
return {'$or': [self.expand(first),self.expand(second)]}
def BELONGS(self, first, second):
if isinstance(second, str):
return {self.expand(first) : {"$in" : [ second[:-1]]} }
elif second==[] or second==() or second==set():
return {1:0}
items = [self.expand(item, first.type) for item in second]
return {self.expand(first) : {"$in" : items} }
def EQ(self,first,second=None):
result = {}
result[self.expand(first)] = self.expand(second)
return result
def NE(self, first, second=None):
result = {}
result[self.expand(first)] = {'$ne': self.expand(second)}
return result
def LT(self,first,second=None):
if second is None:
raise RuntimeError("Cannot compare %s < None" % first)
result = {}
result[self.expand(first)] = {'$lt': self.expand(second)}
return result
def LE(self,first,second=None):
if second is None:
raise RuntimeError("Cannot compare %s <= None" % first)
result = {}
result[self.expand(first)] = {'$lte': self.expand(second)}
return result
def GT(self,first,second):
result = {}
result[self.expand(first)] = {'$gt': self.expand(second)}
return result
def GE(self,first,second=None):
if second is None:
raise RuntimeError("Cannot compare %s >= None" % first)
result = {}
result[self.expand(first)] = {'$gte': self.expand(second)}
return result
def ADD(self, first, second):
raise NotImplementedError(self.error_messages["javascript_needed"])
return '%s + %s' % (self.expand(first),
self.expand(second, first.type))
def SUB(self, first, second):
raise NotImplementedError(self.error_messages["javascript_needed"])
return '(%s - %s)' % (self.expand(first),
self.expand(second, first.type))
def MUL(self, first, second):
raise NotImplementedError(self.error_messages["javascript_needed"])
return '(%s * %s)' % (self.expand(first),
self.expand(second, first.type))
def DIV(self, first, second):
raise NotImplementedError(self.error_messages["javascript_needed"])
return '(%s / %s)' % (self.expand(first),
self.expand(second, first.type))
def MOD(self, first, second):
raise NotImplementedError(self.error_messages["javascript_needed"])
return '(%s %% %s)' % (self.expand(first),
self.expand(second, first.type))
def AS(self, first, second):
raise NotImplementedError(self.error_messages["javascript_needed"])
return '%s AS %s' % (self.expand(first), second)
# We could implement an option that simulates a full featured SQL
# database. But I think the option should be set explicit or
# implemented as another library.
def ON(self, first, second):
raise NotImplementedError("This is not possible in NoSQL" +
" but can be simulated with a wrapper.")
return '%s ON %s' % (self.expand(first), self.expand(second))
# BLOW ARE TWO IMPLEMENTATIONS OF THE SAME FUNCITONS
# WHICH ONE IS BEST?
def COMMA(self, first, second):
return '%s, %s' % (self.expand(first), self.expand(second))
def LIKE(self, first, second):
#escaping regex operators?
return {self.expand(first): ('%s' % \
self.expand(second, 'string').replace('%','/'))}
def ILIKE(self, first, second):
val = second if isinstance(second,self.ObjectId) else {
'$regex': second.replace('%', ''), '$options': 'i'}
return {self.expand(first): val}
def STARTSWITH(self, first, second):
#escaping regex operators?
return {self.expand(first): ('/^%s/' % \
self.expand(second, 'string'))}
def ENDSWITH(self, first, second):
#escaping regex operators?
return {self.expand(first): ('/%s^/' % \
self.expand(second, 'string'))}
def CONTAINS(self, first, second, case_sensitive=False):
# silently ignore, only case sensitive
# There is a technical difference, but mongodb doesn't support
# that, but the result will be the same
val = second if isinstance(second,self.ObjectId) else \
{'$regex':".*" + re.escape(self.expand(second, 'string')) + ".*"}
return {self.expand(first) : val}
def LIKE(self, first, second):
import re
return {self.expand(first): {'$regex': \
re.escape(self.expand(second,
'string')).replace('%','.*')}}
#TODO verify full compatibilty with official SQL Like operator
def STARTSWITH(self, first, second):
#TODO Solve almost the same problem as with endswith
import re
return {self.expand(first): {'$regex' : '^' +
re.escape(self.expand(second,
'string'))}}
#TODO verify full compatibilty with official SQL Like operator
def ENDSWITH(self, first, second):
#escaping regex operators?
#TODO if searched for a name like zsa_corbitt and the function
# is endswith('a') then this is also returned.
# Aldo it end with a t
import re
return {self.expand(first): {'$regex': \
re.escape(self.expand(second, 'string')) + '$'}}
#TODO verify full compatibilty with official oracle contains operator
def CONTAINS(self, first, second, case_sensitive=False):
# silently ignore, only case sensitive
#There is a technical difference, but mongodb doesn't support
# that, but the result will be the same
#TODO contains operators need to be transformed to Regex
return {self.expand(first) : {'$regex': \
".*" + re.escape(self.expand(second, 'string')) + ".*"}}
+513
View File
@@ -0,0 +1,513 @@
# -*- coding: utf-8 -*-
import re
import sys
from .._globals import IDENTITY, LOGGER
from ..helpers.methods import varquote_aux
from .base import BaseAdapter
class MSSQLAdapter(BaseAdapter):
drivers = ('pyodbc',)
T_SEP = 'T'
QUOTE_TEMPLATE = '"%s"'
types = {
'boolean': 'BIT',
'string': 'VARCHAR(%(length)s)',
'text': 'TEXT',
'json': 'TEXT',
'password': 'VARCHAR(%(length)s)',
'blob': 'IMAGE',
'upload': 'VARCHAR(%(length)s)',
'integer': 'INT',
'bigint': 'BIGINT',
'float': 'FLOAT',
'double': 'FLOAT',
'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
'date': 'DATETIME',
'time': 'CHAR(8)',
'datetime': 'DATETIME',
'id': 'INT IDENTITY PRIMARY KEY',
'reference': 'INT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'list:integer': 'TEXT',
'list:string': 'TEXT',
'list:reference': 'TEXT',
'geometry': 'geometry',
'geography': 'geography',
'big-id': 'BIGINT IDENTITY PRIMARY KEY',
'big-reference': 'BIGINT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s',
}
def concat_add(self,tablename):
return '; ALTER TABLE %s ADD ' % tablename
def varquote(self,name):
return varquote_aux(name,'[%s]')
def EXTRACT(self,field,what):
return "DATEPART(%s,%s)" % (what, self.expand(field))
def LEFT_JOIN(self):
return 'LEFT OUTER JOIN'
def RANDOM(self):
return 'NEWID()'
def ALLOW_NULL(self):
return ' NULL'
def CAST(self, first, second):
return first # apparently no cast necessary in MSSQL
def SUBSTRING(self,field,parameters):
return 'SUBSTRING(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1])
def PRIMARY_KEY(self,key):
return 'PRIMARY KEY CLUSTERED (%s)' % key
def AGGREGATE(self, first, what):
if what == 'LENGTH':
what = 'LEN'
return "%s(%s)" % (what, self.expand(first))
def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
if limitby:
(lmin, lmax) = limitby
sql_s += ' TOP %i' % lmax
return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
TRUE = 1
FALSE = 0
REGEX_DSN = re.compile('^(?P<dsn>.+)$')
REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?(?P<urlargs>.*))?$')
REGEX_ARGPATTERN = re.compile('(?P<argkey>[^=]+)=(?P<argvalue>[^&]*)')
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, srid=4326,
after_connection=None):
self.db = db
self.dbengine = "mssql"
self.uri = uri
if do_connect: self.find_driver(adapter_args,uri)
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self._after_connection = after_connection
self.srid = srid
self.find_or_make_work_folder()
# ## read: http://bytes.com/groups/python/460325-cx_oracle-utf8
ruri = uri.split('://',1)[1]
if '@' not in ruri:
try:
m = self.REGEX_DSN.match(ruri)
if not m:
raise SyntaxError(
'Parsing uri string(%s) has no result' % self.uri)
dsn = m.group('dsn')
if not dsn:
raise SyntaxError('DSN required')
except SyntaxError:
e = sys.exc_info()[1]
LOGGER.error('NdGpatch error')
raise e
# was cnxn = 'DSN=%s' % dsn
cnxn = dsn
else:
m = self.REGEX_URI.match(ruri)
if not m:
raise SyntaxError(
"Invalid URI string in DAL: %s" % self.uri)
user = credential_decoder(m.group('user'))
if not user:
raise SyntaxError('User required')
password = credential_decoder(m.group('password'))
if not password:
password = ''
host = m.group('host')
if not host:
raise SyntaxError('Host name required')
db = m.group('db')
if not db:
raise SyntaxError('Database name required')
port = m.group('port') or '1433'
# Parse the optional url name-value arg pairs after the '?'
# (in the form of arg1=value1&arg2=value2&...)
# Default values (drivers like FreeTDS insist on uppercase parameter keys)
argsdict = { 'DRIVER':'{SQL Server}' }
urlargs = m.group('urlargs') or ''
for argmatch in self.REGEX_ARGPATTERN.finditer(urlargs):
argsdict[str(argmatch.group('argkey')).upper()] = argmatch.group('argvalue')
urlargs = ';'.join(['%s=%s' % (ak, av) for (ak, av) in argsdict.iteritems()])
cnxn = 'SERVER=%s;PORT=%s;DATABASE=%s;UID=%s;PWD=%s;%s' \
% (host, port, db, user, password, urlargs)
def connector(cnxn=cnxn,driver_args=driver_args):
return self.driver.connect(cnxn,**driver_args)
self.connector = connector
if do_connect: self.reconnect()
def lastrowid(self,table):
#self.execute('SELECT @@IDENTITY;')
self.execute('SELECT SCOPE_IDENTITY();')
return long(self.cursor.fetchone()[0])
def rowslice(self,rows,minimum=0,maximum=None):
if maximum is None:
return rows[minimum:]
return rows[minimum:maximum]
def EPOCH(self, first):
return "DATEDIFF(second, '1970-01-01 00:00:00', %s)" % self.expand(first)
def CONCAT(self, *items):
return '(%s)' % ' + '.join(self.expand(x,'string') for x in items)
# GIS Spatial Extensions
# No STAsGeoJSON in MSSQL
def ST_ASTEXT(self, first):
return '%s.STAsText()' %(self.expand(first))
def ST_CONTAINS(self, first, second):
return '%s.STContains(%s)=1' %(self.expand(first), self.expand(second, first.type))
def ST_DISTANCE(self, first, second):
return '%s.STDistance(%s)' %(self.expand(first), self.expand(second, first.type))
def ST_EQUALS(self, first, second):
return '%s.STEquals(%s)=1' %(self.expand(first), self.expand(second, first.type))
def ST_INTERSECTS(self, first, second):
return '%s.STIntersects(%s)=1' %(self.expand(first), self.expand(second, first.type))
def ST_OVERLAPS(self, first, second):
return '%s.STOverlaps(%s)=1' %(self.expand(first), self.expand(second, first.type))
# no STSimplify in MSSQL
def ST_TOUCHES(self, first, second):
return '%s.STTouches(%s)=1' %(self.expand(first), self.expand(second, first.type))
def ST_WITHIN(self, first, second):
return '%s.STWithin(%s)=1' %(self.expand(first), self.expand(second, first.type))
def represent(self, obj, fieldtype):
field_is_type = fieldtype.startswith
if field_is_type('geometry'):
srid = 0 # MS SQL default srid for geometry
geotype, parms = fieldtype[:-1].split('(')
if parms:
srid = parms
return "geometry::STGeomFromText('%s',%s)" %(obj, srid)
elif fieldtype == 'geography':
srid = 4326 # MS SQL default srid for geography
geotype, parms = fieldtype[:-1].split('(')
if parms:
srid = parms
return "geography::STGeomFromText('%s',%s)" %(obj, srid)
# else:
# raise SyntaxError('Invalid field type %s' %fieldtype)
return "geometry::STGeomFromText('%s',%s)" %(obj, srid)
return BaseAdapter.represent(self, obj, fieldtype)
class MSSQL3Adapter(MSSQLAdapter):
"""Experimental support for pagination in MSSQL
Requires MSSQL >= 2005, uses `ROW_NUMBER()`
"""
types = {
'boolean': 'BIT',
'string': 'VARCHAR(%(length)s)',
'text': 'VARCHAR(MAX)',
'json': 'VARCHAR(MAX)',
'password': 'VARCHAR(%(length)s)',
'blob': 'IMAGE',
'upload': 'VARCHAR(%(length)s)',
'integer': 'INT',
'bigint': 'BIGINT',
'float': 'FLOAT',
'double': 'FLOAT',
'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
'date': 'DATETIME',
'time': 'TIME(7)',
'datetime': 'DATETIME',
'id': 'INT IDENTITY PRIMARY KEY',
'reference': 'INT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'list:integer': 'VARCHAR(MAX)',
'list:string': 'VARCHAR(MAX)',
'list:reference': 'VARCHAR(MAX)',
'geometry': 'geometry',
'geography': 'geography',
'big-id': 'BIGINT IDENTITY PRIMARY KEY',
'big-reference': 'BIGINT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s',
}
def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
if limitby:
(lmin, lmax) = limitby
if lmin == 0:
sql_s += ' TOP %i' % lmax
return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
lmin += 1
sql_o_inner = sql_o[sql_o.find('ORDER BY ')+9:]
sql_g_inner = sql_o[:sql_o.find('ORDER BY ')]
sql_f_outer = ['f_%s' % f for f in range(len(sql_f.split(',')))]
sql_f_inner = [f for f in sql_f.split(',')]
sql_f_iproxy = ['%s AS %s' % (o, n) for (o, n) in zip(sql_f_inner, sql_f_outer)]
sql_f_iproxy = ', '.join(sql_f_iproxy)
sql_f_oproxy = ', '.join(sql_f_outer)
return 'SELECT %s %s FROM (SELECT %s ROW_NUMBER() OVER (ORDER BY %s) AS w_row, %s FROM %s%s%s) TMP WHERE w_row BETWEEN %i AND %s;' % (sql_s,sql_f_oproxy,sql_s,sql_f,sql_f_iproxy,sql_t,sql_w,sql_g_inner,lmin,lmax)
return 'SELECT %s %s FROM %s%s%s;' % (sql_s,sql_f,sql_t,sql_w,sql_o)
def rowslice(self,rows,minimum=0,maximum=None):
return rows
class MSSQL4Adapter(MSSQLAdapter):
"""Support for "native" pagination
Requires MSSQL >= 2012, uses `OFFSET ... ROWS ... FETCH NEXT ... ROWS ONLY`
"""
types = {
'boolean': 'BIT',
'string': 'VARCHAR(%(length)s)',
'text': 'VARCHAR(MAX)',
'json': 'VARCHAR(MAX)',
'password': 'VARCHAR(%(length)s)',
'blob': 'IMAGE',
'upload': 'VARCHAR(%(length)s)',
'integer': 'INT',
'bigint': 'BIGINT',
'float': 'FLOAT',
'double': 'FLOAT',
'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
'date': 'DATETIME',
'time': 'TIME(7)',
'datetime': 'DATETIME',
'id': 'INT IDENTITY PRIMARY KEY',
'reference': 'INT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'list:integer': 'VARCHAR(MAX)',
'list:string': 'VARCHAR(MAX)',
'list:reference': 'VARCHAR(MAX)',
'geometry': 'geometry',
'geography': 'geography',
'big-id': 'BIGINT IDENTITY PRIMARY KEY',
'big-reference': 'BIGINT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s',
}
def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
if limitby:
(lmin, lmax) = limitby
if lmin == 0:
#top is still slightly faster, especially because
#web2py's default to fetch references is to not specify
#an orderby clause
sql_s += ' TOP %i' % lmax
else:
if not sql_o:
#if there is no orderby, we can't use the brand new statements
#that being said, developer chose its own poison, so be it random
sql_o += ' ORDER BY %s' % self.RANDOM()
sql_o += ' OFFSET %i ROWS FETCH NEXT %i ROWS ONLY' % (lmin, lmax - lmin)
return 'SELECT %s %s FROM %s%s%s;' % \
(sql_s, sql_f, sql_t, sql_w, sql_o)
def rowslice(self,rows,minimum=0,maximum=None):
return rows
class MSSQL2Adapter(MSSQLAdapter):
drivers = ('pyodbc',)
types = {
'boolean': 'CHAR(1)',
'string': 'NVARCHAR(%(length)s)',
'text': 'NTEXT',
'json': 'NTEXT',
'password': 'NVARCHAR(%(length)s)',
'blob': 'IMAGE',
'upload': 'NVARCHAR(%(length)s)',
'integer': 'INT',
'bigint': 'BIGINT',
'float': 'FLOAT',
'double': 'FLOAT',
'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
'date': 'DATETIME',
'time': 'CHAR(8)',
'datetime': 'DATETIME',
'id': 'INT IDENTITY PRIMARY KEY',
'reference': 'INT, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'list:integer': 'NTEXT',
'list:string': 'NTEXT',
'list:reference': 'NTEXT',
'big-id': 'BIGINT IDENTITY PRIMARY KEY',
'big-reference': 'BIGINT, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s',
}
def represent(self, obj, fieldtype):
value = BaseAdapter.represent(self, obj, fieldtype)
if fieldtype in ('string','text', 'json') and value[:1]=="'":
value = 'N'+value
return value
def execute(self,a):
return self.log_execute(a.decode('utf8'))
class VerticaAdapter(MSSQLAdapter):
drivers = ('pyodbc',)
T_SEP = ' '
types = {
'boolean': 'BOOLEAN',
'string': 'VARCHAR(%(length)s)',
'text': 'BYTEA',
'json': 'VARCHAR(%(length)s)',
'password': 'VARCHAR(%(length)s)',
'blob': 'BYTEA',
'upload': 'VARCHAR(%(length)s)',
'integer': 'INT',
'bigint': 'BIGINT',
'float': 'FLOAT',
'double': 'DOUBLE PRECISION',
'decimal': 'DECIMAL(%(precision)s,%(scale)s)',
'date': 'DATE',
'time': 'TIME',
'datetime': 'DATETIME',
'id': 'IDENTITY',
'reference': 'INT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'list:integer': 'BYTEA',
'list:string': 'BYTEA',
'list:reference': 'BYTEA',
'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
}
def EXTRACT(self, first, what):
return "DATE_PART('%s', TIMESTAMP %s)" % (what, self.expand(first))
def _truncate(self, table, mode=''):
tablename = table._tablename
return ['TRUNCATE %s %s;' % (tablename, mode or '')]
def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
if limitby:
(lmin, lmax) = limitby
sql_o += ' LIMIT %i OFFSET %i' % (lmax - lmin, lmin)
return 'SELECT %s %s FROM %s%s%s;' % \
(sql_s, sql_f, sql_t, sql_w, sql_o)
def lastrowid(self,table):
self.execute('SELECT LAST_INSERT_ID();')
return long(self.cursor.fetchone()[0])
def execute(self, a):
return self.log_execute(a)
class SybaseAdapter(MSSQLAdapter):
drivers = ('Sybase',)
types = {
'boolean': 'BIT',
'string': 'CHAR VARYING(%(length)s)',
'text': 'TEXT',
'json': 'TEXT',
'password': 'CHAR VARYING(%(length)s)',
'blob': 'IMAGE',
'upload': 'CHAR VARYING(%(length)s)',
'integer': 'INT',
'bigint': 'BIGINT',
'float': 'FLOAT',
'double': 'FLOAT',
'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
'date': 'DATETIME',
'time': 'CHAR(8)',
'datetime': 'DATETIME',
'id': 'INT IDENTITY PRIMARY KEY',
'reference': 'INT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'list:integer': 'TEXT',
'list:string': 'TEXT',
'list:reference': 'TEXT',
'geometry': 'geometry',
'geography': 'geography',
'big-id': 'BIGINT IDENTITY PRIMARY KEY',
'big-reference': 'BIGINT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s',
}
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, srid=4326,
after_connection=None):
self.db = db
self.dbengine = "sybase"
self.uri = uri
if do_connect: self.find_driver(adapter_args,uri)
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self._after_connection = after_connection
self.srid = srid
self.find_or_make_work_folder()
# ## read: http://bytes.com/groups/python/460325-cx_oracle-utf8
ruri = uri.split('://',1)[1]
if '@' not in ruri:
try:
m = self.REGEX_DSN.match(ruri)
if not m:
raise SyntaxError(
'Parsing uri string(%s) has no result' % self.uri)
dsn = m.group('dsn')
if not dsn:
raise SyntaxError('DSN required')
except SyntaxError:
e = sys.exc_info()[1]
LOGGER.error('NdGpatch error')
raise e
else:
m = self.REGEX_URI.match(uri)
if not m:
raise SyntaxError(
"Invalid URI string in DAL: %s" % self.uri)
user = credential_decoder(m.group('user'))
if not user:
raise SyntaxError('User required')
password = credential_decoder(m.group('password'))
if not password:
password = ''
host = m.group('host')
if not host:
raise SyntaxError('Host name required')
db = m.group('db')
if not db:
raise SyntaxError('Database name required')
port = m.group('port') or '1433'
dsn = 'sybase:host=%s:%s;dbname=%s' % (host,port,db)
driver_args.update(user = credential_decoder(user),
password = credential_decoder(password))
def connector(dsn=dsn,driver_args=driver_args):
return self.driver.connect(dsn,**driver_args)
self.connector = connector
if do_connect: self.reconnect()
+140
View File
@@ -0,0 +1,140 @@
# -*- coding: utf-8 -*-
import re
from .._globals import IDENTITY
from ..helpers.methods import varquote_aux
from .base import BaseAdapter
class MySQLAdapter(BaseAdapter):
drivers = ('MySQLdb','pymysql', 'mysqlconnector')
commit_on_alter_table = True
support_distributed_transaction = True
types = {
'boolean': 'CHAR(1)',
'string': 'VARCHAR(%(length)s)',
'text': 'LONGTEXT',
'json': 'LONGTEXT',
'password': 'VARCHAR(%(length)s)',
'blob': 'LONGBLOB',
'upload': 'VARCHAR(%(length)s)',
'integer': 'INT',
'bigint': 'BIGINT',
'float': 'FLOAT',
'double': 'DOUBLE',
'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
'date': 'DATE',
'time': 'TIME',
'datetime': 'DATETIME',
'id': 'INT AUTO_INCREMENT NOT NULL',
'reference': 'INT, INDEX %(index_name)s (%(field_name)s), FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'list:integer': 'LONGTEXT',
'list:string': 'LONGTEXT',
'list:reference': 'LONGTEXT',
'big-id': 'BIGINT AUTO_INCREMENT NOT NULL',
'big-reference': 'BIGINT, INDEX %(index_name)s (%(field_name)s), FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference FK': ', CONSTRAINT `FK_%(constraint_name)s` FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
}
QUOTE_TEMPLATE = "`%s`"
def varquote(self,name):
return varquote_aux(name,'`%s`')
def RANDOM(self):
return 'RAND()'
def SUBSTRING(self,field,parameters):
return 'SUBSTRING(%s,%s,%s)' % (self.expand(field),
parameters[0], parameters[1])
def EPOCH(self, first):
return "UNIX_TIMESTAMP(%s)" % self.expand(first)
def CONCAT(self, *items):
return 'CONCAT(%s)' % ','.join(self.expand(x,'string') for x in items)
def REGEXP(self,first,second):
return '(%s REGEXP %s)' % (self.expand(first),
self.expand(second,'string'))
def CAST(self, first, second):
if second=='LONGTEXT': second = 'CHAR'
return 'CAST(%s AS %s)' % (first, second)
def _drop(self,table,mode):
# breaks db integrity but without this mysql does not drop table
table_rname = table.sqlsafe
return ['SET FOREIGN_KEY_CHECKS=0;','DROP TABLE %s;' % table_rname,
'SET FOREIGN_KEY_CHECKS=1;']
def _insert_empty(self, table):
return 'INSERT INTO %s VALUES (DEFAULT);' % (table.sqlsafe)
def distributed_transaction_begin(self,key):
self.execute('XA START;')
def prepare(self,key):
self.execute("XA END;")
self.execute("XA PREPARE;")
def commit_prepared(self,key):
self.execute("XA COMMIT;")
def rollback_prepared(self,key):
self.execute("XA ROLLBACK;")
REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^?]+)(\?set_encoding=(?P<charset>\w+))?$')
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, after_connection=None):
self.db = db
self.dbengine = "mysql"
self.uri = uri
if do_connect: self.find_driver(adapter_args,uri)
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self._after_connection = after_connection
self.find_or_make_work_folder()
ruri = uri.split('://',1)[1]
m = self.REGEX_URI.match(ruri)
if not m:
raise SyntaxError(
"Invalid URI string in DAL: %s" % self.uri)
user = credential_decoder(m.group('user'))
if not user:
raise SyntaxError('User required')
password = credential_decoder(m.group('password'))
if not password:
password = ''
host = m.group('host')
if not host:
raise SyntaxError('Host name required')
db = m.group('db')
if not db:
raise SyntaxError('Database name required')
port = int(m.group('port') or '3306')
charset = m.group('charset') or 'utf8'
driver_args.update(db=db,
user=credential_decoder(user),
passwd=credential_decoder(password),
host=host,
port=port,
charset=charset)
def connector(driver_args=driver_args):
return self.driver.connect(**driver_args)
self.connector = connector
if do_connect: self.reconnect()
def after_connection(self):
self.execute('SET FOREIGN_KEY_CHECKS=1;')
self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
def lastrowid(self,table):
self.execute('select last_insert_id();')
return int(self.cursor.fetchone()[0])
+191
View File
@@ -0,0 +1,191 @@
# -*- coding: utf-8 -*-
import base64
import datetime
import re
from .._globals import IDENTITY
from .._load import cx_Oracle
from .base import BaseAdapter
class OracleAdapter(BaseAdapter):
drivers = ('cx_Oracle',)
commit_on_alter_table = False
types = {
'boolean': 'CHAR(1)',
'string': 'VARCHAR2(%(length)s)',
'text': 'CLOB',
'json': 'CLOB',
'password': 'VARCHAR2(%(length)s)',
'blob': 'CLOB',
'upload': 'VARCHAR2(%(length)s)',
'integer': 'INT',
'bigint': 'NUMBER',
'float': 'FLOAT',
'double': 'BINARY_DOUBLE',
'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
'date': 'DATE',
'time': 'CHAR(8)',
'datetime': 'DATE',
'id': 'NUMBER PRIMARY KEY',
'reference': 'NUMBER, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'list:integer': 'CLOB',
'list:string': 'CLOB',
'list:reference': 'CLOB',
'big-id': 'NUMBER PRIMARY KEY',
'big-reference': 'NUMBER, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s',
}
def trigger_name(self,tablename):
return '%s_trigger' % tablename
def LEFT_JOIN(self):
return 'LEFT OUTER JOIN'
def RANDOM(self):
return 'dbms_random.value'
def NOT_NULL(self,default,field_type):
return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
def REGEXP(self, first, second):
return 'REGEXP_LIKE(%s, %s)' % (self.expand(first),
self.expand(second, 'string'))
def _drop(self,table,mode):
sequence_name = table._sequence_name
return ['DROP TABLE %s %s;' % (table.sqlsafe, mode), 'DROP SEQUENCE %s;' % sequence_name]
def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
if limitby:
(lmin, lmax) = limitby
if len(sql_w) > 1:
sql_w_row = sql_w + ' AND w_row > %i' % lmin
else:
sql_w_row = 'WHERE w_row > %i' % lmin
return 'SELECT %s %s FROM (SELECT w_tmp.*, ROWNUM w_row FROM (SELECT %s FROM %s%s%s) w_tmp WHERE ROWNUM<=%i) %s %s %s;' % (sql_s, sql_f, sql_f, sql_t, sql_w, sql_o, lmax, sql_t, sql_w_row, sql_o)
return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
def constraint_name(self, tablename, fieldname):
constraint_name = BaseAdapter.constraint_name(self, tablename, fieldname)
if len(constraint_name)>30:
constraint_name = '%s_%s__constraint' % (tablename[:10], fieldname[:7])
return constraint_name
def represent_exceptions(self, obj, fieldtype):
if fieldtype == 'blob':
obj = base64.b64encode(str(obj))
return ":CLOB('%s')" % obj
elif fieldtype == 'date':
if isinstance(obj, (datetime.date, datetime.datetime)):
obj = obj.isoformat()[:10]
else:
obj = str(obj)
return "to_date('%s','yyyy-mm-dd')" % obj
elif fieldtype == 'datetime':
if isinstance(obj, datetime.datetime):
obj = obj.isoformat()[:19].replace('T',' ')
elif isinstance(obj, datetime.date):
obj = obj.isoformat()[:10]+' 00:00:00'
else:
obj = str(obj)
return "to_date('%s','yyyy-mm-dd hh24:mi:ss')" % obj
return None
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, after_connection=None):
self.db = db
self.dbengine = "oracle"
self.uri = uri
if do_connect: self.find_driver(adapter_args,uri)
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self._after_connection = after_connection
self.find_or_make_work_folder()
ruri = uri.split('://',1)[1]
if not 'threaded' in driver_args:
driver_args['threaded']=True
def connector(uri=ruri,driver_args=driver_args):
return self.driver.connect(uri,**driver_args)
self.connector = connector
if do_connect: self.reconnect()
def after_connection(self):
self.execute("ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS';")
self.execute("ALTER SESSION SET NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS';")
oracle_fix = re.compile("[^']*('[^']*'[^']*)*\:(?P<clob>CLOB\('([^']+|'')*'\))")
def execute(self, command, args=None):
args = args or []
i = 1
while True:
m = self.oracle_fix.match(command)
if not m:
break
command = command[:m.start('clob')] + str(i) + command[m.end('clob'):]
args.append(m.group('clob')[6:-2].replace("''", "'"))
i += 1
if command[-1:]==';':
command = command[:-1]
return self.log_execute(command, args)
def create_sequence_and_triggers(self, query, table, **args):
tablename = table._tablename
id_name = table._id.name
sequence_name = table._sequence_name
trigger_name = table._trigger_name
self.execute(query)
self.execute('CREATE SEQUENCE %s START WITH 1 INCREMENT BY 1 NOMAXVALUE MINVALUE -1;' % sequence_name)
self.execute("""
CREATE OR REPLACE TRIGGER %(trigger_name)s BEFORE INSERT ON %(tablename)s FOR EACH ROW
DECLARE
curr_val NUMBER;
diff_val NUMBER;
PRAGMA autonomous_transaction;
BEGIN
IF :NEW.%(id)s IS NOT NULL THEN
EXECUTE IMMEDIATE 'SELECT %(sequence_name)s.nextval FROM dual' INTO curr_val;
diff_val := :NEW.%(id)s - curr_val - 1;
IF diff_val != 0 THEN
EXECUTE IMMEDIATE 'alter sequence %(sequence_name)s increment by '|| diff_val;
EXECUTE IMMEDIATE 'SELECT %(sequence_name)s.nextval FROM dual' INTO curr_val;
EXECUTE IMMEDIATE 'alter sequence %(sequence_name)s increment by 1';
END IF;
END IF;
SELECT %(sequence_name)s.nextval INTO :NEW.%(id)s FROM DUAL;
END;
""" % dict(trigger_name=trigger_name, tablename=tablename,
sequence_name=sequence_name,id=id_name))
def lastrowid(self,table):
sequence_name = table._sequence_name
self.execute('SELECT %s.currval FROM dual;' % sequence_name)
return long(self.cursor.fetchone()[0])
#def parse_value(self, value, field_type, blob_decode=True):
# if blob_decode and isinstance(value, cx_Oracle.LOB):
# try:
# value = value.read()
# except self.driver.ProgrammingError:
# # After a subsequent fetch the LOB value is not valid anymore
# pass
# return BaseAdapter.parse_value(self, value, field_type, blob_decode)
def _fetchall(self):
if any(x[1]==cx_Oracle.LOB for x in self.cursor.description):
return [tuple([(c.read() if type(c) == cx_Oracle.LOB else c) \
for c in r]) for r in self.cursor]
else:
return self.cursor.fetchall()
def sqlsafe_table(self, tablename, ot=None):
if ot is not None:
return (self.QUOTE_TEMPLATE + ' ' \
+ self.QUOTE_TEMPLATE) % (ot, tablename)
return self.QUOTE_TEMPLATE % tablename
+425
View File
@@ -0,0 +1,425 @@
# -*- coding: utf-8 -*-
import re
from .._load import psycopg2_adapt
from .._globals import IDENTITY, LOGGER
from ..helpers.methods import varquote_aux
from .base import BaseAdapter
class PostgreSQLAdapter(BaseAdapter):
drivers = ('psycopg2','pg8000')
QUOTE_TEMPLATE = '"%s"'
support_distributed_transaction = True
types = {
'boolean': 'CHAR(1)',
'string': 'VARCHAR(%(length)s)',
'text': 'TEXT',
'json': 'TEXT',
'password': 'VARCHAR(%(length)s)',
'blob': 'BYTEA',
'upload': 'VARCHAR(%(length)s)',
'integer': 'INTEGER',
'bigint': 'BIGINT',
'float': 'FLOAT',
'double': 'FLOAT8',
'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
'date': 'DATE',
'time': 'TIME',
'datetime': 'TIMESTAMP',
'id': 'SERIAL PRIMARY KEY',
'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'list:integer': 'TEXT',
'list:string': 'TEXT',
'list:reference': 'TEXT',
'geometry': 'GEOMETRY',
'geography': 'GEOGRAPHY',
'big-id': 'BIGSERIAL PRIMARY KEY',
'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference FK': ', CONSTRAINT "FK_%(constraint_name)s" FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'reference TFK': ' CONSTRAINT "FK_%(foreign_table)s_PK" FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s',
}
def varquote(self,name):
return varquote_aux(name,'"%s"')
def adapt(self,obj):
if self.driver_name == 'psycopg2':
return psycopg2_adapt(obj).getquoted()
elif self.driver_name == 'pg8000':
return "'%s'" % str(obj).replace("%","%%").replace("'","''")
else:
return "'%s'" % str(obj).replace("'","''")
def sequence_name(self,table):
return self.QUOTE_TEMPLATE % (table + '_id_seq')
def RANDOM(self):
return 'RANDOM()'
def ADD(self, first, second):
t = first.type
if t in ('text','string','password', 'json', 'upload','blob'):
return '(%s || %s)' % (self.expand(first), self.expand(second, t))
else:
return '(%s + %s)' % (self.expand(first), self.expand(second, t))
def distributed_transaction_begin(self,key):
return
def prepare(self,key):
self.execute("PREPARE TRANSACTION '%s';" % key)
def commit_prepared(self,key):
self.execute("COMMIT PREPARED '%s';" % key)
def rollback_prepared(self,key):
self.execute("ROLLBACK PREPARED '%s';" % key)
def create_sequence_and_triggers(self, query, table, **args):
# following lines should only be executed if table._sequence_name does not exist
# self.execute('CREATE SEQUENCE %s;' % table._sequence_name)
# self.execute("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT NEXTVAL('%s');" \
# % (table._tablename, table._fieldname, table._sequence_name))
self.execute(query)
REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?sslmode=(?P<sslmode>.+))?$')
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, srid=4326,
after_connection=None):
self.db = db
self.dbengine = "postgres"
self.uri = uri
if do_connect: self.find_driver(adapter_args,uri)
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self._after_connection = after_connection
self.srid = srid
self.find_or_make_work_folder()
self._last_insert = None # for INSERT ... RETURNING ID
ruri = uri.split('://',1)[1]
m = self.REGEX_URI.match(ruri)
if not m:
raise SyntaxError("Invalid URI string in DAL")
user = credential_decoder(m.group('user'))
if not user:
raise SyntaxError('User required')
password = credential_decoder(m.group('password'))
if not password:
password = ''
host = m.group('host')
if not host:
raise SyntaxError('Host name required')
db = m.group('db')
if not db:
raise SyntaxError('Database name required')
port = m.group('port') or '5432'
sslmode = m.group('sslmode')
if sslmode:
msg = ("dbname='%s' user='%s' host='%s' "
"port=%s password='%s' sslmode='%s'") \
% (db, user, host, port, password, sslmode)
else:
msg = ("dbname='%s' user='%s' host='%s' "
"port=%s password='%s'") \
% (db, user, host, port, password)
# choose diver according uri
if self.driver:
self.__version__ = "%s %s" % (self.driver.__name__,
self.driver.__version__)
else:
self.__version__ = None
def connector(msg=msg,driver_args=driver_args):
return self.driver.connect(msg,**driver_args)
self.connector = connector
if do_connect: self.reconnect()
def after_connection(self):
self.connection.set_client_encoding('UTF8')
self.execute("SET standard_conforming_strings=on;")
self.try_json()
def _insert(self, table, fields):
table_rname = table.sqlsafe
if fields:
keys = ','.join(f.sqlsafe_name for f, v in fields)
values = ','.join(self.expand(v, f.type) for f, v in fields)
if table._id:
self._last_insert = (table._id, 1)
return 'INSERT INTO %s(%s) VALUES (%s) RETURNING %s;' % (
table_rname, keys, values, table._id.name)
else:
self._last_insert = None
return 'INSERT INTO %s(%s) VALUES (%s);' % (table_rname, keys, values)
else:
self._last_insert
return self._insert_empty(table)
def lastrowid(self, table=None):
if self._last_insert:
return int(self.cursor.fetchone()[0])
else:
self.execute("select lastval()")
return int(self.cursor.fetchone()[0])
def try_json(self):
# check JSON data type support
# (to be added to after_connection)
if self.driver_name == "pg8000":
supports_json = self.connection.server_version >= "9.2.0"
elif (self.driver_name == "psycopg2" and
self.driver.__version__ >= "2.0.12"):
supports_json = self.connection.server_version >= 90200
elif self.driver_name == "zxJDBC":
supports_json = self.connection.dbversion >= "9.2.0"
else:
supports_json = None
if supports_json:
self.types["json"] = "JSON"
if (self.driver_name == "psycopg2" and
self.driver.__version__ >= '2.5.0'):
self.driver_auto_json = ['loads']
else:
LOGGER.debug("Your database version does not support the JSON"
" data type (using TEXT instead)")
def LIKE(self,first,second):
args = (self.expand(first), self.expand(second,'string'))
if not first.type in ('string', 'text', 'json'):
return '(%s LIKE %s)' % (
self.CAST(args[0], 'CHAR(%s)' % first.length), args[1])
else:
return '(%s LIKE %s)' % args
def ILIKE(self,first,second):
args = (self.expand(first), self.expand(second,'string'))
if not first.type in ('string', 'text', 'json'):
return '(%s LIKE %s)' % (
self.CAST(args[0], 'CHAR(%s)' % first.length), args[1])
else:
return '(%s ILIKE %s)' % args
def REGEXP(self,first,second):
return '(%s ~ %s)' % (self.expand(first),
self.expand(second,'string'))
def STARTSWITH(self,first,second):
return '(%s LIKE %s)' % (self.expand(first),
self.expand(second+'%','string'))
def ENDSWITH(self,first,second):
return '(%s LIKE %s)' % (self.expand(first),
self.expand('%'+second,'string'))
# GIS functions
def ST_ASGEOJSON(self, first, second):
"""
http://postgis.org/docs/ST_AsGeoJSON.html
"""
return 'ST_AsGeoJSON(%s,%s,%s,%s)' %(second['version'],
self.expand(first), second['precision'], second['options'])
def ST_ASTEXT(self, first):
"""
http://postgis.org/docs/ST_AsText.html
"""
return 'ST_AsText(%s)' %(self.expand(first))
def ST_X(self, first):
"""
http://postgis.org/docs/ST_X.html
"""
return 'ST_X(%s)' %(self.expand(first))
def ST_Y(self, first):
"""
http://postgis.org/docs/ST_Y.html
"""
return 'ST_Y(%s)' %(self.expand(first))
def ST_CONTAINS(self, first, second):
"""
http://postgis.org/docs/ST_Contains.html
"""
return 'ST_Contains(%s,%s)' %(self.expand(first), self.expand(second, first.type))
def ST_DISTANCE(self, first, second):
"""
http://postgis.org/docs/ST_Distance.html
"""
return 'ST_Distance(%s,%s)' %(self.expand(first), self.expand(second, first.type))
def ST_EQUALS(self, first, second):
"""
http://postgis.org/docs/ST_Equals.html
"""
return 'ST_Equals(%s,%s)' %(self.expand(first), self.expand(second, first.type))
def ST_INTERSECTS(self, first, second):
"""
http://postgis.org/docs/ST_Intersects.html
"""
return 'ST_Intersects(%s,%s)' %(self.expand(first), self.expand(second, first.type))
def ST_OVERLAPS(self, first, second):
"""
http://postgis.org/docs/ST_Overlaps.html
"""
return 'ST_Overlaps(%s,%s)' %(self.expand(first), self.expand(second, first.type))
def ST_SIMPLIFY(self, first, second):
"""
http://postgis.org/docs/ST_Simplify.html
"""
return 'ST_Simplify(%s,%s)' %(self.expand(first), self.expand(second, 'double'))
def ST_TOUCHES(self, first, second):
"""
http://postgis.org/docs/ST_Touches.html
"""
return 'ST_Touches(%s,%s)' %(self.expand(first), self.expand(second, first.type))
def ST_WITHIN(self, first, second):
"""
http://postgis.org/docs/ST_Within.html
"""
return 'ST_Within(%s,%s)' %(self.expand(first), self.expand(second, first.type))
def ST_DWITHIN(self, first, (second, third)):
"""
http://postgis.org/docs/ST_DWithin.html
"""
return 'ST_DWithin(%s,%s,%s)' %(self.expand(first),
self.expand(second, first.type),
self.expand(third, 'double'))
def represent(self, obj, fieldtype):
field_is_type = fieldtype.startswith
if field_is_type('geo'):
srid = 4326 # postGIS default srid for geometry
geotype, parms = fieldtype[:-1].split('(')
parms = parms.split(',')
if len(parms) >= 2:
schema, srid = parms[:2]
if field_is_type('geometry'):
value = "ST_GeomFromText('%s',%s)" %(obj, srid)
elif field_is_type('geography'):
value = "ST_GeogFromText('SRID=%s;%s')" %(srid, obj)
# else:
# raise SyntaxError('Invalid field type %s' %fieldtype)
return value
return BaseAdapter.represent(self, obj, fieldtype)
def _drop(self, table, mode='restrict'):
if mode not in ['restrict', 'cascade', '']:
raise ValueError('Invalid mode: %s' % mode)
return ['DROP TABLE ' + table.sqlsafe + ' ' + str(mode) + ';']
class NewPostgreSQLAdapter(PostgreSQLAdapter):
drivers = ('psycopg2','pg8000')
types = {
'boolean': 'CHAR(1)',
'string': 'VARCHAR(%(length)s)',
'text': 'TEXT',
'json': 'TEXT',
'password': 'VARCHAR(%(length)s)',
'blob': 'BYTEA',
'upload': 'VARCHAR(%(length)s)',
'integer': 'INTEGER',
'bigint': 'BIGINT',
'float': 'FLOAT',
'double': 'FLOAT8',
'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
'date': 'DATE',
'time': 'TIME',
'datetime': 'TIMESTAMP',
'id': 'SERIAL PRIMARY KEY',
'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'list:integer': 'BIGINT[]',
'list:string': 'TEXT[]',
'list:reference': 'BIGINT[]',
'geometry': 'GEOMETRY',
'geography': 'GEOGRAPHY',
'big-id': 'BIGSERIAL PRIMARY KEY',
'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
}
def parse_list_integers(self, value, field_type):
return value
def parse_list_references(self, value, field_type):
return [self.parse_reference(r, field_type[5:]) for r in value]
def parse_list_strings(self, value, field_type):
return value
def represent(self, obj, fieldtype):
field_is_type = fieldtype.startswith
if field_is_type('list:'):
if not obj:
obj = []
elif not isinstance(obj, (list, tuple)):
obj = [obj]
if field_is_type('list:string'):
obj = map(str,obj)
else:
obj = map(int,obj)
return 'ARRAY[%s]' % ','.join(repr(item) for item in obj)
return BaseAdapter.represent(self, obj, fieldtype)
class JDBCPostgreSQLAdapter(PostgreSQLAdapter):
drivers = ('zxJDBC',)
REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$')
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, after_connection=None ):
self.db = db
self.dbengine = "postgres"
self.uri = uri
if do_connect: self.find_driver(adapter_args,uri)
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self._after_connection = after_connection
self.find_or_make_work_folder()
ruri = uri.split('://',1)[1]
m = self.REGEX_URI.match(ruri)
if not m:
raise SyntaxError("Invalid URI string in DAL")
user = credential_decoder(m.group('user'))
if not user:
raise SyntaxError('User required')
password = credential_decoder(m.group('password'))
if not password:
password = ''
host = m.group('host')
if not host:
raise SyntaxError('Host name required')
db = m.group('db')
if not db:
raise SyntaxError('Database name required')
port = m.group('port') or '5432'
msg = ('jdbc:postgresql://%s:%s/%s' % (host, port, db), user, password)
def connector(msg=msg,driver_args=driver_args):
return self.driver.connect(*msg,**driver_args)
self.connector = connector
if do_connect: self.reconnect()
def after_connection(self):
self.connection.set_client_encoding('UTF8')
self.execute('BEGIN;')
self.execute("SET CLIENT_ENCODING TO 'UNICODE';")
self.try_json()
+97
View File
@@ -0,0 +1,97 @@
# -*- coding: utf-8 -*-
import re
from .._globals import IDENTITY
from .base import BaseAdapter
class SAPDBAdapter(BaseAdapter):
drivers = ('sapdb',)
support_distributed_transaction = False
types = {
'boolean': 'CHAR(1)',
'string': 'VARCHAR(%(length)s)',
'text': 'LONG',
'json': 'LONG',
'password': 'VARCHAR(%(length)s)',
'blob': 'LONG',
'upload': 'VARCHAR(%(length)s)',
'integer': 'INT',
'bigint': 'BIGINT',
'float': 'FLOAT',
'double': 'DOUBLE PRECISION',
'decimal': 'FIXED(%(precision)s,%(scale)s)',
'date': 'DATE',
'time': 'TIME',
'datetime': 'TIMESTAMP',
'id': 'INT PRIMARY KEY',
'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
'list:integer': 'LONG',
'list:string': 'LONG',
'list:reference': 'LONG',
'big-id': 'BIGINT PRIMARY KEY',
'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s',
}
def sequence_name(self,table):
return (self.QUOTE_TEMPLATE + '_id_Seq') % table
def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
if limitby:
(lmin, lmax) = limitby
if len(sql_w) > 1:
sql_w_row = sql_w + ' AND w_row > %i' % lmin
else:
sql_w_row = 'WHERE w_row > %i' % lmin
return '%s %s FROM (SELECT w_tmp.*, ROWNO w_row FROM (SELECT %s FROM %s%s%s) w_tmp WHERE ROWNO=%i) %s %s %s;' % (sql_s, sql_f, sql_f, sql_t, sql_w, sql_o, lmax, sql_t, sql_w_row, sql_o)
return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
def create_sequence_and_triggers(self, query, table, **args):
# following lines should only be executed if table._sequence_name does not exist
self.execute('CREATE SEQUENCE %s;' % table._sequence_name)
self.execute("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT NEXTVAL('%s');" \
% (table._tablename, table._id.name, table._sequence_name))
self.execute(query)
REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?sslmode=(?P<sslmode>.+))?$')
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, after_connection=None):
self.db = db
self.dbengine = "sapdb"
self.uri = uri
if do_connect: self.find_driver(adapter_args,uri)
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self._after_connection = after_connection
self.find_or_make_work_folder()
ruri = uri.split('://',1)[1]
m = self.REGEX_URI.match(ruri)
if not m:
raise SyntaxError("Invalid URI string in DAL")
user = credential_decoder(m.group('user'))
if not user:
raise SyntaxError('User required')
password = credential_decoder(m.group('password'))
if not password:
password = ''
host = m.group('host')
if not host:
raise SyntaxError('Host name required')
db = m.group('db')
if not db:
raise SyntaxError('Database name required')
def connector(user=user, password=password, database=db,
host=host, driver_args=driver_args):
return self.driver.Connection(user, password, database,
host, **driver_args)
self.connector = connector
if do_connect: self.reconnect()
def lastrowid(self,table):
self.execute("select %s.NEXTVAL from dual" % table._sequence_name)
return long(self.cursor.fetchone()[0])
+280
View File
@@ -0,0 +1,280 @@
# -*- coding: utf-8 -*-
import copy
import datetime
import locale
import platform
import re
import sys
import time
from .._compat import PY2, pjoin
from .._globals import IDENTITY
from .base import BaseAdapter
class SQLiteAdapter(BaseAdapter):
drivers = ('sqlite2','sqlite3')
can_select_for_update = None # support ourselves with BEGIN TRANSACTION
def EXTRACT(self,field,what):
return "web2py_extract('%s',%s)" % (what, self.expand(field))
@staticmethod
def web2py_extract(lookup, s):
table = {
'year': (0, 4),
'month': (5, 7),
'day': (8, 10),
'hour': (11, 13),
'minute': (14, 16),
'second': (17, 19),
}
try:
if lookup != 'epoch':
(i, j) = table[lookup]
return int(s[i:j])
else:
return time.mktime(datetime.datetime.strptime(s, '%Y-%m-%d %H:%M:%S').timetuple())
except:
return None
@staticmethod
def web2py_regexp(expression, item):
return re.compile(expression).search(item) is not None
def __init__(self, db, uri, pool_size=0, folder=None, db_codec ='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, after_connection=None):
self.db = db
self.dbengine = "sqlite"
self.uri = uri
self.adapter_args = adapter_args
if do_connect: self.find_driver(adapter_args)
self.pool_size = 0
self.folder = folder
self.db_codec = db_codec
self._after_connection = after_connection
self.find_or_make_work_folder()
path_encoding = sys.getfilesystemencoding() \
or locale.getdefaultlocale()[1] or 'utf8'
if uri.startswith('sqlite:memory'):
self.dbpath = ':memory:'
else:
self.dbpath = uri.split('://',1)[1]
if self.dbpath[0] != '/':
if PY2:
self.dbpath = pjoin(
self.folder.decode(path_encoding).encode('utf8'), self.dbpath)
else:
self.dbpath = pjoin(self.folder, self.dbpath)
if not 'check_same_thread' in driver_args:
driver_args['check_same_thread'] = False
if not 'detect_types' in driver_args and do_connect:
driver_args['detect_types'] = self.driver.PARSE_DECLTYPES
def connector(dbpath=self.dbpath, driver_args=driver_args):
return self.driver.Connection(dbpath, **driver_args)
self.connector = connector
if do_connect: self.reconnect()
def after_connection(self):
self.connection.create_function('web2py_extract', 2,
SQLiteAdapter.web2py_extract)
self.connection.create_function("REGEXP", 2,
SQLiteAdapter.web2py_regexp)
if self.adapter_args.get('foreign_keys',True):
self.execute('PRAGMA foreign_keys=ON;')
def _truncate(self, table, mode=''):
tablename = table._tablename
return ['DELETE FROM %s;' % tablename,
"DELETE FROM sqlite_sequence WHERE name='%s';" % tablename]
def lastrowid(self, table):
return self.cursor.lastrowid
def REGEXP(self,first,second):
return '(%s REGEXP %s)' % (self.expand(first),
self.expand(second,'string'))
def delete(self, tablename, query):
# SQLite requires its own delete to handle CASCADE
db = self.db
table = db[tablename]
deleted = [x[table._id.name] for x in db(query).select(table._id)]
counter = super(SQLiteAdapter, self).delete(tablename, query)
if counter:
for field in table._referenced_by:
if field.type == 'reference '+ tablename \
and field.ondelete == 'CASCADE':
db(field.belongs(deleted)).delete()
return counter
def select(self, query, fields, attributes):
"""
Simulate `SELECT ... FOR UPDATE` with `BEGIN IMMEDIATE TRANSACTION`.
Note that the entire database, rather than one record, is locked
(it will be locked eventually anyway by the following UPDATE).
"""
if attributes.get('for_update', False) and not 'cache' in attributes:
self.execute('BEGIN IMMEDIATE TRANSACTION;')
return super(SQLiteAdapter, self).select(query, fields, attributes)
SPATIALLIBS = {
'Windows':'libspatialite',
'Linux':'libspatialite.so',
'Darwin':'libspatialite.dylib'
}
class SpatiaLiteAdapter(SQLiteAdapter):
drivers = ('sqlite3','sqlite2')
types = copy.copy(BaseAdapter.types)
types.update(geometry='GEOMETRY')
def __init__(self, db, uri, pool_size=0, folder=None, db_codec ='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, srid=4326, after_connection=None):
self.db = db
self.dbengine = "spatialite"
self.uri = uri
if do_connect: self.find_driver(adapter_args)
self.pool_size = 0
self.folder = folder
self.db_codec = db_codec
self._after_connection = after_connection
self.find_or_make_work_folder()
self.srid = srid
path_encoding = sys.getfilesystemencoding() \
or locale.getdefaultlocale()[1] or 'utf8'
if uri.startswith('spatialite:memory'):
self.dbpath = ':memory:'
else:
self.dbpath = uri.split('://',1)[1]
if self.dbpath[0] != '/':
self.dbpath = pjoin(
self.folder.decode(path_encoding).encode('utf8'), self.dbpath)
if not 'check_same_thread' in driver_args:
driver_args['check_same_thread'] = False
if not 'detect_types' in driver_args and do_connect:
driver_args['detect_types'] = self.driver.PARSE_DECLTYPES
def connector(dbpath=self.dbpath, driver_args=driver_args):
return self.driver.Connection(dbpath, **driver_args)
self.connector = connector
if do_connect: self.reconnect()
def after_connection(self):
self.connection.enable_load_extension(True)
# for Windows, rename libspatialite-2.dll to libspatialite.dll
# Linux uses libspatialite.so
# Mac OS X uses libspatialite.dylib
libspatialite = SPATIALLIBS[platform.system()]
self.execute(r'SELECT load_extension("%s");' % libspatialite)
self.connection.create_function('web2py_extract', 2,
SQLiteAdapter.web2py_extract)
self.connection.create_function("REGEXP", 2,
SQLiteAdapter.web2py_regexp)
# GIS functions
def ST_ASGEOJSON(self, first, second):
return 'AsGeoJSON(%s,%s,%s)' %(self.expand(first),
second['precision'], second['options'])
def ST_ASTEXT(self, first):
return 'AsText(%s)' %(self.expand(first))
def ST_CONTAINS(self, first, second):
return 'Contains(%s,%s)' %(self.expand(first),
self.expand(second, first.type))
def ST_DISTANCE(self, first, second):
return 'Distance(%s,%s)' %(self.expand(first),
self.expand(second, first.type))
def ST_EQUALS(self, first, second):
return 'Equals(%s,%s)' %(self.expand(first),
self.expand(second, first.type))
def ST_INTERSECTS(self, first, second):
return 'Intersects(%s,%s)' %(self.expand(first),
self.expand(second, first.type))
def ST_OVERLAPS(self, first, second):
return 'Overlaps(%s,%s)' %(self.expand(first),
self.expand(second, first.type))
def ST_SIMPLIFY(self, first, second):
return 'Simplify(%s,%s)' %(self.expand(first),
self.expand(second, 'double'))
def ST_TOUCHES(self, first, second):
return 'Touches(%s,%s)' %(self.expand(first),
self.expand(second, first.type))
def ST_WITHIN(self, first, second):
return 'Within(%s,%s)' %(self.expand(first),
self.expand(second, first.type))
def represent(self, obj, fieldtype):
field_is_type = fieldtype.startswith
if field_is_type('geo'):
srid = 4326 # Spatialite default srid for geometry
geotype, parms = fieldtype[:-1].split('(')
parms = parms.split(',')
if len(parms) >= 2:
schema, srid = parms[:2]
# if field_is_type('geometry'):
value = "ST_GeomFromText('%s',%s)" %(obj, srid)
# elif field_is_type('geography'):
# value = "ST_GeogFromText('SRID=%s;%s')" %(srid, obj)
# else:
# raise SyntaxError, 'Invalid field type %s' %fieldtype
return value
return BaseAdapter.represent(self, obj, fieldtype)
class JDBCSQLiteAdapter(SQLiteAdapter):
drivers = ('zxJDBC_sqlite',)
def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, after_connection=None):
self.db = db
self.dbengine = "sqlite"
self.uri = uri
if do_connect: self.find_driver(adapter_args)
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self._after_connection = after_connection
self.find_or_make_work_folder()
path_encoding = sys.getfilesystemencoding() \
or locale.getdefaultlocale()[1] or 'utf8'
if uri.startswith('sqlite:memory'):
self.dbpath = ':memory:'
else:
self.dbpath = uri.split('://',1)[1]
if self.dbpath[0] != '/':
self.dbpath = pjoin(
self.folder.decode(path_encoding).encode('utf8'), self.dbpath)
def connector(dbpath=self.dbpath,driver_args=driver_args):
return self.driver.connect(
self.driver.getConnection('jdbc:sqlite:'+dbpath),
**driver_args)
self.connector = connector
if do_connect: self.reconnect()
def after_connection(self):
# FIXME http://www.zentus.com/sqlitejdbc/custom_functions.html for UDFs
self.connection.create_function('web2py_extract', 2,
SQLiteAdapter.web2py_extract)
def execute(self, a):
return self.log_execute(a)
+76
View File
@@ -0,0 +1,76 @@
# -*- coding: utf-8 -*-
from .._globals import IDENTITY
from ..connection import ConnectionPool
from .base import BaseAdapter
class TeradataAdapter(BaseAdapter):
drivers = ('pyodbc',)
types = {
'boolean': 'CHAR(1)',
'string': 'VARCHAR(%(length)s)',
'text': 'VARCHAR(2000)',
'json': 'VARCHAR(4000)',
'password': 'VARCHAR(%(length)s)',
'blob': 'BLOB',
'upload': 'VARCHAR(%(length)s)',
'integer': 'INT',
'bigint': 'BIGINT',
'float': 'REAL',
'double': 'DOUBLE',
'decimal': 'NUMERIC(%(precision)s,%(scale)s)',
'date': 'DATE',
'time': 'TIME',
'datetime': 'TIMESTAMP',
# Modified Constraint syntax for Teradata.
# Teradata does not support ON DELETE.
'id': 'INT GENERATED ALWAYS AS IDENTITY', # Teradata Specific
'reference': 'INT',
'list:integer': 'VARCHAR(4000)',
'list:string': 'VARCHAR(4000)',
'list:reference': 'VARCHAR(4000)',
'big-id': 'BIGINT GENERATED ALWAYS AS IDENTITY', # Teradata Specific
'big-reference': 'BIGINT',
'reference FK': ' REFERENCES %(foreign_key)s',
'reference TFK': ' FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s)',
}
def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8',
credential_decoder=IDENTITY, driver_args={},
adapter_args={}, do_connect=True, after_connection=None):
self.db = db
self.dbengine = "teradata"
self.uri = uri
if do_connect: self.find_driver(adapter_args,uri)
self.pool_size = pool_size
self.folder = folder
self.db_codec = db_codec
self._after_connection = after_connection
self.find_or_make_work_folder()
ruri = uri.split('://', 1)[1]
def connector(cnxn=ruri,driver_args=driver_args):
return self.driver.connect(cnxn,**driver_args)
self.connector = connector
if do_connect: self.reconnect()
def close(self,action='commit',really=True):
# Teradata does not implicitly close off the cursor
# leading to SQL_ACTIVE_STATEMENTS limit errors
self.cursor.close()
ConnectionPool.close(self, action, really)
def LEFT_JOIN(self):
return 'LEFT OUTER JOIN'
# Similar to MSSQL, Teradata can't specify a range (for Pageby)
def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
if limitby:
(lmin, lmax) = limitby
sql_s += ' TOP %i' % lmax
return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
def _truncate(self, table, mode=''):
tablename = table._tablename
return ['DELETE FROM %s ALL;' % (tablename)]
+1093
View File
File diff suppressed because it is too large Load Diff
+116
View File
@@ -0,0 +1,116 @@
# -*- coding: utf-8 -*-
import os
from ._compat import exists
from ._globals import GLOBAL_LOCKER, THREAD_LOCAL
from .helpers.classes import UseDatabaseStoredFile
class ConnectionPool(object):
POOLS = {}
check_active_connection = True
@staticmethod
def set_folder(folder):
THREAD_LOCAL.folder = folder
# ## this allows gluon to commit/rollback all dbs in this thread
def close(self,action='commit',really=True):
if action:
if callable(action):
action(self)
else:
getattr(self, action)()
# ## if you want pools, recycle this connection
if self.pool_size:
GLOBAL_LOCKER.acquire()
pool = ConnectionPool.POOLS[self.uri]
if len(pool) < self.pool_size:
pool.append(self.connection)
really = False
GLOBAL_LOCKER.release()
if really:
self.close_connection()
self.connection = None
@staticmethod
def close_all_instances(action):
""" to close cleanly databases in a multithreaded environment """
dbs = getattr(THREAD_LOCAL,'db_instances',{}).items()
for db_uid, db_group in dbs:
for db in db_group:
if hasattr(db,'_adapter'):
db._adapter.close(action)
getattr(THREAD_LOCAL,'db_instances',{}).clear()
getattr(THREAD_LOCAL,'db_instances_zombie',{}).clear()
if callable(action):
action(None)
return
def find_or_make_work_folder(self):
#this actually does not make the folder. it has to be there
self.folder = getattr(THREAD_LOCAL,'folder','')
if (os.path.isabs(self.folder) and
isinstance(self, UseDatabaseStoredFile) and
self.folder.startswith(os.getcwd())):
self.folder = os.path.relpath(self.folder, os.getcwd())
# Creating the folder if it does not exist
if False and self.folder and not exists(self.folder):
os.mkdir(self.folder)
def after_connection_hook(self):
"""Hook for the after_connection parameter"""
if callable(self._after_connection):
self._after_connection(self)
self.after_connection()
def after_connection(self):
#this it is supposed to be overloaded by adapters
pass
def reconnect(self, f=None, cursor=True):
"""
Defines: `self.connection` and `self.cursor`
(if cursor is True)
if `self.pool_size>0` it will try pull the connection from the pool
if the connection is not active (closed by db server) it will loop
if not `self.pool_size` or no active connections in pool makes a new one
"""
if getattr(self,'connection', None) is not None:
return
if f is None:
f = self.connector
# if not hasattr(self, "driver") or self.driver is None:
# LOGGER.debug("Skipping connection since there's no driver")
# return
if not self.pool_size:
self.connection = f()
self.cursor = cursor and self.connection.cursor()
else:
uri = self.uri
POOLS = ConnectionPool.POOLS
while True:
GLOBAL_LOCKER.acquire()
if not uri in POOLS:
POOLS[uri] = []
if POOLS[uri]:
self.connection = POOLS[uri].pop()
GLOBAL_LOCKER.release()
self.cursor = cursor and self.connection.cursor()
try:
if self.cursor and self.check_active_connection:
self.execute('SELECT 1;')
break
except:
pass
else:
GLOBAL_LOCKER.release()
self.connection = f()
self.cursor = cursor and self.connection.cursor()
break
self.after_connection_hook()
View File
+298
View File
@@ -0,0 +1,298 @@
# -*- coding: utf-8 -*-
import copy
import marshal
import struct
import traceback
from .._compat import exists, copyreg
from .._globals import LOGGER
class Reference(long):
def __allocate(self):
if not self._record:
self._record = self._table[long(self)]
if not self._record:
raise RuntimeError(
"Using a recursive select but encountered a broken reference: %s %d"%(self._table, long(self)))
def __getattr__(self, key):
if key == 'id':
return long(self)
if key in self._table:
self.__allocate()
if self._record:
return self._record.get(key,None) # to deal with case self.update_record()
else:
return None
def get(self, key, default=None):
return self.__getattr__(key, default)
def __setattr__(self, key, value):
if key.startswith('_'):
long.__setattr__(self, key, value)
return
self.__allocate()
self._record[key] = value
def __getitem__(self, key):
if key == 'id':
return long(self)
self.__allocate()
return self._record.get(key, None)
def __setitem__(self,key,value):
self.__allocate()
self._record[key] = value
def Reference_unpickler(data):
return marshal.loads(data)
def Reference_pickler(data):
try:
marshal_dump = marshal.dumps(long(data))
except AttributeError:
marshal_dump = 'i%s' % struct.pack('<i', long(data))
return (Reference_unpickler, (marshal_dump,))
copyreg.pickle(Reference, Reference_pickler, Reference_unpickler)
class SQLCallableList(list):
def __call__(self):
return copy.copy(self)
class SQLALL(object):
"""
Helper class providing a comma-separated string having all the field names
(prefixed by table name and '.')
normally only called from within gluon.dal
"""
def __init__(self, table):
self._table = table
def __str__(self):
return ', '.join([str(field) for field in self._table])
class SQLCustomType(object):
"""
Allows defining of custom SQL types
Args:
type: the web2py type (default = 'string')
native: the backend type
encoder: how to encode the value to store it in the backend
decoder: how to decode the value retrieved from the backend
validator: what validators to use ( default = None, will use the
default validator for type)
Example::
Define as:
decimal = SQLCustomType(
type ='double',
native ='integer',
encoder =(lambda x: int(float(x) * 100)),
decoder = (lambda x: Decimal("0.00") + Decimal(str(float(x)/100)) )
)
db.define_table(
'example',
Field('value', type=decimal)
)
"""
def __init__(
self,
type='string',
native=None,
encoder=None,
decoder=None,
validator=None,
_class=None,
):
self.type = type
self.native = native
self.encoder = encoder or (lambda x: x)
self.decoder = decoder or (lambda x: x)
self.validator = validator
self._class = _class or type
def startswith(self, text=None):
try:
return self.type.startswith(self, text)
except TypeError:
return False
def endswith(self, text=None):
try:
return self.type.endswith(self, text)
except TypeError:
return False
def __getslice__(self, a=0, b=100):
return None
def __getitem__(self, i):
return None
def __str__(self):
return self._class
class RecordUpdater(object):
def __init__(self, colset, table, id):
self.colset, self.db, self.tablename, self.id = \
colset, table._db, table._tablename, id
def __call__(self, **fields):
colset, db, tablename, id = self.colset, self.db, self.tablename, self.id
table = db[tablename]
newfields = fields or dict(colset)
for fieldname in newfields.keys():
if not fieldname in table.fields or table[fieldname].type=='id':
del newfields[fieldname]
table._db(table._id==id,ignore_common_filters=True).update(**newfields)
colset.update(newfields)
return colset
class RecordDeleter(object):
def __init__(self, table, id):
self.db, self.tablename, self.id = table._db, table._tablename, id
def __call__(self):
return self.db(self.db[self.tablename]._id==self.id).delete()
class MethodAdder(object):
def __init__(self,table):
self.table = table
def __call__(self):
return self.register()
def __getattr__(self,method_name):
return self.register(method_name)
def register(self,method_name=None):
def _decorated(f):
instance = self.table
import types
method = types.MethodType(f, instance, instance.__class__)
name = method_name or f.func_name
setattr(instance, name, method)
return f
return _decorated
class DatabaseStoredFile:
web2py_filesystems = set()
def escape(self,obj):
return self.db._adapter.escape(obj)
@staticmethod
def try_create_web2py_filesystem(db):
if not db._uri in DatabaseStoredFile.web2py_filesystems:
if db._adapter.dbengine == 'mysql':
sql = "CREATE TABLE IF NOT EXISTS web2py_filesystem (path VARCHAR(255), content LONGTEXT, PRIMARY KEY(path) ) ENGINE=InnoDB;"
elif db._adapter.dbengine in ('postgres', 'sqlite'):
sql = "CREATE TABLE IF NOT EXISTS web2py_filesystem (path VARCHAR(255), content TEXT, PRIMARY KEY(path));"
db.executesql(sql)
DatabaseStoredFile.web2py_filesystems.add(db._uri)
def __init__(self,db,filename,mode):
if not db._adapter.dbengine in ('mysql', 'postgres', 'sqlite'):
raise RuntimeError("only MySQL/Postgres/SQLite can store metadata .table files in database for now")
self.db = db
self.filename = filename
self.mode = mode
DatabaseStoredFile.try_create_web2py_filesystem(db)
self.p=0
self.data = ''
if mode in ('r','rw','a'):
query = "SELECT content FROM web2py_filesystem WHERE path='%s'" \
% filename
rows = self.db.executesql(query)
if rows:
self.data = rows[0][0]
elif exists(filename):
datafile = open(filename, 'r')
try:
self.data = datafile.read()
finally:
datafile.close()
elif mode in ('r','rw'):
raise RuntimeError("File %s does not exist" % filename)
def read(self, bytes):
data = self.data[self.p:self.p+bytes]
self.p += len(data)
return data
def readline(self):
i = self.data.find('\n',self.p)+1
if i>0:
data, self.p = self.data[self.p:i], i
else:
data, self.p = self.data[self.p:], len(self.data)
return data
def write(self,data):
self.data += data
def close_connection(self):
if self.db is not None:
self.db.executesql(
"DELETE FROM web2py_filesystem WHERE path='%s'" % self.filename)
query = "INSERT INTO web2py_filesystem(path,content) VALUES ('%s','%s')"\
% (self.filename, self.data.replace("'","''"))
self.db.executesql(query)
self.db.commit()
self.db = None
def close(self):
self.close_connection()
@staticmethod
def exists(db, filename):
if exists(filename):
return True
DatabaseStoredFile.try_create_web2py_filesystem(db)
query = "SELECT path FROM web2py_filesystem WHERE path='%s'" % filename
try:
if db.executesql(query):
return True
except Exception, e:
if not (db._adapter.isOperationalError(e) or
db._adapter.isProgrammingError(e)):
raise
# no web2py_filesystem found?
tb = traceback.format_exc()
LOGGER.error("Could not retrieve %s\n%s" % (filename, tb))
return False
class UseDatabaseStoredFile:
def file_exists(self, filename):
return DatabaseStoredFile.exists(self.db,filename)
def file_open(self, filename, mode='rb', lock=True):
return DatabaseStoredFile(self.db,filename,mode)
def file_close(self, fileobj):
fileobj.close_connection()
def file_delete(self,filename):
query = "DELETE FROM web2py_filesystem WHERE path='%s'" % filename
self.db.executesql(query)
self.db.commit()
+342
View File
@@ -0,0 +1,342 @@
# -*- coding: utf-8 -*-
import uuid
import re
from .regex import REGEX_NOPASSWD, REGEX_UNPACK, REGEX_CONST_STRING, REGEX_W
from .classes import SQLCustomType
#from ..objects import Field, Table
PLURALIZE_RULES = [
(re.compile('child$'), re.compile('child$'), 'children'),
(re.compile('oot$'), re.compile('oot$'), 'eet'),
(re.compile('ooth$'), re.compile('ooth$'), 'eeth'),
(re.compile('l[eo]af$'), re.compile('l([eo])af$'), 'l\\1aves'),
(re.compile('sis$'), re.compile('sis$'), 'ses'),
(re.compile('man$'), re.compile('man$'), 'men'),
(re.compile('ife$'), re.compile('ife$'), 'ives'),
(re.compile('eau$'), re.compile('eau$'), 'eaux'),
(re.compile('lf$'), re.compile('lf$'), 'lves'),
(re.compile('[sxz]$'), re.compile('$'), 'es'),
(re.compile('[^aeioudgkprt]h$'), re.compile('$'), 'es'),
(re.compile('(qu|[^aeiou])y$'), re.compile('y$'), 'ies'),
(re.compile('$'), re.compile('$'), 's'),
]
def pluralize(singular, rules=PLURALIZE_RULES):
for line in rules:
re_search, re_sub, replace = line
plural = re_search.search(singular) and re_sub.sub(replace, singular)
if plural: return plural
def hide_password(uri):
if isinstance(uri,(list,tuple)):
return [hide_password(item) for item in uri]
return REGEX_NOPASSWD.sub('******',uri)
def cleanup(text):
"""
Validates that the given text is clean: only contains [0-9a-zA-Z_]
"""
#if not REGEX_ALPHANUMERIC.match(text):
# raise SyntaxError('invalid table or field name: %s' % text)
return text
def list_represent(x,r=None):
return ', '.join(str(y) for y in x or [])
def xorify(orderby):
if not orderby:
return None
orderby2 = orderby[0]
for item in orderby[1:]:
orderby2 = orderby2 | item
return orderby2
def use_common_filters(query):
return (query and hasattr(query,'ignore_common_filters') and \
not query.ignore_common_filters)
def bar_escape(item):
return str(item).replace('|', '||')
def bar_encode(items):
return '|%s|' % '|'.join(bar_escape(item) for item in items if str(item).strip())
def bar_decode_integer(value):
if not hasattr(value,'split') and hasattr(value,'read'):
value = value.read()
return [long(x) for x in value.split('|') if x.strip()]
def bar_decode_string(value):
return [x.replace('||', '|') for x in
REGEX_UNPACK.split(value[1:-1]) if x.strip()]
def archive_record(qset, fs, archive_table, current_record):
tablenames = qset.db._adapter.tables(qset.query)
if len(tablenames) != 1:
raise RuntimeError("cannot update join")
for row in qset.select():
fields = archive_table._filter_fields(row)
fields[current_record] = row.id
archive_table.insert(**fields)
return False
def smart_query(fields,text):
from ..objects import Field, Table
if not isinstance(fields,(list,tuple)):
fields = [fields]
new_fields = []
for field in fields:
if isinstance(field,Field):
new_fields.append(field)
elif isinstance(field,Table):
for ofield in field:
new_fields.append(ofield)
else:
raise RuntimeError("fields must be a list of fields")
fields = new_fields
field_map = {}
for field in fields:
n = field.name.lower()
if not n in field_map:
field_map[n] = field
n = str(field).lower()
if not n in field_map:
field_map[n] = field
constants = {}
i = 0
while True:
m = REGEX_CONST_STRING.search(text)
if not m: break
text = text[:m.start()]+('#%i' % i)+text[m.end():]
constants[str(i)] = m.group()[1:-1]
i+=1
text = re.sub('\s+',' ',text).lower()
for a,b in [('&','and'),
('|','or'),
('~','not'),
('==','='),
('<','<'),
('>','>'),
('<=','<='),
('>=','>='),
('<>','!='),
('=<','<='),
('=>','>='),
('=','='),
(' less or equal than ','<='),
(' greater or equal than ','>='),
(' equal or less than ','<='),
(' equal or greater than ','>='),
(' less or equal ','<='),
(' greater or equal ','>='),
(' equal or less ','<='),
(' equal or greater ','>='),
(' not equal to ','!='),
(' not equal ','!='),
(' equal to ','='),
(' equal ','='),
(' equals ','='),
(' less than ','<'),
(' greater than ','>'),
(' starts with ','startswith'),
(' ends with ','endswith'),
(' not in ' , 'notbelongs'),
(' in ' , 'belongs'),
(' is ','=')]:
if a[0]==' ':
text = text.replace(' is'+a,' %s ' % b)
text = text.replace(a,' %s ' % b)
text = re.sub('\s+',' ',text).lower()
text = re.sub('(?P<a>[\<\>\!\=])\s+(?P<b>[\<\>\!\=])','\g<a>\g<b>',text)
query = field = neg = op = logic = None
for item in text.split():
if field is None:
if item == 'not':
neg = True
elif not neg and not logic and item in ('and','or'):
logic = item
elif item in field_map:
field = field_map[item]
else:
raise RuntimeError("Invalid syntax")
elif not field is None and op is None:
op = item
elif not op is None:
if item.startswith('#'):
if not item[1:] in constants:
raise RuntimeError("Invalid syntax")
value = constants[item[1:]]
else:
value = item
if field.type in ('text', 'string', 'json'):
if op == '=': op = 'like'
if op == '=': new_query = field==value
elif op == '<': new_query = field<value
elif op == '>': new_query = field>value
elif op == '<=': new_query = field<=value
elif op == '>=': new_query = field>=value
elif op == '!=': new_query = field!=value
elif op == 'belongs': new_query = field.belongs(value.split(','))
elif op == 'notbelongs': new_query = ~field.belongs(value.split(','))
elif field.type in ('text', 'string', 'json'):
if op == 'contains': new_query = field.contains(value)
elif op == 'like': new_query = field.ilike(value)
elif op == 'startswith': new_query = field.startswith(value)
elif op == 'endswith': new_query = field.endswith(value)
else: raise RuntimeError("Invalid operation")
elif field._db._adapter.dbengine=='google:datastore' and \
field.type in ('list:integer', 'list:string', 'list:reference'):
if op == 'contains': new_query = field.contains(value)
else: raise RuntimeError("Invalid operation")
else: raise RuntimeError("Invalid operation")
if neg: new_query = ~new_query
if query is None:
query = new_query
elif logic == 'and':
query &= new_query
elif logic == 'or':
query |= new_query
field = op = neg = logic = None
return query
def sqlhtml_validators(field):
"""
Field type validation, using web2py's validators mechanism.
makes sure the content of a field is in line with the declared
fieldtype
"""
db = field.db
try:
from gluon import validators
except ImportError:
return []
field_type, field_length = field.type, field.length
if isinstance(field_type, SQLCustomType):
if hasattr(field_type, 'validator'):
return field_type.validator
else:
field_type = field_type.type
elif not isinstance(field_type,str):
return []
requires=[]
def ff(r,id):
row=r(id)
if not row:
return str(id)
elif hasattr(r, '_format') and isinstance(r._format,str):
return r._format % row
elif hasattr(r, '_format') and callable(r._format):
return r._format(row)
else:
return str(id)
if field_type in (('string', 'text', 'password')):
requires.append(validators.IS_LENGTH(field_length))
elif field_type == 'json':
requires.append(validators.IS_EMPTY_OR(validators.IS_JSON()))
elif field_type == 'double' or field_type == 'float':
requires.append(validators.IS_FLOAT_IN_RANGE(-1e100, 1e100))
elif field_type == 'integer':
requires.append(validators.IS_INT_IN_RANGE(-2**31, 2**31))
elif field_type == 'bigint':
requires.append(validators.IS_INT_IN_RANGE(-2**63, 2**63))
elif field_type.startswith('decimal'):
requires.append(validators.IS_DECIMAL_IN_RANGE(-10**10, 10**10))
elif field_type == 'date':
requires.append(validators.IS_DATE())
elif field_type == 'time':
requires.append(validators.IS_TIME())
elif field_type == 'datetime':
requires.append(validators.IS_DATETIME())
elif db and field_type.startswith('reference') and \
field_type.find('.') < 0 and \
field_type[10:] in db.tables:
referenced = db[field_type[10:]]
def repr_ref(id, row=None, r=referenced, f=ff): return f(r, id)
field.represent = field.represent or repr_ref
if hasattr(referenced, '_format') and referenced._format:
requires = validators.IS_IN_DB(db,referenced._id,
referenced._format)
if field.unique:
requires._and = validators.IS_NOT_IN_DB(db,field)
if field.tablename == field_type[10:]:
return validators.IS_EMPTY_OR(requires)
return requires
elif db and field_type.startswith('list:reference') and \
field_type.find('.') < 0 and \
field_type[15:] in db.tables:
referenced = db[field_type[15:]]
def list_ref_repr(ids, row=None, r=referenced, f=ff):
if not ids:
return None
from ..adapters.google import GoogleDatastoreAdapter
refs = None
db, id = r._db, r._id
if isinstance(db._adapter, GoogleDatastoreAdapter):
def count(values): return db(id.belongs(values)).select(id)
rx = range(0, len(ids), 30)
refs = reduce(lambda a,b:a&b, [count(ids[i:i+30]) for i in rx])
else:
refs = db(id.belongs(ids)).select(id)
return (refs and ', '.join(f(r,x.id) for x in refs) or '')
field.represent = field.represent or list_ref_repr
if hasattr(referenced, '_format') and referenced._format:
requires = validators.IS_IN_DB(db,referenced._id,
referenced._format,multiple=True)
else:
requires = validators.IS_IN_DB(db,referenced._id,
multiple=True)
if field.unique:
requires._and = validators.IS_NOT_IN_DB(db,field)
if not field.notnull:
requires = validators.IS_EMPTY_OR(requires)
return requires
elif field_type.startswith('list:'):
def repr_list(values,row=None): return', '.join(str(v) for v in (values or []))
field.represent = field.represent or repr_list
if field.unique:
requires.append(validators.IS_NOT_IN_DB(db, field))
sff = ['in', 'do', 'da', 'ti', 'de', 'bo']
if field.notnull and not field_type[:2] in sff:
requires.append(validators.IS_NOT_EMPTY())
elif not field.notnull and field_type[:2] in sff and requires:
requires[0] = validators.IS_EMPTY_OR(requires[0])
return requires
def varquote_aux(name,quotestr='%s'):
return name if REGEX_W.match(name) else quotestr % name
def uuid2int(uuidv):
return uuid.UUID(uuidv).int
def int2uuid(n):
return str(uuid.UUID(int=n))
# Geodal utils
def geoPoint(x, y):
return "POINT (%f %f)" % (x, y)
def geoLine(*line):
return "LINESTRING (%s)" % ','.join("%f %f" % item for item in line)
def geoPolygon(*line):
return "POLYGON ((%s))" % ','.join("%f %f" % item for item in line)
+22
View File
@@ -0,0 +1,22 @@
# -*- coding: utf-8 -*-
import re
REGEX_TYPE = re.compile('^([\w\_\:]+)')
REGEX_DBNAME = re.compile('^(\w+)(\:\w+)*')
REGEX_W = re.compile('^\w+$')
REGEX_TABLE_DOT_FIELD = re.compile('^(\w+)\.([^.]+)$')
REGEX_NO_GREEDY_ENTITY_NAME = r'(.+?)'
REGEX_UPLOAD_PATTERN = re.compile('(?P<table>[\w\-]+)\.(?P<field>[\w\-]+)\.(?P<uuidkey>[\w\-]+)(\.(?P<name>\w+))?\.\w+$')
REGEX_CLEANUP_FN = re.compile('[\'"\s;]+')
REGEX_UNPACK = re.compile('(?<!\|)\|(?!\|)')
REGEX_PYTHON_KEYWORDS = re.compile('^(and|del|from|not|while|as|elif|global|or|with|assert|else|if|pass|yield|break|except|import|print|class|exec|in|raise|continue|finally|is|return|def|for|lambda|try)$')
REGEX_SELECT_AS_PARSER = re.compile("\s+AS\s+(\S+)")
REGEX_CONST_STRING = re.compile('(\"[^\"]*?\")|(\'[^\']*?\')')
REGEX_SEARCH_PATTERN = re.compile('^{[^\.]+\.[^\.]+(\.(lt|gt|le|ge|eq|ne|contains|startswith|year|month|day|hour|minute|second))?(\.not)?}$')
REGEX_SQUARE_BRACKETS = re.compile('^.+\[.+\]$')
REGEX_STORE_PATTERN = re.compile('\.(?P<e>\w{1,5})$')
REGEX_QUOTES = re.compile("'[^']*'")
REGEX_ALPHANUMERIC = re.compile('^[0-9a-zA-Z]\w*$')
REGEX_PASSWORD = re.compile('\://([^:@]*)\:')
REGEX_NOPASSWD = re.compile('\/\/[\w\.\-]+[\:\/](.+)(?=@)') # was '(?<=[\:\/])([^:@/]+)(?=@.+)'
+2704
View File
File diff suppressed because it is too large Load Diff
+7 -6
View File
@@ -25,6 +25,7 @@ from gluon.serializers import json, custom_json
import gluon.settings as settings
from gluon.utils import web2py_uuid, secure_dumps, secure_loads
from gluon.settings import global_settings
from gluon.dal import Field
from gluon import recfile
import hashlib
import portalocker
@@ -197,7 +198,7 @@ class Request(Storage):
def parse_get_vars(self):
"""Takes the QUERY_STRING and unpacks it to get_vars
"""
query_string = self.env.get('QUERY_STRING', '')
query_string = self.env.get('QUERY_STRING', '')
dget = urlparse.parse_qs(query_string, keep_blank_values=1) # Ref: https://docs.python.org/2/library/cgi.html#cgi.parse_qs
get_vars = self._get_vars = Storage(dget)
for (key, value) in get_vars.iteritems():
@@ -254,7 +255,7 @@ class Request(Storage):
# its value else leave it alone
pvalue = listify([(_dpk if _dpk.filename else _dpk.value)
for _dpk in dpk]
for _dpk in dpk]
if isinstance(dpk, list) else
(dpk if dpk.filename else dpk.value))
if len(pvalue):
@@ -393,7 +394,7 @@ class Response(Storage):
self._view_environment = None
self._custom_commit = None
self._custom_rollback = None
self.generic_patterns = ['*']
self.generic_patterns = ['*']
self.delimiters = ('{{','}}')
self.formstyle = 'table3cols'
self.form_label_separator = ': '
@@ -691,10 +692,10 @@ class Response(Storage):
DIV(BEAUTIFY(current.response), backtotop,
_class="w2p-toolbar-hidden", _id="response-%s" % u),
DIV(BEAUTIFY(dbtables), backtotop,
_class="w2p-toolbar-hidden",_id="db-tables-%s" % u),
_class="w2p-toolbar-hidden",_id="db-tables-%s" % u),
DIV(BEAUTIFY(dbstats), backtotop,
_class="w2p-toolbar-hidden", _id="db-stats-%s" % u),
SCRIPT("jQuery('.w2p-toolbar-hidden').hide()"),
SCRIPT("jQuery('.w2p-toolbar-hidden').hide()"),
_id="totop-%s" % u
)
@@ -868,7 +869,7 @@ class Session(Storage):
table_migrate = False
tname = tablename + '_' + masterapp
table = db.get(tname, None)
Field = db.Field
#Field = db.Field
if table is None:
db.define_table(
tname,
+1 -1
View File
@@ -93,7 +93,7 @@ from gluon.globals import Request, Response, Session
from gluon.compileapp import build_environment, run_models_in, \
run_controller_in, run_view_in
from gluon.contenttype import contenttype
from gluon.dal import BaseAdapter
from gluon.dal.base import BaseAdapter
from gluon.validators import CRYPT
from gluon.html import URL, xmlescape
from gluon.utils import is_valid_ip_address, getipaddrinfo
+1 -1
View File
@@ -598,7 +598,7 @@ class Scheduler(MetaScheduler):
def define_tables(self, db, migrate):
"""Defines Scheduler tables structure"""
from gluon.dal import DEFAULT
from gluon.dal.base import DEFAULT
logger.debug('defining tables (migrate=%s)', migrate)
now = self.now
db.define_table(
+1 -1
View File
@@ -28,7 +28,7 @@ from gluon.restricted import RestrictedError
from gluon.globals import Request, Response, Session
from gluon.storage import Storage, List
from gluon.admin import w2p_unpack
from gluon.dal import BaseAdapter
from gluon.dal.base import BaseAdapter
logger = logging.getLogger("web2py")
+14 -1
View File
@@ -12,4 +12,17 @@ Just for backward compatibility
"""
__all__ = ['DAL', 'Field', 'DRIVERS']
from dal import DAL, Field, Table, Query, Set, Expression, Row, Rows, DRIVERS, BaseAdapter, SQLField, SQLTable, SQLXorable, SQLQuery, SQLSet, SQLRows, SQLStorage, SQLDB, GQLDB, SQLALL, SQLCustomType
from dal import DAL, Field, SQLCustomType
from dal.adapters.base import BaseAdapter, DRIVERS
from dal.objects import Table, Query, Set, Expression, Row, Rows
from dal.helpers.classes import SQLALL
SQLDB = DAL
GQLDB = DAL
SQLField = Field
SQLTable = Table
SQLXorable = Expression
SQLQuery = Query
SQLSet = Set
SQLRows = Rows
SQLStorage = Row
+6 -3
View File
@@ -21,9 +21,12 @@ from gluon.html import XML, SPAN, TAG, A, DIV, CAT, UL, LI, TEXTAREA, BR, IMG, S
from gluon.html import FORM, INPUT, LABEL, OPTION, SELECT, COL, COLGROUP
from gluon.html import TABLE, THEAD, TBODY, TR, TD, TH, STYLE, DEFAULT_PASSWORD_DISPLAY
from gluon.html import URL, truncate_string, FIELDSET
from gluon.dal import DAL, Field, Table, Row, CALLABLETYPES, smart_query, \
bar_encode, Reference, Expression, SQLCustomType, sqlhtml_validators, \
DEFAULT
from gluon.dal import DAL, Field
from gluon.dal.base import DEFAULT
from gluon.dal.objects import Table, Row, Expression
from gluon.dal.adapters.base import CALLABLETYPES
from gluon.dal.helpers.methods import smart_query, bar_encode, sqlhtml_validators
from gluon.dal.helpers.classes import Reference, SQLCustomType
from gluon.storage import Storage
from gluon.utils import md5_hash
from gluon.validators import IS_EMPTY_OR, IS_NOT_EMPTY, IS_LIST_OF, IS_DATE, \
+4 -2
View File
@@ -24,7 +24,9 @@ DEFAULT_URI = os.getenv('DB', 'sqlite:memory')
print 'Testing against %s engine (%s)' % (DEFAULT_URI.partition(':')[0], DEFAULT_URI)
from dal import DAL, Field, Table, SQLALL
from dal import DAL, Field
from dal.objects import Table
from dal.helpers.classes import SQLALL
from gluon.cache import CacheInRam
ALLOWED_DATATYPES = [
@@ -1511,7 +1513,7 @@ class TestQuoting(unittest.TestCase):
class TestTableAndFieldCase(unittest.TestCase):
"""
at the Python level we should not allow db.C and db.c because of .table conflicts on windows
at the Python level we should not allow db.C and db.c because of .table conflicts on windows
but it should be possible to map two different names into distinct tables "c" and "C" at the Python level
By default Python models names should be mapped into lower case table names and assume case insensitivity.
"""
+4 -2
View File
@@ -32,11 +32,13 @@ IS_MONGODB = "mongodb" in DEFAULT_URI
IS_IMAP = "imap" in DEFAULT_URI
if IS_IMAP:
from dal import IMAPAdapter
from dal.adapters import IMAPAdapter
from contrib import mockimaplib
IMAPAdapter.driver = mockimaplib
from dal import DAL, Field, Table, SQLALL
from dal import DAL, Field
from dal.objects import Table
from dal.helpers.classes import SQLALL
def drop(table, cascade=None):
# mongodb implements drop()
+12 -12
View File
@@ -43,7 +43,7 @@ from gluon import *
from gluon.contrib.autolinks import expand_one
from gluon.contrib.markmin.markmin2html import \
replace_at_urls, replace_autolinks, replace_components
from gluon.dal import Row, Set, Query
from gluon.dal.objects import Table, Row, Set, Query
import gluon.serializers as serializers
@@ -1730,7 +1730,7 @@ class Auth(object):
except:
return id
ondelete = self.settings.ondelete
self.signature = db.Table(
self.signature = Table(
self.db, 'auth_signature',
Field('is_active', 'boolean',
default=True,
@@ -1786,7 +1786,7 @@ class Auth(object):
signature_list = [self.signature]
elif not signature:
signature_list = []
elif isinstance(signature, self.db.Table):
elif isinstance(signature, Table):
signature_list = [signature]
else:
signature_list = signature
@@ -4005,10 +4005,10 @@ class Crud(object):
formname=DEFAULT,
**attributes
):
if not (isinstance(table, self.db.Table) or table in self.db.tables) \
if not (isinstance(table, Table) or table in self.db.tables) \
or (isinstance(record, str) and not str(record).isdigit()):
raise HTTP(404)
if not isinstance(table, self.db.Table):
if not isinstance(table, Table):
table = self.db[table]
try:
record_id = record.id
@@ -4139,10 +4139,10 @@ class Crud(object):
)
def read(self, table, record):
if not (isinstance(table, self.db.Table) or table in self.db.tables) \
if not (isinstance(table, Table) or table in self.db.tables) \
or (isinstance(record, str) and not str(record).isdigit()):
raise HTTP(404)
if not isinstance(table, self.db.Table):
if not isinstance(table, Table):
table = self.db[table]
if not self.has_permission('read', table, record):
redirect(self.settings.auth.settings.on_failed_authorization)
@@ -4167,9 +4167,9 @@ class Crud(object):
next=DEFAULT,
message=DEFAULT,
):
if not (isinstance(table, self.db.Table) or table in self.db.tables):
if not (isinstance(table, Table) or table in self.db.tables):
raise HTTP(404)
if not isinstance(table, self.db.Table):
if not isinstance(table, Table):
table = self.db[table]
if not self.has_permission('delete', table, record_id):
redirect(self.settings.auth.settings.on_failed_authorization)
@@ -4197,13 +4197,13 @@ class Crud(object):
orderby=None,
limitby=None,
):
if not (isinstance(table, self.db.Table) or table in self.db.tables):
if not (isinstance(table, Table) or table in self.db.tables):
raise HTTP(404)
if not self.has_permission('select', table):
redirect(self.settings.auth.settings.on_failed_authorization)
#if record_id and not self.has_permission('select', table):
# redirect(self.settings.auth.settings.on_failed_authorization)
if not isinstance(table, self.db.Table):
if not isinstance(table, Table):
table = self.db[table]
if not query:
query = table.id > 0
@@ -4310,7 +4310,7 @@ class Crud(object):
validate = args.get('validate',True)
request = current.request
db = self.db
if not (isinstance(table, db.Table) or table in db.tables):
if not (isinstance(table, Table) or table in db.tables):
raise HTTP(404)
attributes = {}
for key in ('orderby', 'groupby', 'left', 'distinct', 'limitby', 'cache'):
+4 -4
View File
@@ -22,7 +22,7 @@ import decimal
import unicodedata
from cStringIO import StringIO
from gluon.utils import simple_hash, web2py_uuid, DIGEST_ALG_BY_SIZE
from gluon.dal import FieldVirtual, FieldMethod
from gluon.dal.objects import FieldVirtual, FieldMethod
regex_isint = re.compile('^[+-]?\d+$')
@@ -506,7 +506,7 @@ class IS_IN_DB(Validator):
sort=False,
_and=None,
):
from dal import Table
from dal.objects import Table
if isinstance(field, Table):
field = field._id
@@ -603,7 +603,7 @@ class IS_IN_DB(Validator):
if not [v for v in values if not v in self.theset]:
return (values, None)
else:
from dal import GoogleDatastoreAdapter
from dal.adapters import GoogleDatastoreAdapter
def count(values, s=self.dbset, f=field):
return s(f.belongs(map(int, values))).count()
@@ -648,7 +648,7 @@ class IS_NOT_IN_DB(Validator):
ignore_common_filters=False,
):
from dal import Table
from dal.objects import Table
if isinstance(field, Table):
field = field._id
+1 -1
View File
@@ -1086,7 +1086,7 @@ def start(cron=True):
print ProgramAuthor
print ProgramVersion
from dal import DRIVERS
from dal.adapters.base import DRIVERS
if not options.nobanner:
print 'Database drivers available: %s' % ', '.join(DRIVERS)