Update SQLAlchemy

This commit is contained in:
Ruud
2013-06-14 11:00:06 +02:00
parent 267ecfacab
commit 4aa6700ceb
124 changed files with 6500 additions and 5207 deletions

View File

@@ -1,5 +1,5 @@
# sqlalchemy/__init__.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -50,6 +50,8 @@ from sqlalchemy.sql import (
)
from sqlalchemy.types import (
BIGINT,
BINARY,
BLOB,
BOOLEAN,
BigInteger,
@@ -87,6 +89,7 @@ from sqlalchemy.types import (
TypeDecorator,
Unicode,
UnicodeText,
VARBINARY,
VARCHAR,
)
@@ -117,7 +120,7 @@ from sqlalchemy.engine import create_engine, engine_from_config
__all__ = sorted(name for name, obj in locals().items()
if not (name.startswith('_') or inspect.ismodule(obj)))
__version__ = '0.7.6'
__version__ = '0.7.10'
del inspect, sys

View File

@@ -13,8 +13,8 @@ typedef int Py_ssize_t;
#define PY_SSIZE_T_MAX INT_MAX
#define PY_SSIZE_T_MIN INT_MIN
typedef Py_ssize_t (*lenfunc)(PyObject *);
#define PyInt_FromSsize_t(x) PyInt_FromLong(x)
typedef intargfunc ssizeargfunc;
#define PyInt_FromSsize_t(x) PyInt_FromLong(x)
typedef intargfunc ssizeargfunc;
#endif
@@ -241,12 +241,13 @@ static PyObject *
BaseRowProxy_subscript(BaseRowProxy *self, PyObject *key)
{
PyObject *processors, *values;
PyObject *processor, *value;
PyObject *processor, *value, *processed_value;
PyObject *row, *record, *result, *indexobject;
PyObject *exc_module, *exception;
PyObject *exc_module, *exception, *cstr_obj;
char *cstr_key;
long index;
int key_fallback = 0;
int tuple_check = 0;
if (PyInt_CheckExact(key)) {
index = PyInt_AS_LONG(key);
@@ -299,9 +300,16 @@ BaseRowProxy_subscript(BaseRowProxy *self, PyObject *key)
if (exception == NULL)
return NULL;
cstr_key = PyString_AsString(key);
if (cstr_key == NULL)
// wow. this seems quite excessive.
cstr_obj = PyObject_Str(key);
if (cstr_obj == NULL)
return NULL;
cstr_key = PyString_AsString(cstr_obj);
if (cstr_key == NULL) {
Py_DECREF(cstr_obj);
return NULL;
}
Py_DECREF(cstr_obj);
PyErr_Format(exception,
"Ambiguous column name '%.200s' in result set! "
@@ -319,17 +327,28 @@ BaseRowProxy_subscript(BaseRowProxy *self, PyObject *key)
return NULL;
row = self->row;
if (PyTuple_CheckExact(row))
if (PyTuple_CheckExact(row)) {
value = PyTuple_GetItem(row, index);
else
tuple_check = 1;
}
else {
value = PySequence_GetItem(row, index);
tuple_check = 0;
}
if (value == NULL)
return NULL;
if (processor != Py_None) {
return PyObject_CallFunctionObjArgs(processor, value, NULL);
processed_value = PyObject_CallFunctionObjArgs(processor, value, NULL);
if (!tuple_check) {
Py_DECREF(value);
}
return processed_value;
} else {
Py_INCREF(value);
if (tuple_check) {
Py_INCREF(value);
}
return value;
}
}
@@ -356,7 +375,7 @@ BaseRowProxy_getattro(BaseRowProxy *self, PyObject *name)
tmp = BaseRowProxy_subscript(self, name);
if (tmp == NULL && PyErr_ExceptionMatches(PyExc_KeyError)) {
PyErr_Format(
PyExc_AttributeError,
PyExc_AttributeError,
"Could not locate column in row for column '%.200s'",
PyString_AsString(name)
);

View File

@@ -1,5 +1,5 @@
# connectors/__init__.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php

View File

@@ -1,5 +1,5 @@
# connectors/mxodbc.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -117,7 +117,7 @@ class MxODBCConnector(Connector):
return False
def _get_server_version_info(self, connection):
# eGenix suggests using conn.dbms_version instead
# eGenix suggests using conn.dbms_version instead
# of what we're doing here
dbapi_con = connection.connection
version = []

View File

@@ -63,6 +63,7 @@ class MySQLDBConnector(Connector):
util.coerce_kw_type(opts, 'compress', bool)
util.coerce_kw_type(opts, 'connect_timeout', int)
util.coerce_kw_type(opts, 'read_timeout', int)
util.coerce_kw_type(opts, 'client_flag', int)
util.coerce_kw_type(opts, 'local_infile', int)
# Note: using either of the below will cause all strings to be returned

View File

@@ -1,5 +1,5 @@
# connectors/pyodbc.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -37,6 +37,10 @@ class PyODBCConnector(Connector):
# if the libessqlsrv.so is detected
easysoft = False
def __init__(self, supports_unicode_binds=None, **kw):
super(PyODBCConnector, self).__init__(**kw)
self._user_supports_unicode_binds = supports_unicode_binds
@classmethod
def dbapi(cls):
return __import__('pyodbc')
@@ -66,7 +70,7 @@ class PyODBCConnector(Connector):
if 'port' in keys and not 'port' in query:
port = ',%d' % int(keys.pop('port'))
connectors = ["DRIVER={%s}" %
connectors = ["DRIVER={%s}" %
keys.pop('driver', self.pyodbc_driver_name),
'Server=%s%s' % (keys.pop('host', ''), port),
'Database=%s' % keys.pop('database', '') ]
@@ -79,9 +83,9 @@ class PyODBCConnector(Connector):
connectors.append("Trusted_Connection=Yes")
# if set to 'Yes', the ODBC layer will try to automagically
# convert textual data from your database encoding to your
# client encoding. This should obviously be set to 'No' if
# you query a cp1253 encoded database from a latin1 client...
# convert textual data from your database encoding to your
# client encoding. This should obviously be set to 'No' if
# you query a cp1253 encoded database from a latin1 client...
if 'odbc_autotranslate' in keys:
connectors.append("AutoTranslate=%s" %
keys.pop("odbc_autotranslate"))
@@ -119,8 +123,12 @@ class PyODBCConnector(Connector):
# have not tried pyodbc + python3.1 yet.
# Py2K
self.supports_unicode_statements = not self.freetds and not self.easysoft
self.supports_unicode_binds = (not self.freetds or
self.freetds_driver_version >= '0.91') and not self.easysoft
if self._user_supports_unicode_binds is not None:
self.supports_unicode_binds = self._user_supports_unicode_binds
else:
self.supports_unicode_binds = (not self.freetds or
self.freetds_driver_version >= '0.91'
) and not self.easysoft
# end Py2K
# run other initialization which asks for user name, etc.

View File

@@ -1,5 +1,5 @@
# connectors/zxJDBC.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -33,7 +33,7 @@ class ZxJDBCConnector(Connector):
def _create_jdbc_url(self, url):
"""Create a JDBC url from a :class:`~sqlalchemy.engine.url.URL`"""
return 'jdbc:%s://%s%s/%s' % (self.jdbc_db_name, url.host,
url.port is not None
url.port is not None
and ':%s' % url.port or '',
url.database)
@@ -41,8 +41,8 @@ class ZxJDBCConnector(Connector):
opts = self._driver_kwargs()
opts.update(url.query)
return [
[self._create_jdbc_url(url),
url.username, url.password,
[self._create_jdbc_url(url),
url.username, url.password,
self.jdbc_driver_name],
opts]

View File

@@ -1,5 +1,5 @@
# databases/__init__.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php

View File

@@ -1,5 +1,5 @@
# dialects/__init__.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php

View File

@@ -9,9 +9,10 @@
"""
Support for the Microsoft Access database.
This dialect is *not* ported to SQLAlchemy 0.6 or 0.7.
.. note::
This dialect is *not* tested on SQLAlchemy 0.6 or 0.7.
The Access dialect is **non-functional as of SQLAlchemy 0.6**,
pending development efforts to bring it up-to-date.
"""
@@ -124,7 +125,7 @@ class AccessExecutionContext(default.DefaultExecutionContext):
# self._last_inserted_ids[0] is None:
self.cursor.execute("SELECT @@identity AS lastrowid")
row = self.cursor.fetchone()
self._last_inserted_ids = [int(row[0])]
self._last_inserted_ids = [int(row[0])]
#+ self._last_inserted_ids[1:]
# print "LAST ROW ID", self._last_inserted_ids
@@ -259,7 +260,7 @@ class AccessDialect(default.DefaultDialect):
colargs = \
{
'nullable': not(col.Required or
'nullable': not(col.Required or
col.Attributes & const.dbAutoIncrField),
}
default = col.DefaultValue
@@ -286,7 +287,7 @@ class AccessDialect(default.DefaultDialect):
if isinstance(thecol.type, AcInteger) and \
not (thecol.default and
isinstance(
thecol.default.arg,
thecol.default.arg,
schema.Sequence
)):
thecol.autoincrement = False
@@ -321,7 +322,7 @@ class AccessDialect(default.DefaultDialect):
# This is necessary, so we get the latest updates
dtbs = daoEngine.OpenDatabase(connection.engine.url.database)
names = [t.Name for t in dtbs.TableDefs
names = [t.Name for t in dtbs.TableDefs
if t.Name[:4] != "MSys" and t.Name[:4] != "~TMP"]
dtbs.Close()
return names
@@ -372,7 +373,7 @@ class AccessCompiler(compiler.SQLCompiler):
'length': 'len',
}
def visit_function(self, func):
"""Access function names differ from the ANSI SQL names;
"""Access function names differ from the ANSI SQL names;
rewrite common ones"""
func.name = self.function_rewrites.get(func.name, func.name)
return super(AccessCompiler, self).visit_function(func)

View File

@@ -1,18 +1,22 @@
from sqlalchemy.dialects.drizzle import base, mysqldb
# default dialect
base.dialect = mysqldb.dialect
from sqlalchemy.dialects.drizzle.base import \
BIGINT, BINARY, BLOB, BOOLEAN, CHAR, DATE, DATETIME, \
DECIMAL, DOUBLE, ENUM, \
FLOAT, INTEGER, \
NUMERIC, REAL, TEXT, TIME, TIMESTAMP, \
VARBINARY, VARCHAR, dialect
BIGINT, BINARY, BLOB, \
BOOLEAN, CHAR, DATE, \
DATETIME, DECIMAL, DOUBLE, \
ENUM, FLOAT, INTEGER, \
NUMERIC, REAL, TEXT, \
TIME, TIMESTAMP, VARBINARY, \
VARCHAR, dialect
__all__ = (
'BIGINT', 'BINARY', 'BLOB', 'BOOLEAN', 'CHAR', 'DATE', 'DATETIME', 'DECIMAL', 'DOUBLE',
'ENUM', 'FLOAT', 'INTEGER',
'NUMERIC', 'SET', 'REAL', 'TEXT', 'TIME', 'TIMESTAMP',
'VARBINARY', 'VARCHAR', 'dialect'
'BIGINT', 'BINARY', 'BLOB',
'BOOLEAN', 'CHAR', 'DATE',
'DATETIME', 'DECIMAL', 'DOUBLE',
'ENUM', 'FLOAT', 'INTEGER',
'NUMERIC', 'REAL', 'TEXT',
'TIME', 'TIMESTAMP', 'VARBINARY',
'VARCHAR', 'dialect'
)

View File

@@ -1,138 +1,36 @@
# drizzle/base.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2010-2011 Monty Taylor <mordred@inaugust.com>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Support for the Drizzle database.
Supported Versions and Features
-------------------------------
Drizzle is a variant of MySQL. Unlike MySQL, Drizzle's default storage engine
is InnoDB (transactions, foreign-keys) rather than MyISAM. For more
`Notable Differences <http://docs.drizzle.org/mysql_differences.html>`_, visit
the `Drizzle Documentation <http://docs.drizzle.org/index.html>`_.
SQLAlchemy supports the Drizzle database starting with 2010.08.
with capabilities increasing with more modern servers.
Most available DBAPI drivers are supported; see below.
===================================== ===============
Feature Minimum Version
===================================== ===============
sqlalchemy.orm 2010.08
Table Reflection 2010.08
DDL Generation 2010.08
utf8/Full Unicode Connections 2010.08
Transactions 2010.08
Two-Phase Transactions 2010.08
Nested Transactions 2010.08
===================================== ===============
See the official Drizzle documentation for detailed information about features
supported in any given server release.
The SQLAlchemy Drizzle dialect leans heavily on the MySQL dialect, so much of
the :doc:`SQLAlchemy MySQL <mysql>` documentation is also relevant.
Connecting
----------
See the API documentation on individual drivers for details on connecting.
Connection Timeouts
-------------------
Drizzle features an automatic connection close behavior, for connections that
have been idle for eight hours or more. To circumvent having this issue, use
the ``pool_recycle`` option which controls the maximum age of any connection::
engine = create_engine('drizzle+mysqldb://...', pool_recycle=3600)
Storage Engines
---------------
Drizzle defaults to the ``InnoDB`` storage engine, which is transactional.
Storage engines can be elected when creating tables in SQLAlchemy by supplying
a ``drizzle_engine='whatever'`` to the ``Table`` constructor. Any Drizzle table
creation option can be specified in this syntax::
Table('mytable', metadata,
Column('data', String(32)),
drizzle_engine='InnoDB',
)
Keys
----
Not all Drizzle storage engines support foreign keys. For ``BlitzDB`` and
similar engines, the information loaded by table reflection will not include
foreign keys. For these tables, you may supply a
:class:`~sqlalchemy.ForeignKeyConstraint` at reflection time::
Table('mytable', metadata,
ForeignKeyConstraint(['other_id'], ['othertable.other_id']),
autoload=True
)
When creating tables, SQLAlchemy will automatically set ``AUTO_INCREMENT`` on
an integer primary key column::
>>> t = Table('mytable', metadata,
... Column('mytable_id', Integer, primary_key=True)
... )
>>> t.create()
CREATE TABLE mytable (
id INTEGER NOT NULL AUTO_INCREMENT,
PRIMARY KEY (id)
)
You can disable this behavior by supplying ``autoincrement=False`` to the
:class:`~sqlalchemy.Column`. This flag can also be used to enable
auto-increment on a secondary column in a multi-column key for some storage
engines::
Table('mytable', metadata,
Column('gid', Integer, primary_key=True, autoincrement=False),
Column('id', Integer, primary_key=True)
)
Drizzle SQL Extensions
----------------------
Many of the Drizzle SQL extensions are handled through SQLAlchemy's generic
function and operator support::
table.select(table.c.password==func.md5('plaintext'))
table.select(table.c.username.op('regexp')('^[a-d]'))
And of course any valid Drizzle statement can be executed as a string as well.
Some limited direct support for Drizzle extensions to SQL is currently
available.
* SELECT pragma::
select(..., prefixes=['HIGH_PRIORITY', 'SQL_SMALL_RESULT'])
* UPDATE with LIMIT::
update(..., drizzle_limit=10)
See the individual driver sections below for details on connecting.
"""
import datetime, inspect, re, sys
from sqlalchemy import schema as sa_schema
from sqlalchemy import exc, log, sql, util
from sqlalchemy.sql import operators as sql_operators
from sqlalchemy.sql import functions as sql_functions
from sqlalchemy.sql import compiler
from array import array as _array
from sqlalchemy.engine import reflection
from sqlalchemy.engine import base as engine_base, default
from sqlalchemy import exc
from sqlalchemy import log
from sqlalchemy import types as sqltypes
from sqlalchemy.engine import reflection
from sqlalchemy.dialects.mysql import base as mysql_dialect
from sqlalchemy.types import DATE, DATETIME, BOOLEAN, TIME, \
BLOB, BINARY, VARBINARY
BLOB, BINARY, VARBINARY
class _NumericType(object):
"""Base for Drizzle numeric types."""
@@ -140,6 +38,7 @@ class _NumericType(object):
def __init__(self, **kw):
super(_NumericType, self).__init__(**kw)
class _FloatType(_NumericType, sqltypes.Float):
def __init__(self, precision=None, scale=None, asdecimal=True, **kw):
if isinstance(self, (REAL, DOUBLE)) and \
@@ -147,23 +46,22 @@ class _FloatType(_NumericType, sqltypes.Float):
(precision is None and scale is not None) or
(precision is not None and scale is None)
):
raise exc.ArgumentError(
"You must specify both precision and scale or omit "
"both altogether.")
raise exc.ArgumentError(
"You must specify both precision and scale or omit "
"both altogether.")
super(_FloatType, self).__init__(precision=precision, asdecimal=asdecimal, **kw)
super(_FloatType, self).__init__(precision=precision,
asdecimal=asdecimal, **kw)
self.scale = scale
class _StringType(mysql_dialect._StringType):
"""Base for Drizzle string types."""
def __init__(self, collation=None,
binary=False,
**kw):
def __init__(self, collation=None, binary=False, **kw):
kw['national'] = False
super(_StringType, self).__init__(collation=collation,
binary=binary,
**kw)
super(_StringType, self).__init__(collation=collation, binary=binary,
**kw)
class NUMERIC(_NumericType, sqltypes.NUMERIC):
@@ -180,7 +78,9 @@ class NUMERIC(_NumericType, sqltypes.NUMERIC):
:param scale: The number of digits after the decimal point.
"""
super(NUMERIC, self).__init__(precision=precision, scale=scale, asdecimal=asdecimal, **kw)
super(NUMERIC, self).__init__(precision=precision, scale=scale,
asdecimal=asdecimal, **kw)
class DECIMAL(_NumericType, sqltypes.DECIMAL):
@@ -215,9 +115,11 @@ class DOUBLE(_FloatType):
:param scale: The number of digits after the decimal point.
"""
super(DOUBLE, self).__init__(precision=precision, scale=scale,
asdecimal=asdecimal, **kw)
class REAL(_FloatType, sqltypes.REAL):
"""Drizzle REAL type."""
@@ -232,9 +134,11 @@ class REAL(_FloatType, sqltypes.REAL):
:param scale: The number of digits after the decimal point.
"""
super(REAL, self).__init__(precision=precision, scale=scale,
asdecimal=asdecimal, **kw)
class FLOAT(_FloatType, sqltypes.FLOAT):
"""Drizzle FLOAT type."""
@@ -249,42 +153,46 @@ class FLOAT(_FloatType, sqltypes.FLOAT):
:param scale: The number of digits after the decimal point.
"""
super(FLOAT, self).__init__(precision=precision, scale=scale,
asdecimal=asdecimal, **kw)
def bind_processor(self, dialect):
return None
class INTEGER(sqltypes.INTEGER):
"""Drizzle INTEGER type."""
__visit_name__ = 'INTEGER'
def __init__(self, **kw):
"""Construct an INTEGER.
"""Construct an INTEGER."""
"""
super(INTEGER, self).__init__(**kw)
class BIGINT(sqltypes.BIGINT):
"""Drizzle BIGINTEGER type."""
__visit_name__ = 'BIGINT'
def __init__(self, **kw):
"""Construct a BIGINTEGER.
"""Construct a BIGINTEGER."""
"""
super(BIGINT, self).__init__(**kw)
class _DrizzleTime(mysql_dialect._MSTime):
"""Drizzle TIME type."""
class TIMESTAMP(sqltypes.TIMESTAMP):
"""Drizzle TIMESTAMP type."""
__visit_name__ = 'TIMESTAMP'
class TEXT(_StringType, sqltypes.TEXT):
"""Drizzle TEXT type, for text up to 2^16 characters."""
@@ -306,8 +214,10 @@ class TEXT(_StringType, sqltypes.TEXT):
only the collation of character data.
"""
super(TEXT, self).__init__(length=length, **kw)
class VARCHAR(_StringType, sqltypes.VARCHAR):
"""Drizzle VARCHAR type, for variable-length character data."""
@@ -325,8 +235,10 @@ class VARCHAR(_StringType, sqltypes.VARCHAR):
only the collation of character data.
"""
super(VARCHAR, self).__init__(length=length, **kwargs)
class CHAR(_StringType, sqltypes.CHAR):
"""Drizzle CHAR type, for fixed-length character data."""
@@ -345,8 +257,10 @@ class CHAR(_StringType, sqltypes.CHAR):
compatible with the national character set.
"""
super(CHAR, self).__init__(length=length, **kwargs)
class ENUM(mysql_dialect.ENUM):
"""Drizzle ENUM type."""
@@ -363,8 +277,9 @@ class ENUM(mysql_dialect.ENUM):
:param strict: Defaults to False: ensure that a given value is in this
ENUM's range of permissible values when inserting or updating rows.
Note that Drizzle will not raise a fatal error if you attempt to store
an out of range value- an alternate value will be stored instead.
Note that Drizzle will not raise a fatal error if you attempt to
store an out of range value- an alternate value will be stored
instead.
(See Drizzle ENUM documentation.)
:param collation: Optional, a column-level collation for this string
@@ -390,12 +305,15 @@ class ENUM(mysql_dialect.ENUM):
literals for you. This is a transitional option.
"""
super(ENUM, self).__init__(*enums, **kw)
class _DrizzleBoolean(sqltypes.Boolean):
def get_dbapi_type(self, dbapi):
return dbapi.NUMERIC
colspecs = {
sqltypes.Numeric: NUMERIC,
sqltypes.Float: FLOAT,
@@ -404,6 +322,7 @@ colspecs = {
sqltypes.Boolean: _DrizzleBoolean,
}
# All the types we have in Drizzle
ischema_names = {
'BIGINT': BIGINT,
@@ -427,6 +346,7 @@ ischema_names = {
'VARCHAR': VARCHAR,
}
class DrizzleCompiler(mysql_dialect.MySQLCompiler):
def visit_typeclause(self, typeclause):
@@ -439,7 +359,7 @@ class DrizzleCompiler(mysql_dialect.MySQLCompiler):
def visit_cast(self, cast, **kwargs):
type_ = self.process(cast.typeclause)
if type_ is None:
return self.process(cast.clause)
return self.process(cast.clause)
return 'CAST(%s AS %s)' % (self.process(cast.clause), type_)
@@ -447,12 +367,13 @@ class DrizzleCompiler(mysql_dialect.MySQLCompiler):
class DrizzleDDLCompiler(mysql_dialect.MySQLDDLCompiler):
pass
class DrizzleTypeCompiler(mysql_dialect.MySQLTypeCompiler):
def _extend_numeric(self, type_, spec):
return spec
def _extend_string(self, type_, defaults, spec):
"""Extend a string-type declaration with standard SQL
"""Extend a string-type declaration with standard SQL
COLLATE annotations and Drizzle specific extensions.
"""
@@ -492,11 +413,16 @@ class DrizzleTypeCompiler(mysql_dialect.MySQLTypeCompiler):
class DrizzleExecutionContext(mysql_dialect.MySQLExecutionContext):
pass
class DrizzleIdentifierPreparer(mysql_dialect.MySQLIdentifierPreparer):
pass
class DrizzleDialect(mysql_dialect.MySQLDialect):
"""Details of the Drizzle dialect. Not used directly in application code."""
"""Details of the Drizzle dialect.
Not used directly in application code.
"""
name = 'drizzle'
@@ -505,7 +431,6 @@ class DrizzleDialect(mysql_dialect.MySQLDialect):
supports_native_boolean = True
supports_views = False
default_paramstyle = 'format'
colspecs = colspecs
@@ -516,8 +441,8 @@ class DrizzleDialect(mysql_dialect.MySQLDialect):
preparer = DrizzleIdentifierPreparer
def on_connect(self):
"""Force autocommit - Drizzle Bug#707842 doesn't set this
properly"""
"""Force autocommit - Drizzle Bug#707842 doesn't set this properly"""
def connect(conn):
conn.autocommit(False)
return connect
@@ -535,6 +460,7 @@ class DrizzleDialect(mysql_dialect.MySQLDialect):
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
"""Return a Unicode SHOW TABLES from a given schema."""
if schema is not None:
current_schema = schema
else:
@@ -554,8 +480,8 @@ class DrizzleDialect(mysql_dialect.MySQLDialect):
Cached per-connection. This value can not change without a server
restart.
"""
return 0
def _detect_collations(self, connection):
@@ -566,7 +492,9 @@ class DrizzleDialect(mysql_dialect.MySQLDialect):
collations = {}
charset = self._connection_charset
rs = connection.execute('SELECT CHARACTER_SET_NAME, COLLATION_NAME from data_dictionary.COLLATIONS')
rs = connection.execute(
'SELECT CHARACTER_SET_NAME, COLLATION_NAME FROM'
' data_dictionary.COLLATIONS')
for row in self._compat_fetchall(rs, charset):
collations[row[0]] = row[1]
return collations
@@ -575,8 +503,7 @@ class DrizzleDialect(mysql_dialect.MySQLDialect):
"""Detect and adjust for the ANSI_QUOTES sql mode."""
self._server_ansiquotes = False
self._backslash_escapes = False
log.class_logger(DrizzleDialect)
log.class_logger(DrizzleDialect)

View File

@@ -1,11 +1,9 @@
"""Support for the Drizzle database via the Drizzle-python adapter.
"""Support for the Drizzle database via the mysql-python adapter.
Drizzle-Python is available at:
MySQL-Python is available at:
http://sourceforge.net/projects/mysql-python
At least version 1.2.1 or 1.2.2 should be used.
Connecting
-----------
@@ -13,37 +11,22 @@ Connect string format::
drizzle+mysqldb://<user>:<password>@<host>[:<port>]/<dbname>
Unicode
-------
Drizzle accommodates Python ``unicode`` objects directly and
uses the ``utf8`` encoding in all cases.
Known Issues
-------------
Drizzle-python at least as of version 1.2.2 has a serious memory leak related
to unicode conversion, a feature which is disabled via ``use_unicode=0``.
The recommended connection form with SQLAlchemy is::
engine = create_engine('mysql://scott:tiger@localhost/test?charset=utf8&use_unicode=0', pool_recycle=3600)
"""
from sqlalchemy.dialects.drizzle.base import (DrizzleDialect,
DrizzleExecutionContext,
DrizzleCompiler, DrizzleIdentifierPreparer)
from sqlalchemy.dialects.drizzle.base import (
DrizzleDialect,
DrizzleExecutionContext,
DrizzleCompiler,
DrizzleIdentifierPreparer)
from sqlalchemy.connectors.mysqldb import (
MySQLDBExecutionContext,
MySQLDBCompiler,
MySQLDBIdentifierPreparer,
MySQLDBConnector
)
MySQLDBExecutionContext,
MySQLDBCompiler,
MySQLDBIdentifierPreparer,
MySQLDBConnector)
class DrizzleExecutionContext_mysqldb(
MySQLDBExecutionContext,
DrizzleExecutionContext):
class DrizzleExecutionContext_mysqldb(MySQLDBExecutionContext,
DrizzleExecutionContext):
pass
@@ -51,11 +34,11 @@ class DrizzleCompiler_mysqldb(MySQLDBCompiler, DrizzleCompiler):
pass
class DrizzleIdentifierPreparer_mysqldb(
MySQLDBIdentifierPreparer,
DrizzleIdentifierPreparer):
class DrizzleIdentifierPreparer_mysqldb(MySQLDBIdentifierPreparer,
DrizzleIdentifierPreparer):
pass
class DrizzleDialect_mysqldb(MySQLDBConnector, DrizzleDialect):
execution_ctx_cls = DrizzleExecutionContext_mysqldb
statement_compiler = DrizzleCompiler_mysqldb
@@ -63,6 +46,7 @@ class DrizzleDialect_mysqldb(MySQLDBConnector, DrizzleDialect):
def _detect_charset(self, connection):
"""Sniff out the character set in use for connection results."""
return 'utf8'

View File

@@ -1,5 +1,5 @@
# firebird/__init__.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -14,7 +14,7 @@ from sqlalchemy.dialects.firebird.base import \
dialect
__all__ = (
'SMALLINT', 'BIGINT', 'FLOAT', 'FLOAT', 'DATE', 'TIME',
'SMALLINT', 'BIGINT', 'FLOAT', 'FLOAT', 'DATE', 'TIME',
'TEXT', 'NUMERIC', 'FLOAT', 'TIMESTAMP', 'VARCHAR', 'CHAR', 'BLOB',
'dialect'
)

View File

@@ -1,5 +1,5 @@
# firebird/base.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -135,7 +135,7 @@ class VARCHAR(_StringType, sqltypes.VARCHAR):
__visit_name__ = 'VARCHAR'
def __init__(self, length = None, **kwargs):
super(VARCHAR, self).__init__(length=length, **kwargs)
super(VARCHAR, self).__init__(length=length, **kwargs)
class CHAR(_StringType, sqltypes.CHAR):
"""Firebird CHAR type"""
@@ -164,7 +164,7 @@ ischema_names = {
}
# TODO: date conversion types (should be implemented as _FBDateTime,
# TODO: date conversion types (should be implemented as _FBDateTime,
# _FBDate, etc. as bind/result functionality is required)
class FBTypeCompiler(compiler.GenericTypeCompiler):
@@ -198,7 +198,7 @@ class FBTypeCompiler(compiler.GenericTypeCompiler):
class FBCompiler(sql.compiler.SQLCompiler):
"""Firebird specific idiosincrasies"""
"""Firebird specific idiosyncrasies"""
def visit_mod(self, binary, **kw):
# Firebird lacks a builtin modulo operator, but there is
@@ -293,7 +293,7 @@ class FBCompiler(sql.compiler.SQLCompiler):
class FBDDLCompiler(sql.compiler.DDLCompiler):
"""Firebird syntactic idiosincrasies"""
"""Firebird syntactic idiosyncrasies"""
def visit_create_sequence(self, create):
"""Generate a ``CREATE GENERATOR`` statement for the sequence."""
@@ -339,7 +339,7 @@ class FBExecutionContext(default.DefaultExecutionContext):
"""Get the next value from the sequence using ``gen_id()``."""
return self._execute_scalar(
"SELECT gen_id(%s, 1) FROM rdb$database" %
"SELECT gen_id(%s, 1) FROM rdb$database" %
self.dialect.identifier_preparer.format_sequence(seq),
type_
)
@@ -418,7 +418,7 @@ class FBDialect(default.DefaultDialect):
return name
def has_table(self, connection, table_name, schema=None):
"""Return ``True`` if the given table exists, ignoring
"""Return ``True`` if the given table exists, ignoring
the `schema`."""
tblqry = """
@@ -489,8 +489,8 @@ class FBDialect(default.DefaultDialect):
return pkfields
@reflection.cache
def get_column_sequence(self, connection,
table_name, column_name,
def get_column_sequence(self, connection,
table_name, column_name,
schema=None, **kw):
tablename = self.denormalize_name(table_name)
colname = self.denormalize_name(column_name)
@@ -528,7 +528,7 @@ class FBDialect(default.DefaultDialect):
COALESCE(cs.rdb$bytes_per_character,1) AS flen,
f.rdb$field_precision AS fprec,
f.rdb$field_scale AS fscale,
COALESCE(r.rdb$default_source,
COALESCE(r.rdb$default_source,
f.rdb$default_source) AS fdefault
FROM rdb$relation_fields r
JOIN rdb$fields f ON r.rdb$field_source=f.rdb$field_name
@@ -563,7 +563,7 @@ class FBDialect(default.DefaultDialect):
coltype = sqltypes.NULLTYPE
elif colspec == 'INT64':
coltype = coltype(
precision=row['fprec'],
precision=row['fprec'],
scale=row['fscale'] * -1)
elif colspec in ('VARYING', 'CSTRING'):
coltype = coltype(row['flen'])
@@ -582,7 +582,7 @@ class FBDialect(default.DefaultDialect):
if row['fdefault'] is not None:
# the value comes down as "DEFAULT 'value'": there may be
# more than one whitespace around the "DEFAULT" keyword
# and it may also be lower case
# and it may also be lower case
# (see also http://tracker.firebirdsql.org/browse/CORE-356)
defexpr = row['fdefault'].lstrip()
assert defexpr[:8].rstrip().upper() == \

View File

@@ -1,5 +1,5 @@
# firebird/kinterbasdb.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -17,20 +17,20 @@ Kinterbasedb backend specific keyword arguments are:
SQLAlchemy uses 200 with Unicode, datetime and decimal support (see
details__).
* concurrency_level - set the backend policy with regards to threading
* concurrency_level - set the backend policy with regards to threading
issues: by default SQLAlchemy uses policy 1 (see details__).
* enable_rowcount - True by default, setting this to False disables
the usage of "cursor.rowcount" with the
* enable_rowcount - True by default, setting this to False disables
the usage of "cursor.rowcount" with the
Kinterbasdb dialect, which SQLAlchemy ordinarily calls upon automatically
after any UPDATE or DELETE statement. When disabled, SQLAlchemy's
ResultProxy will return -1 for result.rowcount. The rationale here is
that Kinterbasdb requires a second round trip to the database when
.rowcount is called - since SQLA's resultproxy automatically closes
the cursor after a non-result-returning statement, rowcount must be
after any UPDATE or DELETE statement. When disabled, SQLAlchemy's
ResultProxy will return -1 for result.rowcount. The rationale here is
that Kinterbasdb requires a second round trip to the database when
.rowcount is called - since SQLA's resultproxy automatically closes
the cursor after a non-result-returning statement, rowcount must be
called, if at all, before the result object is returned. Additionally,
cursor.rowcount may not return correct results with older versions
of Firebird, and setting this flag to False will also cause the
of Firebird, and setting this flag to False will also cause the
SQLAlchemy ORM to ignore its usage. The behavior can also be controlled on a
per-execution basis using the `enable_rowcount` option with
:meth:`execution_options()`::
@@ -64,7 +64,7 @@ class _FBNumeric_kinterbasdb(sqltypes.Numeric):
class FBExecutionContext_kinterbasdb(FBExecutionContext):
@property
def rowcount(self):
if self.execution_options.get('enable_rowcount',
if self.execution_options.get('enable_rowcount',
self.dialect.enable_rowcount):
return self.cursor.rowcount
else:
@@ -135,7 +135,7 @@ class FBDialect_kinterbasdb(FBDialect):
# that for backward compatibility reasons returns a string like
# LI-V6.3.3.12981 Firebird 2.0
# where the first version is a fake one resembling the old
# Interbase signature.
# Interbase signature.
fbconn = connection.connection
version = fbconn.server_version
@@ -159,7 +159,7 @@ class FBDialect_kinterbasdb(FBDialect):
msg = str(e)
return ('Unable to complete network request to host' in msg or
'Invalid connection state' in msg or
'Invalid cursor state' in msg or
'Invalid cursor state' in msg or
'connection shutdown' in msg)
else:
return False

View File

@@ -1,5 +1,5 @@
# informix/__init__.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php

View File

@@ -1,5 +1,5 @@
# informix/base.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
# coding: gbk
#
# This module is part of SQLAlchemy and is released under
@@ -7,8 +7,11 @@
"""Support for the Informix database.
This dialect is mostly functional as of SQLAlchemy 0.6.5.
.. note::
The Informix dialect functions on current SQLAlchemy versions
but is not regularly tested, and may have many issues and
caveats not currently handled.
"""
@@ -23,7 +26,7 @@ from sqlalchemy import types as sqltypes
RESERVED_WORDS = set(
["abs", "absolute", "access", "access_method", "acos", "active", "add",
"address", "add_months", "admin", "after", "aggregate", "alignment",
"all", "allocate", "all_rows", "altere", "and", "ansi", "any", "append",
"all", "allocate", "all_rows", "alter", "and", "ansi", "any", "append",
"array", "as", "asc", "ascii", "asin", "at", "atan", "atan2", "attach",
"attributes", "audit", "authentication", "authid", "authorization",
"authorized", "auto", "autofree", "auto_reprepare", "auto_stat_mode",
@@ -463,7 +466,7 @@ class InformixDialect(default.DefaultDialect):
c = connection.execute(
"""select t1.constrname as cons_name,
t4.colname as local_column, t7.tabname as remote_table,
t6.colname as remote_column, t7.owner as remote_owner
t6.colname as remote_column, t7.owner as remote_owner
from sysconstraints as t1 , systables as t2 ,
sysindexes as t3 , syscolumns as t4 ,
sysreferences as t5 , syscolumns as t6 , systables as t7 ,
@@ -472,7 +475,7 @@ class InformixDialect(default.DefaultDialect):
and t3.tabid = t2.tabid and t3.idxname = t1.idxname
and t4.tabid = t2.tabid and t4.colno in (t3.part1, t3.part2, t3.part3,
t3.part4, t3.part5, t3.part6, t3.part7, t3.part8, t3.part9, t3.part10,
t3.part11, t3.part11, t3.part12, t3.part13, t3.part4, t3.part15, t3.part16)
t3.part11, t3.part11, t3.part12, t3.part13, t3.part4, t3.part15, t3.part16)
and t5.constrid = t1.constrid and t8.constrid = t5.primary
and t6.tabid = t5.ptabid and t6.colno in (t9.part1, t9.part2, t9.part3,
t9.part4, t9.part5, t9.part6, t9.part7, t9.part8, t9.part9, t9.part10,
@@ -519,7 +522,7 @@ class InformixDialect(default.DefaultDialect):
# Select the column positions from sysindexes for sysconstraints
data = connection.execute(
"""select t2.*
"""select t2.*
from systables as t1, sysindexes as t2, sysconstraints as t3
where t1.tabid=t2.tabid and t1.tabname=? and t1.owner=?
and t2.idxname=t3.idxname and t3.constrtype='P'""",
@@ -541,7 +544,7 @@ class InformixDialect(default.DefaultDialect):
c = connection.execute(
"""select t1.colname
from syscolumns as t1, systables as t2
where t2.tabname=? and t1.tabid = t2.tabid and
where t2.tabname=? and t1.tabid = t2.tabid and
t1.colno in (%s)""" % place_holder,
table_name, *colpositions
).fetchall()
@@ -565,7 +568,7 @@ class InformixDialect(default.DefaultDialect):
c = connection.execute(
"""select t1.colname
from syscolumns as t1, systables as t2
where t2.tabname=? and t1.tabid = t2.tabid and
where t2.tabname=? and t1.tabid = t2.tabid and
t1.colno in (%s)""" % place_holder,
table_name, *colnames
).fetchall()

View File

@@ -1,5 +1,5 @@
# informix/informixdb.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php

View File

@@ -1,5 +1,5 @@
# maxdb/__init__.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php

View File

@@ -1,14 +1,15 @@
# maxdb/base.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Support for the MaxDB database.
This dialect is *not* ported to SQLAlchemy 0.6 or 0.7.
.. note::
This dialect is *not* tested on SQLAlchemy 0.6 or 0.7.
The MaxDB dialect is **non-functional as of SQLAlchemy 0.6**,
pending development efforts to bring it up-to-date.
Overview
--------
@@ -254,7 +255,7 @@ class MaxTimestamp(sqltypes.DateTime):
value[20:])])
else:
raise exc.InvalidRequestError(
"datetimeformat '%s' is not supported." %
"datetimeformat '%s' is not supported." %
dialect.datetimeformat)
return process
@@ -282,18 +283,18 @@ class MaxDate(sqltypes.Date):
if value is None:
return None
else:
return datetime.date(int(value[0:4]), int(value[4:6]),
return datetime.date(int(value[0:4]), int(value[4:6]),
int(value[6:8]))
elif dialect.datetimeformat == 'iso':
def process(value):
if value is None:
return None
else:
return datetime.date(int(value[0:4]), int(value[5:7]),
return datetime.date(int(value[0:4]), int(value[5:7]),
int(value[8:10]))
else:
raise exc.InvalidRequestError(
"datetimeformat '%s' is not supported." %
"datetimeformat '%s' is not supported." %
dialect.datetimeformat)
return process
@@ -321,7 +322,7 @@ class MaxTime(sqltypes.Time):
if value is None:
return None
else:
return datetime.time(int(value[0:4]), int(value[4:6]),
return datetime.time(int(value[0:4]), int(value[4:6]),
int(value[6:8]))
elif dialect.datetimeformat == 'iso':
def process(value):
@@ -332,7 +333,7 @@ class MaxTime(sqltypes.Time):
int(value[8:10]))
else:
raise exc.InvalidRequestError(
"datetimeformat '%s' is not supported." %
"datetimeformat '%s' is not supported." %
dialect.datetimeformat)
return process

View File

@@ -1,5 +1,5 @@
# maxdb/sapdb.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php

View File

@@ -1,5 +1,5 @@
# mssql/__init__.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -18,9 +18,9 @@ from sqlalchemy.dialects.mssql.base import \
__all__ = (
'INTEGER', 'BIGINT', 'SMALLINT', 'TINYINT', 'VARCHAR', 'NVARCHAR', 'CHAR',
'INTEGER', 'BIGINT', 'SMALLINT', 'TINYINT', 'VARCHAR', 'NVARCHAR', 'CHAR',
'NCHAR', 'TEXT', 'NTEXT', 'DECIMAL', 'NUMERIC', 'FLOAT', 'DATETIME',
'DATETIME2', 'DATETIMEOFFSET', 'DATE', 'TIME', 'SMALLDATETIME',
'DATETIME2', 'DATETIMEOFFSET', 'DATE', 'TIME', 'SMALLDATETIME',
'BINARY', 'VARBINARY', 'BIT', 'REAL', 'IMAGE', 'TIMESTAMP',
'MONEY', 'SMALLMONEY', 'UNIQUEIDENTIFIER', 'SQL_VARIANT', 'dialect'
)

View File

@@ -1,5 +1,5 @@
# mssql/adodbapi.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -16,7 +16,7 @@ import sys
class MSDateTime_adodbapi(MSDateTime):
def result_processor(self, dialect, coltype):
def process(value):
# adodbapi will return datetimes with empty time
# adodbapi will return datetimes with empty time
# values as datetime.date() objects.
# Promote them back to full datetime.datetime()
if type(value) is datetime.date:
@@ -49,7 +49,7 @@ class MSDialect_adodbapi(MSDialect):
connectors = ["Provider=SQLOLEDB"]
if 'port' in keys:
connectors.append ("Data Source=%s, %s" %
connectors.append ("Data Source=%s, %s" %
(keys.get("host"), keys.get("port")))
else:
connectors.append ("Data Source=%s" % keys.get("host"))

View File

@@ -1,5 +1,5 @@
# mssql/base.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -74,7 +74,7 @@ will yield::
SELECT TOP n
If using SQL Server 2005 or above, LIMIT with OFFSET
support is available through the ``ROW_NUMBER OVER`` construct.
support is available through the ``ROW_NUMBER OVER`` construct.
For versions below 2005, LIMIT with OFFSET usage will fail.
Nullability
@@ -107,10 +107,10 @@ Compatibility Levels
--------------------
MSSQL supports the notion of setting compatibility levels at the
database level. This allows, for instance, to run a database that
is compatibile with SQL2000 while running on a SQL2005 database
is compatible with SQL2000 while running on a SQL2005 database
server. ``server_version_info`` will always return the database
server version information (in this case SQL2005) and not the
compatibiility level information. Because of this, if running under
compatibility level information. Because of this, if running under
a backwards compatibility mode SQAlchemy may attempt to use T-SQL
statements that are unable to be parsed by the database server.
@@ -119,14 +119,14 @@ Triggers
SQLAlchemy by default uses OUTPUT INSERTED to get at newly
generated primary key values via IDENTITY columns or other
server side defaults. MS-SQL does not
server side defaults. MS-SQL does not
allow the usage of OUTPUT INSERTED on tables that have triggers.
To disable the usage of OUTPUT INSERTED on a per-table basis,
specify ``implicit_returning=False`` for each :class:`.Table`
which has triggers::
Table('mytable', metadata,
Column('id', Integer, primary_key=True),
Table('mytable', metadata,
Column('id', Integer, primary_key=True),
# ...,
implicit_returning=False
)
@@ -144,11 +144,11 @@ This option can also be specified engine-wide using the
Enabling Snapshot Isolation
---------------------------
Not necessarily specific to SQLAlchemy, SQL Server has a default transaction
Not necessarily specific to SQLAlchemy, SQL Server has a default transaction
isolation mode that locks entire tables, and causes even mildly concurrent
applications to have long held locks and frequent deadlocks.
Enabling snapshot isolation for the database as a whole is recommended
for modern levels of concurrency support. This is accomplished via the
applications to have long held locks and frequent deadlocks.
Enabling snapshot isolation for the database as a whole is recommended
for modern levels of concurrency support. This is accomplished via the
following ALTER DATABASE commands executed at the SQL prompt::
ALTER DATABASE MyDatabase SET ALLOW_SNAPSHOT_ISOLATION ON
@@ -161,14 +161,15 @@ http://msdn.microsoft.com/en-us/library/ms175095.aspx.
Scalar Select Comparisons
-------------------------
The MSSQL dialect contains a legacy behavior whereby comparing
a scalar select to a value using the ``=`` or ``!=`` operator
will resolve to IN or NOT IN, respectively. This behavior is
deprecated and will be removed in 0.8 - the ``s.in_()``/``~s.in_()`` operators
should be used when IN/NOT IN are desired.
.. deprecated:: 0.8
The MSSQL dialect contains a legacy behavior whereby comparing
a scalar select to a value using the ``=`` or ``!=`` operator
will resolve to IN or NOT IN, respectively. This behavior
will be removed in 0.8 - the ``s.in_()``/``~s.in_()`` operators
should be used when IN/NOT IN are desired.
For the time being, the existing behavior prevents a comparison
between scalar select and another value that actually wants to use ``=``.
between scalar select and another value that actually wants to use ``=``.
To remove this behavior in a forwards-compatible way, apply this
compilation rule by placing the following code at the module import
level::
@@ -176,7 +177,7 @@ level::
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql.expression import _BinaryExpression
from sqlalchemy.sql.compiler import SQLCompiler
@compiles(_BinaryExpression, 'mssql')
def override_legacy_binary(element, compiler, **kw):
return SQLCompiler.visit_binary(compiler, element, **kw)
@@ -272,7 +273,7 @@ class _MSDate(sqltypes.Date):
return value.date()
elif isinstance(value, basestring):
return datetime.date(*[
int(x or 0)
int(x or 0)
for x in self._reg.match(value).groups()
])
else:
@@ -303,7 +304,7 @@ class TIME(sqltypes.TIME):
return value.time()
elif isinstance(value, basestring):
return datetime.time(*[
int(x or 0)
int(x or 0)
for x in self._reg.match(value).groups()])
else:
return value
@@ -608,7 +609,7 @@ class MSTypeCompiler(compiler.GenericTypeCompiler):
return self._extend("TEXT", type_)
def visit_VARCHAR(self, type_):
return self._extend("VARCHAR", type_,
return self._extend("VARCHAR", type_,
length = type_.length or 'max')
def visit_CHAR(self, type_):
@@ -618,7 +619,7 @@ class MSTypeCompiler(compiler.GenericTypeCompiler):
return self._extend("NCHAR", type_)
def visit_NVARCHAR(self, type_):
return self._extend("NVARCHAR", type_,
return self._extend("NVARCHAR", type_,
length = type_.length or 'max')
def visit_date(self, type_):
@@ -641,8 +642,8 @@ class MSTypeCompiler(compiler.GenericTypeCompiler):
def visit_VARBINARY(self, type_):
return self._extend(
"VARBINARY",
type_,
"VARBINARY",
type_,
length=type_.length or 'max')
def visit_boolean(self, type_):
@@ -689,18 +690,22 @@ class MSExecutionContext(default.DefaultExecutionContext):
not self.executemany
if self._enable_identity_insert:
self.cursor.execute("SET IDENTITY_INSERT %s ON" %
self.dialect.identifier_preparer.format_table(tbl))
self.root_connection._cursor_execute(self.cursor,
"SET IDENTITY_INSERT %s ON" %
self.dialect.identifier_preparer.format_table(tbl),
())
def post_exec(self):
"""Disable IDENTITY_INSERT if enabled."""
conn = self.root_connection
if self._select_lastrowid:
if self.dialect.use_scope_identity:
self.cursor.execute(
"SELECT scope_identity() AS lastrowid", ())
conn._cursor_execute(self.cursor,
"SELECT scope_identity() AS lastrowid", ())
else:
self.cursor.execute("SELECT @@identity AS lastrowid", ())
conn._cursor_execute(self.cursor,
"SELECT @@identity AS lastrowid", ())
# fetchall() ensures the cursor is consumed without closing it
row = self.cursor.fetchall()[0]
self._lastrowid = int(row[0])
@@ -710,10 +715,11 @@ class MSExecutionContext(default.DefaultExecutionContext):
self._result_proxy = base.FullyBufferedResultProxy(self)
if self._enable_identity_insert:
self.cursor.execute(
conn._cursor_execute(self.cursor,
"SET IDENTITY_INSERT %s OFF" %
self.dialect.identifier_preparer.
format_table(self.compiled.statement.table)
format_table(self.compiled.statement.table),
()
)
def get_lastrowid(self):
@@ -723,7 +729,7 @@ class MSExecutionContext(default.DefaultExecutionContext):
if self._enable_identity_insert:
try:
self.cursor.execute(
"SET IDENTITY_INSERT %s OFF" %
"SET IDENTITY_INSERT %s OFF" %
self.dialect.identifier_preparer.\
format_table(self.compiled.statement.table)
)
@@ -766,12 +772,12 @@ class MSSQLCompiler(compiler.SQLCompiler):
def visit_concat_op(self, binary, **kw):
return "%s + %s" % \
(self.process(binary.left, **kw),
(self.process(binary.left, **kw),
self.process(binary.right, **kw))
def visit_match_op(self, binary, **kw):
return "CONTAINS (%s, %s)" % (
self.process(binary.left, **kw),
self.process(binary.left, **kw),
self.process(binary.right, **kw))
def get_select_precolumns(self, select):
@@ -803,27 +809,28 @@ class MSSQLCompiler(compiler.SQLCompiler):
so tries to wrap it in a subquery with ``row_number()`` criterion.
"""
if not getattr(select, '_mssql_visit', None) and select._offset:
if select._offset and not getattr(select, '_mssql_visit', None):
# to use ROW_NUMBER(), an ORDER BY is required.
orderby = self.process(select._order_by_clause)
if not orderby:
if not select._order_by_clause.clauses:
raise exc.CompileError('MSSQL requires an order_by when '
'using an offset.')
_offset = select._offset
_limit = select._limit
_order_by_clauses = select._order_by_clause.clauses
select = select._generate()
select._mssql_visit = True
select = select.column(
sql.literal_column("ROW_NUMBER() OVER (ORDER BY %s)" \
% orderby).label("mssql_rn")
sql.func.ROW_NUMBER().over(order_by=_order_by_clauses)
.label("mssql_rn")
).order_by(None).alias()
mssql_rn = sql.column('mssql_rn')
limitselect = sql.select([c for c in select.c if
c.key!='mssql_rn'])
limitselect.append_whereclause(mssql_rn> _offset)
c.key != 'mssql_rn'])
limitselect.append_whereclause(mssql_rn > _offset)
if _limit is not None:
limitselect.append_whereclause(mssql_rn<=(_limit + _offset))
limitselect.append_whereclause(mssql_rn <= (_limit + _offset))
return self.process(limitselect, iswrapper=True, **kwargs)
else:
return compiler.SQLCompiler.visit_select(self, select, **kwargs)
@@ -861,7 +868,7 @@ class MSSQLCompiler(compiler.SQLCompiler):
return "SAVE TRANSACTION %s" % self.preparer.format_savepoint(savepoint_stmt)
def visit_rollback_to_savepoint(self, savepoint_stmt):
return ("ROLLBACK TRANSACTION %s"
return ("ROLLBACK TRANSACTION %s"
% self.preparer.format_savepoint(savepoint_stmt))
def visit_column(self, column, result_map=None, **kwargs):
@@ -875,15 +882,16 @@ class MSSQLCompiler(compiler.SQLCompiler):
if result_map is not None:
result_map[column.name.lower()] = \
(column.name, (column, ),
(column.name, (column, column.name,
column.key),
column.type)
return super(MSSQLCompiler, self).\
visit_column(converted,
visit_column(converted,
result_map=None, **kwargs)
return super(MSSQLCompiler, self).visit_column(column,
result_map=result_map,
return super(MSSQLCompiler, self).visit_column(column,
result_map=result_map,
**kwargs)
def visit_binary(self, binary, **kwargs):
@@ -892,27 +900,27 @@ class MSSQLCompiler(compiler.SQLCompiler):
"""
if (
isinstance(binary.left, expression._BindParamClause)
isinstance(binary.left, expression._BindParamClause)
and binary.operator == operator.eq
and not isinstance(binary.right, expression._BindParamClause)
):
return self.process(
expression._BinaryExpression(binary.right,
binary.left,
binary.operator),
expression._BinaryExpression(binary.right,
binary.left,
binary.operator),
**kwargs)
else:
if (
(binary.operator is operator.eq or
binary.operator is operator.ne)
(binary.operator is operator.eq or
binary.operator is operator.ne)
and (
(isinstance(binary.left, expression._FromGrouping)
and isinstance(binary.left.element,
expression._ScalarSelect))
or (isinstance(binary.right, expression._FromGrouping)
and isinstance(binary.right.element,
expression._ScalarSelect))
or isinstance(binary.left, expression._ScalarSelect)
(isinstance(binary.left, expression._FromGrouping)
and isinstance(binary.left.element,
expression._ScalarSelect))
or (isinstance(binary.right, expression._FromGrouping)
and isinstance(binary.right.element,
expression._ScalarSelect))
or isinstance(binary.left, expression._ScalarSelect)
or isinstance(binary.right, expression._ScalarSelect)
)
):
@@ -944,10 +952,10 @@ class MSSQLCompiler(compiler.SQLCompiler):
columns = [
self.process(
col_label(c),
within_columns_clause=True,
col_label(c),
within_columns_clause=True,
result_map=self.result_map
)
)
for c in expression._select_iterables(returning_cols)
]
return 'OUTPUT ' + ', '.join(columns)
@@ -967,7 +975,7 @@ class MSSQLCompiler(compiler.SQLCompiler):
label_select_column(select, column, asfrom)
def for_update_clause(self, select):
# "FOR UPDATE" is only allowed on "DECLARE CURSOR" which
# "FOR UPDATE" is only allowed on "DECLARE CURSOR" which
# SQLAlchemy doesn't use
return ''
@@ -980,6 +988,22 @@ class MSSQLCompiler(compiler.SQLCompiler):
else:
return ""
def update_from_clause(self, update_stmt,
from_table, extra_froms,
from_hints,
**kw):
"""Render the UPDATE..FROM clause specific to MSSQL.
In MSSQL, if the UPDATE statement involves an alias of the table to
be updated, then the table itself must be added to the FROM list as
well. Otherwise, it is optional. Here, we add it regardless.
"""
return "FROM " + ', '.join(
t._compiler_dispatch(self, asfrom=True,
fromhints=from_hints, **kw)
for t in [from_table] + extra_froms)
class MSSQLStrictCompiler(MSSQLCompiler):
"""A subclass of MSSQLCompiler which disables the usage of bind
parameters where not allowed natively by MS-SQL.
@@ -993,14 +1017,14 @@ class MSSQLStrictCompiler(MSSQLCompiler):
def visit_in_op(self, binary, **kw):
kw['literal_binds'] = True
return "%s IN %s" % (
self.process(binary.left, **kw),
self.process(binary.left, **kw),
self.process(binary.right, **kw)
)
def visit_notin_op(self, binary, **kw):
kw['literal_binds'] = True
return "%s NOT IN %s" % (
self.process(binary.left, **kw),
self.process(binary.left, **kw),
self.process(binary.right, **kw)
)
@@ -1029,7 +1053,7 @@ class MSSQLStrictCompiler(MSSQLCompiler):
class MSDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kwargs):
colspec = (self.preparer.format_column(column) + " "
colspec = (self.preparer.format_column(column) + " "
+ self.dialect.type_compiler.process(column.type))
if column.nullable is not None:
@@ -1040,7 +1064,7 @@ class MSDDLCompiler(compiler.DDLCompiler):
if column.table is None:
raise exc.CompileError(
"mssql requires Table-bound columns "
"mssql requires Table-bound columns "
"in order to generate DDL")
seq_col = column.table._autoincrement_column
@@ -1075,7 +1099,7 @@ class MSIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = RESERVED_WORDS
def __init__(self, dialect):
super(MSIdentifierPreparer, self).__init__(dialect, initial_quote='[',
super(MSIdentifierPreparer, self).__init__(dialect, initial_quote='[',
final_quote=']')
def _escape_identifier(self, value):
@@ -1140,7 +1164,7 @@ class MSDialect(default.DefaultDialect):
super(MSDialect, self).initialize(connection)
if self.server_version_info[0] not in range(8, 17):
# FreeTDS with version 4.2 seems to report here
# a number like "95.10.255". Don't know what
# a number like "95.10.255". Don't know what
# that is. So emit warning.
util.warn(
"Unrecognized server version info '%s'. Version specific "
@@ -1241,11 +1265,11 @@ class MSDialect(default.DefaultDialect):
"join sys.schemas as sch on sch.schema_id=tab.schema_id "
"where tab.name = :tabname "
"and sch.name=:schname "
"and ind.is_primary_key=0",
"and ind.is_primary_key=0",
bindparams=[
sql.bindparam('tabname', tablename,
sql.bindparam('tabname', tablename,
sqltypes.String(convert_unicode=True)),
sql.bindparam('schname', current_schema,
sql.bindparam('schname', current_schema,
sqltypes.String(convert_unicode=True))
],
typemap = {
@@ -1272,9 +1296,9 @@ class MSDialect(default.DefaultDialect):
"where tab.name=:tabname "
"and sch.name=:schname",
bindparams=[
sql.bindparam('tabname', tablename,
sql.bindparam('tabname', tablename,
sqltypes.String(convert_unicode=True)),
sql.bindparam('schname', current_schema,
sql.bindparam('schname', current_schema,
sqltypes.String(convert_unicode=True))
],
typemap = {
@@ -1302,9 +1326,9 @@ class MSDialect(default.DefaultDialect):
"views.schema_id=sch.schema_id and "
"views.name=:viewname and sch.name=:schname",
bindparams=[
sql.bindparam('viewname', viewname,
sql.bindparam('viewname', viewname,
sqltypes.String(convert_unicode=True)),
sql.bindparam('schname', current_schema,
sql.bindparam('schname', current_schema,
sqltypes.String(convert_unicode=True))
]
)
@@ -1332,7 +1356,7 @@ class MSDialect(default.DefaultDialect):
row = c.fetchone()
if row is None:
break
(name, type, nullable, charlen,
(name, type, nullable, charlen,
numericprec, numericscale, default, collation) = (
row[columns.c.column_name],
row[columns.c.data_type],
@@ -1346,7 +1370,7 @@ class MSDialect(default.DefaultDialect):
coltype = self.ischema_names.get(type, None)
kwargs = {}
if coltype in (MSString, MSChar, MSNVarchar, MSNChar, MSText,
if coltype in (MSString, MSChar, MSNVarchar, MSNChar, MSText,
MSNText, MSBinary, MSVarBinary,
sqltypes.LargeBinary):
kwargs['length'] = charlen
@@ -1358,7 +1382,7 @@ class MSDialect(default.DefaultDialect):
if coltype is None:
util.warn(
"Did not recognize type '%s' of column '%s'" %
"Did not recognize type '%s' of column '%s'" %
(type, name))
coltype = sqltypes.NULLTYPE
else:
@@ -1382,7 +1406,7 @@ class MSDialect(default.DefaultDialect):
colmap[col['name']] = col
# We also run an sp_columns to check for identity columns:
cursor = connection.execute("sp_columns @table_name = '%s', "
"@table_owner = '%s'"
"@table_owner = '%s'"
% (tablename, current_schema))
ic = None
while True:
@@ -1401,7 +1425,7 @@ class MSDialect(default.DefaultDialect):
if ic is not None and self.server_version_info >= MS_2005_VERSION:
table_fullname = "%s.%s" % (current_schema, tablename)
cursor = connection.execute(
"select ident_seed('%s'), ident_incr('%s')"
"select ident_seed('%s'), ident_incr('%s')"
% (table_fullname, table_fullname)
)
@@ -1421,16 +1445,17 @@ class MSDialect(default.DefaultDialect):
RR = ischema.ref_constraints
# information_schema.table_constraints
TC = ischema.constraints
# information_schema.constraint_column_usage:
# information_schema.constraint_column_usage:
# the constrained column
C = ischema.key_constraints.alias('C')
# information_schema.constraint_column_usage:
C = ischema.key_constraints.alias('C')
# information_schema.constraint_column_usage:
# the referenced column
R = ischema.key_constraints.alias('R')
R = ischema.key_constraints.alias('R')
# Primary key constraints
s = sql.select([C.c.column_name, TC.c.constraint_type],
sql.and_(TC.c.constraint_name == C.c.constraint_name,
TC.c.table_schema == C.c.table_schema,
C.c.table_name == tablename,
C.c.table_schema == current_schema)
)
@@ -1448,12 +1473,12 @@ class MSDialect(default.DefaultDialect):
RR = ischema.ref_constraints
# information_schema.table_constraints
TC = ischema.constraints
# information_schema.constraint_column_usage:
# information_schema.constraint_column_usage:
# the constrained column
C = ischema.key_constraints.alias('C')
# information_schema.constraint_column_usage:
C = ischema.key_constraints.alias('C')
# information_schema.constraint_column_usage:
# the referenced column
R = ischema.key_constraints.alias('R')
R = ischema.key_constraints.alias('R')
# Foreign key constraints
s = sql.select([C.c.column_name,

View File

@@ -1,5 +1,5 @@
# mssql/information_schema.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -15,8 +15,10 @@ class CoerceUnicode(TypeDecorator):
impl = Unicode
def process_bind_param(self, value, dialect):
# Py2K
if isinstance(value, str):
value = value.decode(dialect.encoding)
# end Py2K
return value
schemata = Table("SCHEMATA", ischema,

View File

@@ -1,5 +1,5 @@
# mssql/mxodbc.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -41,13 +41,13 @@ simplistic statements.
For this reason, the mxODBC dialect uses the "native" mode by default only for
INSERT, UPDATE, and DELETE statements, and uses the escaped string mode for
all other statements.
all other statements.
This behavior can be controlled via
:meth:`~sqlalchemy.sql.expression.Executable.execution_options` using the
``native_odbc_execute`` flag with a value of ``True`` or ``False``, where a
value of ``True`` will unconditionally use native bind parameters and a value
of ``False`` will uncondtionally use string-escaped parameters.
of ``False`` will unconditionally use string-escaped parameters.
"""
@@ -55,7 +55,7 @@ of ``False`` will uncondtionally use string-escaped parameters.
from sqlalchemy import types as sqltypes
from sqlalchemy.connectors.mxodbc import MxODBCConnector
from sqlalchemy.dialects.mssql.pyodbc import MSExecutionContext_pyodbc
from sqlalchemy.dialects.mssql.base import (MSDialect,
from sqlalchemy.dialects.mssql.base import (MSDialect,
MSSQLStrictCompiler,
_MSDateTime, _MSDate, TIME)

View File

@@ -1,5 +1,5 @@
# mssql/pymssql.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -21,8 +21,8 @@ Sample connect string::
mssql+pymssql://<username>:<password>@<freetds_name>
Adding "?charset=utf8" or similar will cause pymssql to return
strings as Python unicode objects. This can potentially improve
performance in some scenarios as decoding of strings is
strings as Python unicode objects. This can potentially improve
performance in some scenarios as decoding of strings is
handled natively.
Limitations

View File

@@ -1,5 +1,5 @@
# mssql/pyodbc.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -35,14 +35,14 @@ Examples of pyodbc connection string URLs:
dsn=mydsn;UID=user;PWD=pass;LANGUAGE=us_english
* ``mssql+pyodbc://user:pass@host/db`` - connects using a connection
* ``mssql+pyodbc://user:pass@host/db`` - connects using a connection
that would appear like::
DRIVER={SQL Server};Server=host;Database=db;UID=user;PWD=pass
* ``mssql+pyodbc://user:pass@host:123/db`` - connects using a connection
string which includes the port
information using the comma syntax. This will create the following
information using the comma syntax. This will create the following
connection string::
DRIVER={SQL Server};Server=host,123;Database=db;UID=user;PWD=pass
@@ -80,6 +80,34 @@ the python shell. For example::
>>> urllib.quote_plus('dsn=mydsn;Database=db')
'dsn%3Dmydsn%3BDatabase%3Ddb'
Unicode Binds
^^^^^^^^^^^^^
The current state of PyODBC on a unix backend with FreeTDS and/or
EasySoft is poor regarding unicode; different OS platforms and versions of UnixODBC
versus IODBC versus FreeTDS/EasySoft versus PyODBC itself dramatically
alter how strings are received. The PyODBC dialect attempts to use all the information
it knows to determine whether or not a Python unicode literal can be
passed directly to the PyODBC driver or not; while SQLAlchemy can encode
these to bytestrings first, some users have reported that PyODBC mis-handles
bytestrings for certain encodings and requires a Python unicode object,
while the author has observed widespread cases where a Python unicode
is completely misinterpreted by PyODBC, particularly when dealing with
the information schema tables used in table reflection, and the value
must first be encoded to a bytestring.
It is for this reason that whether or not unicode literals for bound
parameters be sent to PyODBC can be controlled using the
``supports_unicode_binds`` parameter to ``create_engine()``. When
left at its default of ``None``, the PyODBC dialect will use its
best guess as to whether or not the driver deals with unicode literals
well. When ``False``, unicode literals will be encoded first, and when
``True`` unicode literals will be passed straight through. This is an interim
flag that hopefully should not be needed when the unicode situation stabilizes
for unix + PyODBC.
.. versionadded:: 0.7.7
``supports_unicode_binds`` parameter to ``create_engine()``\ .
"""
@@ -171,7 +199,7 @@ class MSExecutionContext_pyodbc(MSExecutionContext):
super(MSExecutionContext_pyodbc, self).pre_exec()
# don't embed the scope_identity select into an
# don't embed the scope_identity select into an
# "INSERT .. DEFAULT VALUES"
if self._select_lastrowid and \
self.dialect.use_scope_identity and \
@@ -183,11 +211,11 @@ class MSExecutionContext_pyodbc(MSExecutionContext):
def post_exec(self):
if self._embedded_scope_identity:
# Fetch the last inserted id from the manipulated statement
# We may have to skip over a number of result sets with
# We may have to skip over a number of result sets with
# no data (due to triggers, etc.)
while True:
try:
# fetchall() ensures the cursor is consumed
# fetchall() ensures the cursor is consumed
# without closing it (FreeTDS particularly)
row = self.cursor.fetchall()[0]
break
@@ -217,7 +245,8 @@ class MSDialect_pyodbc(PyODBCConnector, MSDialect):
def __init__(self, description_encoding='latin-1', **params):
super(MSDialect_pyodbc, self).__init__(**params)
self.description_encoding = description_encoding
self.use_scope_identity = self.dbapi and \
self.use_scope_identity = self.use_scope_identity and \
self.dbapi and \
hasattr(self.dbapi.Cursor, 'nextset')
self._need_decimal_fix = self.dbapi and \
self._dbapi_version() < (2, 1, 8)

View File

@@ -1,5 +1,5 @@
# mssql/zxjdbc.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -68,7 +68,7 @@ class MSDialect_zxjdbc(ZxJDBCConnector, MSDialect):
def _get_server_version_info(self, connection):
return tuple(
int(x)
int(x)
for x in connection.connection.dbversion.split('.')
)

View File

@@ -1,11 +1,12 @@
# mysql/__init__.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from sqlalchemy.dialects.mysql import base, mysqldb, oursql, \
pyodbc, zxjdbc, mysqlconnector, pymysql
pyodbc, zxjdbc, mysqlconnector, pymysql,\
gaerdbms
# default dialect
base.dialect = mysqldb.dialect

View File

@@ -1,5 +1,5 @@
# mysql/base.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -9,29 +9,11 @@
Supported Versions and Features
-------------------------------
SQLAlchemy supports 6 major MySQL versions: 3.23, 4.0, 4.1, 5.0, 5.1 and 6.0,
with capabilities increasing with more modern servers.
Versions 4.1 and higher support the basic SQL functionality that SQLAlchemy
uses in the ORM and SQL expressions. These versions pass the applicable tests
in the suite 100%. No heroic measures are taken to work around major missing
SQL features- if your server version does not support sub-selects, for
SQLAlchemy supports MySQL starting with version 4.1 through modern releases.
However, no heroic measures are taken to work around major missing
SQL features - if your server version does not support sub-selects, for
example, they won't work in SQLAlchemy either.
Most available DBAPI drivers are supported; see below.
===================================== ===============
Feature Minimum Version
===================================== ===============
sqlalchemy.orm 4.1.1
Table Reflection 3.23.x
DDL Generation 4.1.1
utf8/Full Unicode Connections 4.1.1
Transactions 3.23.15
Two-Phase Transactions 5.0.3
Nested Transactions 5.0.3
===================================== ===============
See the official MySQL documentation for detailed information about features
supported in any given server release.
@@ -44,18 +26,21 @@ Connection Timeouts
-------------------
MySQL features an automatic connection close behavior, for connections that have
been idle for eight hours or more. To circumvent having this issue, use the
been idle for eight hours or more. To circumvent having this issue, use the
``pool_recycle`` option which controls the maximum age of any connection::
engine = create_engine('mysql+mysqldb://...', pool_recycle=3600)
.. _mysql_storage_engines:
Storage Engines
---------------
Most MySQL server installations have a default table type of ``MyISAM``, a
non-transactional table type. During a transaction, non-transactional storage
engines do not participate and continue to store table changes in autocommit
mode. For fully atomic transactions, all participating tables must use a
mode. For fully atomic transactions as well as support for foreign key
constraints, all participating tables must use a
transactional engine such as ``InnoDB``, ``Falcon``, ``SolidDB``, `PBXT`, etc.
Storage engines can be elected when creating tables in SQLAlchemy by supplying
@@ -68,6 +53,10 @@ creation option can be specified in this syntax::
mysql_charset='utf8'
)
.. seealso::
`The InnoDB Storage Engine <http://dev.mysql.com/doc/refman/5.0/en/innodb-storage-engine.html>`_ - on the MySQL website.
Case Sensitivity and Table Reflection
-------------------------------------
@@ -87,19 +76,19 @@ to be used.
Transaction Isolation Level
---------------------------
:func:`.create_engine` accepts an ``isolation_level``
parameter which results in the command ``SET SESSION
TRANSACTION ISOLATION LEVEL <level>`` being invoked for
:func:`.create_engine` accepts an ``isolation_level``
parameter which results in the command ``SET SESSION
TRANSACTION ISOLATION LEVEL <level>`` being invoked for
every new connection. Valid values for this parameter are
``READ COMMITTED``, ``READ UNCOMMITTED``,
``READ COMMITTED``, ``READ UNCOMMITTED``,
``REPEATABLE READ``, and ``SERIALIZABLE``::
engine = create_engine(
"mysql://scott:tiger@localhost/test",
"mysql://scott:tiger@localhost/test",
isolation_level="READ UNCOMMITTED"
)
(new in 0.7.6)
.. versionadded:: 0.7.6
Keys
----
@@ -185,6 +174,24 @@ available.
update(..., mysql_limit=10)
rowcount Support
----------------
SQLAlchemy standardizes the DBAPI ``cursor.rowcount`` attribute to be the
usual definition of "number of rows matched by an UPDATE or DELETE" statement.
This is in contradiction to the default setting on most MySQL DBAPI drivers,
which is "number of rows actually modified/deleted". For this reason, the
SQLAlchemy MySQL dialects always set the ``constants.CLIENT.FOUND_ROWS`` flag,
or whatever is equivalent for the DBAPI in use, on connect, unless the flag value
is overridden using DBAPI-specific options
(such as ``client_flag`` for the MySQL-Python driver, ``found_rows`` for the
OurSQL driver).
See also:
:attr:`.ResultProxy.rowcount`
CAST Support
------------
@@ -242,7 +249,7 @@ Index Types
~~~~~~~~~~~~~
Some MySQL storage engines permit you to specify an index type when creating
an index or primary key constraint. SQLAlchemy provides this feature via the
an index or primary key constraint. SQLAlchemy provides this feature via the
``mysql_using`` parameter on :class:`.Index`::
Index('my_index', my_table.c.data, mysql_using='hash')
@@ -252,7 +259,7 @@ As well as the ``mysql_using`` parameter on :class:`.PrimaryKeyConstraint`::
PrimaryKeyConstraint("data", mysql_using='hash')
The value passed to the keyword argument will be simply passed through to the
underlying CREATE INDEX or PRIMARY KEY clause, so it *must* be a valid index
underlying CREATE INDEX or PRIMARY KEY clause, so it *must* be a valid index
type for your MySQL storage engine.
More information can be found at:
@@ -344,9 +351,9 @@ class _FloatType(_NumericType, sqltypes.Float):
(precision is None and scale is not None) or
(precision is not None and scale is None)
):
raise exc.ArgumentError(
"You must specify both precision and scale or omit "
"both altogether.")
raise exc.ArgumentError(
"You must specify both precision and scale or omit "
"both altogether.")
super(_FloatType, self).__init__(precision=precision, asdecimal=asdecimal, **kw)
self.scale = scale
@@ -1273,11 +1280,11 @@ class MySQLCompiler(compiler.SQLCompiler):
def visit_cast(self, cast, **kwargs):
# No cast until 4, no decimals until 5.
if not self.dialect._supports_cast:
return self.process(cast.clause)
return self.process(cast.clause.self_group())
type_ = self.process(cast.typeclause)
if type_ is None:
return self.process(cast.clause)
return self.process(cast.clause.self_group())
return 'CAST(%s AS %s)' % (self.process(cast.clause), type_)
@@ -1289,13 +1296,13 @@ class MySQLCompiler(compiler.SQLCompiler):
def get_select_precolumns(self, select):
"""Add special MySQL keywords in place of DISTINCT.
.. note::
.. note::
this usage is deprecated. :meth:`.Select.prefix_with`
should be used for special keywords at the start
of a SELECT.
"""
if isinstance(select._distinct, basestring):
return select._distinct.upper() + " "
@@ -1343,16 +1350,16 @@ class MySQLCompiler(compiler.SQLCompiler):
if limit is None:
# hardwire the upper limit. Currently
# needed by OurSQL with Python 3
# (https://bugs.launchpad.net/oursql/+bug/686232),
# (https://bugs.launchpad.net/oursql/+bug/686232),
# but also is consistent with the usage of the upper
# bound as part of MySQL's "syntax" for OFFSET with
# no LIMIT
return ' \n LIMIT %s, %s' % (
self.process(sql.literal(offset)),
self.process(sql.literal(offset)),
"18446744073709551615")
else:
return ' \n LIMIT %s, %s' % (
self.process(sql.literal(offset)),
self.process(sql.literal(offset)),
self.process(sql.literal(limit)))
else:
# No offset provided, so just use the limit
@@ -1366,10 +1373,10 @@ class MySQLCompiler(compiler.SQLCompiler):
return None
def update_tables_clause(self, update_stmt, from_table, extra_froms, **kw):
return ', '.join(t._compiler_dispatch(self, asfrom=True, **kw)
return ', '.join(t._compiler_dispatch(self, asfrom=True, **kw)
for t in [from_table] + list(extra_froms))
def update_from_clause(self, update_stmt, from_table,
def update_from_clause(self, update_stmt, from_table,
extra_froms, from_hints, **kw):
return None
@@ -1395,8 +1402,12 @@ class MySQLDDLCompiler(compiler.DDLCompiler):
auto_inc_column is not list(table.primary_key)[0]:
if constraint_string:
constraint_string += ", \n\t"
constraint_string += "KEY `idx_autoinc_%s`(`%s`)" % (auto_inc_column.name, \
self.preparer.format_column(auto_inc_column))
constraint_string += "KEY %s (%s)" % (
self.preparer.quote(
"idx_autoinc_%s" % auto_inc_column.name, None
),
self.preparer.format_column(auto_inc_column)
)
return constraint_string
@@ -1431,7 +1442,7 @@ class MySQLDDLCompiler(compiler.DDLCompiler):
opts = dict(
(
k[len(self.dialect.name)+1:].upper(),
k[len(self.dialect.name)+1:].upper(),
v
)
for k, v in table.kwargs.items()
@@ -1447,7 +1458,7 @@ class MySQLDDLCompiler(compiler.DDLCompiler):
arg = "'%s'" % arg.replace("\\", "\\\\").replace("'", "''")
if opt in ('DATA_DIRECTORY', 'INDEX_DIRECTORY',
'DEFAULT_CHARACTER_SET', 'CHARACTER_SET',
'DEFAULT_CHARACTER_SET', 'CHARACTER_SET',
'DEFAULT_CHARSET',
'DEFAULT_COLLATE'):
opt = opt.replace('_', ' ')
@@ -1467,7 +1478,7 @@ class MySQLDDLCompiler(compiler.DDLCompiler):
table = preparer.format_table(index.table)
columns = [preparer.quote(c.name, c.quote) for c in index.columns]
name = preparer.quote(
self._index_identifier(index.name),
self._index_identifier(index.name),
index.quote)
text = "CREATE "
@@ -1576,24 +1587,24 @@ class MySQLTypeCompiler(compiler.GenericTypeCompiler):
if type_.precision is None:
return self._extend_numeric(type_, "NUMERIC")
elif type_.scale is None:
return self._extend_numeric(type_,
"NUMERIC(%(precision)s)" %
return self._extend_numeric(type_,
"NUMERIC(%(precision)s)" %
{'precision': type_.precision})
else:
return self._extend_numeric(type_,
"NUMERIC(%(precision)s, %(scale)s)" %
return self._extend_numeric(type_,
"NUMERIC(%(precision)s, %(scale)s)" %
{'precision': type_.precision, 'scale' : type_.scale})
def visit_DECIMAL(self, type_):
if type_.precision is None:
return self._extend_numeric(type_, "DECIMAL")
elif type_.scale is None:
return self._extend_numeric(type_,
"DECIMAL(%(precision)s)" %
return self._extend_numeric(type_,
"DECIMAL(%(precision)s)" %
{'precision': type_.precision})
else:
return self._extend_numeric(type_,
"DECIMAL(%(precision)s, %(scale)s)" %
return self._extend_numeric(type_,
"DECIMAL(%(precision)s, %(scale)s)" %
{'precision': type_.precision, 'scale' : type_.scale})
def visit_DOUBLE(self, type_):
@@ -1616,7 +1627,7 @@ class MySQLTypeCompiler(compiler.GenericTypeCompiler):
if self._mysql_type(type_) and \
type_.scale is not None and \
type_.precision is not None:
return self._extend_numeric(type_,
return self._extend_numeric(type_,
"FLOAT(%s, %s)" % (type_.precision, type_.scale))
elif type_.precision is not None:
return self._extend_numeric(type_, "FLOAT(%s)" % (type_.precision,))
@@ -1625,24 +1636,24 @@ class MySQLTypeCompiler(compiler.GenericTypeCompiler):
def visit_INTEGER(self, type_):
if self._mysql_type(type_) and type_.display_width is not None:
return self._extend_numeric(type_,
"INTEGER(%(display_width)s)" %
return self._extend_numeric(type_,
"INTEGER(%(display_width)s)" %
{'display_width': type_.display_width})
else:
return self._extend_numeric(type_, "INTEGER")
def visit_BIGINT(self, type_):
if self._mysql_type(type_) and type_.display_width is not None:
return self._extend_numeric(type_,
"BIGINT(%(display_width)s)" %
return self._extend_numeric(type_,
"BIGINT(%(display_width)s)" %
{'display_width': type_.display_width})
else:
return self._extend_numeric(type_, "BIGINT")
def visit_MEDIUMINT(self, type_):
if self._mysql_type(type_) and type_.display_width is not None:
return self._extend_numeric(type_,
"MEDIUMINT(%(display_width)s)" %
return self._extend_numeric(type_,
"MEDIUMINT(%(display_width)s)" %
{'display_width': type_.display_width})
else:
return self._extend_numeric(type_, "MEDIUMINT")
@@ -1655,8 +1666,8 @@ class MySQLTypeCompiler(compiler.GenericTypeCompiler):
def visit_SMALLINT(self, type_):
if self._mysql_type(type_) and type_.display_width is not None:
return self._extend_numeric(type_,
"SMALLINT(%(display_width)s)" %
return self._extend_numeric(type_,
"SMALLINT(%(display_width)s)" %
{'display_width': type_.display_width}
)
else:
@@ -1706,7 +1717,7 @@ class MySQLTypeCompiler(compiler.GenericTypeCompiler):
return self._extend_string(type_, {}, "VARCHAR(%d)" % type_.length)
else:
raise exc.CompileError(
"VARCHAR requires a length on dialect %s" %
"VARCHAR requires a length on dialect %s" %
self.dialect.name)
def visit_CHAR(self, type_):
@@ -1722,7 +1733,7 @@ class MySQLTypeCompiler(compiler.GenericTypeCompiler):
return self._extend_string(type_, {'national':True}, "VARCHAR(%(length)s)" % {'length': type_.length})
else:
raise exc.CompileError(
"NVARCHAR requires a length on dialect %s" %
"NVARCHAR requires a length on dialect %s" %
self.dialect.name)
def visit_NCHAR(self, type_):
@@ -1783,8 +1794,8 @@ class MySQLIdentifierPreparer(compiler.IdentifierPreparer):
quote = '"'
super(MySQLIdentifierPreparer, self).__init__(
dialect,
initial_quote=quote,
dialect,
initial_quote=quote,
escape_quote=quote)
def _quote_free_identifiers(self, *ids):
@@ -1817,7 +1828,7 @@ class MySQLDialect(default.DefaultDialect):
preparer = MySQLIdentifierPreparer
# default SQL compilation settings -
# these are modified upon initialize(),
# these are modified upon initialize(),
# i.e. first connect
_backslash_escapes = True
_server_ansiquotes = False
@@ -1834,7 +1845,7 @@ class MySQLDialect(default.DefaultDialect):
else:
return None
_isolation_lookup = set(['SERIALIZABLE',
_isolation_lookup = set(['SERIALIZABLE',
'READ UNCOMMITTED', 'READ COMMITTED', 'REPEATABLE READ'])
def set_isolation_level(self, connection, level):
@@ -1842,7 +1853,7 @@ class MySQLDialect(default.DefaultDialect):
if level not in self._isolation_lookup:
raise exc.ArgumentError(
"Invalid value '%s' for isolation_level. "
"Valid isolation levels for %s are %s" %
"Valid isolation levels for %s are %s" %
(level, self.name, ", ".join(self._isolation_lookup))
)
cursor = connection.cursor()
@@ -1914,7 +1925,7 @@ class MySQLDialect(default.DefaultDialect):
return self._extract_error_code(e) in \
(2006, 2013, 2014, 2045, 2055)
elif isinstance(e, self.dbapi.InterfaceError):
# if underlying connection is closed,
# if underlying connection is closed,
# this is the error you get
return "(0, '')" in str(e)
else:
@@ -2017,7 +2028,6 @@ class MySQLDialect(default.DefaultDialect):
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
charset = self._connection_charset
if self.server_version_info < (5, 0, 2):
raise NotImplementedError
if schema is None:
@@ -2028,7 +2038,7 @@ class MySQLDialect(default.DefaultDialect):
rp = connection.execute("SHOW FULL TABLES FROM %s" %
self.identifier_preparer.quote_identifier(schema))
return [row[0] for row in self._compat_fetchall(rp, charset=charset)\
if row[1] == 'VIEW']
if row[1] in ('VIEW', 'SYSTEM VIEW')]
@reflection.cache
def get_table_options(self, connection, table_name, schema=None, **kw):
@@ -2129,9 +2139,9 @@ class MySQLDialect(default.DefaultDialect):
def _parsed_state_or_create(self, connection, table_name, schema=None, **kw):
return self._setup_parser(
connection,
table_name,
schema,
connection,
table_name,
schema,
info_cache=kw.get('info_cache', None)
)
@@ -2139,7 +2149,7 @@ class MySQLDialect(default.DefaultDialect):
def _tabledef_parser(self):
"""return the MySQLTableDefinitionParser, generate if needed.
The deferred creation ensures that the dialect has
The deferred creation ensures that the dialect has
retrieved server version information first.
"""

View File

@@ -0,0 +1,84 @@
# mysql/gaerdbms.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Support for Google Cloud SQL on Google App Engine.
This dialect is based primarily on the :mod:`.mysql.mysqldb` dialect with minimal
changes.
.. versionadded:: 0.7.8
Connecting
----------
Connect string format::
mysql+gaerdbms:///<dbname>
E.g.::
create_engine('mysql+gaerdbms:///mydb',
connect_args={"instance":"instancename"})
Pooling
-------
Google App Engine connections appear to be randomly recycled,
so the dialect does not pool connections. The :class:`.NullPool`
implementation is installed within the :class:`.Engine` by
default.
"""
from sqlalchemy.dialects.mysql.mysqldb import MySQLDialect_mysqldb
from sqlalchemy.pool import NullPool
import re
class MySQLDialect_gaerdbms(MySQLDialect_mysqldb):
@classmethod
def dbapi(cls):
# from django:
# http://code.google.com/p/googleappengine/source/
# browse/trunk/python/google/storage/speckle/
# python/django/backend/base.py#118
# see also [ticket:2649]
# see also http://stackoverflow.com/q/14224679/34549
from google.appengine.api import apiproxy_stub_map
if apiproxy_stub_map.apiproxy.GetStub('rdbms'):
from google.storage.speckle.python.api import rdbms_apiproxy
return rdbms_apiproxy
else:
from google.storage.speckle.python.api import rdbms_googleapi
return rdbms_googleapi
@classmethod
def get_pool_class(cls, url):
# Cloud SQL connections die at any moment
return NullPool
def create_connect_args(self, url):
opts = url.translate_connect_args()
# 'dsn' and 'instance' are because we are skipping
# the traditional google.api.rdbms wrapper
opts['dsn'] = ''
opts['instance'] = url.query['instance']
return [], opts
def _extract_error_code(self, exception):
match = re.compile(r"^(\d+):").match(str(exception))
# The rdbms api will wrap then re-raise some types of errors
# making this regex return no matches.
if match:
code = match.group(1)
else:
code = None
if code:
return int(code)
dialect = MySQLDialect_gaerdbms

View File

@@ -1,5 +1,5 @@
# mysql/mysqlconnector.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -105,15 +105,11 @@ class MySQLDialect_mysqlconnector(MySQLDialect):
def _get_server_version_info(self, connection):
dbapi_con = connection.connection
from mysql.connector.constants import ClientFlag
dbapi_con.set_client_flag(ClientFlag.FOUND_ROWS)
version = dbapi_con.get_server_version()
return tuple(version)
def _detect_charset(self, connection):
return connection.connection.get_characterset_info()
return connection.connection.charset
def _extract_error_code(self, exception):
return exception.errno

View File

@@ -1,5 +1,5 @@
# mysql/mysqldb.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -26,20 +26,20 @@ MySQLdb will accommodate Python ``unicode`` objects if the
``use_unicode=1`` parameter, or the ``charset`` parameter,
is passed as a connection argument.
Without this setting, many MySQL server installations default to
Without this setting, many MySQL server installations default to
a ``latin1`` encoding for client connections, which has the effect
of all data being converted into ``latin1``, even if you have ``utf8``
of all data being converted into ``latin1``, even if you have ``utf8``
or another character set configured on your tables
and columns. With versions 4.1 and higher, you can change the connection
character set either through server configuration or by including the
``charset`` parameter. The ``charset``
parameter as received by MySQL-Python also has the side-effect of
parameter as received by MySQL-Python also has the side-effect of
enabling ``use_unicode=1``::
# set client encoding to utf8; all strings come back as unicode
create_engine('mysql+mysqldb:///mydb?charset=utf8')
Manually configuring ``use_unicode=0`` will cause MySQL-python to
Manually configuring ``use_unicode=0`` will cause MySQL-python to
return encoded strings::
# set client encoding to utf8; all strings come back as utf8 str
@@ -57,9 +57,9 @@ It is strongly advised to use the latest version of MySQL-Python.
from sqlalchemy.dialects.mysql.base import (MySQLDialect, MySQLExecutionContext,
MySQLCompiler, MySQLIdentifierPreparer)
from sqlalchemy.connectors.mysqldb import (
MySQLDBExecutionContext,
MySQLDBCompiler,
MySQLDBIdentifierPreparer,
MySQLDBExecutionContext,
MySQLDBCompiler,
MySQLDBIdentifierPreparer,
MySQLDBConnector
)

View File

@@ -1,5 +1,5 @@
# mysql/oursql.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -108,9 +108,9 @@ class MySQLDialect_oursql(MySQLDialect):
arg = "'%s'" % arg
connection.execution_options(_oursql_plain_query=True).execute(query % arg)
# Because mysql is bad, these methods have to be
# Because mysql is bad, these methods have to be
# reimplemented to use _PlainQuery. Basically, some queries
# refuse to return any data if they're run through
# refuse to return any data if they're run through
# the parameterized query API, or refuse to be parameterized
# in the first place.
def do_begin_twophase(self, connection, xid):
@@ -135,7 +135,7 @@ class MySQLDialect_oursql(MySQLDialect):
# Q: why didn't we need all these "plain_query" overrides earlier ?
# am i on a newer/older version of OurSQL ?
def has_table(self, connection, table_name, schema=None):
return MySQLDialect.has_table(self,
return MySQLDialect.has_table(self,
connection.connect().\
execution_options(_oursql_plain_query=True),
table_name, schema)
@@ -183,7 +183,7 @@ class MySQLDialect_oursql(MySQLDialect):
def initialize(self, connection):
return MySQLDialect.initialize(
self,
self,
connection.execution_options(_oursql_plain_query=True)
)
@@ -208,6 +208,7 @@ class MySQLDialect_oursql(MySQLDialect):
util.coerce_kw_type(opts, 'port', int)
util.coerce_kw_type(opts, 'compress', bool)
util.coerce_kw_type(opts, 'autoping', bool)
util.coerce_kw_type(opts, 'raise_on_warnings', bool)
util.coerce_kw_type(opts, 'default_charset', bool)
if opts.pop('default_charset', False):
@@ -222,7 +223,7 @@ class MySQLDialect_oursql(MySQLDialect):
opts.setdefault('found_rows', True)
ssl = {}
for key in ['ssl_ca', 'ssl_key', 'ssl_cert',
for key in ['ssl_ca', 'ssl_key', 'ssl_cert',
'ssl_capath', 'ssl_cipher']:
if key in opts:
ssl[key[4:]] = opts[key]

View File

@@ -1,5 +1,5 @@
# mysql/pymysql.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -20,20 +20,20 @@ Connect string::
MySQL-Python Compatibility
--------------------------
The pymysql DBAPI is a pure Python port of the MySQL-python (MySQLdb) driver,
and targets 100% compatibility. Most behavioral notes for MySQL-python apply to
The pymysql DBAPI is a pure Python port of the MySQL-python (MySQLdb) driver,
and targets 100% compatibility. Most behavioral notes for MySQL-python apply to
the pymysql driver as well.
"""
from sqlalchemy.dialects.mysql.mysqldb import MySQLDialect_mysqldb
from sqlalchemy.dialects.mysql.mysqldb import MySQLDialect_mysqldb
class MySQLDialect_pymysql(MySQLDialect_mysqldb):
class MySQLDialect_pymysql(MySQLDialect_mysqldb):
driver = 'pymysql'
description_encoding = None
@classmethod
def dbapi(cls):
return __import__('pymysql')
@classmethod
def dbapi(cls):
return __import__('pymysql')
dialect = MySQLDialect_pymysql
dialect = MySQLDialect_pymysql

View File

@@ -1,5 +1,5 @@
# mysql/pyodbc.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -20,7 +20,7 @@ Connect string::
Limitations
-----------
The mysql-pyodbc dialect is subject to unresolved character encoding issues
The mysql-pyodbc dialect is subject to unresolved character encoding issues
which exist within the current ODBC drivers available.
(see http://code.google.com/p/pyodbc/issues/detail?id=25). Consider usage
of OurSQL, MySQLdb, or MySQL-connector/Python.

View File

@@ -1,5 +1,5 @@
# mysql/zxjdbc.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php

View File

@@ -1,5 +1,5 @@
# oracle/__init__.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -12,7 +12,7 @@ from sqlalchemy.dialects.oracle.base import \
VARCHAR, NVARCHAR, CHAR, DATE, DATETIME, NUMBER,\
BLOB, BFILE, CLOB, NCLOB, TIMESTAMP, RAW,\
FLOAT, DOUBLE_PRECISION, LONG, dialect, INTERVAL,\
VARCHAR2, NVARCHAR2
VARCHAR2, NVARCHAR2, ROWID
__all__ = (

View File

@@ -1,5 +1,5 @@
# oracle/base.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -14,7 +14,7 @@ for that driver.
Connect Arguments
-----------------
The dialect supports several :func:`~sqlalchemy.create_engine()` arguments which
The dialect supports several :func:`~sqlalchemy.create_engine()` arguments which
affect the behavior of the dialect regardless of driver in use.
* *use_ansi* - Use ANSI JOIN constructs (see the section on Oracle 8). Defaults
@@ -29,32 +29,32 @@ Auto Increment Behavior
SQLAlchemy Table objects which include integer primary keys are usually assumed to have
"autoincrementing" behavior, meaning they can generate their own primary key values upon
INSERT. Since Oracle has no "autoincrement" feature, SQLAlchemy relies upon sequences
INSERT. Since Oracle has no "autoincrement" feature, SQLAlchemy relies upon sequences
to produce these values. With the Oracle dialect, *a sequence must always be explicitly
specified to enable autoincrement*. This is divergent with the majority of documentation
specified to enable autoincrement*. This is divergent with the majority of documentation
examples which assume the usage of an autoincrement-capable database. To specify sequences,
use the sqlalchemy.schema.Sequence object which is passed to a Column construct::
t = Table('mytable', metadata,
t = Table('mytable', metadata,
Column('id', Integer, Sequence('id_seq'), primary_key=True),
Column(...), ...
)
This step is also required when using table reflection, i.e. autoload=True::
t = Table('mytable', metadata,
t = Table('mytable', metadata,
Column('id', Integer, Sequence('id_seq'), primary_key=True),
autoload=True
)
)
Identifier Casing
-----------------
In Oracle, the data dictionary represents all case insensitive identifier names
In Oracle, the data dictionary represents all case insensitive identifier names
using UPPERCASE text. SQLAlchemy on the other hand considers an all-lower case identifier
name to be case insensitive. The Oracle dialect converts all case insensitive identifiers
to and from those two formats during schema level communication, such as reflection of
tables and indexes. Using an UPPERCASE name on the SQLAlchemy side indicates a
tables and indexes. Using an UPPERCASE name on the SQLAlchemy side indicates a
case sensitive identifier, and SQLAlchemy will quote the name - this will cause mismatches
against data dictionary data received from Oracle, so unless identifier names have been
truly created as case sensitive (i.e. using quoted names), all lowercase names should be
@@ -63,23 +63,25 @@ used on the SQLAlchemy side.
Unicode
-------
SQLAlchemy 0.6 uses the "native unicode" mode provided as of cx_oracle 5. cx_oracle 5.0.2
or greater is recommended for support of NCLOB. If not using cx_oracle 5, the NLS_LANG
environment variable needs to be set in order for the oracle client library to use
proper encoding, such as "AMERICAN_AMERICA.UTF8".
.. versionchanged:: 0.6
SQLAlchemy uses the "native unicode" mode provided as of cx_oracle 5.
cx_oracle 5.0.2 or greater is recommended for support of NCLOB.
If not using cx_oracle 5, the NLS_LANG environment variable needs
to be set in order for the oracle client library to use proper encoding,
such as "AMERICAN_AMERICA.UTF8".
Also note that Oracle supports unicode data through the NVARCHAR and NCLOB data types.
When using the SQLAlchemy Unicode and UnicodeText types, these DDL types will be used
within CREATE TABLE statements. Usage of VARCHAR2 and CLOB with unicode text still
within CREATE TABLE statements. Usage of VARCHAR2 and CLOB with unicode text still
requires NLS_LANG to be set.
LIMIT/OFFSET Support
--------------------
Oracle has no support for the LIMIT or OFFSET keywords. SQLAlchemy uses
a wrapped subquery approach in conjunction with ROWNUM. The exact methodology
Oracle has no support for the LIMIT or OFFSET keywords. SQLAlchemy uses
a wrapped subquery approach in conjunction with ROWNUM. The exact methodology
is taken from
http://www.oracle.com/technology/oramag/oracle/06-sep/o56asktom.html .
http://www.oracle.com/technology/oramag/oracle/06-sep/o56asktom.html .
There are two options which affect its behavior:
@@ -87,13 +89,13 @@ There are two options which affect its behavior:
optimization directive, specify ``optimize_limits=True`` to :func:`.create_engine`.
* the values passed for the limit/offset are sent as bound parameters. Some users have observed
that Oracle produces a poor query plan when the values are sent as binds and not
rendered literally. To render the limit/offset values literally within the SQL
rendered literally. To render the limit/offset values literally within the SQL
statement, specify ``use_binds_for_limits=False`` to :func:`.create_engine`.
Some users have reported better performance when the entirely different approach of a
window query is used, i.e. ROW_NUMBER() OVER (ORDER BY), to provide LIMIT/OFFSET (note
that the majority of users don't observe this). To suit this case the
method used for LIMIT/OFFSET can be replaced entirely. See the recipe at
Some users have reported better performance when the entirely different approach of a
window query is used, i.e. ROW_NUMBER() OVER (ORDER BY), to provide LIMIT/OFFSET (note
that the majority of users don't observe this). To suit this case the
method used for LIMIT/OFFSET can be replaced entirely. See the recipe at
http://www.sqlalchemy.org/trac/wiki/UsageRecipes/WindowFunctionsByDefault
which installs a select compiler that overrides the generation of limit/offset with
a window function.
@@ -101,11 +103,11 @@ a window function.
ON UPDATE CASCADE
-----------------
Oracle doesn't have native ON UPDATE CASCADE functionality. A trigger based solution
Oracle doesn't have native ON UPDATE CASCADE functionality. A trigger based solution
is available at http://asktom.oracle.com/tkyte/update_cascade/index.html .
When using the SQLAlchemy ORM, the ORM has limited ability to manually issue
cascading updates - specify ForeignKey objects using the
cascading updates - specify ForeignKey objects using the
"deferrable=True, initially='deferred'" keyword arguments,
and specify "passive_updates=False" on each relationship().
@@ -119,21 +121,21 @@ behaviors:
JOIN phrases into the WHERE clause, and in the case of LEFT OUTER JOIN
makes use of Oracle's (+) operator.
* the NVARCHAR2 and NCLOB datatypes are no longer generated as DDL when
the :class:`~sqlalchemy.types.Unicode` is used - VARCHAR2 and CLOB are issued
* the NVARCHAR2 and NCLOB datatypes are no longer generated as DDL when
the :class:`~sqlalchemy.types.Unicode` is used - VARCHAR2 and CLOB are issued
instead. This because these types don't seem to work correctly on Oracle 8
even though they are available. The :class:`~sqlalchemy.types.NVARCHAR`
even though they are available. The :class:`~sqlalchemy.types.NVARCHAR`
and :class:`~sqlalchemy.dialects.oracle.NCLOB` types will always generate NVARCHAR2 and NCLOB.
* the "native unicode" mode is disabled when using cx_oracle, i.e. SQLAlchemy
* the "native unicode" mode is disabled when using cx_oracle, i.e. SQLAlchemy
encodes all Python unicode objects to "string" before passing in as bind parameters.
Synonym/DBLINK Reflection
-------------------------
When using reflection with Table objects, the dialect can optionally search for tables
indicated by synonyms that reference DBLINK-ed tables by passing the flag
oracle_resolve_synonyms=True as a keyword argument to the Table construct. If DBLINK
indicated by synonyms that reference DBLINK-ed tables by passing the flag
oracle_resolve_synonyms=True as a keyword argument to the Table construct. If DBLINK
is not in use this flag should be left off.
"""
@@ -215,8 +217,8 @@ class LONG(sqltypes.Text):
class INTERVAL(sqltypes.TypeEngine):
__visit_name__ = 'INTERVAL'
def __init__(self,
day_precision=None,
def __init__(self,
day_precision=None,
second_precision=None):
"""Construct an INTERVAL.
@@ -301,10 +303,10 @@ class OracleTypeCompiler(compiler.GenericTypeCompiler):
def visit_INTERVAL(self, type_):
return "INTERVAL DAY%s TO SECOND%s" % (
type_.day_precision is not None and
type_.day_precision is not None and
"(%d)" % type_.day_precision or
"",
type_.second_precision is not None and
type_.second_precision is not None and
"(%d)" % type_.second_precision or
"",
)
@@ -338,7 +340,7 @@ class OracleTypeCompiler(compiler.GenericTypeCompiler):
else:
return "%(name)s(%(precision)s, %(scale)s)" % {'name':name,'precision': precision, 'scale' : scale}
def visit_string(self, type_):
def visit_string(self, type_):
return self.visit_VARCHAR2(type_)
def visit_VARCHAR2(self, type_):
@@ -354,10 +356,10 @@ class OracleTypeCompiler(compiler.GenericTypeCompiler):
def _visit_varchar(self, type_, n, num):
if not n and self.dialect._supports_char_length:
return "VARCHAR%(two)s(%(length)s CHAR)" % {
'length' : type_.length,
'length' : type_.length,
'two':num}
else:
return "%(n)sVARCHAR%(two)s(%(length)s)" % {'length' : type_.length,
return "%(n)sVARCHAR%(two)s(%(length)s)" % {'length' : type_.length,
'two':num, 'n':n}
def visit_text(self, type_):
@@ -429,7 +431,7 @@ class OracleCompiler(compiler.SQLCompiler):
return ""
def default_from(self):
"""Called when a ``SELECT`` statement has no froms,
"""Called when a ``SELECT`` statement has no froms,
and no ``FROM`` clause is to be appended.
The Oracle compiler tacks a "FROM DUAL" to the statement.
@@ -564,7 +566,7 @@ class OracleCompiler(compiler.SQLCompiler):
if not self.dialect.use_binds_for_limits:
max_row = sql.literal_column("%d" % max_row)
limitselect.append_whereclause(
sql.literal_column("ROWNUM")<=max_row)
sql.literal_column("ROWNUM") <= max_row)
# If needed, add the ora_rn, and wrap again with offset.
if select._offset is None:
@@ -611,7 +613,7 @@ class OracleDDLCompiler(compiler.DDLCompiler):
if constraint.ondelete is not None:
text += " ON DELETE %s" % constraint.ondelete
# oracle has no ON UPDATE CASCADE -
# oracle has no ON UPDATE CASCADE -
# its only available via triggers http://asktom.oracle.com/tkyte/update_cascade/index.html
if constraint.onupdate is not None:
util.warn(
@@ -641,8 +643,8 @@ class OracleIdentifierPreparer(compiler.IdentifierPreparer):
class OracleExecutionContext(default.DefaultExecutionContext):
def fire_sequence(self, seq, type_):
return self._execute_scalar("SELECT " +
self.dialect.identifier_preparer.format_sequence(seq) +
return self._execute_scalar("SELECT " +
self.dialect.identifier_preparer.format_sequence(seq) +
".nextval FROM DUAL", type_)
class OracleDialect(default.DefaultDialect):
@@ -674,9 +676,9 @@ class OracleDialect(default.DefaultDialect):
reflection_options = ('oracle_resolve_synonyms', )
def __init__(self,
use_ansi=True,
optimize_limits=False,
def __init__(self,
use_ansi=True,
optimize_limits=False,
use_binds_for_limits=True,
**kwargs):
default.DefaultDialect.__init__(self, **kwargs)
@@ -806,8 +808,8 @@ class OracleDialect(default.DefaultDialect):
if resolve_synonyms:
actual_name, owner, dblink, synonym = self._resolve_synonym(
connection,
desired_owner=self.denormalize_name(schema),
connection,
desired_owner=self.denormalize_name(schema),
desired_synonym=self.denormalize_name(table_name)
)
else:
@@ -874,11 +876,11 @@ class OracleDialect(default.DefaultDialect):
char_length_col = 'char_length'
else:
char_length_col = 'data_length'
c = connection.execute(sql.text(
"SELECT column_name, data_type, %(char_length_col)s, data_precision, data_scale, "
"nullable, data_default FROM ALL_TAB_COLUMNS%(dblink)s "
"WHERE table_name = :table_name AND owner = :owner "
"WHERE table_name = :table_name AND owner = :owner "
"ORDER BY column_id" % {'dblink': dblink, 'char_length_col':char_length_col}),
table_name=table_name, owner=schema)
@@ -890,7 +892,7 @@ class OracleDialect(default.DefaultDialect):
coltype = NUMBER(precision, scale)
elif coltype in ('VARCHAR2', 'NVARCHAR2', 'CHAR'):
coltype = self.ischema_names.get(coltype)(length)
elif 'WITH TIME ZONE' in coltype:
elif 'WITH TIME ZONE' in coltype:
coltype = TIMESTAMP(timezone=True)
else:
coltype = re.sub(r'\(\d+\)', '', coltype)
@@ -927,8 +929,8 @@ class OracleDialect(default.DefaultDialect):
indexes = []
q = sql.text("""
SELECT a.index_name, a.column_name, b.uniqueness
FROM ALL_IND_COLUMNS%(dblink)s a,
ALL_INDEXES%(dblink)s b
FROM ALL_IND_COLUMNS%(dblink)s a,
ALL_INDEXES%(dblink)s b
WHERE
a.index_name = b.index_name
AND a.table_owner = b.table_owner
@@ -1110,8 +1112,8 @@ class OracleDialect(default.DefaultDialect):
if resolve_synonyms:
ref_remote_name, ref_remote_owner, ref_dblink, ref_synonym = \
self._resolve_synonym(
connection,
desired_owner=self.denormalize_name(remote_owner),
connection,
desired_owner=self.denormalize_name(remote_owner),
desired_table=self.denormalize_name(remote_table)
)
if ref_synonym:

View File

@@ -1,5 +1,5 @@
# oracle/cx_oracle.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -9,19 +9,19 @@
Driver
------
The Oracle dialect uses the cx_oracle driver, available at
http://cx-oracle.sourceforge.net/ . The dialect has several behaviors
The Oracle dialect uses the cx_oracle driver, available at
http://cx-oracle.sourceforge.net/ . The dialect has several behaviors
which are specifically tailored towards compatibility with this module.
Version 5.0 or greater is **strongly** recommended, as SQLAlchemy makes
extensive use of the cx_oracle output converters for numeric and
extensive use of the cx_oracle output converters for numeric and
string conversions.
Connecting
----------
Connecting with create_engine() uses the standard URL approach of
``oracle://user:pass@host:port/dbname[?key=value&key=value...]``. If dbname is present, the
host, port, and dbname tokens are converted to a TNS name using the cx_oracle
Connecting with create_engine() uses the standard URL approach of
``oracle://user:pass@host:port/dbname[?key=value&key=value...]``. If dbname is present, the
host, port, and dbname tokens are converted to a TNS name using the cx_oracle
:func:`makedsn()` function. Otherwise, the host token is taken directly as a TNS name.
Additional arguments which may be specified either as query string arguments on the
@@ -53,7 +53,7 @@ handler so that all string based result values are returned as unicode as well.
Generally, the ``NLS_LANG`` environment variable determines the nature
of the encoding to be used.
Note that this behavior is disabled when Oracle 8 is detected, as it has been
Note that this behavior is disabled when Oracle 8 is detected, as it has been
observed that issues remain when passing Python unicodes to cx_oracle with Oracle 8.
LOB Objects
@@ -71,8 +71,40 @@ To disable this processing, pass ``auto_convert_lobs=False`` to :func:`create_en
Two Phase Transaction Support
-----------------------------
Two Phase transactions are implemented using XA transactions. Success has been reported
with this feature but it should be regarded as experimental.
Two Phase transactions are implemented using XA transactions, and are known
to work in a rudimental fashion with recent versions of cx_Oracle
as of SQLAlchemy 0.8.0b2, 0.7.10. However, the mechanism is not yet
considered to be robust and should still be regarded as experimental.
In particular, the cx_Oracle DBAPI as recently as 5.1.2 has a bug regarding
two phase which prevents
a particular DBAPI connection from being consistently usable in both
prepared transactions as well as traditional DBAPI usage patterns; therefore
once a particular connection is used via :meth:`.Connection.begin_prepared`,
all subsequent usages of the underlying DBAPI connection must be within
the context of prepared transactions.
The default behavior of :class:`.Engine` is to maintain a pool of DBAPI
connections. Therefore, due to the above glitch, a DBAPI connection that has
been used in a two-phase operation, and is then returned to the pool, will
not be usable in a non-two-phase context. To avoid this situation,
the application can make one of several choices:
* Disable connection pooling using :class:`.NullPool`
* Ensure that the particular :class:`.Engine` in use is only used
for two-phase operations. A :class:`.Engine` bound to an ORM
:class:`.Session` which includes ``twophase=True`` will consistently
use the two-phase transaction style.
* For ad-hoc two-phase operations without disabling pooling, the DBAPI
connection in use can be evicted from the connection pool using the
:class:`.Connection.detach` method.
.. versionchanged:: 0.8.0b2,0.7.10
Support for cx_oracle prepared transactions has been implemented
and tested.
Precision Numerics
------------------
@@ -95,13 +127,14 @@ If precision numerics aren't required, the decimal handling
can be disabled by passing the flag ``coerce_to_decimal=False``
to :func:`.create_engine`::
engine = create_engine("oracle+cx_oracle://dsn",
engine = create_engine("oracle+cx_oracle://dsn",
coerce_to_decimal=False)
The ``coerce_to_decimal`` flag is new in 0.7.6.
.. versionadded:: 0.7.6
Add the ``coerce_to_decimal`` flag.
Another alternative to performance is to use the
`cdecimal <http://pypi.python.org/pypi/cdecimal/>`_ library;
Another alternative to performance is to use the
`cdecimal <http://pypi.python.org/pypi/cdecimal/>`_ library;
see :class:`.Numeric` for additional notes.
The handler attempts to use the "precision" and "scale"
@@ -128,21 +161,23 @@ environment variable. Upon first connection, the dialect runs a
test to determine the current "decimal" character, which can be
a comma "," for european locales. From that point forward the
outputtypehandler uses that character to represent a decimal
point (this behavior is new in version 0.6.6). Note that
cx_oracle 5.0.3 or greater is required when dealing with
numerics with locale settings that don't use a period "." as the
decimal character.
point. Note that cx_oracle 5.0.3 or greater is required
when dealing with numerics with locale settings that don't use
a period "." as the decimal character.
.. versionchanged:: 0.6.6
The outputtypehandler uses a comma "," character to represent
a decimal point.
.. _OCI: http://www.oracle.com/technetwork/database/features/oci/index.html
"""
from sqlalchemy.dialects.oracle.base import OracleCompiler, OracleDialect, \
RESERVED_WORDS, OracleExecutionContext
OracleExecutionContext
from sqlalchemy.dialects.oracle import base as oracle
from sqlalchemy.engine import base
from sqlalchemy import types as sqltypes, util, exc, processors
from datetime import datetime
import random
import collections
from sqlalchemy.util.compat import decimal
@@ -156,7 +191,7 @@ class _OracleNumeric(sqltypes.Numeric):
def result_processor(self, dialect, coltype):
# we apply a cx_oracle type handler to all connections
# that converts floating point strings to Decimal().
# However, in some subquery situations, Oracle doesn't
# However, in some subquery situations, Oracle doesn't
# give us enough information to determine int or Decimal.
# It could even be int/Decimal differently on each row,
# regardless of the scale given for the originating type.
@@ -186,7 +221,7 @@ class _OracleNumeric(sqltypes.Numeric):
else:
return None
else:
# cx_oracle 4 behavior, will assume
# cx_oracle 4 behavior, will assume
# floats
return super(_OracleNumeric, self).\
result_processor(dialect, coltype)
@@ -233,7 +268,7 @@ class _NativeUnicodeMixin(object):
# end Py2K
# we apply a connection output handler that returns
# unicode in all cases, so the "native_unicode" flag
# unicode in all cases, so the "native_unicode" flag
# will be set for the default String.result_processor.
class _OracleChar(_NativeUnicodeMixin, sqltypes.CHAR):
@@ -248,6 +283,13 @@ class _OracleText(_LOBMixin, sqltypes.Text):
def get_dbapi_type(self, dbapi):
return dbapi.CLOB
class _OracleLong(oracle.LONG):
# a raw LONG is a text type, but does *not*
# get the LobMixin with cx_oracle.
def get_dbapi_type(self, dbapi):
return dbapi.LONG_STRING
class _OracleString(_NativeUnicodeMixin, sqltypes.String):
pass
@@ -296,13 +338,13 @@ class _OracleRowid(oracle.ROWID):
return dbapi.ROWID
class OracleCompiler_cx_oracle(OracleCompiler):
def bindparam_string(self, name):
def bindparam_string(self, name, **kw):
if self.preparer._bindparam_requires_quotes(name):
quoted_name = '"%s"' % name
self._quoted_bind_names[name] = quoted_name
return OracleCompiler.bindparam_string(self, quoted_name)
return OracleCompiler.bindparam_string(self, quoted_name, **kw)
else:
return OracleCompiler.bindparam_string(self, name)
return OracleCompiler.bindparam_string(self, name, **kw)
class OracleExecutionContext_cx_oracle(OracleExecutionContext):
@@ -312,15 +354,15 @@ class OracleExecutionContext_cx_oracle(OracleExecutionContext):
getattr(self.compiled, '_quoted_bind_names', None)
if quoted_bind_names:
if not self.dialect.supports_unicode_statements:
# if DBAPI doesn't accept unicode statements,
# if DBAPI doesn't accept unicode statements,
# keys in self.parameters would have been encoded
# here. so convert names in quoted_bind_names
# to encoded as well.
quoted_bind_names = \
dict(
(fromname.encode(self.dialect.encoding),
toname.encode(self.dialect.encoding))
for fromname, toname in
(fromname.encode(self.dialect.encoding),
toname.encode(self.dialect.encoding))
for fromname, toname in
quoted_bind_names.items()
)
for param in self.parameters:
@@ -329,11 +371,11 @@ class OracleExecutionContext_cx_oracle(OracleExecutionContext):
del param[fromname]
if self.dialect.auto_setinputsizes:
# cx_oracle really has issues when you setinputsizes
# cx_oracle really has issues when you setinputsizes
# on String, including that outparams/RETURNING
# breaks for varchars
self.set_input_sizes(quoted_bind_names,
exclude_types=self.dialect._cx_oracle_string_types
self.set_input_sizes(quoted_bind_names,
exclude_types=self.dialect._cx_oracle_exclude_setinputsizes
)
# if a single execute, check for outparams
@@ -365,7 +407,7 @@ class OracleExecutionContext_cx_oracle(OracleExecutionContext):
def get_result_proxy(self):
if hasattr(self, 'out_parameters') and self.compiled.returning:
returning_params = dict(
(k, v.getvalue())
(k, v.getvalue())
for k, v in self.out_parameters.items()
)
return ReturningResultProxy(self, returning_params)
@@ -391,7 +433,7 @@ class OracleExecutionContext_cx_oracle(OracleExecutionContext):
impl_type = type.dialect_impl(self.dialect)
dbapi_type = impl_type.get_dbapi_type(self.dialect.dbapi)
result_processor = impl_type.\
result_processor(self.dialect,
result_processor(self.dialect,
dbapi_type)
if result_processor is not None:
out_parameters[name] = \
@@ -400,7 +442,7 @@ class OracleExecutionContext_cx_oracle(OracleExecutionContext):
out_parameters[name] = self.out_parameters[name].getvalue()
else:
result.out_parameters = dict(
(k, v.getvalue())
(k, v.getvalue())
for k, v in self.out_parameters.items()
)
@@ -409,13 +451,13 @@ class OracleExecutionContext_cx_oracle(OracleExecutionContext):
class OracleExecutionContext_cx_oracle_with_unicode(OracleExecutionContext_cx_oracle):
"""Support WITH_UNICODE in Python 2.xx.
WITH_UNICODE allows cx_Oracle's Python 3 unicode handling
behavior under Python 2.x. This mode in some cases disallows
and in other cases silently passes corrupted data when
non-Python-unicode strings (a.k.a. plain old Python strings)
are passed as arguments to connect(), the statement sent to execute(),
WITH_UNICODE allows cx_Oracle's Python 3 unicode handling
behavior under Python 2.x. This mode in some cases disallows
and in other cases silently passes corrupted data when
non-Python-unicode strings (a.k.a. plain old Python strings)
are passed as arguments to connect(), the statement sent to execute(),
or any of the bind parameter keys or values sent to execute().
This optional context therefore ensures that all statements are
This optional context therefore ensures that all statements are
passed as Python unicode objects.
"""
@@ -446,7 +488,7 @@ class ReturningResultProxy(base.FullyBufferedResultProxy):
return ret
def _buffer_rows(self):
return collections.deque([tuple(self._returning_params["ret_%d" % i]
return collections.deque([tuple(self._returning_params["ret_%d" % i]
for i, c in enumerate(self._returning_params))])
class OracleDialect_cx_oracle(OracleDialect):
@@ -467,6 +509,11 @@ class OracleDialect_cx_oracle(OracleDialect):
sqltypes.String : _OracleString,
sqltypes.UnicodeText : _OracleUnicodeText,
sqltypes.CHAR : _OracleChar,
# a raw LONG is a text type, but does *not*
# get the LobMixin with cx_oracle.
oracle.LONG: _OracleLong,
sqltypes.Integer : _OracleInteger, # this is only needed for OUT parameters.
# it would be nice if we could not use it otherwise.
oracle.RAW: _OracleRaw,
@@ -478,11 +525,11 @@ class OracleDialect_cx_oracle(OracleDialect):
execute_sequence_format = list
def __init__(self,
auto_setinputsizes=True,
auto_convert_lobs=True,
threaded=True,
allow_twophase=True,
def __init__(self,
auto_setinputsizes=True,
auto_convert_lobs=True,
threaded=True,
allow_twophase=True,
coerce_to_decimal=True,
arraysize=50, **kwargs):
OracleDialect.__init__(self, **kwargs)
@@ -503,13 +550,14 @@ class OracleDialect_cx_oracle(OracleDialect):
getattr(self.dbapi, name, None) for name in names
]).difference([None])
self._cx_oracle_exclude_setinputsizes = types("STRING", "UNICODE")
self._cx_oracle_string_types = types("STRING", "UNICODE", "NCLOB", "CLOB")
self._cx_oracle_unicode_types = types("UNICODE", "NCLOB")
self._cx_oracle_binary_types = types("BFILE", "CLOB", "NCLOB", "BLOB")
self._cx_oracle_binary_types = types("BFILE", "CLOB", "NCLOB", "BLOB")
self.supports_unicode_binds = self.cx_oracle_ver >= (5, 0)
self.supports_native_decimal = (
self.cx_oracle_ver >= (5, 0) and
self.cx_oracle_ver >= (5, 0) and
coerce_to_decimal
)
@@ -551,13 +599,13 @@ class OracleDialect_cx_oracle(OracleDialect):
# expect encoded strings or unicodes, etc.
self.dbapi_type_map = {
self.dbapi.CLOB: oracle.CLOB(),
self.dbapi.NCLOB:oracle.NCLOB(),
self.dbapi.NCLOB: oracle.NCLOB(),
self.dbapi.BLOB: oracle.BLOB(),
self.dbapi.BINARY: oracle.RAW(),
}
@classmethod
def dbapi(cls):
import cx_Oracle
cx_Oracle = __import__('cx_Oracle')
return cx_Oracle
def initialize(self, connection):
@@ -567,12 +615,12 @@ class OracleDialect_cx_oracle(OracleDialect):
self._detect_decimal_char(connection)
def _detect_decimal_char(self, connection):
"""detect if the decimal separator character is not '.', as
"""detect if the decimal separator character is not '.', as
is the case with european locale settings for NLS_LANG.
cx_oracle itself uses similar logic when it formats Python
Decimal objects to strings on the bind side (as of 5.0.3),
as Oracle sends/receives string numerics only in the
Decimal objects to strings on the bind side (as of 5.0.3),
as Oracle sends/receives string numerics only in the
current locale.
"""
@@ -583,14 +631,14 @@ class OracleDialect_cx_oracle(OracleDialect):
cx_Oracle = self.dbapi
conn = connection.connection
# override the output_type_handler that's
# on the cx_oracle connection with a plain
# override the output_type_handler that's
# on the cx_oracle connection with a plain
# one on the cursor
def output_type_handler(cursor, name, defaultType,
def output_type_handler(cursor, name, defaultType,
size, precision, scale):
return cursor.var(
cx_Oracle.STRING,
cx_Oracle.STRING,
255, arraysize=cursor.arraysize)
cursor = conn.cursor()
@@ -620,7 +668,7 @@ class OracleDialect_cx_oracle(OracleDialect):
return
cx_Oracle = self.dbapi
def output_type_handler(cursor, name, defaultType,
def output_type_handler(cursor, name, defaultType,
size, precision, scale):
# convert all NUMBER with precision + positive scale to Decimal
# this almost allows "native decimal" mode.
@@ -628,22 +676,22 @@ class OracleDialect_cx_oracle(OracleDialect):
defaultType == cx_Oracle.NUMBER and \
precision and scale > 0:
return cursor.var(
cx_Oracle.STRING,
255,
outconverter=self._to_decimal,
cx_Oracle.STRING,
255,
outconverter=self._to_decimal,
arraysize=cursor.arraysize)
# if NUMBER with zero precision and 0 or neg scale, this appears
# to indicate "ambiguous". Use a slower converter that will
# make a decision based on each value received - the type
# to indicate "ambiguous". Use a slower converter that will
# make a decision based on each value received - the type
# may change from row to row (!). This kills
# off "native decimal" mode, handlers still needed.
elif self.supports_native_decimal and \
defaultType == cx_Oracle.NUMBER \
and not precision and scale <= 0:
return cursor.var(
cx_Oracle.STRING,
255,
outconverter=self._detect_decimal,
cx_Oracle.STRING,
255,
outconverter=self._detect_decimal,
arraysize=cursor.arraysize)
# allow all strings to come back natively as Unicode
elif defaultType in (cx_Oracle.STRING, cx_Oracle.FIXED_CHAR):
@@ -707,7 +755,7 @@ class OracleDialect_cx_oracle(OracleDialect):
def _get_server_version_info(self, connection):
return tuple(
int(x)
int(x)
for x in connection.connection.version.split('.')
)
@@ -739,15 +787,23 @@ class OracleDialect_cx_oracle(OracleDialect):
connection.connection.begin(*xid)
def do_prepare_twophase(self, connection, xid):
connection.connection.prepare()
result = connection.connection.prepare()
connection.info['cx_oracle_prepared'] = result
def do_rollback_twophase(self, connection, xid, is_prepared=True, recover=False):
def do_rollback_twophase(self, connection, xid, is_prepared=True,
recover=False):
self.do_rollback(connection.connection)
def do_commit_twophase(self, connection, xid, is_prepared=True, recover=False):
self.do_commit(connection.connection)
def do_commit_twophase(self, connection, xid, is_prepared=True,
recover=False):
if not is_prepared:
self.do_commit(connection.connection)
else:
oci_prepared = connection.info['cx_oracle_prepared']
if oci_prepared:
self.do_commit(connection.connection)
def do_recover_twophase(self, connection):
pass
connection.info.pop('cx_oracle_prepared', None)
dialect = OracleDialect_cx_oracle

View File

@@ -1,5 +1,5 @@
# oracle/zxjdbc.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php

View File

@@ -1,5 +1,5 @@
# dialects/postgres.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php

View File

@@ -1,5 +1,5 @@
# postgresql/__init__.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -14,7 +14,7 @@ from sqlalchemy.dialects.postgresql.base import \
DATE, BYTEA, BOOLEAN, INTERVAL, ARRAY, ENUM, dialect
__all__ = (
'INTEGER', 'BIGINT', 'SMALLINT', 'VARCHAR', 'CHAR', 'TEXT', 'NUMERIC', 'FLOAT', 'REAL', 'INET',
'INTEGER', 'BIGINT', 'SMALLINT', 'VARCHAR', 'CHAR', 'TEXT', 'NUMERIC', 'FLOAT', 'REAL', 'INET',
'CIDR', 'UUID', 'BIT', 'MACADDR', 'DOUBLE_PRECISION', 'TIMESTAMP', 'TIME',
'DATE', 'BYTEA', 'BOOLEAN', 'INTERVAL', 'ARRAY', 'ENUM', 'dialect'
)

View File

@@ -1,5 +1,5 @@
# postgresql/base.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -21,7 +21,7 @@ default corresponding to the column.
To specify a specific named sequence to be used for primary key generation,
use the :func:`~sqlalchemy.schema.Sequence` construct::
Table('sometable', metadata,
Table('sometable', metadata,
Column('id', Integer, Sequence('some_id_seq'), primary_key=True)
)
@@ -51,7 +51,7 @@ parameter are ``READ COMMITTED``, ``READ UNCOMMITTED``, ``REPEATABLE READ``,
and ``SERIALIZABLE``::
engine = create_engine(
"postgresql+pg8000://scott:tiger@localhost/test",
"postgresql+pg8000://scott:tiger@localhost/test",
isolation_level="READ UNCOMMITTED"
)
@@ -75,18 +75,19 @@ the current ``search_path``, the "schema" attribute of the resulting
remote table matches that of the referencing table, and the "schema" argument
was explicitly stated on the referencing table.
The best practice here is to not use the ``schema`` argument
The best practice here is to not use the ``schema`` argument
on :class:`.Table` for any schemas that are present in ``search_path``.
``search_path`` defaults to "public", but care should be taken
to inspect the actual value using::
SHOW search_path;
Prior to version 0.7.3, cross-schema foreign keys when the schemas
were also in the ``search_path`` could make an incorrect assumption
if the schemas were explicitly stated on each :class:`.Table`.
.. versionchanged:: 0.7.3
Prior to this version, cross-schema foreign keys when the schemas
were also in the ``search_path`` could make an incorrect assumption
if the schemas were explicitly stated on each :class:`.Table`.
Background on PG's ``search_path`` is at:
Background on PG's ``search_path`` is at:
http://www.postgresql.org/docs/9.0/static/ddl-schemas.html#DDL-SCHEMAS-PATH
INSERT/UPDATE...RETURNING
@@ -125,7 +126,7 @@ to the PostgreSQL dialect.
Partial Indexes
^^^^^^^^^^^^^^^^
Partial indexes add criterion to the index definition so that the index is
Partial indexes add criterion to the index definition so that the index is
applied to a subset of rows. These can be specified on :class:`.Index`
using the ``postgresql_where`` keyword argument::
@@ -137,13 +138,16 @@ Operator Classes
PostgreSQL allows the specification of an *operator class* for each column of
an index (see http://www.postgresql.org/docs/8.3/interactive/indexes-opclass.html).
The :class:`.Index` construct allows these to be specified via the ``postgresql_ops``
keyword argument (new as of SQLAlchemy 0.7.2)::
keyword argument::
Index('my_index', my_table.c.id, my_table.c.data,
Index('my_index', my_table.c.id, my_table.c.data,
postgresql_ops={
'data': 'text_pattern_ops',
'data': 'text_pattern_ops',
'id': 'int4_ops'
})
})
.. versionadded:: 0.7.2
``postgresql_ops`` keyword argument to :class:`.Index` construct.
Note that the keys in the ``postgresql_ops`` dictionary are the "key" name of
the :class:`.Column`, i.e. the name used to access it from the ``.c`` collection
@@ -345,24 +349,27 @@ class ARRAY(sqltypes.MutableType, sqltypes.Concatenable, sqltypes.TypeEngine):
the fly
:param mutable=False: Specify whether lists passed to this
class should be considered mutable - this enables
"mutable types" mode in the ORM. Be sure to read the
notes for :class:`.MutableType` regarding ORM
performance implications (default changed from ``True`` in
0.7.0).
class should be considered mutable - this enables
"mutable types" mode in the ORM. Be sure to read the
notes for :class:`.MutableType` regarding ORM
performance implications.
.. note::
This functionality is now superseded by the
``sqlalchemy.ext.mutable`` extension described in
:ref:`mutable_toplevel`.
.. versionchanged:: 0.7.0
Default changed from ``True``\ .
.. versionchanged:: 0.7
This functionality is now superseded by the
``sqlalchemy.ext.mutable`` extension described in
:ref:`mutable_toplevel`.
:param as_tuple=False: Specify whether return results
should be converted to tuples from lists. DBAPIs such
as psycopg2 return lists by default. When tuples are
returned, the results are hashable. This flag can only
be set to ``True`` when ``mutable`` is set to
``False``. (new in 0.6.5)
``False``.
.. versionadded:: 0.6.5
"""
if isinstance(item_type, ARRAY):
@@ -444,37 +451,37 @@ PGArray = ARRAY
class ENUM(sqltypes.Enum):
"""Postgresql ENUM type.
This is a subclass of :class:`.types.Enum` which includes
support for PG's ``CREATE TYPE``.
:class:`~.postgresql.ENUM` is used automatically when
:class:`~.postgresql.ENUM` is used automatically when
using the :class:`.types.Enum` type on PG assuming
the ``native_enum`` is left as ``True``. However, the
the ``native_enum`` is left as ``True``. However, the
:class:`~.postgresql.ENUM` class can also be instantiated
directly in order to access some additional Postgresql-specific
options, namely finer control over whether or not
options, namely finer control over whether or not
``CREATE TYPE`` should be emitted.
Note that both :class:`.types.Enum` as well as
Note that both :class:`.types.Enum` as well as
:class:`~.postgresql.ENUM` feature create/drop
methods; the base :class:`.types.Enum` type ultimately
delegates to the :meth:`~.postgresql.ENUM.create` and
:meth:`~.postgresql.ENUM.drop` methods present here.
"""
def __init__(self, *enums, **kw):
"""Construct an :class:`~.postgresql.ENUM`.
Arguments are the same as that of
:class:`.types.Enum`, but also including
the following parameters.
:param create_type: Defaults to True.
Indicates that ``CREATE TYPE`` should be
emitted, after optionally checking for the
presence of the type, when the parent
:param create_type: Defaults to True.
Indicates that ``CREATE TYPE`` should be
emitted, after optionally checking for the
presence of the type, when the parent
table is being created; and additionally
that ``DROP TYPE`` is called when the table
is dropped. When ``False``, no check
@@ -485,31 +492,32 @@ class ENUM(sqltypes.Enum):
are called directly.
Setting to ``False`` is helpful
when invoking a creation scheme to a SQL file
without access to the actual database -
without access to the actual database -
the :meth:`~.postgresql.ENUM.create` and
:meth:`~.postgresql.ENUM.drop` methods can
be used to emit SQL to a target bind.
(new in 0.7.4)
.. versionadded:: 0.7.4
"""
self.create_type = kw.pop("create_type", True)
super(ENUM, self).__init__(*enums, **kw)
def create(self, bind=None, checkfirst=True):
"""Emit ``CREATE TYPE`` for this
"""Emit ``CREATE TYPE`` for this
:class:`~.postgresql.ENUM`.
If the underlying dialect does not support
Postgresql CREATE TYPE, no action is taken.
:param bind: a connectable :class:`.Engine`,
:class:`.Connection`, or similar object to emit
SQL.
:param checkfirst: if ``True``, a query against
:param checkfirst: if ``True``, a query against
the PG catalog will be first performed to see
if the type does not exist already before
creating.
"""
if not bind.dialect.supports_native_enum:
return
@@ -519,19 +527,19 @@ class ENUM(sqltypes.Enum):
bind.execute(CreateEnumType(self))
def drop(self, bind=None, checkfirst=True):
"""Emit ``DROP TYPE`` for this
"""Emit ``DROP TYPE`` for this
:class:`~.postgresql.ENUM`.
If the underlying dialect does not support
Postgresql DROP TYPE, no action is taken.
:param bind: a connectable :class:`.Engine`,
:class:`.Connection`, or similar object to emit
SQL.
:param checkfirst: if ``True``, a query against
:param checkfirst: if ``True``, a query against
the PG catalog will be first performed to see
if the type actually exists before dropping.
"""
if not bind.dialect.supports_native_enum:
return
@@ -543,7 +551,7 @@ class ENUM(sqltypes.Enum):
def _check_for_name_in_memos(self, checkfirst, kw):
"""Look in the 'ddl runner' for 'memos', then
note our name in that collection.
This to ensure a particular named enum is operated
upon only once within any kind of create/drop
sequence without relying upon "checkfirst".
@@ -620,14 +628,14 @@ class PGCompiler(compiler.SQLCompiler):
def visit_match_op(self, binary, **kw):
return "%s @@ to_tsquery(%s)" % (
self.process(binary.left),
self.process(binary.left),
self.process(binary.right))
def visit_ilike_op(self, binary, **kw):
escape = binary.modifiers.get("escape", None)
return '%s ILIKE %s' % \
(self.process(binary.left), self.process(binary.right)) \
+ (escape and
+ (escape and
(' ESCAPE ' + self.render_literal_value(escape, None))
or '')
@@ -635,7 +643,7 @@ class PGCompiler(compiler.SQLCompiler):
escape = binary.modifiers.get("escape", None)
return '%s NOT ILIKE %s' % \
(self.process(binary.left), self.process(binary.right)) \
+ (escape and
+ (escape and
(' ESCAPE ' + self.render_literal_value(escape, None))
or '')
@@ -675,6 +683,10 @@ class PGCompiler(compiler.SQLCompiler):
def for_update_clause(self, select):
if select.for_update == 'nowait':
return " FOR UPDATE NOWAIT"
elif select.for_update == 'read':
return " FOR SHARE"
elif select.for_update == 'read_nowait':
return " FOR SHARE NOWAIT"
else:
return super(PGCompiler, self).for_update_clause(select)
@@ -682,9 +694,9 @@ class PGCompiler(compiler.SQLCompiler):
columns = [
self.process(
self.label_select_column(None, c, asfrom=False),
within_columns_clause=True,
result_map=self.result_map)
self.label_select_column(None, c, asfrom=False),
within_columns_clause=True,
result_map=self.result_map)
for c in expression._select_iterables(returning_cols)
]
@@ -698,8 +710,8 @@ class PGCompiler(compiler.SQLCompiler):
affinity = None
casts = {
sqltypes.Date:'date',
sqltypes.DateTime:'timestamp',
sqltypes.Date:'date',
sqltypes.DateTime:'timestamp',
sqltypes.Interval:'interval', sqltypes.Time:'time'
}
cast = casts.get(affinity, None)
@@ -718,7 +730,7 @@ class PGDDLCompiler(compiler.DDLCompiler):
column is column.table._autoincrement_column and \
not isinstance(impl_type, sqltypes.SmallInteger) and \
(
column.default is None or
column.default is None or
(
isinstance(column.default, schema.Sequence) and
column.default.optional
@@ -773,7 +785,7 @@ class PGDDLCompiler(compiler.DDLCompiler):
text += "(%s)" \
% (
', '.join([
preparer.format_column(c) +
preparer.format_column(c) +
(c.key in ops and (' ' + ops[c.key]) or '')
for c in index.columns])
)
@@ -831,14 +843,14 @@ class PGTypeCompiler(compiler.GenericTypeCompiler):
def visit_TIMESTAMP(self, type_):
return "TIMESTAMP%s %s" % (
getattr(type_, 'precision', None) and "(%d)" %
getattr(type_, 'precision', None) and "(%d)" %
type_.precision or "",
(type_.timezone and "WITH" or "WITHOUT") + " TIME ZONE"
)
def visit_TIME(self, type_):
return "TIME%s %s" % (
getattr(type_, 'precision', None) and "(%d)" %
getattr(type_, 'precision', None) and "(%d)" %
type_.precision or "",
(type_.timezone and "WITH" or "WITHOUT") + " TIME ZONE"
)
@@ -920,21 +932,21 @@ class PGExecutionContext(default.DefaultExecutionContext):
return self._execute_scalar("select %s" %
column.server_default.arg, column.type)
elif (column.default is None or
elif (column.default is None or
(column.default.is_sequence and
column.default.optional)):
# execute the sequence associated with a SERIAL primary
# execute the sequence associated with a SERIAL primary
# key column. for non-primary-key SERIAL, the ID just
# generates server side.
try:
seq_name = column._postgresql_seq_name
except AttributeError:
tab = column.table.name
col = column.name
tab = tab[0:29 + max(0, (29 - len(col)))]
col = col[0:29 + max(0, (29 - len(tab)))]
tab = column.table.name
col = column.name
tab = tab[0:29 + max(0, (29 - len(col)))]
col = col[0:29 + max(0, (29 - len(tab)))]
column._postgresql_seq_name = seq_name = "%s_%s_seq" % (tab, col)
sch = column.table.schema
@@ -1004,7 +1016,7 @@ class PGDialect(default.DefaultDialect):
else:
return None
_isolation_lookup = set(['SERIALIZABLE',
_isolation_lookup = set(['SERIALIZABLE',
'READ UNCOMMITTED', 'READ COMMITTED', 'REPEATABLE READ'])
def set_isolation_level(self, connection, level):
@@ -1012,9 +1024,9 @@ class PGDialect(default.DefaultDialect):
if level not in self._isolation_lookup:
raise exc.ArgumentError(
"Invalid value '%s' for isolation_level. "
"Valid isolation levels for %s are %s" %
"Valid isolation levels for %s are %s" %
(level, self.name, ", ".join(self._isolation_lookup))
)
)
cursor = connection.cursor()
cursor.execute(
"SET SESSION CHARACTERISTICS AS TRANSACTION "
@@ -1035,13 +1047,13 @@ class PGDialect(default.DefaultDialect):
def do_prepare_twophase(self, connection, xid):
connection.execute("PREPARE TRANSACTION '%s'" % xid)
def do_rollback_twophase(self, connection, xid,
def do_rollback_twophase(self, connection, xid,
is_prepared=True, recover=False):
if is_prepared:
if recover:
#FIXME: ugly hack to get out of transaction
# context when commiting recoverable transactions
# Must find out a way how to make the dbapi not
#FIXME: ugly hack to get out of transaction
# context when committing recoverable transactions
# Must find out a way how to make the dbapi not
# open a transaction.
connection.execute("ROLLBACK")
connection.execute("ROLLBACK PREPARED '%s'" % xid)
@@ -1050,7 +1062,7 @@ class PGDialect(default.DefaultDialect):
else:
self.do_rollback(connection.connection)
def do_commit_twophase(self, connection, xid,
def do_commit_twophase(self, connection, xid,
is_prepared=True, recover=False):
if is_prepared:
if recover:
@@ -1102,10 +1114,10 @@ class PGDialect(default.DefaultDialect):
"n.oid=c.relnamespace where n.nspname=:schema and "
"relname=:name",
bindparams=[
sql.bindparam('name',
sql.bindparam('name',
unicode(table_name), type_=sqltypes.Unicode),
sql.bindparam('schema',
unicode(schema), type_=sqltypes.Unicode)]
sql.bindparam('schema',
unicode(schema), type_=sqltypes.Unicode)]
)
)
return bool(cursor.first())
@@ -1121,7 +1133,7 @@ class PGDialect(default.DefaultDialect):
bindparams=[
sql.bindparam('name', unicode(sequence_name),
type_=sqltypes.Unicode)
]
]
)
)
else:
@@ -1133,7 +1145,7 @@ class PGDialect(default.DefaultDialect):
bindparams=[
sql.bindparam('name', unicode(sequence_name),
type_=sqltypes.Unicode),
sql.bindparam('schema',
sql.bindparam('schema',
unicode(schema), type_=sqltypes.Unicode)
]
)
@@ -1261,13 +1273,13 @@ class PGDialect(default.DefaultDialect):
SELECT relname
FROM pg_class c
WHERE relkind = 'v'
AND '%(schema)s' = (select nspname from pg_namespace n
AND '%(schema)s' = (select nspname from pg_namespace n
where n.oid = c.relnamespace)
""" % dict(schema=current_schema)
# Py3K
#view_names = [row[0] for row in connection.execute(s)]
# Py2K
view_names = [row[0].decode(self.encoding)
view_names = [row[0].decode(self.encoding)
for row in connection.execute(s)]
# end Py2K
return view_names
@@ -1301,10 +1313,10 @@ class PGDialect(default.DefaultDialect):
SQL_COLS = """
SELECT a.attname,
pg_catalog.format_type(a.atttypid, a.atttypmod),
(SELECT substring(pg_catalog.pg_get_expr(d.adbin, d.adrelid)
for 128)
(SELECT substring(pg_catalog.pg_get_expr(d.adbin, d.adrelid)
for 128)
FROM pg_catalog.pg_attrdef d
WHERE d.adrelid = a.attrelid AND d.adnum = a.attnum
WHERE d.adrelid = a.attrelid AND d.adnum = a.attnum
AND a.atthasdef)
AS DEFAULT,
a.attnotnull, a.attnum, a.attrelid as table_oid
@@ -1313,8 +1325,8 @@ class PGDialect(default.DefaultDialect):
AND a.attnum > 0 AND NOT a.attisdropped
ORDER BY a.attnum
"""
s = sql.text(SQL_COLS,
bindparams=[sql.bindparam('table_oid', type_=sqltypes.Integer)],
s = sql.text(SQL_COLS,
bindparams=[sql.bindparam('table_oid', type_=sqltypes.Integer)],
typemap={'attname':sqltypes.Unicode, 'default':sqltypes.Unicode}
)
c = connection.execute(s, table_oid=table_oid)
@@ -1325,7 +1337,7 @@ class PGDialect(default.DefaultDialect):
# format columns
columns = []
for name, format_type, default, notnull, attnum, table_oid in rows:
## strip (5) from character varying(5), timestamp(5)
## strip (5) from character varying(5), timestamp(5)
# with time zone, etc
attype = re.sub(r'\([\d,]+\)', '', format_type)
@@ -1350,13 +1362,13 @@ class PGDialect(default.DefaultDialect):
args = (53, )
elif attype == 'integer':
args = ()
elif attype in ('timestamp with time zone',
elif attype in ('timestamp with time zone',
'time with time zone'):
kwargs['timezone'] = True
if charlen:
kwargs['precision'] = int(charlen)
args = ()
elif attype in ('timestamp without time zone',
elif attype in ('timestamp without time zone',
'time without time zone', 'time'):
kwargs['timezone'] = False
if charlen:
@@ -1397,7 +1409,7 @@ class PGDialect(default.DefaultDialect):
# A table can't override whether the domain is nullable.
nullable = domain['nullable']
if domain['default'] and not default:
# It can, however, override the default
# It can, however, override the default
# value, but can't set it to null.
default = domain['default']
continue
@@ -1423,7 +1435,7 @@ class PGDialect(default.DefaultDialect):
sch = schema
if '.' not in match.group(2) and sch is not None:
# unconditionally quote the schema name. this could
# later be enhanced to obey quoting rules /
# later be enhanced to obey quoting rules /
# "quote schema"
default = match.group(1) + \
('"%s"' % sch) + '.' + \
@@ -1439,27 +1451,40 @@ class PGDialect(default.DefaultDialect):
table_oid = self.get_table_oid(connection, table_name, schema,
info_cache=kw.get('info_cache'))
PK_SQL = """
SELECT a.attname
FROM
if self.server_version_info < (8, 4):
# unnest() and generate_subscripts() both introduced in
# version 8.4
PK_SQL = """
SELECT a.attname
FROM
pg_class t
join pg_index ix on t.oid = ix.indrelid
join pg_attribute a
join pg_attribute a
on t.oid=a.attrelid and a.attnum=ANY(ix.indkey)
WHERE
t.oid = :table_oid and
ix.indisprimary = 't'
ORDER BY
a.attnum
"""
t = sql.text(PK_SQL, typemap={'attname':sqltypes.Unicode})
WHERE
t.oid = :table_oid and ix.indisprimary = 't'
ORDER BY a.attnum
"""
else:
PK_SQL = """
SELECT a.attname
FROM pg_attribute a JOIN (
SELECT unnest(ix.indkey) attnum,
generate_subscripts(ix.indkey, 1) ord
FROM pg_index ix
WHERE ix.indrelid = :table_oid AND ix.indisprimary
) k ON a.attnum=k.attnum
WHERE a.attrelid = :table_oid
ORDER BY k.ord
"""
t = sql.text(PK_SQL, typemap={'attname': sqltypes.Unicode})
c = connection.execute(t, table_oid=table_oid)
primary_keys = [r[0] for r in c.fetchall()]
return primary_keys
@reflection.cache
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
cols = self.get_primary_keys(connection, table_name,
cols = self.get_primary_keys(connection, table_name,
schema=schema, **kw)
table_oid = self.get_table_oid(connection, table_name, schema,
@@ -1486,14 +1511,14 @@ class PGDialect(default.DefaultDialect):
info_cache=kw.get('info_cache'))
FK_SQL = """
SELECT r.conname,
SELECT r.conname,
pg_catalog.pg_get_constraintdef(r.oid, true) as condef,
n.nspname as conschema
FROM pg_catalog.pg_constraint r,
pg_namespace n,
pg_class c
WHERE r.conrelid = :table AND
WHERE r.conrelid = :table AND
r.contype = 'f' AND
c.oid = confrelid AND
n.oid = c.relnamespace
@@ -1510,7 +1535,7 @@ class PGDialect(default.DefaultDialect):
'(?:(.*?)\.)?(.*?)\((.*?)\)', condef).groups()
constrained_columns, referred_schema, \
referred_table, referred_columns = m
constrained_columns = [preparer._unquote_identifier(x)
constrained_columns = [preparer._unquote_identifier(x)
for x in re.split(r'\s*,\s*', constrained_columns)]
if referred_schema:
@@ -1525,7 +1550,7 @@ class PGDialect(default.DefaultDialect):
# and an explicit schema was given for the referencing table.
referred_schema = schema
referred_table = preparer._unquote_identifier(referred_table)
referred_columns = [preparer._unquote_identifier(x)
referred_columns = [preparer._unquote_identifier(x)
for x in re.split(r'\s*,\s', referred_columns)]
fkey_d = {
'name' : conname,
@@ -1548,11 +1573,11 @@ class PGDialect(default.DefaultDialect):
ix.indisunique, ix.indexprs, ix.indpred,
a.attname
FROM
pg_class t
pg_class t
join pg_index ix on t.oid = ix.indrelid
join pg_class i on i.oid=ix.indexrelid
left outer join
pg_attribute a
left outer join
pg_attribute a
on t.oid=a.attrelid and a.attnum=ANY(ix.indkey)
WHERE
t.relkind = 'r'
@@ -1604,13 +1629,12 @@ class PGDialect(default.DefaultDialect):
SQL_ENUMS = """
SELECT t.typname as "name",
-- no enum defaults in 8.4 at least
-- t.typdefault as "default",
-- t.typdefault as "default",
pg_catalog.pg_type_is_visible(t.oid) as "visible",
n.nspname as "schema",
e.enumlabel as "label"
FROM pg_catalog.pg_type t
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace
LEFT JOIN pg_catalog.pg_constraint r ON t.oid = r.contypid
LEFT JOIN pg_catalog.pg_enum e ON t.oid = e.enumtypid
WHERE t.typtype = 'e'
ORDER BY "name", e.oid -- e.oid gives us label order
@@ -1625,8 +1649,8 @@ class PGDialect(default.DefaultDialect):
for enum in c.fetchall():
if enum['visible']:
# 'visible' just means whether or not the enum is in a
# schema that's on the search path -- or not overriden by
# a schema with higher presedence. If it's not visible,
# schema that's on the search path -- or not overridden by
# a schema with higher precedence. If it's not visible,
# it will be prefixed with the schema-name when it's used.
name = enum['name']
else:
@@ -1652,7 +1676,6 @@ class PGDialect(default.DefaultDialect):
n.nspname as "schema"
FROM pg_catalog.pg_type t
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace
LEFT JOIN pg_catalog.pg_constraint r ON t.oid = r.contypid
WHERE t.typtype = 'd'
"""
@@ -1665,16 +1688,16 @@ class PGDialect(default.DefaultDialect):
attype = re.search('([^\(]+)', domain['attype']).group(1)
if domain['visible']:
# 'visible' just means whether or not the domain is in a
# schema that's on the search path -- or not overriden by
# a schema with higher presedence. If it's not visible,
# schema that's on the search path -- or not overridden by
# a schema with higher precedence. If it's not visible,
# it will be prefixed with the schema-name when it's used.
name = domain['name']
else:
name = "%s.%s" % (domain['schema'], domain['name'])
domains[name] = {
'attype':attype,
'nullable': domain['nullable'],
'attype':attype,
'nullable': domain['nullable'],
'default': domain['default']
}

View File

@@ -1,5 +1,5 @@
# postgresql/pg8000.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php

View File

@@ -1,5 +1,5 @@
# postgresql/psycopg2.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -10,7 +10,7 @@ Driver
------
The psycopg2 driver is available at http://pypi.python.org/pypi/psycopg2/ .
The dialect has several behaviors which are specifically tailored towards compatibility
The dialect has several behaviors which are specifically tailored towards compatibility
with this module.
Note that psycopg1 is **not** supported.
@@ -48,7 +48,7 @@ which specifies Unix-domain communication rather than TCP/IP communication::
create_engine("postgresql+psycopg2://user:password@/dbname")
By default, the socket file used is to connect to a Unix-domain socket
in ``/tmp``, or whatever socket directory was specified when PostgreSQL
in ``/tmp``, or whatever socket directory was specified when PostgreSQL
was built. This value can be overridden by passing a pathname to psycopg2,
using ``host`` as an additional keyword argument::
@@ -61,11 +61,11 @@ See also:
Per-Statement/Connection Execution Options
-------------------------------------------
The following DBAPI-specific options are respected when used with
The following DBAPI-specific options are respected when used with
:meth:`.Connection.execution_options`, :meth:`.Executable.execution_options`,
:meth:`.Query.execution_options`, in addition to those not specific to DBAPIs:
* isolation_level - Set the transaction isolation level for the lifespan of a
* isolation_level - Set the transaction isolation level for the lifespan of a
:class:`.Connection` (can only be set on a connection, not a statement or query).
This includes the options ``SERIALIZABLE``, ``READ COMMITTED``,
``READ UNCOMMITTED`` and ``REPEATABLE READ``.
@@ -79,8 +79,8 @@ By default, the psycopg2 driver uses the ``psycopg2.extensions.UNICODE``
extension, such that the DBAPI receives and returns all strings as Python
Unicode objects directly - SQLAlchemy passes these values through without
change. Psycopg2 here will encode/decode string values based on the
current "client encoding" setting; by default this is the value in
the ``postgresql.conf`` file, which often defaults to ``SQL_ASCII``.
current "client encoding" setting; by default this is the value in
the ``postgresql.conf`` file, which often defaults to ``SQL_ASCII``.
Typically, this can be changed to ``utf-8``, as a more useful default::
#client_encoding = sql_ascii # actually, defaults to database
@@ -90,26 +90,27 @@ Typically, this can be changed to ``utf-8``, as a more useful default::
A second way to affect the client encoding is to set it within Psycopg2
locally. SQLAlchemy will call psycopg2's ``set_client_encoding()``
method (see: http://initd.org/psycopg/docs/connection.html#connection.set_client_encoding)
on all new connections based on the value passed to
on all new connections based on the value passed to
:func:`.create_engine` using the ``client_encoding`` parameter::
engine = create_engine("postgresql://user:pass@host/dbname", client_encoding='utf8')
This overrides the encoding specified in the Postgresql client configuration.
The psycopg2-specific ``client_encoding`` parameter to :func:`.create_engine` is new as of
SQLAlchemy 0.7.3.
.. versionadded:: 0.7.3
The psycopg2-specific ``client_encoding`` parameter to :func:`.create_engine`.
SQLAlchemy can also be instructed to skip the usage of the psycopg2
``UNICODE`` extension and to instead utilize it's own unicode encode/decode
services, which are normally reserved only for those DBAPIs that don't
fully support unicode directly. Passing ``use_native_unicode=False``
services, which are normally reserved only for those DBAPIs that don't
fully support unicode directly. Passing ``use_native_unicode=False``
to :func:`.create_engine` will disable usage of ``psycopg2.extensions.UNICODE``.
SQLAlchemy will instead encode data itself into Python bytestrings on the way
SQLAlchemy will instead encode data itself into Python bytestrings on the way
in and coerce from bytes on the way back,
using the value of the :func:`.create_engine` ``encoding`` parameter, which
using the value of the :func:`.create_engine` ``encoding`` parameter, which
defaults to ``utf-8``.
SQLAlchemy's own unicode encode/decode functionality is steadily becoming
obsolete as more DBAPIs support unicode fully along with the approach of
obsolete as more DBAPIs support unicode fully along with the approach of
Python 3; in modern usage psycopg2 should be relied upon to handle unicode.
Transactions
@@ -131,7 +132,7 @@ at the API level what level should be used.
NOTICE logging
---------------
The psycopg2 dialect will log Postgresql NOTICE messages via the
The psycopg2 dialect will log Postgresql NOTICE messages via the
``sqlalchemy.dialects.postgresql`` logger::
import logging
@@ -219,8 +220,8 @@ class PGExecutionContext_psycopg2(PGExecutionContext):
(self.compiled and isinstance(self.compiled.statement, expression.Selectable) \
or \
(
(not self.compiled or
isinstance(self.compiled.statement, expression._TextClause))
(not self.compiled or
isinstance(self.compiled.statement, expression._TextClause))
and self.statement and SERVER_SIDE_CURSOR_RE.match(self.statement))
)
)
@@ -248,7 +249,7 @@ class PGExecutionContext_psycopg2(PGExecutionContext):
def _log_notices(self, cursor):
for notice in cursor.connection.notices:
# NOTICE messages have a
# NOTICE messages have a
# newline character at the end
logger.info(notice.rstrip())
@@ -290,7 +291,7 @@ class PGDialect_psycopg2(PGDialect):
}
)
def __init__(self, server_side_cursors=False, use_native_unicode=True,
def __init__(self, server_side_cursors=False, use_native_unicode=True,
client_encoding=None, **kwargs):
PGDialect.__init__(self, **kwargs)
self.server_side_cursors = server_side_cursors
@@ -298,12 +299,12 @@ class PGDialect_psycopg2(PGDialect):
self.supports_unicode_binds = use_native_unicode
self.client_encoding = client_encoding
if self.dbapi and hasattr(self.dbapi, '__version__'):
m = re.match(r'(\d+)\.(\d+)(?:\.(\d+))?',
m = re.match(r'(\d+)\.(\d+)(?:\.(\d+))?',
self.dbapi.__version__)
if m:
self.psycopg2_version = tuple(
int(x)
for x in m.group(1, 2, 3)
int(x)
for x in m.group(1, 2, 3)
if x is not None)
@classmethod
@@ -315,8 +316,8 @@ class PGDialect_psycopg2(PGDialect):
def _isolation_lookup(self):
extensions = __import__('psycopg2.extensions').extensions
return {
'READ COMMITTED':extensions.ISOLATION_LEVEL_READ_COMMITTED,
'READ UNCOMMITTED':extensions.ISOLATION_LEVEL_READ_UNCOMMITTED,
'READ COMMITTED':extensions.ISOLATION_LEVEL_READ_COMMITTED,
'READ UNCOMMITTED':extensions.ISOLATION_LEVEL_READ_UNCOMMITTED,
'REPEATABLE READ':extensions.ISOLATION_LEVEL_REPEATABLE_READ,
'SERIALIZABLE':extensions.ISOLATION_LEVEL_SERIALIZABLE
}
@@ -327,9 +328,9 @@ class PGDialect_psycopg2(PGDialect):
except KeyError:
raise exc.ArgumentError(
"Invalid value '%s' for isolation_level. "
"Valid isolation levels for %s are %s" %
"Valid isolation levels for %s are %s" %
(level, self.name, ", ".join(self._isolation_lookup))
)
)
connection.set_isolation_level(level)
@@ -369,9 +370,10 @@ class PGDialect_psycopg2(PGDialect):
def is_disconnect(self, e, connection, cursor):
if isinstance(e, self.dbapi.OperationalError):
# these error messages from libpq: interfaces/libpq/fe-misc.c.
# TODO: these are sent through gettext in libpq and we can't
# check within other locales - consider using connection.closed
return 'closed the connection' in str(e) or \
# TODO: these are sent through gettext in libpq and we can't
# check within other locales - consider using connection.closed
return 'terminating connection' in str(e) or \
'closed the connection' in str(e) or \
'connection not open' in str(e) or \
'could not receive data from server' in str(e)
elif isinstance(e, self.dbapi.InterfaceError):
@@ -379,7 +381,7 @@ class PGDialect_psycopg2(PGDialect):
return 'connection already closed' in str(e) or \
'cursor already closed' in str(e)
elif isinstance(e, self.dbapi.ProgrammingError):
# not sure where this path is originally from, it may
# not sure where this path is originally from, it may
# be obsolete. It really says "losed", not "closed".
return "losed the connection unexpectedly" in str(e)
else:

View File

@@ -1,5 +1,5 @@
# postgresql/pypostgresql.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -40,7 +40,7 @@ class PGDialect_pypostgresql(PGDialect):
default_paramstyle = 'pyformat'
# requires trunk version to support sane rowcounts
# TODO: use dbapi version information to set this flag appropariately
# TODO: use dbapi version information to set this flag appropriately
supports_sane_rowcount = True
supports_sane_multi_rowcount = False

View File

@@ -1,5 +1,5 @@
# postgresql/zxjdbc.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php

View File

@@ -1,5 +1,5 @@
# sqlite/__init__.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php

View File

@@ -1,5 +1,5 @@
# sqlite/base.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -12,7 +12,7 @@ section regarding that driver.
Date and Time Types
-------------------
SQLite does not have built-in DATE, TIME, or DATETIME types, and pysqlite does not provide
SQLite does not have built-in DATE, TIME, or DATETIME types, and pysqlite does not provide
out of the box functionality for translating values between Python `datetime` objects
and a SQLite-supported format. SQLAlchemy's own :class:`~sqlalchemy.types.DateTime`
and related types provide date formatting and parsing functionality when SQlite is used.
@@ -36,23 +36,91 @@ Two things to note:
This is regardless of the AUTOINCREMENT keyword being present or not.
To specifically render the AUTOINCREMENT keyword on the primary key
column when rendering DDL, add the flag ``sqlite_autoincrement=True``
column when rendering DDL, add the flag ``sqlite_autoincrement=True``
to the Table construct::
Table('sometable', metadata,
Column('id', Integer, primary_key=True),
Column('id', Integer, primary_key=True),
sqlite_autoincrement=True)
Transaction Isolation Level
---------------------------
:func:`.create_engine` accepts an ``isolation_level`` parameter which results in
the command ``PRAGMA read_uncommitted <level>`` being invoked for every new
connection. Valid values for this parameter are ``SERIALIZABLE`` and
:func:`.create_engine` accepts an ``isolation_level`` parameter which results in
the command ``PRAGMA read_uncommitted <level>`` being invoked for every new
connection. Valid values for this parameter are ``SERIALIZABLE`` and
``READ UNCOMMITTED`` corresponding to a value of 0 and 1, respectively.
See the section :ref:`pysqlite_serializable` for an important workaround
when using serializable isolation with Pysqlite.
Database Locking Behavior / Concurrency
---------------------------------------
Note that SQLite is not designed for a high level of concurrency. The database
itself, being a file, is locked completely during write operations and within
transactions, meaning exactly one connection has exclusive access to the database
during this period - all other connections will be blocked during this time.
The Python DBAPI specification also calls for a connection model that is always
in a transaction; there is no BEGIN method, only commit and rollback. This implies
that a SQLite DBAPI driver would technically allow only serialized access to a
particular database file at all times. The pysqlite driver attempts to ameliorate this by
deferring the actual BEGIN statement until the first DML (INSERT, UPDATE, or
DELETE) is received within a transaction. While this breaks serializable isolation,
it at least delays the exclusive locking inherent in SQLite's design.
SQLAlchemy's default mode of usage with the ORM is known
as "autocommit=False", which means the moment the :class:`.Session` begins to be
used, a transaction is begun. As the :class:`.Session` is used, the autoflush
feature, also on by default, will flush out pending changes to the database
before each query. The effect of this is that a :class:`.Session` used in its
default mode will often emit DML early on, long before the transaction is actually
committed. This again will have the effect of serializing access to the SQLite
database. If highly concurrent reads are desired against the SQLite database,
it is advised that the autoflush feature be disabled, and potentially even
that autocommit be re-enabled, which has the effect of each SQL statement and
flush committing changes immediately.
For more information on SQLite's lack of concurrency by design, please
see `Situations Where Another RDBMS May Work Better - High Concurrency <http://www.sqlite.org/whentouse.html>`_
near the bottom of the page.
.. _sqlite_foreign_keys:
Foreign Key Support
-------------------
SQLite supports FOREIGN KEY syntax when emitting CREATE statements for tables,
however by default these constraints have no effect on the operation
of the table.
Constraint checking on SQLite has three prerequisites:
* At least version 3.6.19 of SQLite must be in use
* The SQLite libary must be compiled *without* the SQLITE_OMIT_FOREIGN_KEY
or SQLITE_OMIT_TRIGGER symbols enabled.
* The ``PRAGMA foreign_keys = ON`` statement must be emitted on all connections
before use.
SQLAlchemy allows for the ``PRAGMA`` statement to be emitted automatically
for new connections through the usage of events::
from sqlalchemy.engine import Engine
from sqlalchemy import event
@event.listens_for(Engine, "connect")
def set_sqlite_pragma(dbapi_connection, connection_record):
cursor = dbapi_connection.cursor()
cursor.execute("PRAGMA foreign_keys=ON")
cursor.close()
.. seealso::
`SQLite Foreign Key Support <http://www.sqlite.org/foreignkeys.html>`_ -
on the SQLite web site.
:ref:`event_toplevel` - SQLAlchemy event API.
"""
import datetime, re
@@ -80,36 +148,36 @@ class _DateTimeMixin(object):
class DATETIME(_DateTimeMixin, sqltypes.DateTime):
"""Represent a Python datetime object in SQLite using a string.
The default string storage format is::
"%04d-%02d-%02d %02d:%02d:%02d.%06d" % (value.year,
"%04d-%02d-%02d %02d:%02d:%02d.%06d" % (value.year,
value.month, value.day,
value.hour, value.minute,
value.hour, value.minute,
value.second, value.microsecond)
e.g.::
2011-03-15 12:05:57.10558
The storage format can be customized to some degree using the
The storage format can be customized to some degree using the
``storage_format`` and ``regexp`` parameters, such as::
import re
from sqlalchemy.dialects.sqlite import DATETIME
dt = DATETIME(
storage_format="%04d/%02d/%02d %02d-%02d-%02d-%06d",
regexp=re.compile("(\d+)/(\d+)/(\d+) (\d+)-(\d+)-(\d+)(?:-(\d+))?")
)
:param storage_format: format string which will be appled to the
:param storage_format: format string which will be applied to the
tuple ``(value.year, value.month, value.day, value.hour,
value.minute, value.second, value.microsecond)``, given a
Python datetime.datetime() object.
:param regexp: regular expression which will be applied to
incoming result rows. The resulting match object is appled to
:param regexp: regular expression which will be applied to
incoming result rows. The resulting match object is applied to
the Python datetime() constructor via ``*map(int,
match_obj.groups(0))``.
"""
@@ -146,16 +214,16 @@ class DATE(_DateTimeMixin, sqltypes.Date):
"""Represent a Python date object in SQLite using a string.
The default string storage format is::
"%04d-%02d-%02d" % (value.year, value.month, value.day)
e.g.::
2011-03-15
The storage format can be customized to some degree using the
The storage format can be customized to some degree using the
``storage_format`` and ``regexp`` parameters, such as::
import re
from sqlalchemy.dialects.sqlite import DATE
@@ -163,16 +231,16 @@ class DATE(_DateTimeMixin, sqltypes.Date):
storage_format="%02d/%02d/%02d",
regexp=re.compile("(\d+)/(\d+)/(\d+)")
)
:param storage_format: format string which will be appled to the
:param storage_format: format string which will be applied to the
tuple ``(value.year, value.month, value.day)``,
given a Python datetime.date() object.
:param regexp: regular expression which will be applied to
incoming result rows. The resulting match object is appled to
:param regexp: regular expression which will be applied to
incoming result rows. The resulting match object is applied to
the Python date() constructor via ``*map(int,
match_obj.groups(0))``.
"""
_storage_format = "%04d-%02d-%02d"
@@ -199,20 +267,20 @@ class DATE(_DateTimeMixin, sqltypes.Date):
class TIME(_DateTimeMixin, sqltypes.Time):
"""Represent a Python time object in SQLite using a string.
The default string storage format is::
"%02d:%02d:%02d.%06d" % (value.hour, value.minute,
"%02d:%02d:%02d.%06d" % (value.hour, value.minute,
value.second,
value.microsecond)
e.g.::
12:05:57.10558
The storage format can be customized to some degree using the
The storage format can be customized to some degree using the
``storage_format`` and ``regexp`` parameters, such as::
import re
from sqlalchemy.dialects.sqlite import TIME
@@ -220,13 +288,13 @@ class TIME(_DateTimeMixin, sqltypes.Time):
storage_format="%02d-%02d-%02d-%06d",
regexp=re.compile("(\d+)-(\d+)-(\d+)-(?:-(\d+))?")
)
:param storage_format: format string which will be appled
:param storage_format: format string which will be applied
to the tuple ``(value.hour, value.minute, value.second,
value.microsecond)``, given a Python datetime.time() object.
:param regexp: regular expression which will be applied to
incoming result rows. The resulting match object is appled to
:param regexp: regular expression which will be applied to
incoming result rows. The resulting match object is applied to
the Python time() constructor via ``*map(int,
match_obj.groups(0))``.
@@ -302,6 +370,9 @@ class SQLiteCompiler(compiler.SQLCompiler):
def visit_now_func(self, fn, **kw):
return "CURRENT_TIMESTAMP"
def visit_localtimestamp_func(self, func, **kw):
return 'DATETIME(CURRENT_TIMESTAMP, "localtime")'
def visit_true(self, expr, **kw):
return '1'
@@ -373,7 +444,7 @@ class SQLiteDDLCompiler(compiler.DDLCompiler):
issubclass(c.type._type_affinity, sqltypes.Integer) and \
not c.foreign_keys:
return None
return super(SQLiteDDLCompiler, self).\
visit_primary_key_constraint(constraint)
@@ -441,6 +512,22 @@ class SQLiteIdentifierPreparer(compiler.IdentifierPreparer):
result = self.quote_schema(index.table.schema, index.table.quote_schema) + "." + result
return result
class SQLiteExecutionContext(default.DefaultExecutionContext):
@util.memoized_property
def _preserve_raw_colnames(self):
return self.execution_options.get("sqlite_raw_colnames", False)
def _translate_colname(self, colname):
# adjust for dotted column names. SQLite
# in the case of UNION may store col names as
# "tablename.colname"
# in cursor.description
if not self._preserve_raw_colnames and "." in colname:
return colname.split(".")[1], colname
else:
return colname, None
class SQLiteDialect(default.DefaultDialect):
name = 'sqlite'
supports_alter = False
@@ -451,6 +538,7 @@ class SQLiteDialect(default.DefaultDialect):
supports_cast = True
default_paramstyle = 'qmark'
execution_ctx_cls = SQLiteExecutionContext
statement_compiler = SQLiteCompiler
ddl_compiler = SQLiteDDLCompiler
type_compiler = SQLiteTypeCompiler
@@ -462,13 +550,15 @@ class SQLiteDialect(default.DefaultDialect):
supports_cast = True
supports_default_values = True
_broken_fk_pragma_quotes = False
def __init__(self, isolation_level=None, native_datetime=False, **kwargs):
default.DefaultDialect.__init__(self, **kwargs)
self.isolation_level = isolation_level
# this flag used by pysqlite dialect, and perhaps others in the
# future, to indicate the driver is handling date/timestamp
# conversions (and perhaps datetime/time as well on some
# conversions (and perhaps datetime/time as well on some
# hypothetical driver ?)
self.native_datetime = native_datetime
@@ -478,6 +568,12 @@ class SQLiteDialect(default.DefaultDialect):
self.supports_cast = \
self.dbapi.sqlite_version_info >= (3, 2, 3)
# see http://www.sqlalchemy.org/trac/ticket/2568
# as well as http://www.sqlite.org/src/info/600482d161
self._broken_fk_pragma_quotes = \
self.dbapi.sqlite_version_info < (3, 6, 14)
_isolation_lookup = {
'READ UNCOMMITTED':1,
'SERIALIZABLE':0
@@ -488,9 +584,9 @@ class SQLiteDialect(default.DefaultDialect):
except KeyError:
raise exc.ArgumentError(
"Invalid value '%s' for isolation_level. "
"Valid isolation levels for %s are %s" %
"Valid isolation levels for %s are %s" %
(level, self.name, ", ".join(self._isolation_lookup))
)
)
cursor = connection.cursor()
cursor.execute("PRAGMA read_uncommitted = %d" % isolation_level)
cursor.close()
@@ -501,11 +597,11 @@ class SQLiteDialect(default.DefaultDialect):
res = cursor.fetchone()
if res:
value = res[0]
else:
else:
# http://www.sqlite.org/changes.html#version_3_3_3
# "Optional READ UNCOMMITTED isolation (instead of the
# default isolation level of SERIALIZABLE) and
# table level locking when database connections
# "Optional READ UNCOMMITTED isolation (instead of the
# default isolation level of SERIALIZABLE) and
# table level locking when database connections
# share a common cache.""
# pre-SQLite 3.3.0 default to 0
value = 0
@@ -525,16 +621,6 @@ class SQLiteDialect(default.DefaultDialect):
else:
return None
def _translate_colname(self, colname):
# adjust for dotted column names. SQLite
# in the case of UNION may store col names as
# "tablename.colname"
# in cursor.description
if "." in colname:
return colname.split(".")[1], colname
else:
return colname, None
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
if schema is not None:
@@ -631,45 +717,52 @@ class SQLiteDialect(default.DefaultDialect):
pragma = "PRAGMA "
qtable = quote(table_name)
c = _pragma_cursor(
connection.execute("%stable_info(%s)" %
connection.execute("%stable_info(%s)" %
(pragma, qtable)))
found_table = False
columns = []
while True:
row = c.fetchone()
if row is None:
break
(name, type_, nullable, default, has_default, primary_key) = \
(row[1], row[2].upper(), not row[3],
row[4], row[4] is not None, row[5])
name = re.sub(r'^\"|\"$', '', name)
match = re.match(r'(\w+)(\(.*?\))?', type_)
if match:
coltype = match.group(1)
args = match.group(2)
else:
coltype = "VARCHAR"
args = ''
try:
coltype = self.ischema_names[coltype]
if args is not None:
args = re.findall(r'(\d+)', args)
coltype = coltype(*[int(a) for a in args])
except KeyError:
util.warn("Did not recognize type '%s' of column '%s'" %
(coltype, name))
coltype = sqltypes.NullType()
columns.append({
'name' : name,
'type' : coltype,
'nullable' : nullable,
'default' : default,
'autoincrement':default is None,
'primary_key': primary_key
})
rows = c.fetchall()
columns = []
for row in rows:
(name, type_, nullable, default, primary_key) = \
(row[1], row[2].upper(), not row[3],
row[4], row[5])
columns.append(self._get_column_info(name, type_, nullable,
default, primary_key))
return columns
def _get_column_info(self, name, type_, nullable,
default, primary_key):
match = re.match(r'(\w+)(\(.*?\))?', type_)
if match:
coltype = match.group(1)
args = match.group(2)
else:
coltype = "VARCHAR"
args = ''
try:
coltype = self.ischema_names[coltype]
if args is not None:
args = re.findall(r'(\d+)', args)
coltype = coltype(*[int(a) for a in args])
except KeyError:
util.warn("Did not recognize type '%s' of column '%s'" %
(coltype, name))
coltype = sqltypes.NullType()
if default is not None:
default = unicode(default)
return {
'name': name,
'type': coltype,
'nullable': nullable,
'default': default,
'autoincrement': default is None,
'primary_key': primary_key
}
@reflection.cache
def get_primary_keys(self, connection, table_name, schema=None, **kw):
cols = self.get_columns(connection, table_name, schema, **kw)
@@ -687,7 +780,8 @@ class SQLiteDialect(default.DefaultDialect):
else:
pragma = "PRAGMA "
qtable = quote(table_name)
c = _pragma_cursor(connection.execute("%sforeign_key_list(%s)" % (pragma, qtable)))
statement = "%sforeign_key_list(%s)" % (pragma, qtable)
c = _pragma_cursor(connection.execute(statement))
fkeys = []
fks = {}
while True:
@@ -695,34 +789,38 @@ class SQLiteDialect(default.DefaultDialect):
if row is None:
break
(numerical_id, rtbl, lcol, rcol) = (row[0], row[2], row[3], row[4])
# sqlite won't return rcol if the table
# was created with REFERENCES <tablename>, no col
if rcol is None:
rcol = lcol
rtbl = re.sub(r'^\"|\"$', '', rtbl)
lcol = re.sub(r'^\"|\"$', '', lcol)
rcol = re.sub(r'^\"|\"$', '', rcol)
try:
fk = fks[numerical_id]
except KeyError:
fk = {
'name' : None,
'constrained_columns' : [],
'referred_schema' : None,
'referred_table' : rtbl,
'referred_columns' : []
}
fkeys.append(fk)
fks[numerical_id] = fk
# look up the table based on the given table's engine, not 'self',
# since it could be a ProxyEngine
if lcol not in fk['constrained_columns']:
fk['constrained_columns'].append(lcol)
if rcol not in fk['referred_columns']:
fk['referred_columns'].append(rcol)
self._parse_fk(fks, fkeys, numerical_id, rtbl, lcol, rcol)
return fkeys
def _parse_fk(self, fks, fkeys, numerical_id, rtbl, lcol, rcol):
# sqlite won't return rcol if the table
# was created with REFERENCES <tablename>, no col
if rcol is None:
rcol = lcol
if self._broken_fk_pragma_quotes:
rtbl = re.sub(r'^[\"\[`\']|[\"\]`\']$', '', rtbl)
try:
fk = fks[numerical_id]
except KeyError:
fk = {
'name': None,
'constrained_columns': [],
'referred_schema': None,
'referred_table': rtbl,
'referred_columns': []
}
fkeys.append(fk)
fks[numerical_id] = fk
if lcol not in fk['constrained_columns']:
fk['constrained_columns'].append(lcol)
if rcol not in fk['referred_columns']:
fk['referred_columns'].append(rcol)
return fk
@reflection.cache
def get_indexes(self, connection, table_name, schema=None, **kw):
quote = self.identifier_preparer.quote_identifier
@@ -757,9 +855,10 @@ class SQLiteDialect(default.DefaultDialect):
def _pragma_cursor(cursor):
"""work around SQLite issue whereby cursor.description
"""work around SQLite issue whereby cursor.description
is blank when PRAGMA returns no rows."""
if cursor.closed:
cursor.fetchone = lambda: None
cursor.fetchall = lambda: []
return cursor

View File

@@ -1,5 +1,5 @@
# sqlite/pysqlite.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -12,15 +12,15 @@ module included with the Python distribution.
Driver
------
When using Python 2.5 and above, the built in ``sqlite3`` driver is
When using Python 2.5 and above, the built in ``sqlite3`` driver is
already installed and no additional installation is needed. Otherwise,
the ``pysqlite2`` driver needs to be present. This is the same driver as
``sqlite3``, just with a different name.
The ``pysqlite2`` driver will be loaded first, and if not found, ``sqlite3``
is loaded. This allows an explicitly installed pysqlite driver to take
precedence over the built in one. As with all dialects, a specific
DBAPI module may be provided to :func:`~sqlalchemy.create_engine()` to control
precedence over the built in one. As with all dialects, a specific
DBAPI module may be provided to :func:`~sqlalchemy.create_engine()` to control
this explicitly::
from sqlite3 import dbapi2 as sqlite
@@ -64,25 +64,25 @@ The sqlite ``:memory:`` identifier is the default if no filepath is present. Sp
Compatibility with sqlite3 "native" date and datetime types
-----------------------------------------------------------
The pysqlite driver includes the sqlite3.PARSE_DECLTYPES and
The pysqlite driver includes the sqlite3.PARSE_DECLTYPES and
sqlite3.PARSE_COLNAMES options, which have the effect of any column
or expression explicitly cast as "date" or "timestamp" will be converted
to a Python date or datetime object. The date and datetime types provided
with the pysqlite dialect are not currently compatible with these options,
since they render the ISO date/datetime including microseconds, which
to a Python date or datetime object. The date and datetime types provided
with the pysqlite dialect are not currently compatible with these options,
since they render the ISO date/datetime including microseconds, which
pysqlite's driver does not. Additionally, SQLAlchemy does not at
this time automatically render the "cast" syntax required for the
this time automatically render the "cast" syntax required for the
freestanding functions "current_timestamp" and "current_date" to return
datetime/date types natively. Unfortunately, pysqlite
datetime/date types natively. Unfortunately, pysqlite
does not provide the standard DBAPI types in ``cursor.description``,
leaving SQLAlchemy with no way to detect these types on the fly
leaving SQLAlchemy with no way to detect these types on the fly
without expensive per-row type checks.
Keeping in mind that pysqlite's parsing option is not recommended,
nor should be necessary, for use with SQLAlchemy, usage of PARSE_DECLTYPES
nor should be necessary, for use with SQLAlchemy, usage of PARSE_DECLTYPES
can be forced if one configures "native_datetime=True" on create_engine()::
engine = create_engine('sqlite://',
engine = create_engine('sqlite://',
connect_args={'detect_types': sqlite3.PARSE_DECLTYPES|sqlite3.PARSE_COLNAMES},
native_datetime=True
)
@@ -97,37 +97,40 @@ Threading/Pooling Behavior
---------------------------
Pysqlite's default behavior is to prohibit the usage of a single connection
in more than one thread. This is controlled by the ``check_same_thread``
Pysqlite flag. This default is intended to work with older versions
of SQLite that did not support multithreaded operation under
in more than one thread. This is originally intended to work with older versions
of SQLite that did not support multithreaded operation under
various circumstances. In particular, older SQLite versions
did not allow a ``:memory:`` database to be used in multiple threads
under any circumstances.
Pysqlite does include a now-undocumented flag known as
``check_same_thread`` which will disable this check, however note that pysqlite
connections are still not safe to use in concurrently in multiple threads.
In particular, any statement execution calls would need to be externally
mutexed, as Pysqlite does not provide for thread-safe propagation of error
messages among other things. So while even ``:memory:`` databases can be
shared among threads in modern SQLite, Pysqlite doesn't provide enough
thread-safety to make this usage worth it.
SQLAlchemy sets up pooling to work with Pysqlite's default behavior:
* When a ``:memory:`` SQLite database is specified, the dialect by default will use
:class:`.SingletonThreadPool`. This pool maintains a single connection per
thread, so that all access to the engine within the current thread use the
same ``:memory:`` database - other threads would access a different
same ``:memory:`` database - other threads would access a different
``:memory:`` database.
* When a file-based database is specified, the dialect will use :class:`.NullPool`
* When a file-based database is specified, the dialect will use :class:`.NullPool`
as the source of connections. This pool closes and discards connections
which are returned to the pool immediately. SQLite file-based connections
have extremely low overhead, so pooling is not necessary. The scheme also
prevents a connection from being used again in a different thread and works
best with SQLite's coarse-grained file locking.
.. note::
The default selection of :class:`.NullPool` for SQLite file-based databases
is new in SQLAlchemy 0.7. Previous versions
select :class:`.SingletonThreadPool` by
default for all SQLite databases.
.. versionchanged:: 0.7
Default selection of :class:`.NullPool` for SQLite file-based databases.
Previous versions select :class:`.SingletonThreadPool` by
default for all SQLite databases.
Modern versions of SQLite no longer have the threading restrictions, and assuming
the sqlite3/pysqlite library was built with SQLite's default threading mode
of "Serialized", even ``:memory:`` databases can be shared among threads.
Using a Memory Database in Multiple Threads
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -143,7 +146,7 @@ can be passed to Pysqlite as ``False``::
connect_args={'check_same_thread':False},
poolclass=StaticPool)
Note that using a ``:memory:`` database in multiple threads requires a recent
Note that using a ``:memory:`` database in multiple threads requires a recent
version of SQLite.
Using Temporary Tables with SQLite
@@ -177,8 +180,8 @@ Unicode
The pysqlite driver only returns Python ``unicode`` objects in result sets, never
plain strings, and accommodates ``unicode`` objects within bound parameter
values in all cases. Regardless of the SQLAlchemy string type in use,
string-based result values will by Python ``unicode`` in Python 2.
values in all cases. Regardless of the SQLAlchemy string type in use,
string-based result values will by Python ``unicode`` in Python 2.
The :class:`.Unicode` type should still be used to indicate those columns that
require unicode, however, so that non-``unicode`` values passed inadvertently
will emit a warning. Pysqlite will emit an error if a non-``unicode`` string
@@ -193,7 +196,7 @@ The pysqlite DBAPI driver has a long-standing bug in which transactional
state is not begun until the first DML statement, that is INSERT, UPDATE
or DELETE, is emitted. A SELECT statement will not cause transactional
state to begin. While this mode of usage is fine for typical situations
and has the advantage that the SQLite database file is not prematurely
and has the advantage that the SQLite database file is not prematurely
locked, it breaks serializable transaction isolation, which requires
that the database file be locked upon any SQL being emitted.

View File

@@ -1,5 +1,5 @@
# sybase/__init__.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php

View File

@@ -1,8 +1,8 @@
# sybase/base.py
# Copyright (C) 2010-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
# get_select_precolumns(), limit_clause() implementation
# copyright (C) 2007 Fisch Asset Management
# AG http://www.fam.ch, with coding by Alexander Houben
# copyright (C) 2007 Fisch Asset Management
# AG http://www.fam.ch, with coding by Alexander Houben
# alexander.houben@thor-solutions.ch
#
# This module is part of SQLAlchemy and is released under
@@ -10,8 +10,12 @@
"""Support for Sybase Adaptive Server Enterprise (ASE).
Note that this dialect is no longer specific to Sybase iAnywhere.
ASE is the primary support platform.
.. note::
The Sybase dialect functions on current SQLAlchemy versions
but is not regularly tested, and may have many issues and
caveats not currently handled. In particular, the table
and database reflection features are not implemented.
"""
@@ -126,7 +130,7 @@ class UNIQUEIDENTIFIER(sqltypes.TypeEngine):
class IMAGE(sqltypes.LargeBinary):
__visit_name__ = 'IMAGE'
class SybaseTypeCompiler(compiler.GenericTypeCompiler):
def visit_large_binary(self, type_):
@@ -220,12 +224,12 @@ class SybaseExecutionContext(default.DefaultExecutionContext):
self._enable_identity_insert = False
if self._enable_identity_insert:
self.cursor.execute("SET IDENTITY_INSERT %s ON" %
self.cursor.execute("SET IDENTITY_INSERT %s ON" %
self.dialect.identifier_preparer.format_table(tbl))
if self.isddl:
# TODO: to enhance this, we can detect "ddl in tran" on the
# database settings. this error message should be improved to
# database settings. this error message should be improved to
# include a note about that.
if not self.should_autocommit:
raise exc.InvalidRequestError(
@@ -236,7 +240,7 @@ class SybaseExecutionContext(default.DefaultExecutionContext):
"AUTOCOMMIT (Assuming no Sybase 'ddl in tran')")
self.set_ddl_autocommit(
self.root_connection.connection.connection,
self.root_connection.connection.connection,
True)
@@ -300,7 +304,7 @@ class SybaseSQLCompiler(compiler.SQLCompiler):
field, self.process(extract.expr, **kw))
def for_update_clause(self, select):
# "FOR UPDATE" is only allowed on "DECLARE CURSOR"
# "FOR UPDATE" is only allowed on "DECLARE CURSOR"
# which SQLAlchemy doesn't use
return ''

View File

@@ -1,5 +1,5 @@
# sybase/mxodbc.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php

View File

@@ -1,5 +1,5 @@
# sybase/pyodbc.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -17,7 +17,7 @@ Connect strings are of the form::
Unicode Support
---------------
The pyodbc driver currently supports usage of these Sybase types with
The pyodbc driver currently supports usage of these Sybase types with
Unicode or multibyte strings::
CHAR
@@ -43,7 +43,7 @@ from sqlalchemy.util.compat import decimal
class _SybNumeric_pyodbc(sqltypes.Numeric):
"""Turns Decimals with adjusted() < -6 into floats.
It's not yet known how to get decimals with many
It's not yet known how to get decimals with many
significant digits or very large adjusted() into Sybase
via pyodbc.

View File

@@ -38,7 +38,7 @@ class SybaseExecutionContext_pysybase(SybaseExecutionContext):
def set_ddl_autocommit(self, dbapi_connection, value):
if value:
# call commit() on the Sybase connection directly,
# to avoid any side effects of calling a Connection
# to avoid any side effects of calling a Connection
# transactional method inside of pre_exec()
dbapi_connection.commit()
@@ -52,7 +52,7 @@ class SybaseExecutionContext_pysybase(SybaseExecutionContext):
class SybaseSQLCompiler_pysybase(SybaseSQLCompiler):
def bindparam_string(self, name):
def bindparam_string(self, name, **kw):
return "@" + name
class SybaseDialect_pysybase(SybaseDialect):
@@ -82,10 +82,10 @@ class SybaseDialect_pysybase(SybaseDialect):
cursor.execute(statement, param)
def _get_server_version_info(self, connection):
vers = connection.scalar("select @@version_number")
# i.e. 15500, 15000, 12500 == (15, 5, 0, 0), (15, 0, 0, 0),
# (12, 5, 0, 0)
return (vers / 1000, vers % 1000 / 100, vers % 100 / 10, vers % 10)
vers = connection.scalar("select @@version_number")
# i.e. 15500, 15000, 12500 == (15, 5, 0, 0), (15, 0, 0, 0),
# (12, 5, 0, 0)
return (vers / 1000, vers % 1000 / 100, vers % 100 / 10, vers % 10)
def is_disconnect(self, e, connection, cursor):
if isinstance(e, (self.dbapi.OperationalError,

View File

@@ -1,5 +1,5 @@
# engine/__init__.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -101,8 +101,8 @@ default_strategy = 'plain'
def create_engine(*args, **kwargs):
"""Create a new :class:`.Engine` instance.
The standard calling form is to send the URL as the
first positional argument, usually a string
The standard calling form is to send the URL as the
first positional argument, usually a string
that indicates database dialect and connection arguments.
Additional keyword arguments may then follow it which
establish various options on the resulting :class:`.Engine`
@@ -111,14 +111,14 @@ def create_engine(*args, **kwargs):
The string form of the URL is
``dialect+driver://user:password@host/dbname[?key=value..]``, where
``dialect`` is a database name such as ``mysql``, ``oracle``,
``postgresql``, etc., and ``driver`` the name of a DBAPI, such as
``psycopg2``, ``pyodbc``, ``cx_oracle``, etc. Alternatively,
``dialect`` is a database name such as ``mysql``, ``oracle``,
``postgresql``, etc., and ``driver`` the name of a DBAPI, such as
``psycopg2``, ``pyodbc``, ``cx_oracle``, etc. Alternatively,
the URL can be an instance of :class:`~sqlalchemy.engine.url.URL`.
``**kwargs`` takes a wide variety of options which are routed
towards their appropriate components. Arguments may be
specific to the :class:`.Engine`, the underlying :class:`.Dialect`, as well as the
``**kwargs`` takes a wide variety of options which are routed
towards their appropriate components. Arguments may be
specific to the :class:`.Engine`, the underlying :class:`.Dialect`, as well as the
:class:`.Pool`. Specific dialects also accept keyword arguments that
are unique to that dialect. Here, we describe the parameters
that are common to most :func:`.create_engine()` usage.
@@ -136,11 +136,11 @@ def create_engine(*args, **kwargs):
:ref:`engines_toplevel`
:ref:`connections_toplevel`
:param assert_unicode: Deprecated. This flag
sets an engine-wide default value for
the ``assert_unicode`` flag on the
:class:`.String` type - see that
the ``assert_unicode`` flag on the
:class:`.String` type - see that
type for further details.
:param connect_args: a dictionary of options which will be
@@ -151,16 +151,16 @@ def create_engine(*args, **kwargs):
:param convert_unicode=False: if set to True, sets
the default behavior of ``convert_unicode`` on the
:class:`.String` type to ``True``, regardless
of a setting of ``False`` on an individual
of a setting of ``False`` on an individual
:class:`.String` type, thus causing all :class:`.String`
-based columns
to accommodate Python ``unicode`` objects. This flag
is useful as an engine-wide setting when using a
is useful as an engine-wide setting when using a
DBAPI that does not natively support Python
``unicode`` objects and raises an error when
one is received (such as pyodbc with FreeTDS).
See :class:`.String` for further details on
See :class:`.String` for further details on
what this flag indicates.
:param creator: a callable which returns a DBAPI connection.
@@ -184,43 +184,43 @@ def create_engine(*args, **kwargs):
:ref:`dbengine_logging` for information on how to configure logging
directly.
:param encoding: Defaults to ``utf-8``. This is the string
encoding used by SQLAlchemy for string encode/decode
operations which occur within SQLAlchemy, **outside of
the DBAPI.** Most modern DBAPIs feature some degree of
:param encoding: Defaults to ``utf-8``. This is the string
encoding used by SQLAlchemy for string encode/decode
operations which occur within SQLAlchemy, **outside of
the DBAPI.** Most modern DBAPIs feature some degree of
direct support for Python ``unicode`` objects,
what you see in Python 2 as a string of the form
``u'some string'``. For those scenarios where the
``u'some string'``. For those scenarios where the
DBAPI is detected as not supporting a Python ``unicode``
object, this encoding is used to determine the
object, this encoding is used to determine the
source/destination encoding. It is **not used**
for those cases where the DBAPI handles unicode
directly.
To properly configure a system to accommodate Python
``unicode`` objects, the DBAPI should be
``unicode`` objects, the DBAPI should be
configured to handle unicode to the greatest
degree as is appropriate - see
the notes on unicode pertaining to the specific
target database in use at :ref:`dialect_toplevel`.
Areas where string encoding may need to be accommodated
outside of the DBAPI include zero or more of:
* the values passed to bound parameters, corresponding to
target database in use at :ref:`dialect_toplevel`.
Areas where string encoding may need to be accommodated
outside of the DBAPI include zero or more of:
* the values passed to bound parameters, corresponding to
the :class:`.Unicode` type or the :class:`.String` type
when ``convert_unicode`` is ``True``;
* the values returned in result set columns corresponding
to the :class:`.Unicode` type or the :class:`.String`
* the values returned in result set columns corresponding
to the :class:`.Unicode` type or the :class:`.String`
type when ``convert_unicode`` is ``True``;
* the string SQL statement passed to the DBAPI's
``cursor.execute()`` method;
* the string names of the keys in the bound parameter
dictionary passed to the DBAPI's ``cursor.execute()``
* the string SQL statement passed to the DBAPI's
``cursor.execute()`` method;
* the string names of the keys in the bound parameter
dictionary passed to the DBAPI's ``cursor.execute()``
as well as ``cursor.setinputsizes()`` methods;
* the string column names retrieved from the DBAPI's
* the string column names retrieved from the DBAPI's
``cursor.description`` attribute.
When using Python 3, the DBAPI is required to support
*all* of the above values as Python ``unicode`` objects,
which in Python 3 are just known as ``str``. In Python 2,
@@ -236,9 +236,9 @@ def create_engine(*args, **kwargs):
:param implicit_returning=True: When ``True``, a RETURNING-
compatible construct, if available, will be used to
fetch newly generated primary key values when a single row
INSERT statement is emitted with no existing returning()
clause. This applies to those backends which support RETURNING
or a compatible construct, including Postgresql, Firebird, Oracle,
INSERT statement is emitted with no existing returning()
clause. This applies to those backends which support RETURNING
or a compatible construct, including Postgresql, Firebird, Oracle,
Microsoft SQL Server. Set this to ``False`` to disable
the automatic usage of RETURNING.
@@ -248,13 +248,13 @@ def create_engine(*args, **kwargs):
"_(counter)". If ``None``, the value of
``dialect.max_identifier_length`` is used instead.
:param listeners: A list of one or more
:class:`~sqlalchemy.interfaces.PoolListener` objects which will
:param listeners: A list of one or more
:class:`~sqlalchemy.interfaces.PoolListener` objects which will
receive connection pool events.
:param logging_name: String identifier which will be used within
the "name" field of logging records generated within the
"sqlalchemy.engine" logger. Defaults to a hexstring of the
"sqlalchemy.engine" logger. Defaults to a hexstring of the
object's id.
:param max_overflow=10: the number of connections to allow in
@@ -286,8 +286,8 @@ def create_engine(*args, **kwargs):
of pool to be used.
:param pool_logging_name: String identifier which will be used within
the "name" field of logging records generated within the
"sqlalchemy.pool" logger. Defaults to a hexstring of the object's
the "name" field of logging records generated within the
"sqlalchemy.pool" logger. Defaults to a hexstring of the object's
id.
:param pool_size=5: the number of connections to keep open
@@ -307,18 +307,29 @@ def create_engine(*args, **kwargs):
server configuration as well).
:param pool_reset_on_return='rollback': set the "reset on return"
behavior of the pool, which is whether ``rollback()``,
behavior of the pool, which is whether ``rollback()``,
``commit()``, or nothing is called upon connections
being returned to the pool. See the docstring for
``reset_on_return`` at :class:`.Pool`. (new as of 0.7.6)
``reset_on_return`` at :class:`.Pool`.
.. versionadded:: 0.7.6
:param pool_timeout=30: number of seconds to wait before giving
up on getting a connection from the pool. This is only used
with :class:`~sqlalchemy.pool.QueuePool`.
:param strategy='plain': selects alternate engine implementations.
Currently available is the ``threadlocal``
strategy, which is described in :ref:`threadlocal_strategy`.
Currently available are:
* the ``threadlocal`` strategy, which is described in
:ref:`threadlocal_strategy`;
* the ``mock`` strategy, which dispatches all statement
execution to a function passed as the argument ``executor``.
See `example in the FAQ <http://www.sqlalchemy.org/trac/wiki/FAQ#HowcanIgettheCREATETABLEDROPTABLEoutputasastring>`_.
:param executor=None: a function taking arguments
``(sql, *multiparams, **params)``, to which the ``mock`` strategy will
dispatch all statement execution. Used only by ``strategy='mock'``.
"""

File diff suppressed because it is too large Load Diff

View File

@@ -28,7 +28,7 @@ class SchemaGenerator(DDLBase):
if table.schema:
self.dialect.validate_identifier(table.schema)
return not self.checkfirst or \
not self.dialect.has_table(self.connection,
not self.dialect.has_table(self.connection,
table.name, schema=table.schema)
def _can_create_sequence(self, sequence):
@@ -39,8 +39,8 @@ class SchemaGenerator(DDLBase):
(
not self.checkfirst or
not self.dialect.has_sequence(
self.connection,
sequence.name,
self.connection,
sequence.name,
schema=sequence.schema)
)
)
@@ -50,9 +50,9 @@ class SchemaGenerator(DDLBase):
tables = self.tables
else:
tables = metadata.tables.values()
collection = [t for t in sql_util.sort_tables(tables)
collection = [t for t in sql_util.sort_tables(tables)
if self._can_create_table(t)]
seq_coll = [s for s in metadata._sequences.values()
seq_coll = [s for s in metadata._sequences.values()
if s.column is None and self._can_create_sequence(s)]
metadata.dispatch.before_create(metadata, self.connection,
@@ -95,7 +95,7 @@ class SchemaGenerator(DDLBase):
def visit_sequence(self, sequence, create_ok=False):
if not create_ok and not self._can_create_sequence(sequence):
return
return
self.connection.execute(schema.CreateSequence(sequence))
def visit_index(self, index):
@@ -116,9 +116,9 @@ class SchemaDropper(DDLBase):
tables = self.tables
else:
tables = metadata.tables.values()
collection = [t for t in reversed(sql_util.sort_tables(tables))
collection = [t for t in reversed(sql_util.sort_tables(tables))
if self._can_drop_table(t)]
seq_coll = [s for s in metadata._sequences.values()
seq_coll = [s for s in metadata._sequences.values()
if s.column is None and self._can_drop_sequence(s)]
metadata.dispatch.before_drop(metadata, self.connection,
@@ -141,7 +141,7 @@ class SchemaDropper(DDLBase):
self.dialect.validate_identifier(table.name)
if table.schema:
self.dialect.validate_identifier(table.schema)
return not self.checkfirst or self.dialect.has_table(self.connection,
return not self.checkfirst or self.dialect.has_table(self.connection,
table.name, schema=table.schema)
def _can_drop_sequence(self, sequence):
@@ -150,8 +150,8 @@ class SchemaDropper(DDLBase):
not sequence.optional) and
(not self.checkfirst or
self.dialect.has_sequence(
self.connection,
sequence.name,
self.connection,
sequence.name,
schema=sequence.schema))
)

View File

@@ -1,5 +1,5 @@
# engine/default.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -89,17 +89,13 @@ class DefaultDialect(base.Dialect):
server_version_info = None
# indicates symbol names are
# indicates symbol names are
# UPPERCASEd if they are case insensitive
# within the database.
# if this is True, the methods normalize_name()
# and denormalize_name() must be provided.
requires_name_normalize = False
# a hook for SQLite's translation of
# result column names
_translate_colname = None
reflection_options = ()
def __init__(self, convert_unicode=False, assert_unicode=False,
@@ -190,7 +186,7 @@ class DefaultDialect(base.Dialect):
self.returns_unicode_strings = self._check_unicode_returns(connection)
self.do_rollback(connection.connection)
def on_connect(self):
"""return a callable which sets up a newly created DBAPI connection.
@@ -220,7 +216,7 @@ class DefaultDialect(base.Dialect):
try:
cursor.execute(
cast_to(
expression.select(
expression.select(
[expression.cast(
expression.literal_column(
"'test %s returns'" % formatstr), type_)
@@ -264,20 +260,20 @@ class DefaultDialect(base.Dialect):
return insp.reflecttable(table, include_columns, exclude_columns)
def get_pk_constraint(self, conn, table_name, schema=None, **kw):
"""Compatiblity method, adapts the result of get_primary_keys()
"""Compatibility method, adapts the result of get_primary_keys()
for those dialects which don't implement get_pk_constraint().
"""
return {
'constrained_columns':
self.get_primary_keys(conn, table_name,
self.get_primary_keys(conn, table_name,
schema=schema, **kw)
}
def validate_identifier(self, ident):
if len(ident) > self.max_identifier_length:
raise exc.IdentifierError(
"Identifier '%s' exceeds maximum length of %d characters" %
"Identifier '%s' exceeds maximum length of %d characters" %
(ident, self.max_identifier_length)
)
@@ -341,8 +337,8 @@ class DefaultDialect(base.Dialect):
return False
def reset_isolation_level(self, dbapi_conn):
# default_isolation_level is read from the first conenction
# after the initial set of 'isolation_level', if any, so is
# default_isolation_level is read from the first connection
# after the initial set of 'isolation_level', if any, so is
# the configured default of this dialect.
self.set_isolation_level(dbapi_conn, self.default_isolation_level)
@@ -355,9 +351,15 @@ class DefaultExecutionContext(base.ExecutionContext):
result_map = None
compiled = None
statement = None
postfetch_cols = None
prefetch_cols = None
_is_implicit_returning = False
_is_explicit_returning = False
# a hook for SQLite's translation of
# result column names
_translate_colname = None
@classmethod
def _init_ddl(cls, dialect, connection, dbapi_connection, compiled_ddl):
"""Initialize execution context for a DDLElement construct."""
@@ -449,8 +451,8 @@ class DefaultExecutionContext(base.ExecutionContext):
processors = compiled._bind_processors
# Convert the dictionary of bind parameter values
# into a dict or list to be sent to the DBAPI's
# Convert the dictionary of bind parameter values
# into a dict or list to be sent to the DBAPI's
# execute() or executemany() method.
parameters = []
if dialect.positional:
@@ -513,7 +515,7 @@ class DefaultExecutionContext(base.ExecutionContext):
for d in parameters
] or [{}]
else:
self.parameters = [dialect.execute_sequence_format(p)
self.parameters = [dialect.execute_sequence_format(p)
for p in parameters]
self.executemany = len(parameters) > 1
@@ -550,10 +552,10 @@ class DefaultExecutionContext(base.ExecutionContext):
@util.memoized_property
def should_autocommit(self):
autocommit = self.execution_options.get('autocommit',
not self.compiled and
autocommit = self.execution_options.get('autocommit',
not self.compiled and
self.statement and
expression.PARSE_AUTOCOMMIT
expression.PARSE_AUTOCOMMIT
or False)
if autocommit is expression.PARSE_AUTOCOMMIT:
@@ -586,7 +588,7 @@ class DefaultExecutionContext(base.ExecutionContext):
if type_ is not None:
# apply type post processors to the result
proc = type_._cached_result_processor(
self.dialect,
self.dialect,
self.cursor.description[0][1]
)
if proc:
@@ -623,7 +625,7 @@ class DefaultExecutionContext(base.ExecutionContext):
and when no explicit id value was bound to the
statement.
The function is called once, directly after
The function is called once, directly after
post_exec() and before the transaction is committed
or ResultProxy is generated. If the post_exec()
method assigns a value to `self._lastrowid`, the
@@ -672,7 +674,7 @@ class DefaultExecutionContext(base.ExecutionContext):
self.inserted_primary_key = [
c is autoinc_col and lastrowid or v
for c, v in zip(
table.primary_key,
table.primary_key,
self.inserted_primary_key)
]
@@ -698,7 +700,7 @@ class DefaultExecutionContext(base.ExecutionContext):
style of ``setinputsizes()`` on the cursor, using DB-API types
from the bind parameter's ``TypeEngine`` objects.
This method only called by those dialects which require it,
This method only called by those dialects which require it,
currently cx_oracle.
"""
@@ -743,7 +745,7 @@ class DefaultExecutionContext(base.ExecutionContext):
elif default.is_callable:
return default.arg(self)
elif default.is_clause_element:
# TODO: expensive branching here should be
# TODO: expensive branching here should be
# pulled into _exec_scalar()
conn = self.connection
c = expression.select([default.arg]).compile(bind=conn)
@@ -809,7 +811,7 @@ class DefaultExecutionContext(base.ExecutionContext):
if self.isinsert:
self.inserted_primary_key = [
self.compiled_parameters[0].get(c.key, None)
self.compiled_parameters[0].get(c.key, None)
for c in self.compiled.\
statement.table.primary_key
]

View File

@@ -1,5 +1,5 @@
# engine/reflection.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -38,8 +38,8 @@ def cache(fn, self, con, *args, **kw):
if info_cache is None:
return fn(self, con, *args, **kw)
key = (
fn.__name__,
tuple(a for a in args if isinstance(a, basestring)),
fn.__name__,
tuple(a for a in args if isinstance(a, basestring)),
tuple((k, v) for k, v in kw.iteritems() if isinstance(v, (basestring, int, float)))
)
ret = info_cache.get(key)
@@ -72,9 +72,9 @@ class Inspector(object):
def __init__(self, bind):
"""Initialize a new :class:`.Inspector`.
:param bind: a :class:`~sqlalchemy.engine.base.Connectable`,
which is typically an instance of
:class:`~sqlalchemy.engine.base.Engine` or
:param bind: a :class:`~sqlalchemy.engine.base.Connectable`,
which is typically an instance of
:class:`~sqlalchemy.engine.base.Engine` or
:class:`~sqlalchemy.engine.base.Connection`.
For a dialect-specific instance of :class:`.Inspector`, see
@@ -101,9 +101,9 @@ class Inspector(object):
def from_engine(cls, bind):
"""Construct a new dialect-specific Inspector object from the given engine or connection.
:param bind: a :class:`~sqlalchemy.engine.base.Connectable`,
which is typically an instance of
:class:`~sqlalchemy.engine.base.Engine` or
:param bind: a :class:`~sqlalchemy.engine.base.Connectable`,
which is typically an instance of
:class:`~sqlalchemy.engine.base.Engine` or
:class:`~sqlalchemy.engine.base.Connection`.
This method differs from direct a direct constructor call of :class:`.Inspector`
@@ -320,7 +320,7 @@ class Inspector(object):
def reflecttable(self, table, include_columns, exclude_columns=()):
"""Given a Table object, load its internal constructs based on introspection.
This is the underlying method used by most dialects to produce
This is the underlying method used by most dialects to produce
table reflection. Direct usage is like::
from sqlalchemy import create_engine, MetaData, Table
@@ -379,7 +379,7 @@ class Inspector(object):
coltype = col_d['type']
col_kw = {
'nullable':col_d['nullable'],
'nullable': col_d['nullable'],
}
for k in ('autoincrement', 'quote', 'info', 'key'):
if k in col_d:
@@ -414,11 +414,11 @@ class Inspector(object):
# Primary keys
pk_cons = self.get_pk_constraint(table_name, schema, **tblkw)
if pk_cons:
pk_cols = [table.c[pk]
for pk in pk_cons['constrained_columns']
pk_cols = [table.c[pk]
for pk in pk_cons['constrained_columns']
if pk in table.c and pk not in exclude_columns
] + [pk for pk in table.primary_key if pk.key in exclude_columns]
primary_key_constraint = sa_schema.PrimaryKeyConstraint(name=pk_cons.get('name'),
primary_key_constraint = sa_schema.PrimaryKeyConstraint(name=pk_cons.get('name'),
*pk_cols
)
@@ -452,7 +452,7 @@ class Inspector(object):
table.append_constraint(
sa_schema.ForeignKeyConstraint(constrained_columns, refspec,
conname, link_to_name=True))
# Indexes
# Indexes
indexes = self.get_indexes(table_name, schema)
for index_d in indexes:
name = index_d['name']
@@ -465,5 +465,5 @@ class Inspector(object):
"Omitting %s KEY for (%s), key covers omitted columns." %
(flavor, ', '.join(columns)))
continue
sa_schema.Index(name, *[table.columns[c] for c in columns],
sa_schema.Index(name, *[table.columns[c] for c in columns],
**dict(unique=unique))

View File

@@ -1,5 +1,5 @@
# engine/strategies.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -24,7 +24,7 @@ strategies = {}
class EngineStrategy(object):
"""An adaptor that processes input arguements and produces an Engine.
"""An adaptor that processes input arguments and produces an Engine.
Provides a ``create`` method that receives input arguments and
produces an instance of base.Engine or a subclass.
@@ -41,7 +41,7 @@ class EngineStrategy(object):
class DefaultEngineStrategy(EngineStrategy):
"""Base class for built-in stratgies."""
"""Base class for built-in strategies."""
def create(self, name_or_url, **kwargs):
# create url.URL object
@@ -80,7 +80,7 @@ class DefaultEngineStrategy(EngineStrategy):
return dialect.connect(*cargs, **cparams)
except Exception, e:
# Py3K
#raise exc.DBAPIError.instance(None, None,
#raise exc.DBAPIError.instance(None, None,
# e, dialect.dbapi.Error,
# connection_invalidated=
# dialect.is_disconnect(e, None, None)
@@ -180,7 +180,7 @@ PlainEngineStrategy()
class ThreadLocalEngineStrategy(DefaultEngineStrategy):
"""Strategy for configuring an Engine with thredlocal behavior."""
"""Strategy for configuring an Engine with threadlocal behavior."""
name = 'threadlocal'
engine_cls = threadlocal.TLEngine
@@ -245,8 +245,8 @@ class MockEngineStrategy(EngineStrategy):
from sqlalchemy.engine import ddl
ddl.SchemaDropper(self.dialect, self, **kwargs).traverse_single(entity)
def _run_visitor(self, visitorcallable, element,
connection=None,
def _run_visitor(self, visitorcallable, element,
connection=None,
**kwargs):
kwargs['checkfirst'] = False
visitorcallable(self.dialect, self,

View File

@@ -1,5 +1,5 @@
# engine/threadlocal.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -7,7 +7,7 @@
"""Provides a thread-local transactional wrapper around the root Engine class.
The ``threadlocal`` module is invoked when using the ``strategy="threadlocal"`` flag
with :func:`~sqlalchemy.engine.create_engine`. This module is semi-private and is
with :func:`~sqlalchemy.engine.create_engine`. This module is semi-private and is
invoked automatically when the threadlocal engine strategy is used.
"""

View File

@@ -1,5 +1,5 @@
# engine/url.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -110,7 +110,7 @@ class URL(object):
module = self._load_entry_point()
if module is None:
raise exc.ArgumentError(
"Could not determine dialect for '%s'." %
"Could not determine dialect for '%s'." %
self.drivername)
return module.dialect

View File

@@ -1,5 +1,5 @@
# sqlalchemy/event.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -7,6 +7,7 @@
"""Base event API."""
from sqlalchemy import util, exc
import weakref
CANCEL = util.symbol('CANCEL')
NO_RETVAL = util.symbol('NO_RETVAL')
@@ -25,8 +26,8 @@ def listen(target, identifier, fn, *args, **kw):
list(const.columns)[0].name
)
event.listen(
UniqueConstraint,
"after_parent_attach",
UniqueConstraint,
"after_parent_attach",
unique_constraint_name)
"""
@@ -37,7 +38,7 @@ def listen(target, identifier, fn, *args, **kw):
tgt.dispatch._listen(tgt, identifier, fn, *args, **kw)
return
raise exc.InvalidRequestError("No such event '%s' for target '%s'" %
(identifier,target))
(identifier, target))
def listens_for(target, identifier, *args, **kw):
"""Decorate a function as a listener for the given target + identifier.
@@ -69,7 +70,7 @@ def remove(target, identifier, fn):
"""
for evt_cls in _registrars[identifier]:
for tgt in evt_cls._accept_with(target):
tgt.dispatch._remove(identifier, tgt, fn, *args, **kw)
tgt.dispatch._remove(identifier, tgt, fn)
return
_registrars = util.defaultdict(list)
@@ -90,12 +91,12 @@ class _UnpickleDispatch(object):
raise AttributeError("No class with a 'dispatch' member present.")
class _Dispatch(object):
"""Mirror the event listening definitions of an Events class with
"""Mirror the event listening definitions of an Events class with
listener collections.
Classes which define a "dispatch" member will return a
non-instantiated :class:`._Dispatch` subclass when the member
is accessed at the class level. When the "dispatch" member is
Classes which define a "dispatch" member will return a
non-instantiated :class:`._Dispatch` subclass when the member
is accessed at the class level. When the "dispatch" member is
accessed at the instance level of its owner, an instance
of the :class:`._Dispatch` class is returned.
@@ -103,7 +104,7 @@ class _Dispatch(object):
class defined, by the :func:`._create_dispatcher_class` function.
The original :class:`.Events` classes remain untouched.
This decouples the construction of :class:`.Events` subclasses from
the implementation used by the event internals, and allows
the implementation used by the event internals, and allows
inspecting tools like Sphinx to work in an unsurprising
way against the public API.
@@ -120,13 +121,14 @@ class _Dispatch(object):
object."""
for ls in _event_descriptors(other):
getattr(self, ls.name)._update(ls, only_propagate=only_propagate)
getattr(self, ls.name).\
for_modify(self)._update(ls, only_propagate=only_propagate)
def _event_descriptors(target):
return [getattr(target, k) for k in dir(target) if _is_event_name(k)]
class _EventMeta(type):
"""Intercept new Event subclasses and create
"""Intercept new Event subclasses and create
associated _Dispatch classes."""
def __init__(cls, classname, bases, dict_):
@@ -134,14 +136,14 @@ class _EventMeta(type):
return type.__init__(cls, classname, bases, dict_)
def _create_dispatcher_class(cls, classname, bases, dict_):
"""Create a :class:`._Dispatch` class corresponding to an
"""Create a :class:`._Dispatch` class corresponding to an
:class:`.Events` class."""
# there's all kinds of ways to do this,
# i.e. make a Dispatch class that shares the '_listen' method
# of the Event class, this is the straight monkeypatch.
dispatch_base = getattr(cls, 'dispatch', _Dispatch)
cls.dispatch = dispatch_cls = type("%sDispatch" % classname,
cls.dispatch = dispatch_cls = type("%sDispatch" % classname,
(dispatch_base, ), {})
dispatch_cls._listen = cls._listen
dispatch_cls._clear = cls._clear
@@ -180,9 +182,11 @@ class Events(object):
@classmethod
def _listen(cls, target, identifier, fn, propagate=False, insert=False):
if insert:
getattr(target.dispatch, identifier).insert(fn, target, propagate)
getattr(target.dispatch, identifier).\
for_modify(target.dispatch).insert(fn, target, propagate)
else:
getattr(target.dispatch, identifier).append(fn, target, propagate)
getattr(target.dispatch, identifier).\
for_modify(target.dispatch).append(fn, target, propagate)
@classmethod
def _remove(cls, target, identifier, fn):
@@ -200,7 +204,12 @@ class _DispatchDescriptor(object):
def __init__(self, fn):
self.__name__ = fn.__name__
self.__doc__ = fn.__doc__
self._clslevel = util.defaultdict(list)
self._clslevel = weakref.WeakKeyDictionary()
self._empty_listeners = weakref.WeakKeyDictionary()
def _contains(self, cls, evt):
return cls in self._clslevel and \
evt in self._clslevel[cls]
def insert(self, obj, target, propagate):
assert isinstance(target, type), \
@@ -212,6 +221,8 @@ class _DispatchDescriptor(object):
if cls is not target and cls not in self._clslevel:
self.update_subclass(cls)
else:
if cls not in self._clslevel:
self._clslevel[cls] = []
self._clslevel[cls].insert(0, obj)
def append(self, obj, target, propagate):
@@ -225,15 +236,19 @@ class _DispatchDescriptor(object):
if cls is not target and cls not in self._clslevel:
self.update_subclass(cls)
else:
if cls not in self._clslevel:
self._clslevel[cls] = []
self._clslevel[cls].append(obj)
def update_subclass(self, target):
if target not in self._clslevel:
self._clslevel[target] = []
clslevel = self._clslevel[target]
for cls in target.__mro__[1:]:
if cls in self._clslevel:
clslevel.extend([
fn for fn
in self._clslevel[cls]
fn for fn
in self._clslevel[cls]
if fn not in clslevel
])
@@ -242,7 +257,8 @@ class _DispatchDescriptor(object):
while stack:
cls = stack.pop(0)
stack.extend(cls.__subclasses__())
self._clslevel[cls].remove(obj)
if cls in self._clslevel:
self._clslevel[cls].remove(obj)
def clear(self):
"""Clear all class level listeners"""
@@ -250,18 +266,91 @@ class _DispatchDescriptor(object):
for dispatcher in self._clslevel.values():
dispatcher[:] = []
def for_modify(self, obj):
"""Return an event collection which can be modified.
For _DispatchDescriptor at the class level of
a dispatcher, this returns self.
"""
return self
def __get__(self, obj, cls):
if obj is None:
return self
obj.__dict__[self.__name__] = result = \
_ListenerCollection(self, obj._parent_cls)
elif obj._parent_cls in self._empty_listeners:
ret = self._empty_listeners[obj._parent_cls]
else:
self._empty_listeners[obj._parent_cls] = ret = \
_EmptyListener(self, obj._parent_cls)
# assigning it to __dict__ means
# memoized for fast re-access. but more memory.
obj.__dict__[self.__name__] = ret
return ret
class _EmptyListener(object):
"""Serves as a class-level interface to the events
served by a _DispatchDescriptor, when there are no
instance-level events present.
Is replaced by _ListenerCollection when instance-level
events are added.
"""
def __init__(self, parent, target_cls):
if target_cls not in parent._clslevel:
parent.update_subclass(target_cls)
self.parent = parent
self.parent_listeners = parent._clslevel[target_cls]
self.name = parent.__name__
self.propagate = frozenset()
self.listeners = ()
def for_modify(self, obj):
"""Return an event collection which can be modified.
For _EmptyListener at the instance level of
a dispatcher, this generates a new
_ListenerCollection, applies it to the instance,
and returns it.
"""
obj.__dict__[self.name] = result = _ListenerCollection(
self.parent, obj._parent_cls)
return result
def _needs_modify(self, *args, **kw):
raise NotImplementedError("need to call for_modify()")
exec_once = insert = append = remove = clear = _needs_modify
def __call__(self, *args, **kw):
"""Execute this event."""
for fn in self.parent_listeners:
fn(*args, **kw)
def __len__(self):
return len(self.parent_listeners)
def __iter__(self):
return iter(self.parent_listeners)
def __getitem__(self, index):
return (self.parent_listeners)[index]
def __nonzero__(self):
return bool(self.parent_listeners)
class _ListenerCollection(object):
"""Instance-level attributes on instances of :class:`._Dispatch`.
Represents a collection of listeners.
As of 0.7.9, _ListenerCollection is only first
created via the _EmptyListener.for_modify() method.
"""
_exec_once = False
@@ -274,6 +363,15 @@ class _ListenerCollection(object):
self.listeners = []
self.propagate = set()
def for_modify(self, obj):
"""Return an event collection which can be modified.
For _ListenerCollection at the instance level of
a dispatcher, this returns self.
"""
return self
def exec_once(self, *args, **kw):
"""Execute this event, but only if it has not been
executed already for this collection."""
@@ -294,11 +392,9 @@ class _ListenerCollection(object):
# but this allows class-level listeners to be added
# at any point.
#
# alternatively, _DispatchDescriptor could notify
# all _ListenerCollection objects, but then we move
# to a higher memory model, i.e.weakrefs to all _ListenerCollection
# objects, the _DispatchDescriptor collection repeated
# for all instances.
# In the absense of instance-level listeners,
# we stay with the _EmptyListener object when called
# at the instance level.
def __len__(self):
return len(self.parent_listeners + self.listeners)
@@ -319,8 +415,8 @@ class _ListenerCollection(object):
existing_listeners = self.listeners
existing_listener_set = set(existing_listeners)
self.propagate.update(other.propagate)
existing_listeners.extend([l for l
in other.listeners
existing_listeners.extend([l for l
in other.listeners
if l not in existing_listener_set
and not only_propagate or l in self.propagate
])
@@ -347,7 +443,7 @@ class _ListenerCollection(object):
self.propagate.clear()
class dispatcher(object):
"""Descriptor used by target classes to
"""Descriptor used by target classes to
deliver the _Dispatch class at the class level
and produce new _Dispatch instances for target
instances.

View File

@@ -1,5 +1,5 @@
# sqlalchemy/events.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -17,11 +17,11 @@ class DDLEvents(event.Events):
that is, :class:`.SchemaItem` and :class:`.SchemaEvent`
subclasses, including :class:`.MetaData`, :class:`.Table`,
:class:`.Column`.
:class:`.MetaData` and :class:`.Table` support events
specifically regarding when CREATE and DROP
DDL is emitted to the database.
DDL is emitted to the database.
Attachment events are also provided to customize
behavior whenever a child schema element is associated
with a parent, such as, when a :class:`.Column` is associated
@@ -37,14 +37,14 @@ class DDLEvents(event.Events):
some_table = Table('some_table', m, Column('data', Integer))
def after_create(target, connection, **kw):
connection.execute("ALTER TABLE %s SET name=foo_%s" %
connection.execute("ALTER TABLE %s SET name=foo_%s" %
(target.name, target.name))
event.listen(some_table, "after_create", after_create)
DDL events integrate closely with the
DDL events integrate closely with the
:class:`.DDL` class and the :class:`.DDLElement` hierarchy
of DDL clause constructs, which are themselves appropriate
of DDL clause constructs, which are themselves appropriate
as listener callables::
from sqlalchemy import DDL
@@ -81,7 +81,7 @@ class DDLEvents(event.Events):
to the event. The contents of this dictionary
may vary across releases, and include the
list of tables being generated for a metadata-level
event, the checkfirst flag, and other
event, the checkfirst flag, and other
elements used by internal events.
"""
@@ -97,7 +97,7 @@ class DDLEvents(event.Events):
to the event. The contents of this dictionary
may vary across releases, and include the
list of tables being generated for a metadata-level
event, the checkfirst flag, and other
event, the checkfirst flag, and other
elements used by internal events.
"""
@@ -113,7 +113,7 @@ class DDLEvents(event.Events):
to the event. The contents of this dictionary
may vary across releases, and include the
list of tables being generated for a metadata-level
event, the checkfirst flag, and other
event, the checkfirst flag, and other
elements used by internal events.
"""
@@ -129,52 +129,52 @@ class DDLEvents(event.Events):
to the event. The contents of this dictionary
may vary across releases, and include the
list of tables being generated for a metadata-level
event, the checkfirst flag, and other
event, the checkfirst flag, and other
elements used by internal events.
"""
def before_parent_attach(self, target, parent):
"""Called before a :class:`.SchemaItem` is associated with
"""Called before a :class:`.SchemaItem` is associated with
a parent :class:`.SchemaItem`.
:param target: the target object
:param parent: the parent to which the target is being attached.
:func:`.event.listen` also accepts a modifier for this event:
:param propagate=False: When True, the listener function will
be established for any copies made of the target object,
i.e. those copies that are generated when
:meth:`.Table.tometadata` is used.
"""
def after_parent_attach(self, target, parent):
"""Called after a :class:`.SchemaItem` is associated with
"""Called after a :class:`.SchemaItem` is associated with
a parent :class:`.SchemaItem`.
:param target: the target object
:param parent: the parent to which the target is being attached.
:func:`.event.listen` also accepts a modifier for this event:
:param propagate=False: When True, the listener function will
be established for any copies made of the target object,
i.e. those copies that are generated when
:meth:`.Table.tometadata` is used.
"""
def column_reflect(self, table, column_info):
"""Called for each unit of 'column info' retrieved when
a :class:`.Table` is being reflected.
a :class:`.Table` is being reflected.
The dictionary of column information as returned by the
dialect is passed, and can be modified. The dictionary
is that returned in each element of the list returned
by :meth:`.reflection.Inspector.get_columns`.
is that returned in each element of the list returned
by :meth:`.reflection.Inspector.get_columns`.
The event is called before any action is taken against
this dictionary, and the contents can be modified.
The :class:`.Column` specific arguments ``info``, ``key``,
@@ -182,45 +182,45 @@ class DDLEvents(event.Events):
will be passed to the constructor of :class:`.Column`.
Note that this event is only meaningful if either
associated with the :class:`.Table` class across the
associated with the :class:`.Table` class across the
board, e.g.::
from sqlalchemy.schema import Table
from sqlalchemy import event
def listen_for_reflect(table, column_info):
"receive a column_reflect event"
# ...
event.listen(
Table,
'column_reflect',
Table,
'column_reflect',
listen_for_reflect)
...or with a specific :class:`.Table` instance using
the ``listeners`` argument::
def listen_for_reflect(table, column_info):
"receive a column_reflect event"
# ...
t = Table(
'sometable',
'sometable',
autoload=True,
listeners=[
('column_reflect', listen_for_reflect)
])
This because the reflection process initiated by ``autoload=True``
completes within the scope of the constructor for :class:`.Table`.
"""
class SchemaEventTarget(object):
"""Base class for elements that are the targets of :class:`.DDLEvents` events.
This includes :class:`.SchemaItem` as well as :class:`.SchemaType`.
"""
dispatch = event.dispatcher(DDLEvents)
@@ -230,9 +230,9 @@ class SchemaEventTarget(object):
raise NotImplementedError()
def _set_parent_with_dispatch(self, parent):
self.dispatch.before_parent_attach(self, parent)
self._set_parent(parent)
self.dispatch.after_parent_attach(self, parent)
self.dispatch.before_parent_attach(self, parent)
self._set_parent(parent)
self.dispatch.after_parent_attach(self, parent)
class PoolEvents(event.Events):
"""Available events for :class:`.Pool`.
@@ -350,10 +350,10 @@ class ConnectionEvents(event.Events):
Some events allow modifiers to the listen() function.
:param retval=False: Applies to the :meth:`.before_execute` and
:param retval=False: Applies to the :meth:`.before_execute` and
:meth:`.before_cursor_execute` events only. When True, the
user-defined event function must have a return value, which
is a tuple of parameters that replace the given statement
is a tuple of parameters that replace the given statement
and parameters. See those methods for a description of
specific return arguments.
@@ -372,9 +372,9 @@ class ConnectionEvents(event.Events):
fn = wrap
elif identifier == 'before_cursor_execute':
orig_fn = fn
def wrap(conn, cursor, statement,
def wrap(conn, cursor, statement,
parameters, context, executemany):
orig_fn(conn, cursor, statement,
orig_fn(conn, cursor, statement,
parameters, context, executemany)
return statement, parameters
fn = wrap
@@ -393,14 +393,44 @@ class ConnectionEvents(event.Events):
def after_execute(self, conn, clauseelement, multiparams, params, result):
"""Intercept high level execute() events."""
def before_cursor_execute(self, conn, cursor, statement,
def before_cursor_execute(self, conn, cursor, statement,
parameters, context, executemany):
"""Intercept low-level cursor execute() events."""
def after_cursor_execute(self, conn, cursor, statement,
def after_cursor_execute(self, conn, cursor, statement,
parameters, context, executemany):
"""Intercept low-level cursor execute() events."""
def dbapi_error(self, conn, cursor, statement, parameters,
context, exception):
"""Intercept a raw DBAPI error.
This event is called with the DBAPI exception instance
received from the DBAPI itself, *before* SQLAlchemy wraps the
exception with it's own exception wrappers, and before any
other operations are performed on the DBAPI cursor; the
existing transaction remains in effect as well as any state
on the cursor.
The use case here is to inject low-level exception handling
into an :class:`.Engine`, typically for logging and
debugging purposes. In general, user code should **not** modify
any state or throw any exceptions here as this will
interfere with SQLAlchemy's cleanup and error handling
routines.
Subsequent to this hook, SQLAlchemy may attempt any
number of operations on the connection/cursor, including
closing the cursor, rolling back of the transaction in the
case of connectionless execution, and disposing of the entire
connection pool if a "disconnect" was detected. The
exception is then wrapped in a SQLAlchemy DBAPI exception
wrapper and re-thrown.
.. versionadded:: 0.7.7
"""
def begin(self, conn):
"""Intercept begin() events."""

View File

@@ -1,5 +1,5 @@
# sqlalchemy/exc.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -28,21 +28,21 @@ class ArgumentError(SQLAlchemyError):
class CircularDependencyError(SQLAlchemyError):
"""Raised by topological sorts when a circular dependency is detected.
There are two scenarios where this error occurs:
* In a Session flush operation, if two objects are mutually dependent
on each other, they can not be inserted or deleted via INSERT or
on each other, they can not be inserted or deleted via INSERT or
DELETE statements alone; an UPDATE will be needed to post-associate
or pre-deassociate one of the foreign key constrained values.
The ``post_update`` flag described at :ref:`post_update` can resolve
The ``post_update`` flag described at :ref:`post_update` can resolve
this cycle.
* In a :meth:`.MetaData.create_all`, :meth:`.MetaData.drop_all`,
:attr:`.MetaData.sorted_tables` operation, two :class:`.ForeignKey`
or :class:`.ForeignKeyConstraint` objects mutually refer to each
other. Apply the ``use_alter=True`` flag to one or both,
see :ref:`use_alter`.
"""
def __init__(self, message, cycles, edges, msg=None):
if msg is None:
@@ -54,7 +54,7 @@ class CircularDependencyError(SQLAlchemyError):
self.edges = edges
def __reduce__(self):
return self.__class__, (None, self.cycles,
return self.__class__, (None, self.cycles,
self.edges, self.args[0])
class CompileError(SQLAlchemyError):
@@ -70,9 +70,9 @@ class DisconnectionError(SQLAlchemyError):
"""A disconnect is detected on a raw DB-API connection.
This error is raised and consumed internally by a connection pool. It can
be raised by the :meth:`.PoolEvents.checkout` event
be raised by the :meth:`.PoolEvents.checkout` event
so that the host pool forces a retry; the exception will be caught
three times in a row before the pool gives up and raises
three times in a row before the pool gives up and raises
:class:`~sqlalchemy.exc.InvalidRequestError` regarding the connection attempt.
"""
@@ -121,7 +121,7 @@ class NoReferencedColumnError(NoReferenceError):
self.column_name = cname
def __reduce__(self):
return self.__class__, (self.args[0], self.table_name,
return self.__class__, (self.args[0], self.table_name,
self.column_name)
class NoSuchTableError(InvalidRequestError):
@@ -136,20 +136,20 @@ class DontWrapMixin(object):
"""A mixin class which, when applied to a user-defined Exception class,
will not be wrapped inside of :class:`.StatementError` if the error is
emitted within the process of executing a statement.
E.g.::
from sqlalchemy.exc import DontWrapMixin
class MyCustomException(Exception, DontWrapMixin):
pass
class MySpecialType(TypeDecorator):
impl = String
def process_bind_param(self, value, dialect):
if value == 'invalid':
raise MyCustomException("invalid!")
"""
import sys
if sys.version_info < (2, 5):
@@ -161,15 +161,15 @@ UnmappedColumnError = None
class StatementError(SQLAlchemyError):
"""An error occurred during execution of a SQL statement.
:class:`StatementError` wraps the exception raised
during execution, and features :attr:`.statement`
and :attr:`.params` attributes which supply context regarding
the specifics of the statement which had an issue.
The wrapped exception object is available in
The wrapped exception object is available in
the :attr:`.orig` attribute.
"""
statement = None
@@ -188,7 +188,7 @@ class StatementError(SQLAlchemyError):
self.orig = orig
def __reduce__(self):
return self.__class__, (self.args[0], self.statement,
return self.__class__, (self.args[0], self.statement,
self.params, self.orig)
def __str__(self):
@@ -211,7 +211,7 @@ class DBAPIError(StatementError):
:class:`DBAPIError` features :attr:`~.StatementError.statement`
and :attr:`~.StatementError.params` attributes which supply context regarding
the specifics of the statement which had an issue, for the
the specifics of the statement which had an issue, for the
typical case when the error was raised within the context of
emitting a SQL statement.
@@ -221,8 +221,8 @@ class DBAPIError(StatementError):
"""
@classmethod
def instance(cls, statement, params,
orig,
def instance(cls, statement, params,
orig,
dbapi_base_err,
connection_invalidated=False):
# Don't ever wrap these, just return them directly as if
@@ -236,7 +236,7 @@ class DBAPIError(StatementError):
if not isinstance(orig, dbapi_base_err) and statement:
return StatementError(
"%s (original cause: %s)" % (
str(orig),
str(orig),
traceback.format_exception_only(orig.__class__, orig)[-1].strip()
), statement, params, orig)
@@ -247,7 +247,7 @@ class DBAPIError(StatementError):
return cls(statement, params, orig, connection_invalidated)
def __reduce__(self):
return self.__class__, (self.statement, self.params,
return self.__class__, (self.statement, self.params,
self.orig, self.connection_invalidated)
def __init__(self, statement, params, orig, connection_invalidated=False):
@@ -258,7 +258,7 @@ class DBAPIError(StatementError):
except Exception, e:
text = 'Error in str() of DB-API-generated exception: ' + str(e)
StatementError.__init__(
self,
self,
'(%s) %s' % (orig.__class__.__name__, text),
statement,
params,

View File

@@ -1,5 +1,5 @@
# ext/__init__.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php

View File

@@ -1,5 +1,5 @@
# ext/associationproxy.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -24,17 +24,17 @@ from sqlalchemy.sql import not_
def association_proxy(target_collection, attr, **kw):
"""Return a Python property implementing a view of a target
attribute which references an attribute on members of the
attribute which references an attribute on members of the
target.
The returned value is an instance of :class:`.AssociationProxy`.
Implements a Python property representing a relationship as a collection of
simpler values, or a scalar value. The proxied property will mimic the collection type of
the target (list, dict or set), or, in the case of a one to one relationship,
a simple scalar value.
:param target_collection: Name of the attribute we'll proxy to.
:param target_collection: Name of the attribute we'll proxy to.
This attribute is typically mapped by
:func:`~sqlalchemy.orm.relationship` to link to a target collection, but
can also be a many-to-one or non-scalar relationship.
@@ -80,15 +80,15 @@ class AssociationProxy(object):
"""A descriptor that presents a read/write view of an object attribute."""
def __init__(self, target_collection, attr, creator=None,
getset_factory=None, proxy_factory=None,
getset_factory=None, proxy_factory=None,
proxy_bulk_set=None):
"""Construct a new :class:`.AssociationProxy`.
The :func:`.association_proxy` function is provided as the usual
entrypoint here, though :class:`.AssociationProxy` can be instantiated
and/or subclassed directly.
:param target_collection: Name of the collection we'll proxy to,
:param target_collection: Name of the collection we'll proxy to,
usually created with :func:`.relationship`.
:param attr: Attribute on the collected instances we'll proxy for. For example,
@@ -120,7 +120,7 @@ class AssociationProxy(object):
collection implementation, you may supply a factory function to
produce those collections. Only applicable to non-scalar relationships.
:param proxy_bulk_set: Optional, use with proxy_factory. See
:param proxy_bulk_set: Optional, use with proxy_factory. See
the _set() method for details.
"""
@@ -140,11 +140,11 @@ class AssociationProxy(object):
def remote_attr(self):
"""The 'remote' :class:`.MapperProperty` referenced by this
:class:`.AssociationProxy`.
New in 0.7.3.
.. versionadded:: 0.7.3
See also:
:attr:`.AssociationProxy.attr`
:attr:`.AssociationProxy.local_attr`
@@ -157,10 +157,10 @@ class AssociationProxy(object):
"""The 'local' :class:`.MapperProperty` referenced by this
:class:`.AssociationProxy`.
New in 0.7.3.
.. versionadded:: 0.7.3
See also:
:attr:`.AssociationProxy.attr`
:attr:`.AssociationProxy.remote_attr`
@@ -171,20 +171,20 @@ class AssociationProxy(object):
@property
def attr(self):
"""Return a tuple of ``(local_attr, remote_attr)``.
This attribute is convenient when specifying a join
This attribute is convenient when specifying a join
using :meth:`.Query.join` across two relationships::
sess.query(Parent).join(*Parent.proxied.attr)
New in 0.7.3.
.. versionadded:: 0.7.3
See also:
:attr:`.AssociationProxy.local_attr`
:attr:`.AssociationProxy.remote_attr`
"""
return (self.local_attr, self.remote_attr)
@@ -195,10 +195,10 @@ class AssociationProxy(object):
@util.memoized_property
def target_class(self):
"""The intermediary class handled by this :class:`.AssociationProxy`.
Intercepted append/set/assignment events will result
in the generation of new instances of this class.
"""
return self._get_property().mapper.class_
@@ -333,10 +333,10 @@ class AssociationProxy(object):
def any(self, criterion=None, **kwargs):
"""Produce a proxied 'any' expression using EXISTS.
This expression will be a composed product
using the :meth:`.RelationshipProperty.Comparator.any`
and/or :meth:`.RelationshipProperty.Comparator.has`
and/or :meth:`.RelationshipProperty.Comparator.has`
operators of the underlying proxied attributes.
"""
@@ -360,12 +360,12 @@ class AssociationProxy(object):
def has(self, criterion=None, **kwargs):
"""Produce a proxied 'has' expression using EXISTS.
This expression will be a composed product
using the :meth:`.RelationshipProperty.Comparator.any`
and/or :meth:`.RelationshipProperty.Comparator.has`
and/or :meth:`.RelationshipProperty.Comparator.has`
operators of the underlying proxied attributes.
"""
return self._comparator.has(
@@ -375,7 +375,7 @@ class AssociationProxy(object):
def contains(self, obj):
"""Produce a proxied 'contains' expression using EXISTS.
This expression will be a composed product
using the :meth:`.RelationshipProperty.Comparator.any`
, :meth:`.RelationshipProperty.Comparator.has`,

View File

@@ -1,5 +1,5 @@
# ext/compiler.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -91,9 +91,9 @@ Produces::
"INSERT INTO mytable (SELECT mytable.x, mytable.y, mytable.z FROM mytable WHERE mytable.x > :x_1)"
.. note::
.. note::
The above ``InsertFromSelect`` construct probably wants to have "autocommit"
The above ``InsertFromSelect`` construct probably wants to have "autocommit"
enabled. See :ref:`enabling_compiled_autocommit` for this step.
Cross Compiling between SQL and DDL compilers
@@ -118,12 +118,12 @@ Enabling Autocommit on a Construct
Recall from the section :ref:`autocommit` that the :class:`.Engine`, when asked to execute
a construct in the absence of a user-defined transaction, detects if the given
construct represents DML or DDL, that is, a data modification or data definition statement, which
construct represents DML or DDL, that is, a data modification or data definition statement, which
requires (or may require, in the case of DDL) that the transaction generated by the DBAPI be committed
(recall that DBAPI always has a transaction going on regardless of what SQLAlchemy does). Checking
(recall that DBAPI always has a transaction going on regardless of what SQLAlchemy does). Checking
for this is actually accomplished
by checking for the "autocommit" execution option on the construct. When building a construct like
an INSERT derivation, a new DDL type, or perhaps a stored procedure that alters data, the "autocommit"
an INSERT derivation, a new DDL type, or perhaps a stored procedure that alters data, the "autocommit"
option needs to be set in order for the statement to function with "connectionless" execution
(as described in :ref:`dbengine_implicit`).
@@ -146,13 +146,13 @@ can be used, which already is a subclass of :class:`.Executable`, :class:`.Claus
class MyInsertThing(UpdateBase):
def __init__(self, ...):
...
DDL elements that subclass :class:`.DDLElement` already have the "autocommit" flag turned on.
Changing the default compilation of existing constructs
@@ -163,7 +163,7 @@ the compilation of a built in SQL construct, the @compiles decorator is invoked
the appropriate class (be sure to use the class, i.e. ``Insert`` or ``Select``, instead of the creation function such as ``insert()`` or ``select()``).
Within the new compilation function, to get at the "original" compilation routine,
use the appropriate visit_XXX method - this because compiler.process() will call upon the
use the appropriate visit_XXX method - this because compiler.process() will call upon the
overriding routine and cause an endless loop. Such as, to add "prefix" to all insert statements::
from sqlalchemy.sql.expression import Insert
@@ -205,7 +205,7 @@ A synopsis is as follows:
expression class. Any SQL expression can be derived from this base, and is
probably the best choice for longer constructs such as specialized INSERT
statements.
* :class:`~sqlalchemy.sql.expression.ColumnElement` - The root of all
"column-like" elements. Anything that you'd place in the "columns" clause of
a SELECT statement (as well as order by and group by) can derive from this -
@@ -218,7 +218,7 @@ A synopsis is as follows:
class timestamp(ColumnElement):
type = TIMESTAMP()
* :class:`~sqlalchemy.sql.expression.FunctionElement` - This is a hybrid of a
``ColumnElement`` and a "from clause" like object, and represents a SQL
function or stored procedure type of call. Since most databases support
@@ -250,7 +250,7 @@ A synopsis is as follows:
* :class:`~sqlalchemy.sql.expression.Executable` - This is a mixin which should be
used with any expression class that represents a "standalone" SQL statement that
can be passed directly to an ``execute()`` method. It is already implicit
can be passed directly to an ``execute()`` method. It is already implicit
within ``DDLElement`` and ``FunctionElement``.
Further Examples
@@ -263,15 +263,15 @@ A function that works like "CURRENT_TIMESTAMP" except applies the appropriate co
so that the time is in UTC time. Timestamps are best stored in relational databases
as UTC, without time zones. UTC so that your database doesn't think time has gone
backwards in the hour when daylight savings ends, without timezones because timezones
are like character encodings - they're best applied only at the endpoints of an
are like character encodings - they're best applied only at the endpoints of an
application (i.e. convert to UTC upon user input, re-apply desired timezone upon display).
For Postgresql and Microsoft SQL Server::
from sqlalchemy.sql import expression
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.types import DateTime
class utcnow(expression.FunctionElement):
type = DateTime()
@@ -284,7 +284,7 @@ For Postgresql and Microsoft SQL Server::
return "GETUTCDATE()"
Example usage::
from sqlalchemy import (
Table, Column, Integer, String, DateTime, MetaData
)
@@ -299,8 +299,8 @@ Example usage::
-------------------
The "GREATEST" function is given any number of arguments and returns the one that is
of the highest value - it's equivalent to Python's ``max`` function. A SQL
standard version versus a CASE based version which only accommodates two
of the highest value - it's equivalent to Python's ``max`` function. A SQL
standard version versus a CASE based version which only accommodates two
arguments::
from sqlalchemy.sql import expression
@@ -332,7 +332,7 @@ Example usage::
Session.query(Account).\\
filter(
greatest(
Account.checking_balance,
Account.checking_balance,
Account.savings_balance) > 10000
)
@@ -340,10 +340,10 @@ Example usage::
------------------
Render a "false" constant expression, rendering as "0" on platforms that don't have a "false" constant::
from sqlalchemy.sql import expression
from sqlalchemy.ext.compiler import compiles
class sql_false(expression.ColumnElement):
pass
@@ -358,14 +358,14 @@ Render a "false" constant expression, rendering as "0" on platforms that don't h
return "0"
Example usage::
from sqlalchemy import select, union_all
exp = union_all(
select([users.c.name, sql_false().label("enrolled")]),
select([customers.c.name, customers.c.enrolled])
)
"""
from sqlalchemy import exc

View File

@@ -1,5 +1,5 @@
# ext/declarative.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -51,7 +51,7 @@ automatically named with the name of the attribute to which they are
assigned.
To name columns explicitly with a name distinct from their mapped attribute,
just give the column a name. Below, column "some_table_id" is mapped to the
just give the column a name. Below, column "some_table_id" is mapped to the
"id" attribute of `SomeClass`, but in SQL will be represented as "some_table_id"::
class SomeClass(Base):
@@ -68,7 +68,7 @@ added to the underlying :class:`.Table` and
Classes which are constructed using declarative can interact freely
with classes that are mapped explicitly with :func:`mapper`.
It is recommended, though not required, that all tables
It is recommended, though not required, that all tables
share the same underlying :class:`~sqlalchemy.schema.MetaData` object,
so that string-configured :class:`~sqlalchemy.schema.ForeignKey`
references can be resolved without issue.
@@ -86,21 +86,11 @@ CREATE statements for all tables::
engine = create_engine('sqlite://')
Base.metadata.create_all(engine)
The usual techniques of associating :class:`.MetaData:` with :class:`.Engine`
apply, such as assigning to the ``bind`` attribute::
Base.metadata.bind = create_engine('sqlite://')
To associate the engine with the :func:`declarative_base` at time
of construction, the ``bind`` argument is accepted::
Base = declarative_base(bind=create_engine('sqlite://'))
:func:`declarative_base` can also receive a pre-existing
:class:`.MetaData` object, which allows a
declarative setup to be associated with an already
declarative setup to be associated with an already
existing traditional collection of :class:`~sqlalchemy.schema.Table`
objects::
objects::
mymetadata = MetaData()
Base = declarative_base(metadata=mymetadata)
@@ -113,7 +103,7 @@ feature that the class specified to :func:`~sqlalchemy.orm.relationship`
may be a string name. The "class registry" associated with ``Base``
is used at mapper compilation time to resolve the name into the actual
class object, which is expected to have been defined once the mapper
configuration is used::
configuration is used::
class User(Base):
__tablename__ = 'users'
@@ -131,7 +121,7 @@ configuration is used::
Column constructs, since they are just that, are immediately usable,
as below where we define a primary join condition on the ``Address``
class using them::
class using them::
class Address(Base):
__tablename__ = 'addresses'
@@ -148,15 +138,15 @@ evaluated as Python expressions. The full namespace available within
this evaluation includes all classes mapped for this declarative base,
as well as the contents of the ``sqlalchemy`` package, including
expression functions like :func:`~sqlalchemy.sql.expression.desc` and
:attr:`~sqlalchemy.sql.expression.func`::
:attr:`~sqlalchemy.sql.expression.func`::
class User(Base):
# ....
addresses = relationship("Address",
order_by="desc(Address.email)",
order_by="desc(Address.email)",
primaryjoin="Address.user_id==User.id")
As an alternative to string-based attributes, attributes may also be
As an alternative to string-based attributes, attributes may also be
defined after all classes have been created. Just add them to the target
class after the fact::
@@ -169,8 +159,8 @@ Configuring Many-to-Many Relationships
Many-to-many relationships are also declared in the same way
with declarative as with traditional mappings. The
``secondary`` argument to
:func:`.relationship` is as usual passed a
:class:`.Table` object, which is typically declared in the
:func:`.relationship` is as usual passed a
:class:`.Table` object, which is typically declared in the
traditional way. The :class:`.Table` usually shares
the :class:`.MetaData` object used by the declarative base::
@@ -185,7 +175,7 @@ the :class:`.MetaData` object used by the declarative base::
id = Column(Integer, primary_key=True)
keywords = relationship("Keyword", secondary=keywords)
Like other :func:`.relationship` arguments, a string is accepted as well,
Like other :func:`.relationship` arguments, a string is accepted as well,
passing the string name of the table as defined in the ``Base.metadata.tables``
collection::
@@ -194,7 +184,7 @@ collection::
id = Column(Integer, primary_key=True)
keywords = relationship("Keyword", secondary="keywords")
As with traditional mapping, its generally not a good idea to use
As with traditional mapping, its generally not a good idea to use
a :class:`.Table` as the "secondary" argument which is also mapped to
a class, unless the :class:`.relationship` is declared with ``viewonly=True``.
Otherwise, the unit-of-work system may attempt duplicate INSERT and
@@ -219,7 +209,7 @@ This attribute accommodates both positional as well as keyword
arguments that are normally sent to the
:class:`~sqlalchemy.schema.Table` constructor.
The attribute can be specified in one of two forms. One is as a
dictionary::
dictionary::
class MyClass(Base):
__tablename__ = 'sometable'
@@ -235,7 +225,7 @@ The other, a tuple, where each argument is positional
UniqueConstraint('foo'),
)
Keyword arguments can be specified with the above form by
Keyword arguments can be specified with the above form by
specifying the last argument as a dictionary::
class MyClass(Base):
@@ -253,7 +243,7 @@ As an alternative to ``__tablename__``, a direct
:class:`~sqlalchemy.schema.Table` construct may be used. The
:class:`~sqlalchemy.schema.Column` objects, which in this case require
their names, will be added to the mapping just like a regular mapping
to a table::
to a table::
class MyClass(Base):
__table__ = Table('my_table', Base.metadata,
@@ -277,9 +267,9 @@ and pass it to declarative classes::
class Address(Base):
__table__ = metadata.tables['address']
Some configuration schemes may find it more appropriate to use ``__table__``,
such as those which already take advantage of the data-driven nature of
:class:`.Table` to customize and/or automate schema definition.
Some configuration schemes may find it more appropriate to use ``__table__``,
such as those which already take advantage of the data-driven nature of
:class:`.Table` to customize and/or automate schema definition.
Note that when the ``__table__`` approach is used, the object is immediately
usable as a plain :class:`.Table` within the class declaration body itself,
@@ -292,15 +282,15 @@ by using the ``id`` column in the ``primaryjoin`` condition of a :func:`.relatio
Column('name', String(50))
)
widgets = relationship(Widget,
widgets = relationship(Widget,
primaryjoin=Widget.myclass_id==__table__.c.id)
Similarly, mapped attributes which refer to ``__table__`` can be placed inline,
Similarly, mapped attributes which refer to ``__table__`` can be placed inline,
as below where we assign the ``name`` column to the attribute ``_name``, generating
a synonym for ``name``::
from sqlalchemy.ext.declarative import synonym_for
class MyClass(Base):
__table__ = Table('my_table', Base.metadata,
Column('id', Integer, primary_key=True),
@@ -320,14 +310,14 @@ It's easy to set up a :class:`.Table` that uses ``autoload=True``
in conjunction with a mapped class::
class MyClass(Base):
__table__ = Table('mytable', Base.metadata,
__table__ = Table('mytable', Base.metadata,
autoload=True, autoload_with=some_engine)
However, one improvement that can be made here is to not
require the :class:`.Engine` to be available when classes are
However, one improvement that can be made here is to not
require the :class:`.Engine` to be available when classes are
being first declared. To achieve this, use the example
described at :ref:`examples_declarative_reflection` to build a
declarative base that sets up mappings only after a special
described at :ref:`examples_declarative_reflection` to build a
declarative base that sets up mappings only after a special
``prepare(engine)`` step is called::
Base = declarative_base(cls=DeclarativeReflectedBase)
@@ -339,14 +329,14 @@ declarative base that sets up mappings only after a special
class Bar(Base):
__tablename__ = 'bar'
# illustrate overriding of "bar.foo_id" to have
# illustrate overriding of "bar.foo_id" to have
# a foreign key constraint otherwise not
# reflected, such as when using MySQL
foo_id = Column(Integer, ForeignKey('foo.id'))
Base.prepare(e)
Mapper Configuration
====================
@@ -354,7 +344,7 @@ Declarative makes use of the :func:`~.orm.mapper` function internally
when it creates the mapping to the declared table. The options
for :func:`~.orm.mapper` are passed directly through via the ``__mapper_args__``
class attribute. As always, arguments which reference locally
mapped columns can reference them directly from within the
mapped columns can reference them directly from within the
class declaration::
from datetime import datetime
@@ -383,7 +373,7 @@ as declarative will determine this from the class itself. The various
Joined Table Inheritance
~~~~~~~~~~~~~~~~~~~~~~~~
Joined table inheritance is defined as a subclass that defines its own
Joined table inheritance is defined as a subclass that defines its own
table::
class Person(Base):
@@ -400,8 +390,8 @@ table::
Note that above, the ``Engineer.id`` attribute, since it shares the
same attribute name as the ``Person.id`` attribute, will in fact
represent the ``people.id`` and ``engineers.id`` columns together, and
will render inside a query as ``"people.id"``.
represent the ``people.id`` and ``engineers.id`` columns together,
with the "Engineer.id" column taking precedence if queried directly.
To provide the ``Engineer`` class with an attribute that represents
only the ``engineers.id`` column, give it a different attribute name::
@@ -412,12 +402,17 @@ only the ``engineers.id`` column, give it a different attribute name::
primary_key=True)
primary_language = Column(String(50))
.. versionchanged:: 0.7 joined table inheritance favors the subclass
column over that of the superclass, such as querying above
for ``Engineer.id``. Prior to 0.7 this was the reverse.
Single Table Inheritance
~~~~~~~~~~~~~~~~~~~~~~~~
Single table inheritance is defined as a subclass that does not have
its own table; you just leave out the ``__table__`` and ``__tablename__``
attributes::
attributes::
class Person(Base):
__tablename__ = 'people'
@@ -506,29 +501,31 @@ before the class is built::
Using the Concrete Helpers
^^^^^^^^^^^^^^^^^^^^^^^^^^^
New helper classes released in 0.7.3 provides a simpler pattern for concrete inheritance.
Helper classes provides a simpler pattern for concrete inheritance.
With these objects, the ``__declare_last__`` helper is used to configure the "polymorphic"
loader for the mapper after all subclasses have been declared.
.. versionadded:: 0.7.3
An abstract base can be declared using the :class:`.AbstractConcreteBase` class::
from sqlalchemy.ext.declarative import AbstractConcreteBase
class Employee(AbstractConcreteBase, Base):
pass
To have a concrete ``employee`` table, use :class:`.ConcreteBase` instead::
from sqlalchemy.ext.declarative import ConcreteBase
class Employee(ConcreteBase, Base):
__tablename__ = 'employee'
employee_id = Column(Integer, primary_key=True)
name = Column(String(50))
__mapper_args__ = {
'polymorphic_identity':'employee',
'polymorphic_identity':'employee',
'concrete':True}
Either ``Employee`` base can be used in the normal fashion::
@@ -538,7 +535,7 @@ Either ``Employee`` base can be used in the normal fashion::
name = Column(String(50))
manager_data = Column(String(40))
__mapper_args__ = {
'polymorphic_identity':'manager',
'polymorphic_identity':'manager',
'concrete':True}
class Engineer(Employee):
@@ -546,7 +543,7 @@ Either ``Employee`` base can be used in the normal fashion::
employee_id = Column(Integer, primary_key=True)
name = Column(String(50))
engineer_info = Column(String(40))
__mapper_args__ = {'polymorphic_identity':'engineer',
__mapper_args__ = {'polymorphic_identity':'engineer',
'concrete':True}
@@ -569,7 +566,7 @@ mappings are declared. An example of some commonly mixed-in
idioms is below::
from sqlalchemy.ext.declarative import declared_attr
class MyMixin(object):
@declared_attr
@@ -586,29 +583,29 @@ idioms is below::
Where above, the class ``MyModel`` will contain an "id" column
as the primary key, a ``__tablename__`` attribute that derives
from the name of the class itself, as well as ``__table_args__``
from the name of the class itself, as well as ``__table_args__``
and ``__mapper_args__`` defined by the ``MyMixin`` mixin class.
There's no fixed convention over whether ``MyMixin`` precedes
``Base`` or not. Normal Python method resolution rules apply, and
There's no fixed convention over whether ``MyMixin`` precedes
``Base`` or not. Normal Python method resolution rules apply, and
the above example would work just as well with::
class MyModel(Base, MyMixin):
name = Column(String(1000))
This works because ``Base`` here doesn't define any of the
variables that ``MyMixin`` defines, i.e. ``__tablename__``,
``__table_args__``, ``id``, etc. If the ``Base`` did define
an attribute of the same name, the class placed first in the
inherits list would determine which attribute is used on the
This works because ``Base`` here doesn't define any of the
variables that ``MyMixin`` defines, i.e. ``__tablename__``,
``__table_args__``, ``id``, etc. If the ``Base`` did define
an attribute of the same name, the class placed first in the
inherits list would determine which attribute is used on the
newly defined class.
Augmenting the Base
~~~~~~~~~~~~~~~~~~~
In addition to using a pure mixin, most of the techniques in this
In addition to using a pure mixin, most of the techniques in this
section can also be applied to the base class itself, for patterns that
should apply to all classes derived from a particular base. This
should apply to all classes derived from a particular base. This
is achieved using the ``cls`` argument of the :func:`.declarative_base` function::
from sqlalchemy.ext.declarative import declared_attr
@@ -617,26 +614,26 @@ is achieved using the ``cls`` argument of the :func:`.declarative_base` function
@declared_attr
def __tablename__(cls):
return cls.__name__.lower()
__table_args__ = {'mysql_engine': 'InnoDB'}
id = Column(Integer, primary_key=True)
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base(cls=Base)
class MyModel(Base):
name = Column(String(1000))
Where above, ``MyModel`` and all other classes that derive from ``Base`` will have
a table name derived from the class name, an ``id`` primary key column, as well as
Where above, ``MyModel`` and all other classes that derive from ``Base`` will have
a table name derived from the class name, an ``id`` primary key column, as well as
the "InnoDB" engine for MySQL.
Mixing in Columns
~~~~~~~~~~~~~~~~~
The most basic way to specify a column on a mixin is by simple
The most basic way to specify a column on a mixin is by simple
declaration::
class TimestampMixin(object):
@@ -649,30 +646,29 @@ declaration::
name = Column(String(1000))
Where above, all declarative classes that include ``TimestampMixin``
will also have a column ``created_at`` that applies a timestamp to
will also have a column ``created_at`` that applies a timestamp to
all row insertions.
Those familiar with the SQLAlchemy expression language know that
Those familiar with the SQLAlchemy expression language know that
the object identity of clause elements defines their role in a schema.
Two ``Table`` objects ``a`` and ``b`` may both have a column called
``id``, but the way these are differentiated is that ``a.c.id``
Two ``Table`` objects ``a`` and ``b`` may both have a column called
``id``, but the way these are differentiated is that ``a.c.id``
and ``b.c.id`` are two distinct Python objects, referencing their
parent tables ``a`` and ``b`` respectively.
In the case of the mixin column, it seems that only one
:class:`.Column` object is explicitly created, yet the ultimate
:class:`.Column` object is explicitly created, yet the ultimate
``created_at`` column above must exist as a distinct Python object
for each separate destination class. To accomplish this, the declarative
extension creates a **copy** of each :class:`.Column` object encountered on
extension creates a **copy** of each :class:`.Column` object encountered on
a class that is detected as a mixin.
This copy mechanism is limited to simple columns that have no foreign
keys, as a :class:`.ForeignKey` itself contains references to columns
which can't be properly recreated at this level. For columns that
which can't be properly recreated at this level. For columns that
have foreign keys, as well as for the variety of mapper-level constructs
that require destination-explicit context, the
:func:`~.declared_attr` decorator (renamed from ``sqlalchemy.util.classproperty`` in 0.6.5)
is provided so that
:func:`~.declared_attr` decorator is provided so that
patterns common to many classes can be defined as callables::
from sqlalchemy.ext.declarative import declared_attr
@@ -686,14 +682,17 @@ patterns common to many classes can be defined as callables::
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
Where above, the ``address_id`` class-level callable is executed at the
Where above, the ``address_id`` class-level callable is executed at the
point at which the ``User`` class is constructed, and the declarative
extension can use the resulting :class:`.Column` object as returned by
the method without the need to copy it.
.. versionchanged:: > 0.6.5
Rename 0.6.5 ``sqlalchemy.util.classproperty`` into :func:`~.declared_attr`.
Columns generated by :func:`~.declared_attr` can also be
referenced by ``__mapper_args__`` to a limited degree, currently
by ``polymorphic_on`` and ``version_id_col``, by specifying the
referenced by ``__mapper_args__`` to a limited degree, currently
by ``polymorphic_on`` and ``version_id_col``, by specifying the
classdecorator itself into the dictionary - the declarative extension
will resolve them at class construction time::
@@ -713,7 +712,7 @@ Mixing in Relationships
Relationships created by :func:`~sqlalchemy.orm.relationship` are provided
with declarative mixin classes exclusively using the
:func:`.declared_attr` approach, eliminating any ambiguity
:class:`.declared_attr` approach, eliminating any ambiguity
which could arise when copying a relationship and its possibly column-bound
contents. Below is an example which combines a foreign key column and a
relationship so that two classes ``Foo`` and ``Bar`` can both be configured to
@@ -741,10 +740,10 @@ reference a common target class via many-to-one::
id = Column(Integer, primary_key=True)
:func:`~sqlalchemy.orm.relationship` definitions which require explicit
primaryjoin, order_by etc. expressions should use the string forms
primaryjoin, order_by etc. expressions should use the string forms
for these arguments, so that they are evaluated as late as possible.
To reference the mixin class in these expressions, use the given ``cls``
to get it's name::
to get its name::
class RefTargetMixin(object):
@declared_attr
@@ -763,8 +762,8 @@ Mixing in deferred(), column_property(), etc.
Like :func:`~sqlalchemy.orm.relationship`, all
:class:`~sqlalchemy.orm.interfaces.MapperProperty` subclasses such as
:func:`~sqlalchemy.orm.deferred`, :func:`~sqlalchemy.orm.column_property`,
etc. ultimately involve references to columns, and therefore, when
used with declarative mixins, have the :func:`.declared_attr`
etc. ultimately involve references to columns, and therefore, when
used with declarative mixins, have the :class:`.declared_attr`
requirement so that no reliance on copying is needed::
class SomethingMixin(object):
@@ -781,7 +780,7 @@ Controlling table inheritance with mixins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The ``__tablename__`` attribute in conjunction with the hierarchy of
classes involved in a declarative mixin scenario controls what type of
classes involved in a declarative mixin scenario controls what type of
table inheritance, if any,
is configured by the declarative extension.
@@ -816,7 +815,7 @@ return a ``__tablename__`` in the event that no table is already
mapped in the inheritance hierarchy. To help with this, a
:func:`~sqlalchemy.ext.declarative.has_inherited_table` helper
function is provided that returns ``True`` if a parent class already
has a mapped table.
has a mapped table.
As an example, here's a mixin that will only allow single table
inheritance::
@@ -879,7 +878,7 @@ In the case of ``__table_args__`` or ``__mapper_args__``
specified with declarative mixins, you may want to combine
some parameters from several mixins with those you wish to
define on the class iteself. The
:func:`.declared_attr` decorator can be used
:class:`.declared_attr` decorator can be used
here to create user-defined collation routines that pull
from multiple collections::
@@ -906,7 +905,7 @@ from multiple collections::
Creating Indexes with Mixins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
To define a named, potentially multicolumn :class:`.Index` that applies to all
To define a named, potentially multicolumn :class:`.Index` that applies to all
tables derived from a mixin, use the "inline" form of :class:`.Index` and establish
it as part of ``__table_args__``::
@@ -928,7 +927,7 @@ Special Directives
``__declare_last__()``
~~~~~~~~~~~~~~~~~~~~~~
The ``__declare_last__()`` hook, introduced in 0.7.3, allows definition of
The ``__declare_last__()`` hook allows definition of
a class level function that is automatically called by the :meth:`.MapperEvents.after_configured`
event, which occurs after mappings are assumed to be completed and the 'configure' step
has finished::
@@ -939,29 +938,31 @@ has finished::
""
# do something with mappings
.. versionadded:: 0.7.3
.. _declarative_abstract:
``__abstract__``
~~~~~~~~~~~~~~~~~~~
``__abstract__`` is introduced in 0.7.3 and causes declarative to skip the production
``__abstract__`` causes declarative to skip the production
of a table or mapper for the class entirely. A class can be added within a hierarchy
in the same way as mixin (see :ref:`declarative_mixins`), allowing subclasses to extend
just from the special class::
class SomeAbstractBase(Base):
__abstract__ = True
def some_helpful_method(self):
""
@declared_attr
def __mapper_args__(cls):
return {"helpful mapper arguments":True}
class MyMappedClass(SomeAbstractBase):
""
One possible use of ``__abstract__`` is to use a distinct :class:`.MetaData` for different
bases::
@@ -975,13 +976,15 @@ bases::
__abstract__ = True
metadata = MetaData()
Above, classes which inherit from ``DefaultBase`` will use one :class:`.MetaData` as the
registry of tables, and those which inherit from ``OtherBase`` will use a different one.
Above, classes which inherit from ``DefaultBase`` will use one :class:`.MetaData` as the
registry of tables, and those which inherit from ``OtherBase`` will use a different one.
The tables themselves can then be created perhaps within distinct databases::
DefaultBase.metadata.create_all(some_engine)
OtherBase.metadata_create_all(some_other_engine)
.. versionadded:: 0.7.3
Class Constructor
=================
@@ -1006,7 +1009,7 @@ setup using :func:`~sqlalchemy.orm.scoped_session` might look like::
Base = declarative_base()
Mapped instances then make usage of
:class:`~sqlalchemy.orm.session.Session` in the usual way.
:class:`~sqlalchemy.orm.session.Session` in the usual way.
"""
@@ -1028,7 +1031,7 @@ __all__ = 'declarative_base', 'synonym_for', \
def instrument_declarative(cls, registry, metadata):
"""Given a class, configure the class declaratively,
using the given registry, which can be any dictionary, and
MetaData object.
MetaData object.
"""
if '_decl_class_registry' in cls.__dict__:
@@ -1071,7 +1074,7 @@ def _as_declarative(cls, classname, dict_):
def go():
cls.__declare_last__()
if '__abstract__' in base.__dict__:
if (base is cls or
if (base is cls or
(base in cls.__bases__ and not _is_declarative_inherits)
):
return
@@ -1083,19 +1086,19 @@ def _as_declarative(cls, classname, dict_):
for name,obj in vars(base).items():
if name == '__mapper_args__':
if not mapper_args and (
not class_mapped or
not class_mapped or
isinstance(obj, declarative_props)
):
mapper_args = cls.__mapper_args__
elif name == '__tablename__':
if not tablename and (
not class_mapped or
not class_mapped or
isinstance(obj, declarative_props)
):
tablename = cls.__tablename__
elif name == '__table_args__':
if not table_args and (
not class_mapped or
not class_mapped or
isinstance(obj, declarative_props)
):
table_args = cls.__table_args__
@@ -1110,7 +1113,7 @@ def _as_declarative(cls, classname, dict_):
util.warn("Regular (i.e. not __special__) "
"attribute '%s.%s' uses @declared_attr, "
"but owning class %s is mapped - "
"not applying to subclass %s."
"not applying to subclass %s."
% (base.__name__, name, base, cls))
continue
elif base is not cls:
@@ -1122,7 +1125,7 @@ def _as_declarative(cls, classname, dict_):
"must be declared as @declared_attr callables "
"on declarative mixin classes. ")
if name not in dict_ and not (
'__table__' in dict_ and
'__table__' in dict_ and
(obj.name or name) in dict_['__table__'].c
) and name not in potential_columns:
potential_columns[name] = \
@@ -1151,7 +1154,7 @@ def _as_declarative(cls, classname, dict_):
if inherited_table_args and not tablename:
table_args = None
# make sure that column copies are used rather
# make sure that column copies are used rather
# than the original columns from any mixins
for k in ('version_id_col', 'polymorphic_on',):
if k in mapper_args:
@@ -1204,7 +1207,7 @@ def _as_declarative(cls, classname, dict_):
elif isinstance(c, Column):
_undefer_column_name(key, c)
cols.add(c)
# if the column is the same name as the key,
# if the column is the same name as the key,
# remove it from the explicit properties dict.
# the normal rules for assigning column-based properties
# will take over, including precedence of columns
@@ -1291,7 +1294,7 @@ def _as_declarative(cls, classname, dict_):
if c.name in inherited_table.c:
raise exc.ArgumentError(
"Column '%s' on class %s conflicts with "
"existing column '%s'" %
"existing column '%s'" %
(c, cls, inherited_table.c[c.name])
)
inherited_table.append_column(c)
@@ -1310,7 +1313,7 @@ def _as_declarative(cls, classname, dict_):
if c not in inherited_mapper._columntoproperty])
exclude_properties.difference_update([c.key for c in cols])
# look through columns in the current mapper that
# look through columns in the current mapper that
# are keyed to a propname different than the colname
# (if names were the same, we'd have popped it out above,
# in which case the mapper makes this combination).
@@ -1322,25 +1325,21 @@ def _as_declarative(cls, classname, dict_):
if k in inherited_mapper._props:
p = inherited_mapper._props[k]
if isinstance(p, ColumnProperty):
# note here we place the superclass column
# first. this corresponds to the
# append() in mapper._configure_property().
# change this ordering when we do [ticket:1892]
our_stuff[k] = p.columns + [col]
# note here we place the subclass column
# first. See [ticket:1892] for background.
our_stuff[k] = [col] + p.columns
cls.__mapper__ = mapper_cls(cls,
table,
properties=our_stuff,
cls.__mapper__ = mapper_cls(cls,
table,
properties=our_stuff,
**mapper_args)
class DeclarativeMeta(type):
def __init__(cls, classname, bases, dict_):
if '_decl_class_registry' in cls.__dict__:
return type.__init__(cls, classname, bases, dict_)
else:
if '_decl_class_registry' not in cls.__dict__:
_as_declarative(cls, classname, cls.__dict__)
return type.__init__(cls, classname, bases, dict_)
type.__init__(cls, classname, bases, dict_)
def __setattr__(cls, key, value):
if '__mapper__' in cls.__dict__:
@@ -1356,7 +1355,7 @@ class DeclarativeMeta(type):
cls.__mapper__.add_property(key, value)
elif isinstance(value, MapperProperty):
cls.__mapper__.add_property(
key,
key,
_deferred_relationship(cls, value)
)
else:
@@ -1423,7 +1422,7 @@ def _deferred_relationship(cls, prop):
"When initializing mapper %s, expression %r failed to "
"locate a name (%r). If this is a class name, consider "
"adding this relationship() to the %r class after "
"both dependent classes have been defined." %
"both dependent classes have been defined." %
(prop.parent, arg, n.args[0], cls)
)
return return_cls
@@ -1493,15 +1492,14 @@ class declared_attr(property):
"""Mark a class-level method as representing the definition of
a mapped property or special declarative member name.
.. note::
@declared_attr is available as
``sqlalchemy.util.classproperty`` for SQLAlchemy versions
0.6.2, 0.6.3, 0.6.4.
.. versionchanged:: 0.6.{2,3,4}
``@declared_attr`` is available as
``sqlalchemy.util.classproperty`` for SQLAlchemy versions
0.6.2, 0.6.3, 0.6.4.
@declared_attr turns the attribute into a scalar-like
property that can be invoked from the uninstantiated class.
Declarative treats attributes specifically marked with
Declarative treats attributes specifically marked with
@declared_attr as returning a construct that is specific
to mapping or declarative table configuration. The name
of the attribute is that of what the non-dynamic version
@@ -1533,7 +1531,7 @@ class declared_attr(property):
def __mapper_args__(cls):
if cls.__name__ == 'Employee':
return {
"polymorphic_on":cls.type,
"polymorphic_on":cls.type,
"polymorphic_identity":"Employee"
}
else:
@@ -1581,8 +1579,8 @@ def declarative_base(bind=None, metadata=None, mapper=None, cls=object,
:param bind: An optional
:class:`~sqlalchemy.engine.base.Connectable`, will be assigned
the ``bind`` attribute on the :class:`~sqlalchemy.MetaData`
instance.
the ``bind`` attribute on the :class:`~sqlalchemy.MetaData`
instance.
:param metadata:
An optional :class:`~sqlalchemy.MetaData` instance. All
@@ -1613,13 +1611,13 @@ def declarative_base(bind=None, metadata=None, mapper=None, cls=object,
no __init__ will be provided and construction will fall back to
cls.__init__ by way of the normal Python semantics.
:param class_registry: optional dictionary that will serve as the
:param class_registry: optional dictionary that will serve as the
registry of class names-> mapped classes when string names
are used to identify classes inside of :func:`.relationship`
are used to identify classes inside of :func:`.relationship`
and others. Allows two or more declarative base classes
to share the same registry of class names for simplified
to share the same registry of class names for simplified
inter-base relationships.
:param metaclass:
Defaults to :class:`.DeclarativeMeta`. A metaclass or __metaclass__
compatible callable to use as the meta type of the generated
@@ -1652,7 +1650,7 @@ def _undefer_column_name(key, column):
class ConcreteBase(object):
"""A helper class for 'concrete' declarative mappings.
:class:`.ConcreteBase` will use the :func:`.polymorphic_union`
function automatically, against all tables mapped as a subclass
to this class. The function is called via the
@@ -1662,7 +1660,7 @@ class ConcreteBase(object):
:class:`.ConcreteBase` produces a mapped
table for the class itself. Compare to :class:`.AbstractConcreteBase`,
which does not.
Example::
from sqlalchemy.ext.declarative import ConcreteBase
@@ -1672,7 +1670,7 @@ class ConcreteBase(object):
employee_id = Column(Integer, primary_key=True)
name = Column(String(50))
__mapper_args__ = {
'polymorphic_identity':'employee',
'polymorphic_identity':'employee',
'concrete':True}
class Manager(Employee):
@@ -1681,7 +1679,7 @@ class ConcreteBase(object):
name = Column(String(50))
manager_data = Column(String(40))
__mapper_args__ = {
'polymorphic_identity':'manager',
'polymorphic_identity':'manager',
'concrete':True}
"""
@@ -1706,17 +1704,17 @@ class ConcreteBase(object):
class AbstractConcreteBase(ConcreteBase):
"""A helper class for 'concrete' declarative mappings.
:class:`.AbstractConcreteBase` will use the :func:`.polymorphic_union`
function automatically, against all tables mapped as a subclass
to this class. The function is called via the
``__declare_last__()`` function, which is essentially
a hook for the :func:`.MapperEvents.after_configured` event.
:class:`.AbstractConcreteBase` does not produce a mapped
table for the class itself. Compare to :class:`.ConcreteBase`,
which does.
Example::
from sqlalchemy.ext.declarative import ConcreteBase
@@ -1730,7 +1728,7 @@ class AbstractConcreteBase(ConcreteBase):
name = Column(String(50))
manager_data = Column(String(40))
__mapper_args__ = {
'polymorphic_identity':'manager',
'polymorphic_identity':'manager',
'concrete':True}
"""

View File

@@ -1,5 +1,5 @@
# ext/horizontal_shard.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -9,8 +9,8 @@
Defines a rudimental 'horizontal sharding' system which allows a Session to
distribute queries and persistence operations across multiple databases.
For a usage example, see the :ref:`examples_sharding` example included in
the source distrbution.
For a usage example, see the :ref:`examples_sharding` example included in
the source distribution.
"""
@@ -31,7 +31,7 @@ class ShardedQuery(Query):
def set_shard(self, shard_id):
"""return a new query, limited to a single shard ID.
all subsequent operations with the returned query will
all subsequent operations with the returned query will
be against the single shard regardless of other state.
"""
@@ -45,7 +45,7 @@ class ShardedQuery(Query):
result = self._connection_from_session(
mapper=self._mapper_zero(),
shard_id=shard_id).execute(
context.statement,
context.statement,
self._params)
return self.instances(result, context)
@@ -56,7 +56,7 @@ class ShardedQuery(Query):
for shard_id in self.query_chooser(self):
partial.extend(iter_for_shard(shard_id))
# if some kind of in memory 'sorting'
# if some kind of in memory 'sorting'
# were done, this is where it would happen
return iter(partial)
@@ -73,7 +73,7 @@ class ShardedQuery(Query):
return None
class ShardedSession(Session):
def __init__(self, shard_chooser, id_chooser, query_chooser, shards=None,
def __init__(self, shard_chooser, id_chooser, query_chooser, shards=None,
query_cls=ShardedQuery, **kwargs):
"""Construct a ShardedSession.
@@ -113,8 +113,8 @@ class ShardedSession(Session):
if self.transaction is not None:
return self.transaction.connection(mapper, shard_id=shard_id)
else:
return self.get_bind(mapper,
shard_id=shard_id,
return self.get_bind(mapper,
shard_id=shard_id,
instance=instance).contextual_connect(**kwargs)
def get_bind(self, mapper, shard_id=None, instance=None, clause=None, **kw):

View File

@@ -1,5 +1,5 @@
# ext/hybrid.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -10,8 +10,8 @@
class level and at the instance level.
The :mod:`~sqlalchemy.ext.hybrid` extension provides a special form of method
decorator, is around 50 lines of code and has almost no dependencies on the rest
of SQLAlchemy. It can, in theory, work with any descriptor-based expression
decorator, is around 50 lines of code and has almost no dependencies on the rest
of SQLAlchemy. It can, in theory, work with any descriptor-based expression
system.
Consider a mapping ``Interval``, representing integer ``start`` and ``end``
@@ -25,9 +25,9 @@ as the class itself::
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import Session, aliased
from sqlalchemy.ext.hybrid import hybrid_property, hybrid_method
Base = declarative_base()
class Interval(Base):
__tablename__ = 'interval'
@@ -50,7 +50,7 @@ as the class itself::
@hybrid_method
def intersects(self, other):
return self.contains(other.start) | self.contains(other.end)
Above, the ``length`` property returns the difference between the ``end`` and
``start`` attributes. With an instance of ``Interval``, this subtraction occurs
in Python, using normal Python descriptor mechanics::
@@ -60,33 +60,33 @@ in Python, using normal Python descriptor mechanics::
5
When dealing with the ``Interval`` class itself, the :class:`.hybrid_property`
descriptor evaluates the function body given the ``Interval`` class as
descriptor evaluates the function body given the ``Interval`` class as
the argument, which when evaluated with SQLAlchemy expression mechanics
returns a new SQL expression::
>>> print Interval.length
interval."end" - interval.start
>>> print Session().query(Interval).filter(Interval.length > 10)
SELECT interval.id AS interval_id, interval.start AS interval_start,
interval."end" AS interval_end
FROM interval
SELECT interval.id AS interval_id, interval.start AS interval_start,
interval."end" AS interval_end
FROM interval
WHERE interval."end" - interval.start > :param_1
ORM methods such as :meth:`~.Query.filter_by` generally use ``getattr()`` to
ORM methods such as :meth:`~.Query.filter_by` generally use ``getattr()`` to
locate attributes, so can also be used with hybrid attributes::
>>> print Session().query(Interval).filter_by(length=5)
SELECT interval.id AS interval_id, interval.start AS interval_start,
interval."end" AS interval_end
FROM interval
SELECT interval.id AS interval_id, interval.start AS interval_start,
interval."end" AS interval_end
FROM interval
WHERE interval."end" - interval.start = :param_1
The ``Interval`` class example also illustrates two methods, ``contains()`` and ``intersects()``,
decorated with :class:`.hybrid_method`.
This decorator applies the same idea to methods that :class:`.hybrid_property` applies
to attributes. The methods return boolean values, and take advantage
of the Python ``|`` and ``&`` bitwise operators to produce equivalent instance-level and
to attributes. The methods return boolean values, and take advantage
of the Python ``|`` and ``&`` bitwise operators to produce equivalent instance-level and
SQL expression-level boolean behavior::
>>> i1.contains(6)
@@ -97,24 +97,24 @@ SQL expression-level boolean behavior::
True
>>> i1.intersects(Interval(25, 29))
False
>>> print Session().query(Interval).filter(Interval.contains(15))
SELECT interval.id AS interval_id, interval.start AS interval_start,
interval."end" AS interval_end
FROM interval
SELECT interval.id AS interval_id, interval.start AS interval_start,
interval."end" AS interval_end
FROM interval
WHERE interval.start <= :start_1 AND interval."end" > :end_1
>>> ia = aliased(Interval)
>>> print Session().query(Interval, ia).filter(Interval.intersects(ia))
SELECT interval.id AS interval_id, interval.start AS interval_start,
interval."end" AS interval_end, interval_1.id AS interval_1_id,
interval_1.start AS interval_1_start, interval_1."end" AS interval_1_end
FROM interval, interval AS interval_1
WHERE interval.start <= interval_1.start
AND interval."end" > interval_1.start
OR interval.start <= interval_1."end"
SELECT interval.id AS interval_id, interval.start AS interval_start,
interval."end" AS interval_end, interval_1.id AS interval_1_id,
interval_1.start AS interval_1_start, interval_1."end" AS interval_1_end
FROM interval, interval AS interval_1
WHERE interval.start <= interval_1.start
AND interval."end" > interval_1.start
OR interval.start <= interval_1."end"
AND interval."end" > interval_1."end"
Defining Expression Behavior Distinct from Attribute Behavior
--------------------------------------------------------------
@@ -122,18 +122,18 @@ Our usage of the ``&`` and ``|`` bitwise operators above was fortunate, consider
our functions operated on two boolean values to return a new one. In many cases, the construction
of an in-Python function and a SQLAlchemy SQL expression have enough differences that two
separate Python expressions should be defined. The :mod:`~sqlalchemy.ext.hybrid` decorators
define the :meth:`.hybrid_property.expression` modifier for this purpose. As an example we'll
define the :meth:`.hybrid_property.expression` modifier for this purpose. As an example we'll
define the radius of the interval, which requires the usage of the absolute value function::
from sqlalchemy import func
class Interval(object):
# ...
@hybrid_property
def radius(self):
return abs(self.length) / 2
@radius.expression
def radius(cls):
return func.abs(cls.length) / 2
@@ -143,22 +143,22 @@ Above the Python function ``abs()`` is used for instance-level operations, the S
>>> i1.radius
2
>>> print Session().query(Interval).filter(Interval.radius > 5)
SELECT interval.id AS interval_id, interval.start AS interval_start,
interval."end" AS interval_end
FROM interval
SELECT interval.id AS interval_id, interval.start AS interval_start,
interval."end" AS interval_end
FROM interval
WHERE abs(interval."end" - interval.start) / :abs_1 > :param_1
Defining Setters
----------------
Hybrid properties can also define setter methods. If we wanted ``length`` above, when
Hybrid properties can also define setter methods. If we wanted ``length`` above, when
set, to modify the endpoint value::
class Interval(object):
# ...
@hybrid_property
def length(self):
return self.end - self.start
@@ -179,17 +179,24 @@ The ``length(self, value)`` method is now called upon set::
Working with Relationships
--------------------------
There's no essential difference when creating hybrids that work with related objects as
opposed to column-based data. The need for distinct expressions tends to be greater.
Consider the following declarative mapping which relates a ``User`` to a ``SavingsAccount``::
There's no essential difference when creating hybrids that work with
related objects as opposed to column-based data. The need for distinct
expressions tends to be greater. Two variants of we'll illustrate
are the "join-dependent" hybrid, and the "correlated subquery" hybrid.
Join-Dependent Relationship Hybrid
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Consider the following declarative
mapping which relates a ``User`` to a ``SavingsAccount``::
from sqlalchemy import Column, Integer, ForeignKey, Numeric, String
from sqlalchemy.orm import relationship
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.hybrid import hybrid_property
Base = declarative_base()
class SavingsAccount(Base):
__tablename__ = 'account'
id = Column(Integer, primary_key=True)
@@ -200,9 +207,9 @@ Consider the following declarative mapping which relates a ``User`` to a ``Savin
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
name = Column(String(100), nullable=False)
accounts = relationship("SavingsAccount", backref="owner")
@hybrid_property
def balance(self):
if self.accounts:
@@ -222,30 +229,88 @@ Consider the following declarative mapping which relates a ``User`` to a ``Savin
def balance(cls):
return SavingsAccount.balance
The above hybrid property ``balance`` works with the first ``SavingsAccount`` entry in the list of
accounts for this user. The in-Python getter/setter methods can treat ``accounts`` as a Python
list available on ``self``.
The above hybrid property ``balance`` works with the first
``SavingsAccount`` entry in the list of accounts for this user. The
in-Python getter/setter methods can treat ``accounts`` as a Python
list available on ``self``.
However, at the expression level, we can't travel along relationships to column attributes
directly since SQLAlchemy is explicit about joins. So here, it's expected that the ``User`` class will be
used in an appropriate context such that an appropriate join to ``SavingsAccount`` will be present::
However, at the expression level, it's expected that the ``User`` class will be used
in an appropriate context such that an appropriate join to
``SavingsAccount`` will be present::
>>> print Session().query(User, User.balance).join(User.accounts).filter(User.balance > 5000)
SELECT "user".id AS user_id, "user".name AS user_name, account.balance AS account_balance
FROM "user" JOIN account ON "user".id = account.user_id
>>> print Session().query(User, User.balance).\\
... join(User.accounts).filter(User.balance > 5000)
SELECT "user".id AS user_id, "user".name AS user_name,
account.balance AS account_balance
FROM "user" JOIN account ON "user".id = account.user_id
WHERE account.balance > :balance_1
Note however, that while the instance level accessors need to worry about whether ``self.accounts``
is even present, this issue expresses itself differently at the SQL expression level, where we basically
Note however, that while the instance level accessors need to worry
about whether ``self.accounts`` is even present, this issue expresses
itself differently at the SQL expression level, where we basically
would use an outer join::
>>> from sqlalchemy import or_
>>> print (Session().query(User, User.balance).outerjoin(User.accounts).
... filter(or_(User.balance < 5000, User.balance == None)))
SELECT "user".id AS user_id, "user".name AS user_name, account.balance AS account_balance
FROM "user" LEFT OUTER JOIN account ON "user".id = account.user_id
SELECT "user".id AS user_id, "user".name AS user_name,
account.balance AS account_balance
FROM "user" LEFT OUTER JOIN account ON "user".id = account.user_id
WHERE account.balance < :balance_1 OR account.balance IS NULL
Correlated Subquery Relationship Hybrid
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
We can, of course, forego being dependent on the enclosing query's usage
of joins in favor of the correlated
subquery, which can portably be packed into a single colunn expression.
A correlated subquery is more portable, but often performs more poorly
at the SQL level.
Using the same technique illustrated at :ref:`mapper_column_property_sql_expressions`,
we can adjust our ``SavingsAccount`` example to aggregate the balances for
*all* accounts, and use a correlated subquery for the column expression::
from sqlalchemy import Column, Integer, ForeignKey, Numeric, String
from sqlalchemy.orm import relationship
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy import select, func
Base = declarative_base()
class SavingsAccount(Base):
__tablename__ = 'account'
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('user.id'), nullable=False)
balance = Column(Numeric(15, 5))
class User(Base):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
name = Column(String(100), nullable=False)
accounts = relationship("SavingsAccount", backref="owner")
@hybrid_property
def balance(self):
return sum(acc.balance for acc in self.accounts)
@balance.expression
def balance(cls):
return select([func.sum(SavingsAccount.balance)]).\\
where(SavingsAccount.user_id==cls.id).\\
label('total_balance')
The above recipe will give us the ``balance`` column which renders
a correlated SELECT::
>>> print s.query(User).filter(User.balance > 400)
SELECT "user".id AS user_id, "user".name AS user_name
FROM "user"
WHERE (SELECT sum(account.balance) AS sum_1
FROM account
WHERE account.user_id = "user".id) > :param_1
.. _hybrid_custom_comparators:
Building Custom Comparators
@@ -253,7 +318,7 @@ Building Custom Comparators
The hybrid property also includes a helper that allows construction of custom comparators.
A comparator object allows one to customize the behavior of each SQLAlchemy expression
operator individually. They are useful when creating custom types that have
operator individually. They are useful when creating custom types that have
some highly idiosyncratic behavior on the SQL side.
The example class below allows case-insensitive comparisons on the attribute
@@ -263,9 +328,9 @@ named ``word_insensitive``::
from sqlalchemy import func, Column, Integer, String
from sqlalchemy.orm import Session
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class CaseInsensitiveComparator(Comparator):
def __eq__(self, other):
return func.lower(self.__clause_element__()) == func.lower(other)
@@ -274,27 +339,27 @@ named ``word_insensitive``::
__tablename__ = 'searchword'
id = Column(Integer, primary_key=True)
word = Column(String(255), nullable=False)
@hybrid_property
def word_insensitive(self):
return self.word.lower()
@word_insensitive.comparator
def word_insensitive(cls):
return CaseInsensitiveComparator(cls.word)
Above, SQL expressions against ``word_insensitive`` will apply the ``LOWER()``
Above, SQL expressions against ``word_insensitive`` will apply the ``LOWER()``
SQL function to both sides::
>>> print Session().query(SearchWord).filter_by(word_insensitive="Trucks")
SELECT searchword.id AS searchword_id, searchword.word AS searchword_word
FROM searchword
SELECT searchword.id AS searchword_id, searchword.word AS searchword_word
FROM searchword
WHERE lower(searchword.word) = lower(:lower_1)
The ``CaseInsensitiveComparator`` above implements part of the :class:`.ColumnOperators`
interface. A "coercion" operation like lowercasing can be applied to all comparison operations
(i.e. ``eq``, ``lt``, ``gt``, etc.) using :meth:`.Operators.operate`::
class CaseInsensitiveComparator(Comparator):
def operate(self, op, other):
return op(func.lower(self.__clause_element__()), func.lower(other))
@@ -310,7 +375,7 @@ by ``@word_insensitive.comparator``, only applies to the SQL side.
A more comprehensive form of the custom comparator is to construct a *Hybrid Value Object*.
This technique applies the target value or expression to a value object which is then
returned by the accessor in all cases. The value object allows control
of all operations upon the value as well as how compared values are treated, both
of all operations upon the value as well as how compared values are treated, both
on the SQL expression side as well as the Python value side. Replacing the
previous ``CaseInsensitiveComparator`` class with a new ``CaseInsensitiveWord`` class::
@@ -342,8 +407,8 @@ previous ``CaseInsensitiveComparator`` class with a new ``CaseInsensitiveWord``
Above, the ``CaseInsensitiveWord`` object represents ``self.word``, which may be a SQL function,
or may be a Python native. By overriding ``operate()`` and ``__clause_element__()``
to work in terms of ``self.word``, all comparison operations will work against the
"converted" form of ``word``, whether it be SQL side or Python side.
Our ``SearchWord`` class can now deliver the ``CaseInsensitiveWord`` object unconditionally
"converted" form of ``word``, whether it be SQL side or Python side.
Our ``SearchWord`` class can now deliver the ``CaseInsensitiveWord`` object unconditionally
from a single hybrid call::
class SearchWord(Base):
@@ -356,12 +421,12 @@ from a single hybrid call::
return CaseInsensitiveWord(self.word)
The ``word_insensitive`` attribute now has case-insensitive comparison behavior
universally, including SQL expression vs. Python expression (note the Python value is
universally, including SQL expression vs. Python expression (note the Python value is
converted to lower case on the Python side here)::
>>> print Session().query(SearchWord).filter_by(word_insensitive="Trucks")
SELECT searchword.id AS searchword_id, searchword.word AS searchword_word
FROM searchword
SELECT searchword.id AS searchword_id, searchword.word AS searchword_word
FROM searchword
WHERE lower(searchword.word) = :lower_1
SQL expression versus SQL expression::
@@ -369,13 +434,13 @@ SQL expression versus SQL expression::
>>> sw1 = aliased(SearchWord)
>>> sw2 = aliased(SearchWord)
>>> print Session().query(
... sw1.word_insensitive,
... sw1.word_insensitive,
... sw2.word_insensitive).\\
... filter(
... sw1.word_insensitive > sw2.word_insensitive
... )
SELECT lower(searchword_1.word) AS lower_1, lower(searchword_2.word) AS lower_2
FROM searchword AS searchword_1, searchword AS searchword_2
SELECT lower(searchword_1.word) AS lower_1, lower(searchword_2.word) AS lower_2
FROM searchword AS searchword_1, searchword AS searchword_2
WHERE lower(searchword_1.word) > lower(searchword_2.word)
Python only expression::
@@ -403,7 +468,7 @@ Building Transformers
----------------------
A *transformer* is an object which can receive a :class:`.Query` object and return a
new one. The :class:`.Query` object includes a method :meth:`.with_transformation`
new one. The :class:`.Query` object includes a method :meth:`.with_transformation`
that simply returns a new :class:`.Query` transformed by the given function.
We can combine this with the :class:`.Comparator` class to produce one type
@@ -412,18 +477,18 @@ filtering criterion.
Consider a mapped class ``Node``, which assembles using adjacency list into a hierarchical
tree pattern::
from sqlalchemy import Column, Integer, ForeignKey
from sqlalchemy.orm import relationship
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Node(Base):
__tablename__ = 'node'
id =Column(Integer, primary_key=True)
parent_id = Column(Integer, ForeignKey('node.id'))
parent = relationship("Node", remote_side=id)
Suppose we wanted to add an accessor ``grandparent``. This would return the ``parent`` of
``Node.parent``. When we have an instance of ``Node``, this is simple::
@@ -431,7 +496,7 @@ Suppose we wanted to add an accessor ``grandparent``. This would return the ``p
class Node(Base):
# ...
@hybrid_property
def grandparent(self):
return self.parent.parent
@@ -460,7 +525,7 @@ attribute and filtered based on the given criterion::
id =Column(Integer, primary_key=True)
parent_id = Column(Integer, ForeignKey('node.id'))
parent = relationship("Node", remote_side=id)
@hybrid_property
def grandparent(self):
return self.parent.parent
@@ -486,8 +551,8 @@ using :attr:`.Operators.eq` against the left and right sides, passing into
{sql}>>> session.query(Node).\\
... with_transformation(Node.grandparent==Node(id=5)).\\
... all()
SELECT node.id AS node_id, node.parent_id AS node_parent_id
FROM node JOIN node AS node_1 ON node_1.id = node.parent_id
SELECT node.id AS node_id, node.parent_id AS node_parent_id
FROM node JOIN node AS node_1 ON node_1.id = node.parent_id
WHERE :param_1 = node_1.parent_id
{stop}
@@ -529,14 +594,14 @@ with each class::
{sql}>>> session.query(Node).\\
... with_transformation(Node.grandparent.join).\\
... filter(Node.grandparent==Node(id=5))
SELECT node.id AS node_id, node.parent_id AS node_parent_id
FROM node JOIN node AS node_1 ON node_1.id = node.parent_id
SELECT node.id AS node_id, node.parent_id AS node_parent_id
FROM node JOIN node AS node_1 ON node_1.id = node.parent_id
WHERE :param_1 = node_1.parent_id
{stop}
The "transformer" pattern is an experimental pattern that starts
to make usage of some functional programming paradigms.
While it's only recommended for advanced and/or patient developers,
While it's only recommended for advanced and/or patient developers,
there's probably a whole lot of amazing things it can be used for.
"""
@@ -546,26 +611,26 @@ from sqlalchemy.orm import attributes, interfaces
class hybrid_method(object):
"""A decorator which allows definition of a Python object method with both
instance-level and class-level behavior.
"""
def __init__(self, func, expr=None):
"""Create a new :class:`.hybrid_method`.
Usage is typically via decorator::
from sqlalchemy.ext.hybrid import hybrid_method
class SomeClass(object):
@hybrid_method
def value(self, x, y):
return self._value + x + y
@value.expression
def value(self, x, y):
return func.some_function(self._value, x, y)
"""
self.func = func
self.expr = expr or func
@@ -585,25 +650,25 @@ class hybrid_method(object):
class hybrid_property(object):
"""A decorator which allows definition of a Python descriptor with both
instance-level and class-level behavior.
"""
def __init__(self, fget, fset=None, fdel=None, expr=None):
"""Create a new :class:`.hybrid_property`.
Usage is typically via decorator::
from sqlalchemy.ext.hybrid import hybrid_property
class SomeClass(object):
@hybrid_property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = value
"""
self.fget = fget
self.fset = fset
@@ -647,10 +712,10 @@ class hybrid_property(object):
def comparator(self, comparator):
"""Provide a modifying decorator that defines a custom comparator producing method.
The return value of the decorated method should be an instance of
:class:`~.hybrid.Comparator`.
"""
proxy_attr = attributes.\

View File

@@ -1,5 +1,5 @@
# ext/mutable.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -21,8 +21,8 @@ Establishing Mutability on Scalar Column Values
===============================================
A typical example of a "mutable" structure is a Python dictionary.
Following the example introduced in :ref:`types_toplevel`, we
begin with a custom type that marshals Python dictionaries into
Following the example introduced in :ref:`types_toplevel`, we
begin with a custom type that marshals Python dictionaries into
JSON strings before being persisted::
from sqlalchemy.types import TypeDecorator, VARCHAR
@@ -43,7 +43,7 @@ JSON strings before being persisted::
value = json.loads(value)
return value
The usage of ``json`` is only for the purposes of example. The :mod:`sqlalchemy.ext.mutable`
The usage of ``json`` is only for the purposes of example. The :mod:`sqlalchemy.ext.mutable`
extension can be used
with any type whose target Python type may be mutable, including
:class:`.PickleType`, :class:`.postgresql.ARRAY`, etc.
@@ -86,7 +86,7 @@ The above dictionary class takes the approach of subclassing the Python
built-in ``dict`` to produce a dict
subclass which routes all mutation events through ``__setitem__``. There are
many variants on this approach, such as subclassing ``UserDict.UserDict``,
the newer ``collections.MutableMapping``, etc. The part that's important to this
the newer ``collections.MutableMapping``, etc. The part that's important to this
example is that the :meth:`.Mutable.changed` method is called whenever an in-place change to the
datastructure takes place.
@@ -95,7 +95,7 @@ convert any values that are not instances of ``MutationDict``, such
as the plain dictionaries returned by the ``json`` module, into the
appropriate type. Defining this method is optional; we could just as well created our
``JSONEncodedDict`` such that it always returns an instance of ``MutationDict``,
and additionally ensured that all calling code uses ``MutationDict``
and additionally ensured that all calling code uses ``MutationDict``
explicitly. When :meth:`.Mutable.coerce` is not overridden, any values
applied to a parent object which are not instances of the mutable type
will raise a ``ValueError``.
@@ -108,14 +108,14 @@ of this type, applying event listening instrumentation to the mapped
attribute. Such as, with classical table metadata::
from sqlalchemy import Table, Column, Integer
my_data = Table('my_data', metadata,
Column('id', Integer, primary_key=True),
Column('data', MutationDict.as_mutable(JSONEncodedDict))
)
Above, :meth:`~.Mutable.as_mutable` returns an instance of ``JSONEncodedDict``
(if the type object was not an instance already), which will intercept any
(if the type object was not an instance already), which will intercept any
attributes which are mapped against this type. Below we establish a simple
mapping against the ``my_data`` table::
@@ -157,7 +157,7 @@ will flag the attribute as "dirty" on the parent object::
The ``MutationDict`` can be associated with all future instances
of ``JSONEncodedDict`` in one step, using :meth:`~.Mutable.associate_with`. This
is similar to :meth:`~.Mutable.as_mutable` except it will intercept
is similar to :meth:`~.Mutable.as_mutable` except it will intercept
all occurrences of ``MutationDict`` in all mappings unconditionally, without
the need to declare it individually::
@@ -167,8 +167,8 @@ the need to declare it individually::
__tablename__ = 'my_data'
id = Column(Integer, primary_key=True)
data = Column(JSONEncodedDict)
Supporting Pickling
--------------------
@@ -180,7 +180,7 @@ not picklable, due to the fact that they contain weakrefs and function
callbacks. In our case, this is a good thing, since if this dictionary were
picklable, it could lead to an excessively large pickle size for our value
objects that are pickled by themselves outside of the context of the parent.
The developer responsiblity here is only to provide a ``__getstate__`` method
The developer responsibility here is only to provide a ``__getstate__`` method
that excludes the :meth:`~.MutableBase._parents` collection from the pickle
stream::
@@ -217,12 +217,13 @@ be assigned an object value which represents information "composed" from one
or more columns from the underlying mapped table. The usual example is that of
a geometric "point", and is introduced in :ref:`mapper_composite`.
As of SQLAlchemy 0.7, the internals of :func:`.orm.composite` have been
greatly simplified and in-place mutation detection is no longer enabled by
default; instead, the user-defined value must detect changes on its own and
propagate them to all owning parents. The :mod:`sqlalchemy.ext.mutable`
extension provides the helper class :class:`.MutableComposite`, which is a
slight variant on the :class:`.Mutable` class.
.. versionchanged:: 0.7
The internals of :func:`.orm.composite` have been
greatly simplified and in-place mutation detection is no longer enabled by
default; instead, the user-defined value must detect changes on its own and
propagate them to all owning parents. The :mod:`sqlalchemy.ext.mutable`
extension provides the helper class :class:`.MutableComposite`, which is a
slight variant on the :class:`.Mutable` class.
As is the case with :class:`.Mutable`, the user-defined composite class
subclasses :class:`.MutableComposite` as a mixin, and detects and delivers
@@ -300,6 +301,31 @@ will flag the attribute as "dirty" on the parent object::
>>> assert v1 in sess.dirty
True
Coercing Mutable Composites
---------------------------
The :meth:`.MutableBase.coerce` method is also supported on composite types.
In the case of :class:`.MutableComposite`, the :meth:`.MutableBase.coerce`
method is only called for attribute set operations, not load operations.
Overriding the :meth:`.MutableBase.coerce` method is essentially equivalent
to using a :func:`.validates` validation routine for all attributes which
make use of the custom composite type::
class Point(MutableComposite):
# other Point methods
# ...
def coerce(cls, key, value):
if isinstance(value, tuple):
value = Point(*value)
elif not isinstance(value, Point):
raise ValueError("tuple or Point expected")
return value
.. versionadded:: 0.7.10,0.8.0b2
Support for the :meth:`.MutableBase.coerce` method in conjunction with
objects of type :class:`.MutableComposite`.
Supporting Pickling
--------------------
@@ -313,10 +339,10 @@ the minimal form of our ``Point`` class::
class Point(MutableComposite):
# ...
def __getstate__(self):
return self.x, self.y
def __setstate__(self, state):
self.x, self.y = state
@@ -327,7 +353,7 @@ pickling process of the parent's object-relational state so that the
"""
from sqlalchemy.orm.attributes import flag_modified
from sqlalchemy import event, types
from sqlalchemy.orm import mapper, object_mapper
from sqlalchemy.orm import mapper, object_mapper, Mapper
from sqlalchemy.util import memoized_property
import weakref
@@ -337,20 +363,38 @@ class MutableBase(object):
@memoized_property
def _parents(self):
"""Dictionary of parent object->attribute name on the parent.
This attribute is a so-called "memoized" property. It initializes
itself with a new ``weakref.WeakKeyDictionary`` the first time
it is accessed, returning the same object upon subsequent access.
"""
return weakref.WeakKeyDictionary()
@classmethod
def coerce(cls, key, value):
"""Given a value, coerce it into this type.
"""Given a value, coerce it into the target type.
Can be overridden by custom subclasses to coerce incoming
data into a particular type.
By default, raises ``ValueError``.
This method is called in different scenarios depending on if
the parent class is of type :class:`.Mutable` or of type
:class:`.MutableComposite`. In the case of the former, it is called
for both attribute-set operations as well as during ORM loading
operations. For the latter, it is only called during attribute-set
operations; the mechanics of the :func:`.composite` construct
handle coercion during load operations.
:param key: string name of the ORM-mapped attribute being set.
:param value: the incoming value.
:return: the method should return the coerced value, or raise
``ValueError`` if the coercion cannot be completed.
By default raises ValueError.
"""
if value is None:
return None
@@ -358,7 +402,7 @@ class MutableBase(object):
@classmethod
def _listen_on_attribute(cls, attribute, coerce, parent_cls):
"""Establish this type as a mutation listener for the given
"""Establish this type as a mutation listener for the given
mapped descriptor.
"""
@@ -372,7 +416,7 @@ class MutableBase(object):
def load(state, *args):
"""Listen for objects loaded or refreshed.
Wrap the target data member's value with
Wrap the target data member's value with
``Mutable``.
"""
@@ -388,7 +432,7 @@ class MutableBase(object):
data member.
Establish a weak reference to the parent object
on the incoming value, remove it for the one
on the incoming value, remove it for the one
outgoing.
"""
@@ -435,7 +479,7 @@ class Mutable(MutableBase):
@classmethod
def associate_with_attribute(cls, attribute):
"""Establish this type as a mutation listener for the given
"""Establish this type as a mutation listener for the given
mapped descriptor.
"""
@@ -443,15 +487,15 @@ class Mutable(MutableBase):
@classmethod
def associate_with(cls, sqltype):
"""Associate this wrapper with all future mapped columns
"""Associate this wrapper with all future mapped columns
of the given type.
This is a convenience method that calls ``associate_with_attribute`` automatically.
.. warning::
.. warning::
The listeners established by this method are *global*
to all mappers, and are *not* garbage collected. Only use
to all mappers, and are *not* garbage collected. Only use
:meth:`.associate_with` for types that are permanent to an application,
not with ad-hoc types else this will cause unbounded growth
in memory usage.
@@ -473,7 +517,7 @@ class Mutable(MutableBase):
This establishes listeners that will detect ORM mappings against
the given type, adding mutation event trackers to those mappings.
The type is returned, unconditionally as an instance, so that
The type is returned, unconditionally as an instance, so that
:meth:`.as_mutable` can be used inline::
Table('mytable', metadata,
@@ -485,15 +529,15 @@ class Mutable(MutableBase):
is given, and that only columns which are declared specifically with that
type instance receive additional instrumentation.
To associate a particular mutable type with all occurrences of a
To associate a particular mutable type with all occurrences of a
particular type, use the :meth:`.Mutable.associate_with` classmethod
of the particular :meth:`.Mutable` subclass to establish a global
association.
.. warning::
.. warning::
The listeners established by this method are *global*
to all mappers, and are *not* garbage collected. Only use
to all mappers, and are *not* garbage collected. Only use
:meth:`.as_mutable` for types that are permanent to an application,
not with ad-hoc types else this will cause unbounded growth
in memory usage.
@@ -511,28 +555,22 @@ class Mutable(MutableBase):
return sqltype
class _MutableCompositeMeta(type):
def __init__(cls, classname, bases, dict_):
cls._setup_listeners()
return type.__init__(cls, classname, bases, dict_)
class MutableComposite(MutableBase):
"""Mixin that defines transparent propagation of change
events on a SQLAlchemy "composite" object to its
owning parent or parents.
See the example in :ref:`mutable_composites` for usage information.
.. warning::
.. warning::
The listeners established by the :class:`.MutableComposite`
class are *global* to all mappers, and are *not* garbage collected. Only use
class are *global* to all mappers, and are *not* garbage collected. Only use
:class:`.MutableComposite` for types that are permanent to an application,
not with ad-hoc types else this will cause unbounded growth
in memory usage.
"""
__metaclass__ = _MutableCompositeMeta
def changed(self):
"""Subclasses should call this method whenever change events occur."""
@@ -541,23 +579,18 @@ class MutableComposite(MutableBase):
prop = object_mapper(parent).get_property(key)
for value, attr_name in zip(
self.__composite_values__(),
self.__composite_values__(),
prop._attribute_keys):
setattr(parent, attr_name, value)
@classmethod
def _setup_listeners(cls):
"""Associate this wrapper with all future mapped composites
of the given type.
This is a convenience method that calls ``associate_with_attribute`` automatically.
"""
def listen_for_type(mapper, class_):
for prop in mapper.iterate_properties:
if hasattr(prop, 'composite_class') and issubclass(prop.composite_class, cls):
cls._listen_on_attribute(getattr(class_, prop.key), False, class_)
event.listen(mapper, 'mapper_configured', listen_for_type)
def _setup_composite_listener():
def _listen_for_type(mapper, class_):
for prop in mapper.iterate_properties:
if (hasattr(prop, 'composite_class') and
issubclass(prop.composite_class, MutableComposite)):
prop.composite_class._listen_on_attribute(
getattr(class_, prop.key), False, class_)
if not Mapper.dispatch.mapper_configured._contains(Mapper, _listen_for_type):
event.listen(Mapper, 'mapper_configured', _listen_for_type)
_setup_composite_listener()

View File

@@ -1,64 +1,77 @@
# ext/orderinglist.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""A custom list that manages index/position information for its children.
"""A custom list that manages index/position information for contained
elements.
:author: Jason Kirtland
``orderinglist`` is a helper for mutable ordered relationships. It will intercept
list operations performed on a relationship collection and automatically
synchronize changes in list position with an attribute on the related objects.
(See :ref:`advdatamapping_entitycollections` for more information on the general pattern.)
``orderinglist`` is a helper for mutable ordered relationships. It will
intercept list operations performed on a :func:`.relationship`-managed
collection and
automatically synchronize changes in list position onto a target scalar
attribute.
Example: Two tables that store slides in a presentation. Each slide
has a number of bullet points, displayed in order by the 'position'
column on the bullets table. These bullets can be inserted and re-ordered
by your end users, and you need to update the 'position' column of all
affected rows when changes are made.
Example: A ``slide`` table, where each row refers to zero or more entries
in a related ``bullet`` table. The bullets within a slide are
displayed in order based on the value of the ``position`` column in the
``bullet`` table. As entries are reordered in memory, the value of the
``position`` attribute should be updated to reflect the new sort order::
.. sourcecode:: python+sql
slides_table = Table('Slides', metadata,
Column('id', Integer, primary_key=True),
Column('name', String))
Base = declarative_base()
bullets_table = Table('Bullets', metadata,
Column('id', Integer, primary_key=True),
Column('slide_id', Integer, ForeignKey('Slides.id')),
Column('position', Integer),
Column('text', String))
class Slide(Base):
__tablename__ = 'slide'
class Slide(object):
pass
class Bullet(object):
pass
id = Column(Integer, primary_key=True)
name = Column(String)
mapper(Slide, slides_table, properties={
'bullets': relationship(Bullet, order_by=[bullets_table.c.position])
})
mapper(Bullet, bullets_table)
bullets = relationship("Bullet", order_by="Bullet.position")
The standard relationship mapping will produce a list-like attribute on each Slide
containing all related Bullets, but coping with changes in ordering is totally
your responsibility. If you insert a Bullet into that list, there is no
magic- it won't have a position attribute unless you assign it it one, and
you'll need to manually renumber all the subsequent Bullets in the list to
accommodate the insert.
class Bullet(Base):
__tablename__ = 'bullet'
id = Column(Integer, primary_key=True)
slide_id = Column(Integer, ForeignKey('slide.id'))
position = Column(Integer)
text = Column(String)
An ``orderinglist`` can automate this and manage the 'position' attribute on all
related bullets for you.
The standard relationship mapping will produce a list-like attribute on each
``Slide`` containing all related ``Bullet`` objects,
but coping with changes in ordering is not handled automatically.
When appending a ``Bullet`` into ``Slide.bullets``, the ``Bullet.position``
attribute will remain unset until manually assigned. When the ``Bullet``
is inserted into the middle of the list, the following ``Bullet`` objects
will also need to be renumbered.
.. sourcecode:: python+sql
The :class:`.OrderingList` object automates this task, managing the
``position`` attribute on all ``Bullet`` objects in the collection. It is
constructed using the :func:`.ordering_list` factory::
mapper(Slide, slides_table, properties={
'bullets': relationship(Bullet,
collection_class=ordering_list('position'),
order_by=[bullets_table.c.position])
})
mapper(Bullet, bullets_table)
from sqlalchemy.ext.orderinglist import ordering_list
Base = declarative_base()
class Slide(Base):
__tablename__ = 'slide'
id = Column(Integer, primary_key=True)
name = Column(String)
bullets = relationship("Bullet", order_by="Bullet.position",
collection_class=ordering_list('position'))
class Bullet(Base):
__tablename__ = 'bullet'
id = Column(Integer, primary_key=True)
slide_id = Column(Integer, ForeignKey('slide.id'))
position = Column(Integer)
text = Column(String)
With the above mapping the ``Bullet.position`` attribute is managed::
s = Slide()
s.bullets.append(Bullet())
@@ -69,73 +82,87 @@ related bullets for you.
s.bullets[2].position
>>> 2
Use the ``ordering_list`` function to set up the ``collection_class`` on relationships
(as in the mapper example above). This implementation depends on the list
starting in the proper order, so be SURE to put an order_by on your relationship.
The :class:`.OrderingList` construct only works with **changes** to a collection,
and not the initial load from the database, and requires that the list be
sorted when loaded. Therefore, be sure to
specify ``order_by`` on the :func:`.relationship` against the target ordering
attribute, so that the ordering is correct when first loaded.
.. warning::
.. warning::
``ordering_list`` only provides limited functionality when a primary
key column or unique column is the target of the sort. Since changing the order of
entries often means that two rows must trade values, this is not possible when
the value is constrained by a primary key or unique constraint, since one of the rows
would temporarily have to point to a third available value so that the other row
could take its old value. ``ordering_list`` doesn't do any of this for you,
:class:`.OrderingList` only provides limited functionality when a primary
key column or unique column is the target of the sort. Since changing the
order of entries often means that two rows must trade values, this is not
possible when the value is constrained by a primary key or unique
constraint, since one of the rows would temporarily have to point to a
third available value so that the other row could take its old
value. :class:`.OrderingList` doesn't do any of this for you,
nor does SQLAlchemy itself.
``ordering_list`` takes the name of the related object's ordering attribute as
:func:`.ordering_list` takes the name of the related object's ordering attribute as
an argument. By default, the zero-based integer index of the object's
position in the ``ordering_list`` is synchronized with the ordering attribute:
position in the :func:`.ordering_list` is synchronized with the ordering attribute:
index 0 will get position 0, index 1 position 1, etc. To start numbering at 1
or some other integer, provide ``count_from=1``.
Ordering values are not limited to incrementing integers. Almost any scheme
can implemented by supplying a custom ``ordering_func`` that maps a Python list
index to any value you require.
"""
from sqlalchemy.orm.collections import collection
from sqlalchemy import util
__all__ = [ 'ordering_list' ]
__all__ = ['ordering_list']
def ordering_list(attr, count_from=None, **kw):
"""Prepares an OrderingList factory for use in mapper definitions.
"""Prepares an :class:`OrderingList` factory for use in mapper definitions.
Returns an object suitable for use as an argument to a Mapper relationship's
``collection_class`` option. Arguments are:
Returns an object suitable for use as an argument to a Mapper
relationship's ``collection_class`` option. e.g.::
attr
from sqlalchemy.ext.orderinglist import ordering_list
class Slide(Base):
__tablename__ = 'slide'
id = Column(Integer, primary_key=True)
name = Column(String)
bullets = relationship("Bullet", order_by="Bullet.position",
collection_class=ordering_list('position'))
:param attr:
Name of the mapped attribute to use for storage and retrieval of
ordering information
count_from (optional)
:param count_from:
Set up an integer-based ordering, starting at ``count_from``. For
example, ``ordering_list('pos', count_from=1)`` would create a 1-based
list in SQL, storing the value in the 'pos' column. Ignored if
``ordering_func`` is supplied.
Passes along any keyword arguments to ``OrderingList`` constructor.
Additional arguments are passed to the :class:`.OrderingList` constructor.
"""
kw = _unsugar_count_from(count_from=count_from, **kw)
return lambda: OrderingList(attr, **kw)
# Ordering utility functions
def count_from_0(index, collection):
"""Numbering function: consecutive integers starting at 0."""
return index
def count_from_1(index, collection):
"""Numbering function: consecutive integers starting at 1."""
return index + 1
def count_from_n_factory(start):
"""Numbering function: consecutive integers starting at arbitrary start."""
@@ -147,8 +174,9 @@ def count_from_n_factory(start):
pass
return f
def _unsugar_count_from(**kw):
"""Builds counting functions from keywrod arguments.
"""Builds counting functions from keyword arguments.
Keyword argument filter, prepares a simple ``ordering_func`` from a
``count_from`` argument, otherwise passes ``ordering_func`` on unchanged.
@@ -164,12 +192,13 @@ def _unsugar_count_from(**kw):
kw['ordering_func'] = count_from_n_factory(count_from)
return kw
class OrderingList(list):
"""A custom list that manages position information for its children.
See the module and __init__ documentation for more details. The
``ordering_list`` factory function is used to configure ``OrderingList``
collections in ``mapper`` relationship definitions.
The :class:`.OrderingList` object is normally set up using the
:func:`.ordering_list` factory function, used in conjunction with
the :func:`.relationship` function.
"""
@@ -184,13 +213,14 @@ class OrderingList(list):
This implementation relies on the list starting in the proper order,
so be **sure** to put an ``order_by`` on your relationship.
:param ordering_attr:
:param ordering_attr:
Name of the attribute that stores the object's order in the
relationship.
:param ordering_func: Optional. A function that maps the position in the Python list to a
value to store in the ``ordering_attr``. Values returned are
usually (but need not be!) integers.
:param ordering_func: Optional. A function that maps the position in
the Python list to a value to store in the
``ordering_attr``. Values returned are usually (but need not be!)
integers.
An ``ordering_func`` is called with two positional parameters: the
index of the element in the list, and the list itself.
@@ -201,7 +231,7 @@ class OrderingList(list):
like stepped numbering, alphabetical and Fibonacci numbering, see
the unit tests.
:param reorder_on_append:
:param reorder_on_append:
Default False. When appending an object with an existing (non-None)
ordering value, that value will be left untouched unless
``reorder_on_append`` is true. This is an optimization to avoid a
@@ -215,7 +245,7 @@ class OrderingList(list):
making changes, any of whom happen to load this collection even in
passing, all of the sessions would try to "clean up" the numbering
in their commits, possibly causing all but one to fail with a
concurrent modification error. Spooky action at a distance.
concurrent modification error.
Recommend leaving this with the default of False, and just call
``reorder()`` if you're doing ``append()`` operations with
@@ -314,9 +344,24 @@ class OrderingList(list):
self._reorder()
# end Py2K
def __reduce__(self):
return _reconstitute, (self.__class__, self.__dict__, list(self))
for func_name, func in locals().items():
if (util.callable(func) and func.func_name == func_name and
not func.__doc__ and hasattr(list, func_name)):
func.__doc__ = getattr(list, func_name).__doc__
del func_name, func
def _reconstitute(cls, dict_, items):
""" Reconstitute an :class:`.OrderingList`.
This is the adjoint to :meth:`.OrderingList.__reduce__`. It is used for
unpickling :class:`.OrderingList` objects.
"""
obj = cls.__new__(cls)
obj.__dict__.update(dict_)
list.extend(obj, items)
return obj

View File

@@ -1,10 +1,10 @@
# ext/serializer.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Serializer/Deserializer objects for usage with SQLAlchemy query structures,
"""Serializer/Deserializer objects for usage with SQLAlchemy query structures,
allowing "contextual" deserialization.
Any SQLAlchemy query structure, either based on sqlalchemy.sql.*
@@ -31,19 +31,19 @@ Usage is nearly the same as that of the standard Python pickle module::
print query2.all()
Similar restrictions as when using raw pickle apply; mapped classes must be
Similar restrictions as when using raw pickle apply; mapped classes must be
themselves be pickleable, meaning they are importable from a module-level
namespace.
The serializer module is only appropriate for query structures. It is not
needed for:
* instances of user-defined classes. These contain no references to engines,
* instances of user-defined classes. These contain no references to engines,
sessions or expression constructs in the typical case and can be serialized directly.
* Table metadata that is to be loaded entirely from the serialized structure (i.e. is
not already declared in the application). Regular pickle.loads()/dumps() can
be used to fully dump any ``MetaData`` object, typically one which was reflected
not already declared in the application). Regular pickle.loads()/dumps() can
be used to fully dump any ``MetaData`` object, typically one which was reflected
from an existing database at some previous point in time. The serializer module
is specifically for the opposite case, where the Table metadata is already present
in memory.

View File

@@ -1,22 +1,17 @@
# ext/sqlsoup.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. note::
SQLSoup is now its own project. Documentation
and project status are available at:
http://pypi.python.org/pypi/sqlsoup
http://readthedocs.org/docs/sqlsoup
SQLSoup will no longer be included with SQLAlchemy as of
version 0.8.
.. versionchanged:: 0.8
SQLSoup is now its own project. Documentation
and project status are available at:
http://pypi.python.org/pypi/sqlsoup and
http://readthedocs.org/docs/sqlsoup\ .
SQLSoup will no longer be included with SQLAlchemy.
Introduction
@@ -62,7 +57,7 @@ Loading objects is as easy as this::
>>> users
[
MappedUsers(name=u'Joe Student',email=u'student@example.edu',
password=u'student',classname=None,admin=0),
password=u'student',classname=None,admin=0),
MappedUsers(name=u'Bhargan Basepair',email=u'basepair@example.edu',
password=u'basepair',classname=None,admin=1)
]
@@ -72,7 +67,7 @@ Of course, letting the database do the sort is better::
>>> db.users.order_by(db.users.name).all()
[
MappedUsers(name=u'Bhargan Basepair',email=u'basepair@example.edu',
password=u'basepair',classname=None,admin=1),
password=u'basepair',classname=None,admin=1),
MappedUsers(name=u'Joe Student',email=u'student@example.edu',
password=u'student',classname=None,admin=0)
]
@@ -91,7 +86,7 @@ we're at it::
>>> db.users.filter(where).order_by(desc(db.users.name)).all()
[
MappedUsers(name=u'Joe Student',email=u'student@example.edu',
password=u'student',classname=None,admin=0),
password=u'student',classname=None,admin=0),
MappedUsers(name=u'Bhargan Basepair',email=u'basepair@example.edu',
password=u'basepair',classname=None,admin=1)
]
@@ -217,15 +212,15 @@ with `with_labels`, to disambiguate columns with their table name
(.c is short for .columns)::
>>> db.with_labels(join1).c.keys()
[u'users_name', u'users_email', u'users_password',
u'users_classname', u'users_admin', u'loans_book_id',
[u'users_name', u'users_email', u'users_password',
u'users_classname', u'users_admin', u'loans_book_id',
u'loans_user_name', u'loans_loan_date']
You can also join directly to a labeled object::
>>> labeled_loans = db.with_labels(db.loans)
>>> db.join(db.users, labeled_loans, isouter=True).c.keys()
[u'name', u'email', u'password', u'classname',
[u'name', u'email', u'password', u'classname',
u'admin', u'loans_book_id', u'loans_user_name', u'loans_loan_date']
@@ -256,8 +251,8 @@ accepts in normal mapper definition:
Advanced Use
============
Sessions, Transations and Application Integration
-------------------------------------------------
Sessions, Transactions and Application Integration
---------------------------------------------------
.. note::
@@ -472,8 +467,8 @@ def _class_for_table(session, engine, selectable, base_cls, mapper_kwargs):
selectable = expression._clause_element_as_expr(selectable)
mapname = 'Mapped' + _selectable_name(selectable)
# Py2K
if isinstance(mapname, unicode):
engine_encoding = engine.dialect.encoding
if isinstance(mapname, unicode):
engine_encoding = engine.dialect.encoding
mapname = mapname.encode(engine_encoding)
# end Py2K
@@ -492,7 +487,7 @@ def _class_for_table(session, engine, selectable, base_cls, mapper_kwargs):
raise TypeError('unable to compare with %s' % o.__class__)
return t1, t2
# python2/python3 compatible system of
# python2/python3 compatible system of
# __cmp__ - __lt__ + __eq__
def __lt__(self, o):
@@ -529,15 +524,15 @@ class SqlSoup(object):
def __init__(self, engine_or_metadata, base=object, session=None):
"""Initialize a new :class:`.SqlSoup`.
:param engine_or_metadata: a string database URL, :class:`.Engine`
:param engine_or_metadata: a string database URL, :class:`.Engine`
or :class:`.MetaData` object to associate with. If the
argument is a :class:`.MetaData`, it should be *bound*
to an :class:`.Engine`.
:param base: a class which will serve as the default class for
:param base: a class which will serve as the default class for
returned mapped classes. Defaults to ``object``.
:param session: a :class:`.ScopedSession` or :class:`.Session` with
which to associate ORM operations for this :class:`.SqlSoup` instance.
If ``None``, a :class:`.ScopedSession` that's local to this
If ``None``, a :class:`.ScopedSession` that's local to this
module is used.
"""
@@ -550,7 +545,7 @@ class SqlSoup(object):
elif isinstance(engine_or_metadata, (basestring, Engine)):
self._metadata = MetaData(engine_or_metadata)
else:
raise ArgumentError("invalid engine or metadata argument %r" %
raise ArgumentError("invalid engine or metadata argument %r" %
engine_or_metadata)
self._cache = {}
@@ -572,7 +567,7 @@ class SqlSoup(object):
"""Execute a SQL statement.
The statement may be a string SQL string,
an :func:`.expression.select` construct, or an :func:`.expression.text`
an :func:`.expression.select` construct, or an :func:`.expression.text`
construct.
"""
@@ -599,7 +594,7 @@ class SqlSoup(object):
self.session.flush()
def rollback(self):
"""Rollback the current transction.
"""Rollback the current transaction.
See :meth:`.Session.rollback`.
@@ -635,14 +630,14 @@ class SqlSoup(object):
"""
self.session.expunge_all()
def map_to(self, attrname, tablename=None, selectable=None,
def map_to(self, attrname, tablename=None, selectable=None,
schema=None, base=None, mapper_args=util.immutabledict()):
"""Configure a mapping to the given attrname.
This is the "master" method that can be used to create any
This is the "master" method that can be used to create any
configuration.
(new in 0.6.6)
.. versionadded:: 0.6.6
:param attrname: String attribute name which will be
established as an attribute on this :class:.`.SqlSoup`
@@ -682,10 +677,10 @@ class SqlSoup(object):
raise ArgumentError("'tablename' and 'selectable' "
"arguments are mutually exclusive")
selectable = Table(tablename,
self._metadata,
autoload=True,
autoload_with=self.bind,
selectable = Table(tablename,
self._metadata,
autoload=True,
autoload_with=self.bind,
schema=schema or self.schema)
elif schema:
raise ArgumentError("'tablename' argument is required when "
@@ -723,8 +718,9 @@ class SqlSoup(object):
def map(self, selectable, base=None, **mapper_args):
"""Map a selectable directly.
The class and its mapping are not cached and will
be discarded once dereferenced (as of 0.6.6).
.. versionchanged:: 0.6.6
The class and its mapping are not cached and will
be discarded once dereferenced.
:param selectable: an :func:`.expression.select` construct.
:param base: a Python class which will be used as the
@@ -746,11 +742,12 @@ class SqlSoup(object):
)
def with_labels(self, selectable, base=None, **mapper_args):
"""Map a selectable directly, wrapping the
"""Map a selectable directly, wrapping the
selectable in a subquery with labels.
The class and its mapping are not cached and will
be discarded once dereferenced (as of 0.6.6).
.. versionchanged:: 0.6.6
The class and its mapping are not cached and will
be discarded once dereferenced.
:param selectable: an :func:`.expression.select` construct.
:param base: a Python class which will be used as the
@@ -769,12 +766,13 @@ class SqlSoup(object):
select(use_labels=True).
alias('foo'), base=base, **mapper_args)
def join(self, left, right, onclause=None, isouter=False,
def join(self, left, right, onclause=None, isouter=False,
base=None, **mapper_args):
"""Create an :func:`.expression.join` and map to it.
The class and its mapping are not cached and will
be discarded once dereferenced (as of 0.6.6).
.. versionchanged:: 0.6.6
The class and its mapping are not cached and will
be discarded once dereferenced.
:param left: a mapped class or table object.
:param right: a mapped class or table object.
@@ -794,7 +792,7 @@ class SqlSoup(object):
return self.map(j, base=base, **mapper_args)
def entity(self, attr, schema=None):
"""Return the named entity from this :class:`.SqlSoup`, or
"""Return the named entity from this :class:`.SqlSoup`, or
create if not present.
For more generalized mapping, see :meth:`.map_to`.

View File

@@ -1,5 +1,5 @@
# sqlalchemy/interfaces.py
# Copyright (C) 2007-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2007-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2007 Jason Kirtland jek@discorporate.us
#
# This module is part of SQLAlchemy and is released under
@@ -17,8 +17,8 @@ from sqlalchemy import event, util
class PoolListener(object):
"""Hooks into the lifecycle of connections in a :class:`.Pool`.
.. note::
.. note::
:class:`.PoolListener` is deprecated. Please
refer to :class:`.PoolEvents`.
@@ -27,7 +27,7 @@ class PoolListener(object):
class MyListener(PoolListener):
def connect(self, dbapi_con, con_record):
'''perform connect operations'''
# etc.
# etc.
# create a new pool with a listener
p = QueuePool(..., listeners=[MyListener()])
@@ -151,8 +151,8 @@ class PoolListener(object):
class ConnectionProxy(object):
"""Allows interception of statement execution by Connections.
.. note::
.. note::
:class:`.ConnectionProxy` is deprecated. Please
refer to :class:`.ConnectionEvents`.
@@ -194,7 +194,7 @@ class ConnectionProxy(object):
event.listen(self, 'before_execute', adapt_execute)
def adapt_cursor_execute(conn, cursor, statement,
def adapt_cursor_execute(conn, cursor, statement,
parameters,context, executemany, ):
def execute_wrapper(

View File

@@ -12,7 +12,7 @@ module. The regular dotted module namespace is used, starting at
'sqlalchemy'. For class-level logging, the class name is appended.
The "echo" keyword parameter, available on SQLA :class:`.Engine`
and :class:`.Pool` objects, corresponds to a logger specific to that
and :class:`.Pool` objects, corresponds to a logger specific to that
instance only.
"""
@@ -60,7 +60,7 @@ class InstanceLogger(object):
"""A logger adapter (wrapper) for :class:`.Identified` subclasses.
This allows multiple instances (e.g. Engine or Pool instances)
to share a logger, but have its verbosity controlled on a
to share a logger, but have its verbosity controlled on a
per-instance basis.
The basic functionality is to return a logging level
@@ -185,7 +185,7 @@ def instance_logger(instance, echoflag=None):
logger = logging.getLogger(name)
else:
# if a specified echo flag, return an EchoLogger,
# which checks the flag, overrides normal log
# which checks the flag, overrides normal log
# levels by calling logger._log()
logger = InstanceLogger(echoflag, name)

File diff suppressed because it is too large Load Diff

View File

@@ -1,5 +1,5 @@
# orm/attributes.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -29,7 +29,7 @@ NO_VALUE = util.symbol('NO_VALUE')
NEVER_SET = util.symbol('NEVER_SET')
PASSIVE_RETURN_NEVER_SET = util.symbol('PASSIVE_RETURN_NEVER_SET',
"""Symbol indicating that loader callables can be
"""Symbol indicating that loader callables can be
fired off, but if no callable is applicable and no value is
present, the attribute should remain non-initialized.
NEVER_SET is returned in this case.
@@ -37,14 +37,14 @@ NEVER_SET is returned in this case.
PASSIVE_NO_INITIALIZE = util.symbol('PASSIVE_NO_INITIALIZE',
"""Symbol indicating that loader callables should
not be fired off, and a non-initialized attribute
not be fired off, and a non-initialized attribute
should remain that way.
""")
PASSIVE_NO_FETCH = util.symbol('PASSIVE_NO_FETCH',
"""Symbol indicating that loader callables should not emit SQL,
"""Symbol indicating that loader callables should not emit SQL,
but a value can be fetched from the current session.
Non-initialized attributes should be initialized to an empty value.
""")
@@ -53,9 +53,9 @@ PASSIVE_NO_FETCH_RELATED = util.symbol('PASSIVE_NO_FETCH_RELATED',
"""Symbol indicating that loader callables should not emit SQL for
loading a related object, but can refresh the attributes of the local
instance in order to locate a related object in the current session.
Non-initialized attributes should be initialized to an empty value.
The unit of work uses this mode to check if history is present
on many-to-one attributes with minimal SQL emitted.
@@ -81,7 +81,7 @@ PASSIVE_OFF = util.symbol('PASSIVE_OFF',
class QueryableAttribute(interfaces.PropComparator):
"""Base class for class-bound attributes. """
def __init__(self, class_, key, impl=None,
def __init__(self, class_, key, impl=None,
comparator=None, parententity=None):
self.class_ = class_
self.key = key
@@ -92,7 +92,7 @@ class QueryableAttribute(interfaces.PropComparator):
manager = manager_of_class(class_)
# manager is None in the case of AliasedClass
if manager:
# propagate existing event listeners from
# propagate existing event listeners from
# immediate superclass
for base in manager._bases:
if key in base:
@@ -134,8 +134,8 @@ class QueryableAttribute(interfaces.PropComparator):
except AttributeError:
raise AttributeError(
'Neither %r object nor %r object has an attribute %r' % (
type(self).__name__,
type(self.comparator).__name__,
type(self).__name__,
type(self.comparator).__name__,
key)
)
@@ -151,7 +151,7 @@ class InstrumentedAttribute(QueryableAttribute):
"""Class bound instrumented attribute which adds descriptor methods."""
def __set__(self, instance, value):
self.impl.set(instance_state(instance),
self.impl.set(instance_state(instance),
instance_dict(instance), value, None)
def __delete__(self, instance):
@@ -179,12 +179,12 @@ def create_proxied_attribute(descriptor):
class Proxy(QueryableAttribute):
"""Presents the :class:`.QueryableAttribute` interface as a
proxy on top of a Python descriptor / :class:`.PropComparator`
proxy on top of a Python descriptor / :class:`.PropComparator`
combination.
"""
def __init__(self, class_, key, descriptor, comparator,
def __init__(self, class_, key, descriptor, comparator,
adapter=None, doc=None):
self.class_ = class_
self.key = key
@@ -233,8 +233,8 @@ def create_proxied_attribute(descriptor):
except AttributeError:
raise AttributeError(
'Neither %r object nor %r object has an attribute %r' % (
type(descriptor).__name__,
type(self.comparator).__name__,
type(descriptor).__name__,
type(self.comparator).__name__,
attribute)
)
@@ -250,7 +250,7 @@ class AttributeImpl(object):
def __init__(self, class_, key,
callable_, dispatch, trackparent=False, extension=None,
compare_function=None, active_history=False,
compare_function=None, active_history=False,
parent_token=None, expire_missing=True,
**kwargs):
"""Construct an AttributeImpl.
@@ -287,12 +287,12 @@ class AttributeImpl(object):
parent_token
Usually references the MapperProperty, used as a key for
the hasparent() function to identify an "owning" attribute.
Allows multiple AttributeImpls to all match a single
Allows multiple AttributeImpls to all match a single
owner attribute.
expire_missing
if False, don't add an "expiry" callable to this attribute
during state.expire_attributes(None), if no value is present
during state.expire_attributes(None), if no value is present
for this key.
"""
@@ -331,7 +331,7 @@ class AttributeImpl(object):
def hasparent(self, state, optimistic=False):
"""Return the boolean value of a `hasparent` flag attached to
"""Return the boolean value of a `hasparent` flag attached to
the given state.
The `optimistic` flag determines what the default return value
@@ -375,8 +375,8 @@ class AttributeImpl(object):
"state %s along attribute '%s', "
"but the parent record "
"has gone stale, can't be sure this "
"is the most recent parent." %
(mapperutil.state_str(state),
"is the most recent parent." %
(mapperutil.state_str(state),
mapperutil.state_str(parent_state),
self.key))
@@ -406,18 +406,18 @@ class AttributeImpl(object):
raise NotImplementedError()
def get_all_pending(self, state, dict_):
"""Return a list of tuples of (state, obj)
for all objects in this attribute's current state
"""Return a list of tuples of (state, obj)
for all objects in this attribute's current state
+ history.
Only applies to object-based attributes.
This is an inlining of existing functionality
which roughly correponds to:
which roughly corresponds to:
get_state_history(
state,
key,
state,
key,
passive=PASSIVE_NO_INITIALIZE).sum()
"""
@@ -478,14 +478,14 @@ class AttributeImpl(object):
self.set(state, dict_, value, initiator, passive=passive)
def remove(self, state, dict_, value, initiator, passive=PASSIVE_OFF):
self.set(state, dict_, None, initiator,
self.set(state, dict_, None, initiator,
passive=passive, check_old=value)
def pop(self, state, dict_, value, initiator, passive=PASSIVE_OFF):
self.set(state, dict_, None, initiator,
self.set(state, dict_, None, initiator,
passive=passive, check_old=value, pop=True)
def set(self, state, dict_, value, initiator,
def set(self, state, dict_, value, initiator,
passive=PASSIVE_OFF, check_old=None, pop=False):
raise NotImplementedError()
@@ -532,7 +532,7 @@ class ScalarAttributeImpl(AttributeImpl):
return History.from_scalar_attribute(
self, state, dict_.get(self.key, NO_VALUE))
def set(self, state, dict_, value, initiator,
def set(self, state, dict_, value, initiator,
passive=PASSIVE_OFF, check_old=None, pop=False):
if initiator and initiator.parent_token is self.parent_token:
return
@@ -543,7 +543,7 @@ class ScalarAttributeImpl(AttributeImpl):
old = dict_.get(self.key, NO_VALUE)
if self.dispatch.set:
value = self.fire_replace_event(state, dict_,
value = self.fire_replace_event(state, dict_,
value, old, initiator)
state.modified_event(dict_, self, old)
dict_[self.key] = value
@@ -575,10 +575,10 @@ class MutableScalarAttributeImpl(ScalarAttributeImpl):
class_manager, copy_function=None,
compare_function=None, **kwargs):
super(ScalarAttributeImpl, self).__init__(
class_,
key,
class_,
key,
callable_, dispatch,
compare_function=compare_function,
compare_function=compare_function,
**kwargs)
class_manager.mutable_attributes.add(key)
if copy_function is None:
@@ -611,15 +611,15 @@ class MutableScalarAttributeImpl(ScalarAttributeImpl):
ScalarAttributeImpl.delete(self, state, dict_)
state.mutable_dict.pop(self.key)
def set(self, state, dict_, value, initiator,
def set(self, state, dict_, value, initiator,
passive=PASSIVE_OFF, check_old=None, pop=False):
ScalarAttributeImpl.set(self, state, dict_, value,
ScalarAttributeImpl.set(self, state, dict_, value,
initiator, passive, check_old=check_old, pop=pop)
state.mutable_dict[self.key] = value
class ScalarObjectAttributeImpl(ScalarAttributeImpl):
"""represents a scalar-holding InstrumentedAttribute,
"""represents a scalar-holding InstrumentedAttribute,
where the target object is also instrumented.
Adds events to delete/set operations.
@@ -653,7 +653,7 @@ class ScalarObjectAttributeImpl(ScalarAttributeImpl):
if current is not None:
ret = [(instance_state(current), current)]
else:
ret = []
ret = [(None, None)]
if self.key in state.committed_state:
original = state.committed_state[self.key]
@@ -665,7 +665,7 @@ class ScalarObjectAttributeImpl(ScalarAttributeImpl):
else:
return []
def set(self, state, dict_, value, initiator,
def set(self, state, dict_, value, initiator,
passive=PASSIVE_OFF, check_old=None, pop=False):
"""Set a value on the given InstanceState.
@@ -686,7 +686,7 @@ class ScalarObjectAttributeImpl(ScalarAttributeImpl):
old is not PASSIVE_NO_RESULT and \
check_old is not old:
if pop:
return
return
else:
raise ValueError(
"Object %s not associated with %s on attribute '%s'" % (
@@ -744,12 +744,12 @@ class CollectionAttributeImpl(AttributeImpl):
typecallable=None, trackparent=False, extension=None,
copy_function=None, compare_function=None, **kwargs):
super(CollectionAttributeImpl, self).__init__(
class_,
key,
class_,
key,
callable_, dispatch,
trackparent=trackparent,
extension=extension,
compare_function=compare_function,
compare_function=compare_function,
**kwargs)
if copy_function is None:
@@ -777,11 +777,11 @@ class CollectionAttributeImpl(AttributeImpl):
if self.key in state.committed_state:
original = state.committed_state[self.key]
if original is not NO_VALUE:
current_states = [((c is not None) and
instance_state(c) or None, c)
current_states = [((c is not None) and
instance_state(c) or None, c)
for c in current]
original_states = [((c is not None) and
instance_state(c) or None, c)
original_states = [((c is not None) and
instance_state(c) or None, c)
for c in original]
current_set = dict(current_states)
@@ -869,13 +869,13 @@ class CollectionAttributeImpl(AttributeImpl):
def pop(self, state, dict_, value, initiator, passive=PASSIVE_OFF):
try:
# TODO: better solution here would be to add
# a "popper" role to collections.py to complement
# a "popper" role to collections.py to complement
# "remover".
self.remove(state, dict_, value, initiator, passive=passive)
except (ValueError, KeyError, IndexError):
pass
def set(self, state, dict_, value, initiator,
def set(self, state, dict_, value, initiator,
passive=PASSIVE_OFF, pop=False):
"""Set a value on the given object.
@@ -954,7 +954,7 @@ class CollectionAttributeImpl(AttributeImpl):
return user_data
def get_collection(self, state, dict_,
def get_collection(self, state, dict_,
user_data=None, passive=PASSIVE_OFF):
"""Retrieve the CollectionAdapter associated with the given state.
@@ -973,6 +973,14 @@ def backref_listeners(attribute, key, uselist):
# use easily recognizable names for stack traces
parent_token = attribute.impl.parent_token
def _acceptable_key_err(child_state, initiator):
raise ValueError(
"Object %s not associated with attribute of "
"type %s" % (mapperutil.state_str(child_state),
manager_of_class(initiator.class_)[initiator.key]))
def emit_backref_from_scalar_set_event(state, child, oldchild, initiator):
if oldchild is child:
return child
@@ -983,61 +991,73 @@ def backref_listeners(attribute, key, uselist):
old_state, old_dict = instance_state(oldchild),\
instance_dict(oldchild)
impl = old_state.manager[key].impl
impl.pop(old_state,
old_dict,
state.obj(),
impl.pop(old_state,
old_dict,
state.obj(),
initiator, passive=PASSIVE_NO_FETCH)
if child is not None:
child_state, child_dict = instance_state(child),\
instance_dict(child)
child_state.manager[key].impl.append(
child_state,
child_dict,
state.obj(),
initiator,
passive=PASSIVE_NO_FETCH)
child_impl = child_state.manager[key].impl
if initiator.parent_token is not parent_token and \
initiator.parent_token is not child_impl.parent_token:
_acceptable_key_err(state, initiator)
child_impl.append(
child_state,
child_dict,
state.obj(),
initiator,
passive=PASSIVE_NO_FETCH)
return child
def emit_backref_from_collection_append_event(state, child, initiator):
child_state, child_dict = instance_state(child), \
instance_dict(child)
child_state.manager[key].impl.append(
child_state,
child_dict,
state.obj(),
initiator,
passive=PASSIVE_NO_FETCH)
child_impl = child_state.manager[key].impl
if initiator.parent_token is not parent_token and \
initiator.parent_token is not child_impl.parent_token:
_acceptable_key_err(state, initiator)
child_impl.append(
child_state,
child_dict,
state.obj(),
initiator,
passive=PASSIVE_NO_FETCH)
return child
def emit_backref_from_collection_remove_event(state, child, initiator):
if child is not None:
child_state, child_dict = instance_state(child),\
instance_dict(child)
child_state.manager[key].impl.pop(
child_state,
child_dict,
state.obj(),
initiator,
passive=PASSIVE_NO_FETCH)
child_impl = child_state.manager[key].impl
# can't think of a path that would produce an initiator
# mismatch here, as it would require an existing collection
# mismatch.
child_impl.pop(
child_state,
child_dict,
state.obj(),
initiator,
passive=PASSIVE_NO_FETCH)
if uselist:
event.listen(attribute, "append",
emit_backref_from_collection_append_event,
event.listen(attribute, "append",
emit_backref_from_collection_append_event,
retval=True, raw=True)
else:
event.listen(attribute, "set",
emit_backref_from_scalar_set_event,
event.listen(attribute, "set",
emit_backref_from_scalar_set_event,
retval=True, raw=True)
# TODO: need coverage in test/orm/ of remove event
event.listen(attribute, "remove",
emit_backref_from_collection_remove_event,
event.listen(attribute, "remove",
emit_backref_from_collection_remove_event,
retval=True, raw=True)
_NO_HISTORY = util.symbol('NO_HISTORY')
_NO_STATE_SYMBOLS = frozenset([
id(PASSIVE_NO_RESULT),
id(NO_VALUE),
id(PASSIVE_NO_RESULT),
id(NO_VALUE),
id(NEVER_SET)])
class History(tuple):
"""A 3-tuple of added, unchanged and deleted values,
@@ -1078,7 +1098,7 @@ class History(tuple):
return not bool(
(self.added or self.deleted)
or self.unchanged and self.unchanged != [None]
)
)
def sum(self):
"""Return a collection of added + unchanged + deleted."""
@@ -1126,11 +1146,11 @@ class History(tuple):
return cls((), (), ())
else:
return cls((), [current], ())
# dont let ClauseElement expressions here trip things up
# don't let ClauseElement expressions here trip things up
elif attribute.is_equal(current, original) is True:
return cls((), [current], ())
else:
# current convention on native scalars is to not
# current convention on native scalars is to not
# include information
# about missing previous value in "deleted", but
# we do include None, which helps in some primary
@@ -1156,11 +1176,11 @@ class History(tuple):
elif current is original:
return cls((), [current], ())
else:
# current convention on related objects is to not
# current convention on related objects is to not
# include information
# about missing previous value in "deleted", and
# to also not include None - the dependency.py rules
# ignore the None in any case.
# ignore the None in any case.
if id(original) in _NO_STATE_SYMBOLS or original is None:
deleted = ()
else:
@@ -1181,11 +1201,11 @@ class History(tuple):
return cls((), list(current), ())
else:
current_states = [((c is not None) and instance_state(c) or None, c)
for c in current
current_states = [((c is not None) and instance_state(c) or None, c)
for c in current
]
original_states = [((c is not None) and instance_state(c) or None, c)
for c in original
original_states = [((c is not None) and instance_state(c) or None, c)
for c in original
]
current_set = dict(current_states)
@@ -1200,7 +1220,7 @@ class History(tuple):
HISTORY_BLANK = History(None, None, None)
def get_history(obj, key, passive=PASSIVE_OFF):
"""Return a :class:`.History` record for the given object
"""Return a :class:`.History` record for the given object
and attribute key.
:param obj: an object whose class is instrumented by the
@@ -1239,14 +1259,14 @@ def register_attribute(class_, key, **kw):
comparator = kw.pop('comparator', None)
parententity = kw.pop('parententity', None)
doc = kw.pop('doc', None)
desc = register_descriptor(class_, key,
desc = register_descriptor(class_, key,
comparator, parententity, doc=doc)
register_attribute_impl(class_, key, **kw)
return desc
def register_attribute_impl(class_, key,
uselist=False, callable_=None,
useobject=False, mutable_scalars=False,
uselist=False, callable_=None,
useobject=False, mutable_scalars=False,
impl_class=None, backref=None, **kw):
manager = manager_of_class(class_)
@@ -1281,7 +1301,7 @@ def register_attribute_impl(class_, key,
manager.post_configure_attribute(key)
return manager[key]
def register_descriptor(class_, key, comparator=None,
def register_descriptor(class_, key, comparator=None,
parententity=None, doc=None):
manager = manager_of_class(class_)
@@ -1310,7 +1330,7 @@ def init_collection(obj, key):
:func:`~sqlalchemy.orm.attributes.set_committed_value`.
obj is an instrumented object instance. An InstanceState
is accepted directly for backwards compatibility but
is accepted directly for backwards compatibility but
this usage is deprecated.
"""
@@ -1328,7 +1348,7 @@ def init_state_collection(state, dict_, key):
def set_committed_value(instance, key, value):
"""Set the value of an attribute with no history events.
Cancels any previous history present. The value should be
Cancels any previous history present. The value should be
a scalar value for scalar-holding attributes, or
an iterable for any collection-holding attribute.
@@ -1385,7 +1405,7 @@ def del_attribute(instance, key):
def flag_modified(instance, key):
"""Mark an attribute on an instance as 'modified'.
This sets the 'modified' flag on the instance and
This sets the 'modified' flag on the instance and
establishes an unconditional change event for the given attribute.
"""

View File

@@ -1,5 +1,5 @@
# orm/collections.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -111,27 +111,62 @@ import weakref
from sqlalchemy.sql import expression
from sqlalchemy import schema, util, exc as sa_exc
__all__ = ['collection', 'collection_adapter',
'mapped_collection', 'column_mapped_collection',
'attribute_mapped_collection']
__instrumentation_mutex = util.threading.Lock()
class _SerializableColumnGetter(object):
def __init__(self, colkeys):
self.colkeys = colkeys
self.composite = len(colkeys) > 1
class _PlainColumnGetter(object):
"""Plain column getter, stores collection of Column objects
directly.
Serializes to a :class:`._SerializableColumnGetterV2`
which has more expensive __call__() performance
and some rare caveats.
"""
def __init__(self, cols):
self.cols = cols
self.composite = len(cols) > 1
def __reduce__(self):
return _SerializableColumnGetter, (self.colkeys,)
return _SerializableColumnGetterV2._reduce_from_cols(self.cols)
def _cols(self, mapper):
return self.cols
def __call__(self, value):
state = instance_state(value)
m = _state_mapper(state)
key = [
m._get_state_attr_by_column(state, state.dict, col)
for col in self._cols(m)
]
if self.composite:
return tuple(key)
else:
return key[0]
class _SerializableColumnGetter(object):
"""Column-based getter used in version 0.7.6 only.
Remains here for pickle compatibility with 0.7.6.
"""
def __init__(self, colkeys):
self.colkeys = colkeys
self.composite = len(colkeys) > 1
def __reduce__(self):
return _SerializableColumnGetter, (self.colkeys,)
def __call__(self, value):
state = instance_state(value)
m = _state_mapper(state)
key = [m._get_state_attr_by_column(
state, state.dict,
state, state.dict,
m.mapped_table.columns[k])
for k in self.colkeys]
if self.composite:
@@ -139,6 +174,48 @@ class _SerializableColumnGetter(object):
else:
return key[0]
class _SerializableColumnGetterV2(_PlainColumnGetter):
"""Updated serializable getter which deals with
multi-table mapped classes.
Two extremely unusual cases are not supported.
Mappings which have tables across multiple metadata
objects, or which are mapped to non-Table selectables
linked across inheriting mappers may fail to function
here.
"""
def __init__(self, colkeys):
self.colkeys = colkeys
self.composite = len(colkeys) > 1
def __reduce__(self):
return self.__class__, (self.colkeys,)
@classmethod
def _reduce_from_cols(cls, cols):
def _table_key(c):
if not isinstance(c.table, expression.TableClause):
return None
else:
return c.table.key
colkeys = [(c.key, _table_key(c)) for c in cols]
return _SerializableColumnGetterV2, (colkeys,)
def _cols(self, mapper):
cols = []
metadata = getattr(mapper.local_table, 'metadata', None)
for (ckey, tkey) in self.colkeys:
if tkey is None or \
metadata is None or \
tkey not in metadata:
cols.append(mapper.local_table.c[ckey])
else:
cols.append(metadata.tables[tkey].c[ckey])
return cols
def column_mapped_collection(mapping_spec):
"""A dictionary-based collection type with column-based keying.
@@ -155,10 +232,10 @@ def column_mapped_collection(mapping_spec):
from sqlalchemy.orm.util import _state_mapper
from sqlalchemy.orm.attributes import instance_state
cols = [c.key for c in [
expression._only_column_elements(q, "mapping_spec")
for q in util.to_list(mapping_spec)]]
keyfunc = _SerializableColumnGetter(cols)
cols = [expression._only_column_elements(q, "mapping_spec")
for q in util.to_list(mapping_spec)
]
keyfunc = _PlainColumnGetter(cols)
return lambda: MappedCollection(keyfunc)
class _SerializableAttrGetter(object):
@@ -632,8 +709,8 @@ class CollectionAdapter(object):
"""
if initiator is not False and item is not None:
return self.attr.fire_append_event(
self.owner_state,
self.owner_state.dict,
self.owner_state,
self.owner_state.dict,
item, initiator)
else:
return item
@@ -648,8 +725,8 @@ class CollectionAdapter(object):
"""
if initiator is not False and item is not None:
self.attr.fire_remove_event(
self.owner_state,
self.owner_state.dict,
self.owner_state,
self.owner_state.dict,
item, initiator)
def fire_pre_remove_event(self, initiator=None):
@@ -660,8 +737,8 @@ class CollectionAdapter(object):
"""
self.attr.fire_pre_remove_event(
self.owner_state,
self.owner_state.dict,
self.owner_state,
self.owner_state.dict,
initiator=initiator)
def __getstate__(self):

View File

@@ -1,5 +1,5 @@
# orm/dependency.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -48,7 +48,7 @@ class DependencyProcessor(object):
def hasparent(self, state):
"""return True if the given object instance has a parent,
according to the ``InstrumentedAttribute`` handled by this
according to the ``InstrumentedAttribute`` handled by this
``DependencyProcessor``.
"""
@@ -69,29 +69,29 @@ class DependencyProcessor(object):
before_delete = unitofwork.ProcessAll(uow, self, True, True)
parent_saves = unitofwork.SaveUpdateAll(
uow,
uow,
self.parent.primary_base_mapper
)
child_saves = unitofwork.SaveUpdateAll(
uow,
uow,
self.mapper.primary_base_mapper
)
parent_deletes = unitofwork.DeleteAll(
uow,
uow,
self.parent.primary_base_mapper
)
child_deletes = unitofwork.DeleteAll(
uow,
uow,
self.mapper.primary_base_mapper
)
self.per_property_dependencies(uow,
parent_saves,
child_saves,
parent_deletes,
child_deletes,
after_save,
self.per_property_dependencies(uow,
parent_saves,
child_saves,
parent_deletes,
child_deletes,
after_save,
before_delete
)
@@ -99,7 +99,7 @@ class DependencyProcessor(object):
def per_state_flush_actions(self, uow, states, isdelete):
"""establish actions and dependencies related to a flush.
These actions will operate on all relevant states
These actions will operate on all relevant states
individually. This occurs only if there are cycles
in the 'aggregated' version of events.
@@ -141,14 +141,14 @@ class DependencyProcessor(object):
# check if the "parent" side is part of the cycle
if not isdelete:
parent_saves = unitofwork.SaveUpdateAll(
uow,
uow,
self.parent.base_mapper)
parent_deletes = before_delete = None
if parent_saves in uow.cycles:
parent_in_cycles = True
else:
parent_deletes = unitofwork.DeleteAll(
uow,
uow,
self.parent.base_mapper)
parent_saves = after_save = None
if parent_deletes in uow.cycles:
@@ -165,19 +165,19 @@ class DependencyProcessor(object):
continue
if isdelete:
before_delete = unitofwork.ProcessState(uow,
before_delete = unitofwork.ProcessState(uow,
self, True, state)
if parent_in_cycles:
parent_deletes = unitofwork.DeleteState(
uow,
state,
uow,
state,
parent_base_mapper)
else:
after_save = unitofwork.ProcessState(uow, self, False, state)
if parent_in_cycles:
parent_saves = unitofwork.SaveUpdateState(
uow,
state,
uow,
state,
parent_base_mapper)
if child_in_cycles:
@@ -190,24 +190,24 @@ class DependencyProcessor(object):
if deleted:
child_action = (
unitofwork.DeleteState(
uow, child_state,
child_base_mapper),
uow, child_state,
child_base_mapper),
True)
else:
child_action = (
unitofwork.SaveUpdateState(
uow, child_state,
child_base_mapper),
uow, child_state,
child_base_mapper),
False)
child_actions.append(child_action)
# establish dependencies between our possibly per-state
# parent action and our possibly per-state child action.
for child_action, childisdelete in child_actions:
self.per_state_dependencies(uow, parent_saves,
parent_deletes,
child_action,
after_save, before_delete,
self.per_state_dependencies(uow, parent_saves,
parent_deletes,
child_action,
after_save, before_delete,
isdelete, childisdelete)
@@ -232,12 +232,12 @@ class DependencyProcessor(object):
passive = attributes.PASSIVE_OFF
for s in states:
# TODO: add a high speed method
# TODO: add a high speed method
# to InstanceState which returns: attribute
# has a non-None value, or had one
history = uowcommit.get_attribute_history(
s,
self.key,
s,
self.key,
passive)
if history and not history.empty():
return True
@@ -248,7 +248,7 @@ class DependencyProcessor(object):
def _verify_canload(self, state):
if state is not None and \
not self.mapper._canload(state,
not self.mapper._canload(state,
allow_subtypes=not self.enable_typechecks):
if self.mapper._canload(state, allow_subtypes=True):
raise exc.FlushError('Attempting to flush an item of type '
@@ -287,11 +287,11 @@ class DependencyProcessor(object):
return None
process_key = tuple(sorted(
[self.key] +
[self.key] +
[p.key for p in self.prop._reverse_property]
))
return uow.memo(
('reverse_key', process_key),
('reverse_key', process_key),
set
)
@@ -299,7 +299,7 @@ class DependencyProcessor(object):
for x in related:
if x is not None:
uowcommit.issue_post_update(
state,
state,
[r for l, r in self.prop.synchronize_pairs]
)
break
@@ -312,21 +312,21 @@ class DependencyProcessor(object):
class OneToManyDP(DependencyProcessor):
def per_property_dependencies(self, uow, parent_saves,
child_saves,
parent_deletes,
child_deletes,
after_save,
def per_property_dependencies(self, uow, parent_saves,
child_saves,
parent_deletes,
child_deletes,
after_save,
before_delete,
):
if self.post_update:
child_post_updates = unitofwork.IssuePostUpdate(
uow,
self.mapper.primary_base_mapper,
uow,
self.mapper.primary_base_mapper,
False)
child_pre_updates = unitofwork.IssuePostUpdate(
uow,
self.mapper.primary_base_mapper,
uow,
self.mapper.primary_base_mapper,
True)
uow.dependencies.update([
@@ -352,22 +352,22 @@ class OneToManyDP(DependencyProcessor):
(before_delete, child_deletes),
])
def per_state_dependencies(self, uow,
save_parent,
delete_parent,
child_action,
after_save, before_delete,
def per_state_dependencies(self, uow,
save_parent,
delete_parent,
child_action,
after_save, before_delete,
isdelete, childisdelete):
if self.post_update:
child_post_updates = unitofwork.IssuePostUpdate(
uow,
self.mapper.primary_base_mapper,
uow,
self.mapper.primary_base_mapper,
False)
child_pre_updates = unitofwork.IssuePostUpdate(
uow,
self.mapper.primary_base_mapper,
uow,
self.mapper.primary_base_mapper,
True)
# TODO: this whole block is not covered
@@ -393,7 +393,7 @@ class OneToManyDP(DependencyProcessor):
else:
uow.dependencies.update([
(before_delete, child_pre_updates),
(child_pre_updates, delete_parent),
(child_pre_updates, delete_parent),
])
elif not isdelete:
uow.dependencies.update([
@@ -408,16 +408,16 @@ class OneToManyDP(DependencyProcessor):
])
def presort_deletes(self, uowcommit, states):
# head object is being deleted, and we manage its list of
# child objects the child objects have to have their
# head object is being deleted, and we manage its list of
# child objects the child objects have to have their
# foreign key to the parent set to NULL
should_null_fks = not self.cascade.delete and \
not self.passive_deletes == 'all'
for state in states:
history = uowcommit.get_attribute_history(
state,
self.key,
state,
self.key,
self._passive_delete_flag)
if history:
for child in history.deleted:
@@ -430,7 +430,7 @@ class OneToManyDP(DependencyProcessor):
if should_null_fks:
for child in history.unchanged:
if child is not None:
uowcommit.register_object(child,
uowcommit.register_object(child,
operation="delete", prop=self.prop)
@@ -447,25 +447,25 @@ class OneToManyDP(DependencyProcessor):
passive = attributes.PASSIVE_OFF
history = uowcommit.get_attribute_history(
state,
self.key,
state,
self.key,
passive)
if history:
for child in history.added:
if child is not None:
uowcommit.register_object(child, cancel_delete=True,
operation="add",
uowcommit.register_object(child, cancel_delete=True,
operation="add",
prop=self.prop)
children_added.update(history.added)
for child in history.deleted:
if not self.cascade.delete_orphan:
uowcommit.register_object(child, isdelete=False,
operation='delete',
uowcommit.register_object(child, isdelete=False,
operation='delete',
prop=self.prop)
elif self.hasparent(child) is False:
uowcommit.register_object(child, isdelete=True,
uowcommit.register_object(child, isdelete=True,
operation="delete", prop=self.prop)
for c, m, st_, dct_ in self.mapper.cascade_iterator(
'delete', child):
@@ -478,16 +478,16 @@ class OneToManyDP(DependencyProcessor):
for child in history.unchanged:
if child is not None:
uowcommit.register_object(
child,
False,
child,
False,
self.passive_updates,
operation="pk change",
prop=self.prop)
def process_deletes(self, uowcommit, states):
# head object is being deleted, and we manage its list of
# child objects the child objects have to have their foreign
# key to the parent set to NULL this phase can be called
# head object is being deleted, and we manage its list of
# child objects the child objects have to have their foreign
# key to the parent set to NULL this phase can be called
# safely for any cascade but is unnecessary if delete cascade
# is on.
@@ -496,17 +496,17 @@ class OneToManyDP(DependencyProcessor):
for state in states:
history = uowcommit.get_attribute_history(
state,
self.key,
state,
self.key,
self._passive_delete_flag)
if history:
for child in history.deleted:
if child is not None and \
self.hasparent(child) is False:
self._synchronize(
state,
child,
None, True,
state,
child,
None, True,
uowcommit, False)
if self.post_update and child:
self._post_update(child, uowcommit, [state])
@@ -516,18 +516,18 @@ class OneToManyDP(DependencyProcessor):
difference(children_added):
if child is not None:
self._synchronize(
state,
child,
None, True,
state,
child,
None, True,
uowcommit, False)
if self.post_update and child:
self._post_update(child,
uowcommit,
self._post_update(child,
uowcommit,
[state])
# technically, we can even remove each child from the
# collection here too. but this would be a somewhat
# inconsistent behavior since it wouldn't happen
# collection here too. but this would be a somewhat
# inconsistent behavior since it wouldn't happen
#if the old parent wasn't deleted but child was moved.
def process_saves(self, uowcommit, states):
@@ -538,7 +538,7 @@ class OneToManyDP(DependencyProcessor):
attributes.PASSIVE_NO_INITIALIZE)
if history:
for child in history.added:
self._synchronize(state, child, None,
self._synchronize(state, child, None,
False, uowcommit, False)
if child is not None and self.post_update:
self._post_update(child, uowcommit, [state])
@@ -546,15 +546,15 @@ class OneToManyDP(DependencyProcessor):
for child in history.deleted:
if not self.cascade.delete_orphan and \
not self.hasparent(child):
self._synchronize(state, child, None, True,
self._synchronize(state, child, None, True,
uowcommit, False)
if self._pks_changed(uowcommit, state):
for child in history.unchanged:
self._synchronize(state, child, None,
self._synchronize(state, child, None,
False, uowcommit, True)
def _synchronize(self, state, child,
def _synchronize(self, state, child,
associationrow, clearkeys, uowcommit,
pks_changed):
source = state
@@ -566,15 +566,15 @@ class OneToManyDP(DependencyProcessor):
if clearkeys:
sync.clear(dest, self.mapper, self.prop.synchronize_pairs)
else:
sync.populate(source, self.parent, dest, self.mapper,
sync.populate(source, self.parent, dest, self.mapper,
self.prop.synchronize_pairs, uowcommit,
self.passive_updates and pks_changed)
def _pks_changed(self, uowcommit, state):
return sync.source_modified(
uowcommit,
state,
self.parent,
uowcommit,
state,
self.parent,
self.prop.synchronize_pairs)
class ManyToOneDP(DependencyProcessor):
@@ -582,22 +582,22 @@ class ManyToOneDP(DependencyProcessor):
DependencyProcessor.__init__(self, prop)
self.mapper._dependency_processors.append(DetectKeySwitch(prop))
def per_property_dependencies(self, uow,
parent_saves,
child_saves,
parent_deletes,
child_deletes,
after_save,
def per_property_dependencies(self, uow,
parent_saves,
child_saves,
parent_deletes,
child_deletes,
after_save,
before_delete):
if self.post_update:
parent_post_updates = unitofwork.IssuePostUpdate(
uow,
self.parent.primary_base_mapper,
uow,
self.parent.primary_base_mapper,
False)
parent_pre_updates = unitofwork.IssuePostUpdate(
uow,
self.parent.primary_base_mapper,
uow,
self.parent.primary_base_mapper,
True)
uow.dependencies.update([
@@ -618,19 +618,19 @@ class ManyToOneDP(DependencyProcessor):
(parent_deletes, child_deletes)
])
def per_state_dependencies(self, uow,
save_parent,
delete_parent,
child_action,
after_save, before_delete,
def per_state_dependencies(self, uow,
save_parent,
delete_parent,
child_action,
after_save, before_delete,
isdelete, childisdelete):
if self.post_update:
if not isdelete:
parent_post_updates = unitofwork.IssuePostUpdate(
uow,
self.parent.primary_base_mapper,
uow,
self.parent.primary_base_mapper,
False)
if childisdelete:
uow.dependencies.update([
@@ -646,8 +646,8 @@ class ManyToOneDP(DependencyProcessor):
])
else:
parent_pre_updates = unitofwork.IssuePostUpdate(
uow,
self.parent.primary_base_mapper,
uow,
self.parent.primary_base_mapper,
True)
uow.dependencies.update([
@@ -677,8 +677,8 @@ class ManyToOneDP(DependencyProcessor):
if self.cascade.delete or self.cascade.delete_orphan:
for state in states:
history = uowcommit.get_attribute_history(
state,
self.key,
state,
self.key,
self._passive_delete_flag)
if history:
if self.cascade.delete_orphan:
@@ -688,7 +688,7 @@ class ManyToOneDP(DependencyProcessor):
for child in todelete:
if child is None:
continue
uowcommit.register_object(child, isdelete=True,
uowcommit.register_object(child, isdelete=True,
operation="delete", prop=self.prop)
for c, m, st_, dct_ in self.mapper.cascade_iterator(
'delete', child):
@@ -700,14 +700,14 @@ class ManyToOneDP(DependencyProcessor):
uowcommit.register_object(state, operation="add", prop=self.prop)
if self.cascade.delete_orphan:
history = uowcommit.get_attribute_history(
state,
self.key,
state,
self.key,
self._passive_delete_flag)
if history:
ret = True
for child in history.deleted:
if self.hasparent(child) is False:
uowcommit.register_object(child, isdelete=True,
uowcommit.register_object(child, isdelete=True,
operation="delete", prop=self.prop)
for c, m, st_, dct_ in self.mapper.cascade_iterator(
@@ -721,15 +721,15 @@ class ManyToOneDP(DependencyProcessor):
not self.cascade.delete_orphan and \
not self.passive_deletes == 'all':
# post_update means we have to update our
# post_update means we have to update our
# row to not reference the child object
# before we can DELETE the row
for state in states:
self._synchronize(state, None, None, True, uowcommit)
if state and self.post_update:
history = uowcommit.get_attribute_history(
state,
self.key,
state,
self.key,
self._passive_delete_flag)
if history:
self._post_update(state, uowcommit, history.sum())
@@ -737,12 +737,12 @@ class ManyToOneDP(DependencyProcessor):
def process_saves(self, uowcommit, states):
for state in states:
history = uowcommit.get_attribute_history(
state,
state,
self.key,
attributes.PASSIVE_NO_INITIALIZE)
if history:
for child in history.added:
self._synchronize(state, child, None, False,
self._synchronize(state, child, None, False,
uowcommit, "add")
if self.post_update:
@@ -759,7 +759,7 @@ class ManyToOneDP(DependencyProcessor):
not uowcommit.session._contains_state(child):
util.warn(
"Object of type %s not in session, %s "
"operation along '%s' won't proceed" %
"operation along '%s' won't proceed" %
(mapperutil.state_class_str(child), operation, self.prop))
return
@@ -767,14 +767,14 @@ class ManyToOneDP(DependencyProcessor):
sync.clear(state, self.parent, self.prop.synchronize_pairs)
else:
self._verify_canload(child)
sync.populate(child, self.mapper, state,
self.parent,
self.prop.synchronize_pairs,
sync.populate(child, self.mapper, state,
self.parent,
self.prop.synchronize_pairs,
uowcommit,
False)
False)
class DetectKeySwitch(DependencyProcessor):
"""For many-to-one relationships with no one-to-many backref,
"""For many-to-one relationships with no one-to-many backref,
searches for parents through the unit of work when a primary
key has changed and updates them.
@@ -798,7 +798,7 @@ class DetectKeySwitch(DependencyProcessor):
def per_property_flush_actions(self, uow):
parent_saves = unitofwork.SaveUpdateAll(
uow,
uow,
self.parent.base_mapper)
after_save = unitofwork.ProcessAll(uow, self, False, False)
uow.dependencies.update([
@@ -837,7 +837,7 @@ class DetectKeySwitch(DependencyProcessor):
def _key_switchers(self, uow, states):
switched, notswitched = uow.memo(
('pk_switchers', self),
('pk_switchers', self),
lambda: (set(), set())
)
@@ -865,29 +865,29 @@ class DetectKeySwitch(DependencyProcessor):
related is not None:
related_state = attributes.instance_state(dict_[self.key])
if related_state in switchers:
uowcommit.register_object(state,
False,
uowcommit.register_object(state,
False,
self.passive_updates)
sync.populate(
related_state,
self.mapper, state,
self.parent, self.prop.synchronize_pairs,
related_state,
self.mapper, state,
self.parent, self.prop.synchronize_pairs,
uowcommit, self.passive_updates)
def _pks_changed(self, uowcommit, state):
return bool(state.key) and sync.source_modified(uowcommit,
state,
self.mapper,
return bool(state.key) and sync.source_modified(uowcommit,
state,
self.mapper,
self.prop.synchronize_pairs)
class ManyToManyDP(DependencyProcessor):
def per_property_dependencies(self, uow, parent_saves,
child_saves,
parent_deletes,
child_deletes,
after_save,
def per_property_dependencies(self, uow, parent_saves,
child_saves,
parent_deletes,
child_deletes,
after_save,
before_delete
):
@@ -896,9 +896,9 @@ class ManyToManyDP(DependencyProcessor):
(child_saves, after_save),
(after_save, child_deletes),
# a rowswitch on the parent from deleted to saved
# can make this one occur, as the "save" may remove
# an element from the
# a rowswitch on the parent from deleted to saved
# can make this one occur, as the "save" may remove
# an element from the
# "deleted" list before we have a chance to
# process its child rows
(before_delete, parent_saves),
@@ -908,11 +908,11 @@ class ManyToManyDP(DependencyProcessor):
(before_delete, child_saves),
])
def per_state_dependencies(self, uow,
save_parent,
delete_parent,
child_action,
after_save, before_delete,
def per_state_dependencies(self, uow,
save_parent,
delete_parent,
child_action,
after_save, before_delete,
isdelete, childisdelete):
if not isdelete:
if childisdelete:
@@ -933,25 +933,25 @@ class ManyToManyDP(DependencyProcessor):
def presort_deletes(self, uowcommit, states):
if not self.passive_deletes:
# if no passive deletes, load history on
# if no passive deletes, load history on
# the collection, so that prop_has_changes()
# returns True
for state in states:
history = uowcommit.get_attribute_history(
state,
self.key,
state,
self.key,
self._passive_delete_flag)
def presort_saves(self, uowcommit, states):
if not self.passive_updates:
# if no passive updates, load history on
# if no passive updates, load history on
# each collection where parent has changed PK,
# so that prop_has_changes() returns True
for state in states:
if self._pks_changed(uowcommit, state):
history = uowcommit.get_attribute_history(
state,
self.key,
state,
self.key,
attributes.PASSIVE_OFF)
if not self.cascade.delete_orphan:
@@ -961,16 +961,16 @@ class ManyToManyDP(DependencyProcessor):
# if delete_orphan check is turned on.
for state in states:
history = uowcommit.get_attribute_history(
state,
self.key,
state,
self.key,
attributes.PASSIVE_NO_INITIALIZE)
if history:
for child in history.deleted:
if self.hasparent(child) is False:
uowcommit.register_object(child, isdelete=True,
uowcommit.register_object(child, isdelete=True,
operation="delete", prop=self.prop)
for c, m, st_, dct_ in self.mapper.cascade_iterator(
'delete',
'delete',
child):
uowcommit.register_object(
st_, isdelete=True)
@@ -983,23 +983,23 @@ class ManyToManyDP(DependencyProcessor):
processed = self._get_reversed_processed_set(uowcommit)
tmp = set()
for state in states:
# this history should be cached already, as
# this history should be cached already, as
# we loaded it in preprocess_deletes
history = uowcommit.get_attribute_history(
state,
self.key,
state,
self.key,
self._passive_delete_flag)
if history:
for child in history.non_added():
if child is None or \
(processed is not None and
(processed is not None and
(state, child) in processed):
continue
associationrow = {}
if not self._synchronize(
state,
child,
associationrow,
state,
child,
associationrow,
False, uowcommit, "delete"):
continue
secondary_delete.append(associationrow)
@@ -1009,7 +1009,7 @@ class ManyToManyDP(DependencyProcessor):
if processed is not None:
processed.update(tmp)
self._run_crud(uowcommit, secondary_insert,
self._run_crud(uowcommit, secondary_insert,
secondary_update, secondary_delete)
def process_saves(self, uowcommit, states):
@@ -1022,7 +1022,7 @@ class ManyToManyDP(DependencyProcessor):
for state in states:
need_cascade_pks = not self.passive_updates and \
self._pks_changed(uowcommit, state)
self._pks_changed(uowcommit, state)
if need_cascade_pks:
passive = attributes.PASSIVE_OFF
else:
@@ -1032,45 +1032,45 @@ class ManyToManyDP(DependencyProcessor):
if history:
for child in history.added:
if child is None or \
(processed is not None and
(processed is not None and
(state, child) in processed):
continue
associationrow = {}
if not self._synchronize(state,
child,
associationrow,
if not self._synchronize(state,
child,
associationrow,
False, uowcommit, "add"):
continue
secondary_insert.append(associationrow)
for child in history.deleted:
if child is None or \
(processed is not None and
(processed is not None and
(state, child) in processed):
continue
associationrow = {}
if not self._synchronize(state,
child,
associationrow,
if not self._synchronize(state,
child,
associationrow,
False, uowcommit, "delete"):
continue
secondary_delete.append(associationrow)
tmp.update((c, state)
tmp.update((c, state)
for c in history.added + history.deleted)
if need_cascade_pks:
for child in history.unchanged:
associationrow = {}
sync.update(state,
self.parent,
associationrow,
"old_",
sync.update(state,
self.parent,
associationrow,
"old_",
self.prop.synchronize_pairs)
sync.update(child,
self.mapper,
associationrow,
"old_",
sync.update(child,
self.mapper,
associationrow,
"old_",
self.prop.secondary_synchronize_pairs)
secondary_update.append(associationrow)
@@ -1078,18 +1078,18 @@ class ManyToManyDP(DependencyProcessor):
if processed is not None:
processed.update(tmp)
self._run_crud(uowcommit, secondary_insert,
self._run_crud(uowcommit, secondary_insert,
secondary_update, secondary_delete)
def _run_crud(self, uowcommit, secondary_insert,
def _run_crud(self, uowcommit, secondary_insert,
secondary_update, secondary_delete):
connection = uowcommit.transaction.connection(self.mapper)
if secondary_delete:
associationrow = secondary_delete[0]
statement = self.secondary.delete(sql.and_(*[
c == sql.bindparam(c.key, type_=c.type)
for c in self.secondary.c
c == sql.bindparam(c.key, type_=c.type)
for c in self.secondary.c
if c.key in associationrow
]))
result = connection.execute(statement, secondary_delete)
@@ -1098,7 +1098,7 @@ class ManyToManyDP(DependencyProcessor):
result.rowcount != len(secondary_delete):
raise exc.StaleDataError(
"DELETE statement on table '%s' expected to delete %d row(s); "
"Only %d were matched." %
"Only %d were matched." %
(self.secondary.description, len(secondary_delete),
result.rowcount)
)
@@ -1106,8 +1106,8 @@ class ManyToManyDP(DependencyProcessor):
if secondary_update:
associationrow = secondary_update[0]
statement = self.secondary.update(sql.and_(*[
c == sql.bindparam("old_" + c.key, type_=c.type)
for c in self.secondary.c
c == sql.bindparam("old_" + c.key, type_=c.type)
for c in self.secondary.c
if c.key in associationrow
]))
result = connection.execute(statement, secondary_update)
@@ -1115,7 +1115,7 @@ class ManyToManyDP(DependencyProcessor):
result.rowcount != len(secondary_update):
raise exc.StaleDataError(
"UPDATE statement on table '%s' expected to update %d row(s); "
"Only %d were matched." %
"Only %d were matched." %
(self.secondary.description, len(secondary_update),
result.rowcount)
)
@@ -1124,7 +1124,7 @@ class ManyToManyDP(DependencyProcessor):
statement = self.secondary.insert()
connection.execute(statement, secondary_insert)
def _synchronize(self, state, child, associationrow,
def _synchronize(self, state, child, associationrow,
clearkeys, uowcommit, operation):
if associationrow is None:
return
@@ -1133,13 +1133,13 @@ class ManyToManyDP(DependencyProcessor):
if not child.deleted:
util.warn(
"Object of type %s not in session, %s "
"operation along '%s' won't proceed" %
"operation along '%s' won't proceed" %
(mapperutil.state_class_str(child), operation, self.prop))
return False
self._verify_canload(child)
sync.populate_dict(state, self.parent, associationrow,
sync.populate_dict(state, self.parent, associationrow,
self.prop.synchronize_pairs)
sync.populate_dict(child, self.mapper, associationrow,
self.prop.secondary_synchronize_pairs)
@@ -1148,9 +1148,9 @@ class ManyToManyDP(DependencyProcessor):
def _pks_changed(self, uowcommit, state):
return sync.source_modified(
uowcommit,
state,
self.parent,
uowcommit,
state,
self.parent,
self.prop.synchronize_pairs)
_direction_to_processor = {

View File

@@ -1,5 +1,5 @@
# orm/deprecated_interfaces.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -11,10 +11,10 @@ from interfaces import EXT_CONTINUE
class MapperExtension(object):
"""Base implementation for :class:`.Mapper` event hooks.
.. note::
.. note::
:class:`.MapperExtension` is deprecated. Please
refer to :func:`.event.listen` as well as
refer to :func:`.event.listen` as well as
:class:`.MapperEvents`.
New extension classes subclass :class:`.MapperExtension` and are specified
@@ -42,8 +42,8 @@ class MapperExtension(object):
to the next ``MapperExtension`` for processing". For methods
that return objects like translated rows or new object
instances, EXT_CONTINUE means the result of the method
should be ignored. In some cases it's required for a
default mapper activity to be performed, such as adding a
should be ignored. In some cases it's required for a
default mapper activity to be performed, such as adding a
new instance to a result list.
The symbol EXT_STOP has significance within a chain
@@ -91,29 +91,29 @@ class MapperExtension(object):
def reconstruct(instance, ctx):
ls_meth(self, instance)
return reconstruct
event.listen(self.class_manager, 'load',
event.listen(self.class_manager, 'load',
go(ls_meth), raw=False, propagate=True)
elif meth == 'init_instance':
def go(ls_meth):
def init_instance(instance, args, kwargs):
ls_meth(self, self.class_,
self.class_manager.original_init,
ls_meth(self, self.class_,
self.class_manager.original_init,
instance, args, kwargs)
return init_instance
event.listen(self.class_manager, 'init',
event.listen(self.class_manager, 'init',
go(ls_meth), raw=False, propagate=True)
elif meth == 'init_failed':
def go(ls_meth):
def init_failed(instance, args, kwargs):
util.warn_exception(ls_meth, self, self.class_,
self.class_manager.original_init,
util.warn_exception(ls_meth, self, self.class_,
self.class_manager.original_init,
instance, args, kwargs)
return init_failed
event.listen(self.class_manager, 'init_failure',
event.listen(self.class_manager, 'init_failure',
go(ls_meth), raw=False, propagate=True)
else:
event.listen(self, "%s" % meth, ls_meth,
event.listen(self, "%s" % meth, ls_meth,
raw=False, retval=True, propagate=True)
@@ -121,7 +121,7 @@ class MapperExtension(object):
"""Receive a class when the mapper is first constructed, and has
applied instrumentation to the mapped class.
The return value is only significant within the ``MapperExtension``
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
@@ -130,25 +130,25 @@ class MapperExtension(object):
def init_instance(self, mapper, class_, oldinit, instance, args, kwargs):
"""Receive an instance when it's constructor is called.
This method is only called during a userland construction of
This method is only called during a userland construction of
an object. It is not called when an object is loaded from the
database.
The return value is only significant within the ``MapperExtension``
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
return EXT_CONTINUE
def init_failed(self, mapper, class_, oldinit, instance, args, kwargs):
"""Receive an instance when it's constructor has been called,
"""Receive an instance when it's constructor has been called,
and raised an exception.
This method is only called during a userland construction of
This method is only called during a userland construction of
an object. It is not called when an object is loaded from the
database.
The return value is only significant within the ``MapperExtension``
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
@@ -160,9 +160,9 @@ class MapperExtension(object):
This is called when the mapper first receives a row, before
the object identity or the instance itself has been derived
from that row. The given row may or may not be a
from that row. The given row may or may not be a
``RowProxy`` object - it will always be a dictionary-like
object which contains mapped columns as keys. The
object which contains mapped columns as keys. The
returned object should also be a dictionary-like object
which recognizes mapped columns as keys.
@@ -197,7 +197,7 @@ class MapperExtension(object):
"""
return EXT_CONTINUE
def append_result(self, mapper, selectcontext, row, instance,
def append_result(self, mapper, selectcontext, row, instance,
result, **flags):
"""Receive an object instance before that instance is appended
to a result list.
@@ -231,7 +231,7 @@ class MapperExtension(object):
return EXT_CONTINUE
def populate_instance(self, mapper, selectcontext, row,
def populate_instance(self, mapper, selectcontext, row,
instance, **flags):
"""Receive an instance before that instance has
its attributes populated.
@@ -247,10 +247,11 @@ class MapperExtension(object):
instance population will not proceed, giving this extension an
opportunity to populate the instance itself, if desired.
As of 0.5, most usages of this hook are obsolete. For a
generic "object has been newly created from a row" hook, use
``reconstruct_instance()``, or the ``@orm.reconstructor``
decorator.
.. deprecated:: 0.5
Most usages of this hook are obsolete. For a
generic "object has been newly created from a row" hook, use
``reconstruct_instance()``, or the ``@orm.reconstructor``
decorator.
"""
return EXT_CONTINUE
@@ -265,11 +266,11 @@ class MapperExtension(object):
instance's lifetime.
Note that during a result-row load, this method is called upon
the first row received for this instance. Note that some
attributes and collections may or may not be loaded or even
the first row received for this instance. Note that some
attributes and collections may or may not be loaded or even
initialized, depending on what's present in the result rows.
The return value is only significant within the ``MapperExtension``
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
@@ -284,12 +285,12 @@ class MapperExtension(object):
Column-based attributes can be modified within this method
which will result in the new value being inserted. However
*no* changes to the overall flush plan can be made, and
*no* changes to the overall flush plan can be made, and
manipulation of the ``Session`` will not have the desired effect.
To manipulate the ``Session`` within an extension, use
To manipulate the ``Session`` within an extension, use
``SessionExtension``.
The return value is only significant within the ``MapperExtension``
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
@@ -299,7 +300,7 @@ class MapperExtension(object):
def after_insert(self, mapper, connection, instance):
"""Receive an object instance after that instance is inserted.
The return value is only significant within the ``MapperExtension``
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
@@ -326,12 +327,12 @@ class MapperExtension(object):
Column-based attributes can be modified within this method
which will result in the new value being updated. However
*no* changes to the overall flush plan can be made, and
*no* changes to the overall flush plan can be made, and
manipulation of the ``Session`` will not have the desired effect.
To manipulate the ``Session`` within an extension, use
To manipulate the ``Session`` within an extension, use
``SessionExtension``.
The return value is only significant within the ``MapperExtension``
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
@@ -341,7 +342,7 @@ class MapperExtension(object):
def after_update(self, mapper, connection, instance):
"""Receive an object instance after that instance is updated.
The return value is only significant within the ``MapperExtension``
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
@@ -356,7 +357,7 @@ class MapperExtension(object):
desired effect. To manipulate the ``Session`` within an
extension, use ``SessionExtension``.
The return value is only significant within the ``MapperExtension``
The return value is only significant within the ``MapperExtension``
chain; the parent mapper's behavior isn't modified by this method.
"""
@@ -377,10 +378,10 @@ class SessionExtension(object):
"""Base implementation for :class:`.Session` event hooks.
.. note::
.. note::
:class:`.SessionExtension` is deprecated. Please
refer to :func:`.event.listen` as well as
refer to :func:`.event.listen` as well as
:class:`.SessionEvents`.
Subclasses may be installed into a :class:`.Session` (or
@@ -497,10 +498,10 @@ class AttributeExtension(object):
"""Base implementation for :class:`.AttributeImpl` event hooks, events
that fire upon attribute mutations in user code.
.. note::
.. note::
:class:`.AttributeExtension` is deprecated. Please
refer to :func:`.event.listen` as well as
refer to :func:`.event.listen` as well as
:class:`.AttributeEvents`.
:class:`.AttributeExtension` is used to listen for set,
@@ -554,10 +555,10 @@ class AttributeExtension(object):
active_history=listener.active_history,
raw=True, retval=True)
event.listen(self, 'remove', listener.remove,
active_history=listener.active_history,
active_history=listener.active_history,
raw=True, retval=True)
event.listen(self, 'set', listener.set,
active_history=listener.active_history,
active_history=listener.active_history,
raw=True, retval=True)
def append(self, state, value, initiator):

View File

@@ -1,5 +1,5 @@
# orm/descriptor_props.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -19,7 +19,7 @@ from sqlalchemy.sql import expression
properties = util.importlater('sqlalchemy.orm', 'properties')
class DescriptorProperty(MapperProperty):
""":class:`.MapperProperty` which proxies access to a
""":class:`.MapperProperty` which proxies access to a
user-defined descriptor."""
doc = None
@@ -35,7 +35,7 @@ class DescriptorProperty(MapperProperty):
self.key = key
if hasattr(prop, 'get_history'):
def get_history(self, state, dict_,
def get_history(self, state, dict_,
passive=attributes.PASSIVE_OFF):
return prop.get_history(state, dict_, passive)
@@ -62,7 +62,7 @@ class DescriptorProperty(MapperProperty):
create_proxied_attribute(self.descriptor)\
(
self.parent.class_,
self.key,
self.key,
self.descriptor,
lambda: self._comparator_factory(mapper),
doc=self.doc
@@ -89,7 +89,7 @@ class CompositeProperty(DescriptorProperty):
self._setup_event_handlers()
def do_init(self):
"""Initialization which occurs after the :class:`.CompositeProperty`
"""Initialization which occurs after the :class:`.CompositeProperty`
has been associated with its parent mapper.
"""
@@ -97,7 +97,7 @@ class CompositeProperty(DescriptorProperty):
self._setup_arguments_on_columns()
def _create_descriptor(self):
"""Create the Python descriptor that will serve as
"""Create the Python descriptor that will serve as
the access point on instances of the mapped class.
"""
@@ -113,12 +113,12 @@ class CompositeProperty(DescriptorProperty):
values = [getattr(instance, key) for key in self._attribute_keys]
# current expected behavior here is that the composite is
# created on access if the object is persistent or if
# col attributes have non-None. This would be better
# created on access if the object is persistent or if
# col attributes have non-None. This would be better
# if the composite were created unconditionally,
# but that would be a behavioral change.
if self.key not in dict_ and (
state.key is not None or
state.key is not None or
not _none_set.issuperset(values)
):
dict_[self.key] = self.composite_class(*values)
@@ -139,7 +139,7 @@ class CompositeProperty(DescriptorProperty):
setattr(instance, key, None)
else:
for key, value in zip(
self._attribute_keys,
self._attribute_keys,
value.__composite_values__()):
setattr(instance, key, value)
@@ -198,7 +198,7 @@ class CompositeProperty(DescriptorProperty):
return
# if column elements aren't loaded, skip.
# __get__() will initiate a load for those
# __get__() will initiate a load for those
# columns
for k in self._attribute_keys:
if k not in dict_:
@@ -206,7 +206,7 @@ class CompositeProperty(DescriptorProperty):
#assert self.key not in dict_
dict_[self.key] = self.composite_class(
*[state.dict[key] for key in
*[state.dict[key] for key in
self._attribute_keys]
)
@@ -217,16 +217,16 @@ class CompositeProperty(DescriptorProperty):
def insert_update_handler(mapper, connection, state):
"""After an insert or update, some columns may be expired due
to server side defaults, or re-populated due to client side
defaults. Pop out the composite value here so that it
defaults. Pop out the composite value here so that it
recreates.
"""
state.dict.pop(self.key, None)
event.listen(self.parent, 'after_insert',
event.listen(self.parent, 'after_insert',
insert_update_handler, raw=True)
event.listen(self.parent, 'after_update',
event.listen(self.parent, 'after_update',
insert_update_handler, raw=True)
event.listen(self.parent, 'load', load_handler, raw=True, propagate=True)
event.listen(self.parent, 'refresh', load_handler, raw=True, propagate=True)
@@ -307,19 +307,19 @@ class CompositeProperty(DescriptorProperty):
return str(self.parent.class_.__name__) + "." + self.key
class ConcreteInheritedProperty(DescriptorProperty):
"""A 'do nothing' :class:`.MapperProperty` that disables
"""A 'do nothing' :class:`.MapperProperty` that disables
an attribute on a concrete subclass that is only present
on the inherited mapper, not the concrete classes' mapper.
Cases where this occurs include:
* When the superclass mapper is mapped against a
"polymorphic union", which includes all attributes from
* When the superclass mapper is mapped against a
"polymorphic union", which includes all attributes from
all subclasses.
* When a relationship() is configured on an inherited mapper,
but not on the subclass mapper. Concrete mappers require
that relationship() is configured explicitly on each
subclass.
that relationship() is configured explicitly on each
subclass.
"""
@@ -337,7 +337,7 @@ class ConcreteInheritedProperty(DescriptorProperty):
def warn():
raise AttributeError("Concrete %s does not implement "
"attribute %r at the instance level. Add this "
"property explicitly to %s." %
"property explicitly to %s." %
(self.parent, self.key, self.parent))
class NoninheritedConcreteProp(object):
@@ -354,7 +354,7 @@ class ConcreteInheritedProperty(DescriptorProperty):
class SynonymProperty(DescriptorProperty):
def __init__(self, name, map_column=None,
def __init__(self, name, map_column=None,
descriptor=None, comparator_factory=None,
doc=None):
self.name = name
@@ -387,7 +387,7 @@ class SynonymProperty(DescriptorProperty):
if self.key not in parent.mapped_table.c:
raise sa_exc.ArgumentError(
"Can't compile synonym '%s': no column on table "
"'%s' named '%s'"
"'%s' named '%s'"
% (self.name, parent.mapped_table.description, self.key))
elif parent.mapped_table.c[self.key] in \
parent._columntoproperty and \
@@ -397,13 +397,13 @@ class SynonymProperty(DescriptorProperty):
raise sa_exc.ArgumentError(
"Can't call map_column=True for synonym %r=%r, "
"a ColumnProperty already exists keyed to the name "
"%r for column %r" %
"%r for column %r" %
(self.key, self.name, self.name, self.key)
)
p = properties.ColumnProperty(parent.mapped_table.c[self.key])
parent._configure_property(
self.name, p,
init=init,
self.name, p,
init=init,
setparent=True)
p._mapped_by_synonym = self.key

View File

@@ -1,5 +1,5 @@
# orm/dynamic.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -12,7 +12,6 @@ basic add/delete mutation.
"""
from sqlalchemy import log, util
from sqlalchemy import exc as sa_exc
from sqlalchemy.orm import exc as orm_exc
from sqlalchemy.sql import operators
from sqlalchemy.orm import (
@@ -20,12 +19,16 @@ from sqlalchemy.orm import (
)
from sqlalchemy.orm.query import Query
from sqlalchemy.orm.util import has_identity
from sqlalchemy.orm import attributes, collections
from sqlalchemy.orm import collections
class DynaLoader(strategies.AbstractRelationshipLoader):
def init_class_attribute(self, mapper):
self.is_class_level = True
if not self.uselist:
util.warn(
"On relationship %s, 'dynamic' loaders cannot be used with "
"many-to-one/one-to-one relationships and/or "
"uselist=False." % self.parent_property)
strategies._register_attribute(self,
mapper,
useobject=True,
@@ -63,7 +66,7 @@ class DynamicAttributeImpl(attributes.AttributeImpl):
else:
return self.query_class(self, state)
def get_collection(self, state, dict_, user_data=None,
def get_collection(self, state, dict_, user_data=None,
passive=attributes.PASSIVE_NO_INITIALIZE):
if passive is not attributes.PASSIVE_OFF:
return self._get_collection_history(state,
@@ -97,7 +100,7 @@ class DynamicAttributeImpl(attributes.AttributeImpl):
if self.key not in state.committed_state:
state.committed_state[self.key] = CollectionHistory(self, state)
state.modified_event(dict_,
state.modified_event(dict_,
self,
attributes.NEVER_SET)
@@ -107,7 +110,7 @@ class DynamicAttributeImpl(attributes.AttributeImpl):
return state.committed_state[self.key]
def set(self, state, dict_, value, initiator,
passive=attributes.PASSIVE_OFF,
passive=attributes.PASSIVE_OFF,
check_old=None, pop=False):
if initiator and initiator.parent_token is self.parent_token:
return
@@ -144,8 +147,8 @@ class DynamicAttributeImpl(attributes.AttributeImpl):
def get_all_pending(self, state, dict_):
c = self._get_collection_history(state, True)
return [
(attributes.instance_state(x), x)
for x in
(attributes.instance_state(x), x)
for x in
c.added_items + c.unchanged_items + c.deleted_items
]
@@ -160,12 +163,12 @@ class DynamicAttributeImpl(attributes.AttributeImpl):
else:
return c
def append(self, state, dict_, value, initiator,
def append(self, state, dict_, value, initiator,
passive=attributes.PASSIVE_OFF):
if initiator is not self:
self.fire_append_event(state, dict_, value, initiator)
def remove(self, state, dict_, value, initiator,
def remove(self, state, dict_, value, initiator,
passive=attributes.PASSIVE_OFF):
if initiator is not self:
self.fire_remove_event(state, dict_, value, initiator)
@@ -204,9 +207,9 @@ class AppenderMixin(object):
mapper = object_mapper(instance)
prop = mapper._props[self.attr.key]
self._criterion = prop.compare(
operators.eq,
instance,
value_is_parent=True,
operators.eq,
instance,
value_is_parent=True,
alias_secondary=False)
if self.attr.order_by:
@@ -280,12 +283,12 @@ class AppenderMixin(object):
def append(self, item):
self.attr.append(
attributes.instance_state(self.instance),
attributes.instance_state(self.instance),
attributes.instance_dict(self.instance), item, None)
def remove(self, item):
self.attr.remove(
attributes.instance_state(self.instance),
attributes.instance_state(self.instance),
attributes.instance_dict(self.instance), item, None)

View File

@@ -1,5 +1,5 @@
# orm/evaluator.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -13,10 +13,10 @@ class UnevaluatableError(Exception):
pass
_straight_ops = set(getattr(operators, op)
for op in ('add', 'mul', 'sub',
for op in ('add', 'mul', 'sub',
# Py2K
'div',
# end Py2K
# end Py2K
'mod', 'truediv',
'lt', 'le', 'ne', 'gt', 'ge', 'eq'))

View File

@@ -1,5 +1,5 @@
# orm/events.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -91,11 +91,11 @@ class InstanceEvents(event.Events):
When using :class:`.InstanceEvents`, several modifiers are
available to the :func:`.event.listen` function.
:param propagate=False: When True, the event listener should
be applied to all inheriting mappers as well as the
:param propagate=False: When True, the event listener should
be applied to all inheriting mappers as well as the
mapper which is the target of this listener.
:param raw=False: When True, the "target" argument passed
to applicable event listener functions will be the
to applicable event listener functions will be the
instance's :class:`.InstanceState` management
object, rather than the mapped instance itself.
@@ -142,17 +142,17 @@ class InstanceEvents(event.Events):
def init(self, target, args, kwargs):
"""Receive an instance when it's constructor is called.
This method is only called during a userland construction of
This method is only called during a userland construction of
an object. It is not called when an object is loaded from the
database.
"""
def init_failure(self, target, args, kwargs):
"""Receive an instance when it's constructor has been called,
"""Receive an instance when it's constructor has been called,
and raised an exception.
This method is only called during a userland construction of
This method is only called during a userland construction of
an object. It is not called when an object is loaded from the
database.
@@ -168,12 +168,12 @@ class InstanceEvents(event.Events):
instance's lifetime.
Note that during a result-row load, this method is called upon
the first row received for this instance. Note that some
attributes and collections may or may not be loaded or even
the first row received for this instance. Note that some
attributes and collections may or may not be loaded or even
initialized, depending on what's present in the result rows.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param context: the :class:`.QueryContext` corresponding to the
@@ -184,16 +184,16 @@ class InstanceEvents(event.Events):
"""
def refresh(self, target, context, attrs):
"""Receive an object instance after one or more attributes have
"""Receive an object instance after one or more attributes have
been refreshed from a query.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param context: the :class:`.QueryContext` corresponding to the
current :class:`.Query` in progress.
:param attrs: iterable collection of attribute names which
:param attrs: iterable collection of attribute names which
were populated, or None if all column-mapped, non-deferred
attributes were populated.
@@ -206,23 +206,23 @@ class InstanceEvents(event.Events):
'keys' is a list of attribute names. If None, the entire
state was expired.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param attrs: iterable collection of attribute
names which were expired, or None if all attributes were
names which were expired, or None if all attributes were
expired.
"""
def resurrect(self, target):
"""Receive an object instance as it is 'resurrected' from
"""Receive an object instance as it is 'resurrected' from
garbage collection, which occurs when a "dirty" state falls
out of scope.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
@@ -232,28 +232,28 @@ class InstanceEvents(event.Events):
"""Receive an object instance when its associated state is
being pickled.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param state_dict: the dictionary returned by
:param state_dict: the dictionary returned by
:class:`.InstanceState.__getstate__`, containing the state
to be pickled.
"""
def unpickle(self, target, state_dict):
"""Receive an object instance after it's associated state has
been unpickled.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param state_dict: the dictionary sent to
:class:`.InstanceState.__setstate__`, containing the state
dictionary which was pickled.
"""
class MapperEvents(event.Events):
@@ -267,7 +267,7 @@ class MapperEvents(event.Events):
# execute a stored procedure upon INSERT,
# apply the value to the row to be inserted
target.calculated_value = connection.scalar(
"select my_special_function(%d)"
"select my_special_function(%d)"
% target.special_number)
# associate the listener function with SomeMappedClass,
@@ -304,16 +304,16 @@ class MapperEvents(event.Events):
When using :class:`.MapperEvents`, several modifiers are
available to the :func:`.event.listen` function.
:param propagate=False: When True, the event listener should
be applied to all inheriting mappers as well as the
:param propagate=False: When True, the event listener should
be applied to all inheriting mappers as well as the
mapper which is the target of this listener.
:param raw=False: When True, the "target" argument passed
to applicable event listener functions will be the
to applicable event listener functions will be the
instance's :class:`.InstanceState` management
object, rather than the mapped instance itself.
:param retval=False: when True, the user-defined event function
must have a return value, the purpose of which is either to
control subsequent event propagation, or to otherwise alter
control subsequent event propagation, or to otherwise alter
the operation in progress by the mapper. Possible return
values are:
@@ -322,7 +322,7 @@ class MapperEvents(event.Events):
* ``sqlalchemy.orm.interfaces.EXT_STOP`` - cancel all subsequent
event handlers in the chain.
* other values - the return value specified by specific listeners,
such as :meth:`~.MapperEvents.translate_row` or
such as :meth:`~.MapperEvents.translate_row` or
:meth:`~.MapperEvents.create_instance`.
"""
@@ -340,7 +340,7 @@ class MapperEvents(event.Events):
return target
@classmethod
def _listen(cls, target, identifier, fn,
def _listen(cls, target, identifier, fn,
raw=False, retval=False, propagate=False):
if not raw or not retval:
@@ -370,7 +370,7 @@ class MapperEvents(event.Events):
event.Events._listen(target, identifier, fn)
def instrument_class(self, mapper, class_):
"""Receive a class when the mapper is first constructed,
"""Receive a class when the mapper is first constructed,
before instrumentation is applied to the mapped class.
This event is the earliest phase of mapper construction.
@@ -388,8 +388,16 @@ class MapperEvents(event.Events):
def mapper_configured(self, mapper, class_):
"""Called when the mapper for the class is fully configured.
This event is the latest phase of mapper construction.
The mapper should be in its final state.
This event is the latest phase of mapper construction, and
is invoked when the mapped classes are first used, so that relationships
between mappers can be resolved. When the event is called,
the mapper should be in its final state.
While the configuration event normally occurs automatically,
it can be forced to occur ahead of time, in the case where the event
is needed before any actual mapper usage, by using the
:func:`.configure_mappers` function.
:param mapper: the :class:`.Mapper` which is the target
of this event.
@@ -404,11 +412,11 @@ class MapperEvents(event.Events):
This corresponds to the :func:`.orm.configure_mappers` call, which
note is usually called automatically as mappings are first
used.
Theoretically this event is called once per
application, but is actually called any time new mappers
have been affected by a :func:`.orm.configure_mappers` call. If new mappings
are constructed after existing ones have already been used,
are constructed after existing ones have already been used,
this event can be called again.
"""
@@ -420,9 +428,9 @@ class MapperEvents(event.Events):
This listener is typically registered with ``retval=True``.
It is called when the mapper first receives a row, before
the object identity or the instance itself has been derived
from that row. The given row may or may not be a
from that row. The given row may or may not be a
:class:`.RowProxy` object - it will always be a dictionary-like
object which contains mapped columns as keys. The
object which contains mapped columns as keys. The
returned object should also be a dictionary-like object
which recognizes mapped columns as keys.
@@ -431,7 +439,7 @@ class MapperEvents(event.Events):
:param context: the :class:`.QueryContext`, which includes
a handle to the current :class:`.Query` in progress as well
as additional state information.
:param row: the result row being handled. This may be
:param row: the result row being handled. This may be
an actual :class:`.RowProxy` or may be a dictionary containing
:class:`.Column` objects as keys.
:return: When configured with ``retval=True``, the function
@@ -454,18 +462,18 @@ class MapperEvents(event.Events):
:param context: the :class:`.QueryContext`, which includes
a handle to the current :class:`.Query` in progress as well
as additional state information.
:param row: the result row being handled. This may be
:param row: the result row being handled. This may be
an actual :class:`.RowProxy` or may be a dictionary containing
:class:`.Column` objects as keys.
:param class\_: the mapped class.
:return: When configured with ``retval=True``, the return value
should be a newly created instance of the mapped class,
should be a newly created instance of the mapped class,
or ``EXT_CONTINUE`` indicating that default object construction
should take place.
"""
def append_result(self, mapper, context, row, target,
def append_result(self, mapper, context, row, target,
result, **flags):
"""Receive an object instance before that instance is appended
to a result list.
@@ -478,27 +486,27 @@ class MapperEvents(event.Events):
:param context: the :class:`.QueryContext`, which includes
a handle to the current :class:`.Query` in progress as well
as additional state information.
:param row: the result row being handled. This may be
:param row: the result row being handled. This may be
an actual :class:`.RowProxy` or may be a dictionary containing
:class:`.Column` objects as keys.
:param target: the mapped instance being populated. If
the event is configured with ``raw=True``, this will
:param target: the mapped instance being populated. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:param result: a list-like object where results are being
appended.
:param \**flags: Additional state information about the
:param \**flags: Additional state information about the
current handling of the row.
:return: If this method is registered with ``retval=True``,
a return value of ``EXT_STOP`` will prevent the instance
from being appended to the given result list, whereas a
from being appended to the given result list, whereas a
return value of ``EXT_CONTINUE`` will result in the default
behavior of appending the value to the result list.
"""
def populate_instance(self, mapper, context, row,
def populate_instance(self, mapper, context, row,
target, **flags):
"""Receive an instance before that instance has
its attributes populated.
@@ -518,11 +526,11 @@ class MapperEvents(event.Events):
:param context: the :class:`.QueryContext`, which includes
a handle to the current :class:`.Query` in progress as well
as additional state information.
:param row: the result row being handled. This may be
:param row: the result row being handled. This may be
an actual :class:`.RowProxy` or may be a dictionary containing
:class:`.Column` objects as keys.
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
:param target: the mapped instance. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:return: When configured with ``retval=True``, a return
@@ -536,9 +544,9 @@ class MapperEvents(event.Events):
"""Receive an object instance before an INSERT statement
is emitted corresponding to that instance.
This event is used to modify local, non-object related
This event is used to modify local, non-object related
attributes on the instance before an INSERT occurs, as well
as to emit additional SQL statements on the given
as to emit additional SQL statements on the given
connection.
The event is often called for a batch of objects of the
@@ -552,23 +560,23 @@ class MapperEvents(event.Events):
.. warning::
Mapper-level flush events are designed to operate **on attributes
local to the immediate object being handled
local to the immediate object being handled
and via SQL operations with the given** :class:`.Connection` **only.**
Handlers here should **not** make alterations to the state of
Handlers here should **not** make alterations to the state of
the :class:`.Session` overall, and in general should not
affect any :func:`.relationship` -mapped attributes, as
affect any :func:`.relationship` -mapped attributes, as
session cascade rules will not function properly, nor is it
always known if the related class has already been handled.
always known if the related class has already been handled.
Operations that **are not supported in mapper events** include:
* :meth:`.Session.add`
* :meth:`.Session.delete`
* Mapped collection append, add, remove, delete, discard, etc.
* Mapped relationship attribute set/del events, i.e. ``someobject.related = someotherobject``
Operations which manipulate the state of the object
relative to other objects are better handled:
* In the ``__init__()`` method of the mapped object itself, or another method
designed to establish some particular state.
* In a ``@validates`` handler, see :ref:`simple_validators`
@@ -576,12 +584,12 @@ class MapperEvents(event.Events):
:param mapper: the :class:`.Mapper` which is the target
of this event.
:param connection: the :class:`.Connection` being used to
:param connection: the :class:`.Connection` being used to
emit INSERT statements for this instance. This
provides a handle into the current transaction on the
provides a handle into the current transaction on the
target database specific to this instance.
:param target: the mapped instance being persisted. If
the event is configured with ``raw=True``, this will
:param target: the mapped instance being persisted. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:return: No return value is supported by this event.
@@ -594,7 +602,7 @@ class MapperEvents(event.Events):
This event is used to modify in-Python-only
state on the instance after an INSERT occurs, as well
as to emit additional SQL statements on the given
as to emit additional SQL statements on the given
connection.
The event is often called for a batch of objects of the
@@ -608,23 +616,23 @@ class MapperEvents(event.Events):
.. warning::
Mapper-level flush events are designed to operate **on attributes
local to the immediate object being handled
local to the immediate object being handled
and via SQL operations with the given** :class:`.Connection` **only.**
Handlers here should **not** make alterations to the state of
Handlers here should **not** make alterations to the state of
the :class:`.Session` overall, and in general should not
affect any :func:`.relationship` -mapped attributes, as
affect any :func:`.relationship` -mapped attributes, as
session cascade rules will not function properly, nor is it
always known if the related class has already been handled.
always known if the related class has already been handled.
Operations that **are not supported in mapper events** include:
* :meth:`.Session.add`
* :meth:`.Session.delete`
* Mapped collection append, add, remove, delete, discard, etc.
* Mapped relationship attribute set/del events, i.e. ``someobject.related = someotherobject``
Operations which manipulate the state of the object
relative to other objects are better handled:
* In the ``__init__()`` method of the mapped object itself, or another method
designed to establish some particular state.
* In a ``@validates`` handler, see :ref:`simple_validators`
@@ -632,12 +640,12 @@ class MapperEvents(event.Events):
:param mapper: the :class:`.Mapper` which is the target
of this event.
:param connection: the :class:`.Connection` being used to
:param connection: the :class:`.Connection` being used to
emit INSERT statements for this instance. This
provides a handle into the current transaction on the
provides a handle into the current transaction on the
target database specific to this instance.
:param target: the mapped instance being persisted. If
the event is configured with ``raw=True``, this will
:param target: the mapped instance being persisted. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:return: No return value is supported by this event.
@@ -648,9 +656,9 @@ class MapperEvents(event.Events):
"""Receive an object instance before an UPDATE statement
is emitted corresponding to that instance.
This event is used to modify local, non-object related
This event is used to modify local, non-object related
attributes on the instance before an UPDATE occurs, as well
as to emit additional SQL statements on the given
as to emit additional SQL statements on the given
connection.
This method is called for all instances that are
@@ -683,23 +691,23 @@ class MapperEvents(event.Events):
.. warning::
Mapper-level flush events are designed to operate **on attributes
local to the immediate object being handled
local to the immediate object being handled
and via SQL operations with the given** :class:`.Connection` **only.**
Handlers here should **not** make alterations to the state of
Handlers here should **not** make alterations to the state of
the :class:`.Session` overall, and in general should not
affect any :func:`.relationship` -mapped attributes, as
affect any :func:`.relationship` -mapped attributes, as
session cascade rules will not function properly, nor is it
always known if the related class has already been handled.
always known if the related class has already been handled.
Operations that **are not supported in mapper events** include:
* :meth:`.Session.add`
* :meth:`.Session.delete`
* Mapped collection append, add, remove, delete, discard, etc.
* Mapped relationship attribute set/del events, i.e. ``someobject.related = someotherobject``
Operations which manipulate the state of the object
relative to other objects are better handled:
* In the ``__init__()`` method of the mapped object itself, or another method
designed to establish some particular state.
* In a ``@validates`` handler, see :ref:`simple_validators`
@@ -707,12 +715,12 @@ class MapperEvents(event.Events):
:param mapper: the :class:`.Mapper` which is the target
of this event.
:param connection: the :class:`.Connection` being used to
:param connection: the :class:`.Connection` being used to
emit UPDATE statements for this instance. This
provides a handle into the current transaction on the
provides a handle into the current transaction on the
target database specific to this instance.
:param target: the mapped instance being persisted. If
the event is configured with ``raw=True``, this will
:param target: the mapped instance being persisted. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:return: No return value is supported by this event.
@@ -724,12 +732,12 @@ class MapperEvents(event.Events):
This event is used to modify in-Python-only
state on the instance after an UPDATE occurs, as well
as to emit additional SQL statements on the given
as to emit additional SQL statements on the given
connection.
This method is called for all instances that are
marked as "dirty", *even those which have no net changes
to their column-based attributes*, and for which
to their column-based attributes*, and for which
no UPDATE statement has proceeded. An object is marked
as dirty when any of its column-based attributes have a
"set attribute" operation called or when any of its
@@ -756,23 +764,23 @@ class MapperEvents(event.Events):
.. warning::
Mapper-level flush events are designed to operate **on attributes
local to the immediate object being handled
local to the immediate object being handled
and via SQL operations with the given** :class:`.Connection` **only.**
Handlers here should **not** make alterations to the state of
Handlers here should **not** make alterations to the state of
the :class:`.Session` overall, and in general should not
affect any :func:`.relationship` -mapped attributes, as
affect any :func:`.relationship` -mapped attributes, as
session cascade rules will not function properly, nor is it
always known if the related class has already been handled.
always known if the related class has already been handled.
Operations that **are not supported in mapper events** include:
* :meth:`.Session.add`
* :meth:`.Session.delete`
* Mapped collection append, add, remove, delete, discard, etc.
* Mapped relationship attribute set/del events, i.e. ``someobject.related = someotherobject``
Operations which manipulate the state of the object
relative to other objects are better handled:
* In the ``__init__()`` method of the mapped object itself, or another method
designed to establish some particular state.
* In a ``@validates`` handler, see :ref:`simple_validators`
@@ -780,12 +788,12 @@ class MapperEvents(event.Events):
:param mapper: the :class:`.Mapper` which is the target
of this event.
:param connection: the :class:`.Connection` being used to
:param connection: the :class:`.Connection` being used to
emit UPDATE statements for this instance. This
provides a handle into the current transaction on the
provides a handle into the current transaction on the
target database specific to this instance.
:param target: the mapped instance being persisted. If
the event is configured with ``raw=True``, this will
:param target: the mapped instance being persisted. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:return: No return value is supported by this event.
@@ -796,33 +804,33 @@ class MapperEvents(event.Events):
"""Receive an object instance before a DELETE statement
is emitted corresponding to that instance.
This event is used to emit additional SQL statements on
This event is used to emit additional SQL statements on
the given connection as well as to perform application
specific bookkeeping related to a deletion event.
The event is often called for a batch of objects of the
same class before their DELETE statements are emitted at
once in a later step.
once in a later step.
.. warning::
Mapper-level flush events are designed to operate **on attributes
local to the immediate object being handled
local to the immediate object being handled
and via SQL operations with the given** :class:`.Connection` **only.**
Handlers here should **not** make alterations to the state of
Handlers here should **not** make alterations to the state of
the :class:`.Session` overall, and in general should not
affect any :func:`.relationship` -mapped attributes, as
affect any :func:`.relationship` -mapped attributes, as
session cascade rules will not function properly, nor is it
always known if the related class has already been handled.
always known if the related class has already been handled.
Operations that **are not supported in mapper events** include:
* :meth:`.Session.add`
* :meth:`.Session.delete`
* Mapped collection append, add, remove, delete, discard, etc.
* Mapped relationship attribute set/del events, i.e. ``someobject.related = someotherobject``
Operations which manipulate the state of the object
relative to other objects are better handled:
* In the ``__init__()`` method of the mapped object itself, or another method
designed to establish some particular state.
* In a ``@validates`` handler, see :ref:`simple_validators`
@@ -830,12 +838,12 @@ class MapperEvents(event.Events):
:param mapper: the :class:`.Mapper` which is the target
of this event.
:param connection: the :class:`.Connection` being used to
:param connection: the :class:`.Connection` being used to
emit DELETE statements for this instance. This
provides a handle into the current transaction on the
provides a handle into the current transaction on the
target database specific to this instance.
:param target: the mapped instance being deleted. If
the event is configured with ``raw=True``, this will
:param target: the mapped instance being deleted. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:return: No return value is supported by this event.
@@ -846,33 +854,33 @@ class MapperEvents(event.Events):
"""Receive an object instance after a DELETE statement
has been emitted corresponding to that instance.
This event is used to emit additional SQL statements on
This event is used to emit additional SQL statements on
the given connection as well as to perform application
specific bookkeeping related to a deletion event.
The event is often called for a batch of objects of the
same class after their DELETE statements have been emitted at
once in a previous step.
once in a previous step.
.. warning::
Mapper-level flush events are designed to operate **on attributes
local to the immediate object being handled
local to the immediate object being handled
and via SQL operations with the given** :class:`.Connection` **only.**
Handlers here should **not** make alterations to the state of
Handlers here should **not** make alterations to the state of
the :class:`.Session` overall, and in general should not
affect any :func:`.relationship` -mapped attributes, as
affect any :func:`.relationship` -mapped attributes, as
session cascade rules will not function properly, nor is it
always known if the related class has already been handled.
always known if the related class has already been handled.
Operations that **are not supported in mapper events** include:
* :meth:`.Session.add`
* :meth:`.Session.delete`
* Mapped collection append, add, remove, delete, discard, etc.
* Mapped relationship attribute set/del events, i.e. ``someobject.related = someotherobject``
Operations which manipulate the state of the object
relative to other objects are better handled:
* In the ``__init__()`` method of the mapped object itself, or another method
designed to establish some particular state.
* In a ``@validates`` handler, see :ref:`simple_validators`
@@ -880,12 +888,12 @@ class MapperEvents(event.Events):
:param mapper: the :class:`.Mapper` which is the target
of this event.
:param connection: the :class:`.Connection` being used to
:param connection: the :class:`.Connection` being used to
emit DELETE statements for this instance. This
provides a handle into the current transaction on the
provides a handle into the current transaction on the
target database specific to this instance.
:param target: the mapped instance being deleted. If
the event is configured with ``raw=True``, this will
:param target: the mapped instance being deleted. If
the event is configured with ``raw=True``, this will
instead be the :class:`.InstanceState` state-management
object associated with the instance.
:return: No return value is supported by this event.
@@ -952,7 +960,7 @@ class SessionEvents(event.Events):
transaction is ongoing.
:param session: The target :class:`.Session`.
"""
def after_commit(self, session):
@@ -960,19 +968,19 @@ class SessionEvents(event.Events):
Note that this may not be per-flush if a longer running
transaction is ongoing.
:param session: The target :class:`.Session`.
"""
def after_rollback(self, session):
"""Execute after a real DBAPI rollback has occurred.
Note that this event only fires when the *actual* rollback against
the database occurs - it does *not* fire each time the
:meth:`.Session.rollback` method is called, if the underlying
the database occurs - it does *not* fire each time the
:meth:`.Session.rollback` method is called, if the underlying
DBAPI transaction has already been rolled back. In many
cases, the :class:`.Session` will not be in
cases, the :class:`.Session` will not be in
an "active" state during this event, as the current
transaction is not valid. To acquire a :class:`.Session`
which is active after the outermost rollback has proceeded,
@@ -984,30 +992,30 @@ class SessionEvents(event.Events):
"""
def after_soft_rollback(self, session, previous_transaction):
"""Execute after any rollback has occurred, including "soft"
"""Execute after any rollback has occurred, including "soft"
rollbacks that don't actually emit at the DBAPI level.
This corresponds to both nested and outer rollbacks, i.e.
the innermost rollback that calls the DBAPI's
rollback() method, as well as the enclosing rollback
the innermost rollback that calls the DBAPI's
rollback() method, as well as the enclosing rollback
calls that only pop themselves from the transaction stack.
The given :class:`.Session` can be used to invoke SQL and
:meth:`.Session.query` operations after an outermost rollback
The given :class:`.Session` can be used to invoke SQL and
:meth:`.Session.query` operations after an outermost rollback
by first checking the :attr:`.Session.is_active` flag::
@event.listens_for(Session, "after_soft_rollback")
def do_something(session, previous_transaction):
if session.is_active:
session.execute("select * from some_table")
:param session: The target :class:`.Session`.
:param previous_transaction: The :class:`.SessionTransaction` transactional
marker object which was just closed. The current :class:`.SessionTransaction`
for the given :class:`.Session` is available via the
:attr:`.Session.transaction` attribute.
New in 0.7.3.
.. versionadded:: 0.7.3
"""
@@ -1030,7 +1038,7 @@ class SessionEvents(event.Events):
Note that the session's state is still in pre-flush, i.e. 'new',
'dirty', and 'deleted' lists still show pre-flush state as well
as the history settings on instance attributes.
:param session: The target :class:`.Session`.
:param flush_context: Internal :class:`.UOWTransaction` object
which handles the details of the flush.
@@ -1044,8 +1052,8 @@ class SessionEvents(event.Events):
This will be when the 'new', 'dirty', and 'deleted' lists are in
their final state. An actual commit() may or may not have
occurred, depending on whether or not the flush started its own
transaction or participated in a larger transaction.
transaction or participated in a larger transaction.
:param session: The target :class:`.Session`.
:param flush_context: Internal :class:`.UOWTransaction` object
which handles the details of the flush.
@@ -1056,9 +1064,9 @@ class SessionEvents(event.Events):
:param session: The target :class:`.Session`.
:param transaction: The :class:`.SessionTransaction`.
:param connection: The :class:`~.engine.base.Connection` object
:param connection: The :class:`~.engine.base.Connection` object
which will be used for SQL statements.
"""
def after_attach(self, session, instance):
@@ -1072,7 +1080,7 @@ class SessionEvents(event.Events):
This is called as a result of the :meth:`.Query.update` method.
:param query: the :class:`.Query` object that this update operation was
called upon.
called upon.
:param query_context: The :class:`.QueryContext` object, corresponding
to the invocation of an ORM query.
:param result: the :class:`.ResultProxy` returned as a result of the
@@ -1086,7 +1094,7 @@ class SessionEvents(event.Events):
This is called as a result of the :meth:`.Query.delete` method.
:param query: the :class:`.Query` object that this update operation was
called upon.
called upon.
:param query_context: The :class:`.QueryContext` object, corresponding
to the invocation of an ORM query.
:param result: the :class:`.ResultProxy` returned as a result of the
@@ -1137,15 +1145,15 @@ class AttributeEvents(event.Events):
:param propagate=False: When True, the listener function will
be established not just for the class attribute given, but
for attributes of the same name on all current subclasses
of that class, as well as all future subclasses of that
class, using an additional listener that listens for
for attributes of the same name on all current subclasses
of that class, as well as all future subclasses of that
class, using an additional listener that listens for
instrumentation events.
:param raw=False: When True, the "target" argument to the
event will be the :class:`.InstanceState` management
object, rather than the mapped instance itself.
:param retval=False: when True, the user-defined event
listening must return the "value" argument from the
:param retval=False: when True, the user-defined event
listening must return the "value" argument from the
function. This gives the listening function the opportunity
to change the value that is ultimately used for a "set"
or "append" event.
@@ -1161,7 +1169,7 @@ class AttributeEvents(event.Events):
return target
@classmethod
def _listen(cls, target, identifier, fn, active_history=False,
def _listen(cls, target, identifier, fn, active_history=False,
raw=False, retval=False,
propagate=False):
if active_history:
@@ -1202,9 +1210,9 @@ class AttributeEvents(event.Events):
be the :class:`.InstanceState` object.
:param value: the value being appended. If this listener
is registered with ``retval=True``, the listener
function must return this value, or a new value which
function must return this value, or a new value which
replaces it.
:param initiator: the attribute implementation object
:param initiator: the attribute implementation object
which initiated this event.
:return: if the event was registered with ``retval=True``,
the given value, or a new effective value, should be returned.
@@ -1218,7 +1226,7 @@ class AttributeEvents(event.Events):
If the listener is registered with ``raw=True``, this will
be the :class:`.InstanceState` object.
:param value: the value being removed.
:param initiator: the attribute implementation object
:param initiator: the attribute implementation object
which initiated this event.
:return: No return value is defined for this event.
"""
@@ -1231,15 +1239,15 @@ class AttributeEvents(event.Events):
be the :class:`.InstanceState` object.
:param value: the value being set. If this listener
is registered with ``retval=True``, the listener
function must return this value, or a new value which
function must return this value, or a new value which
replaces it.
:param oldvalue: the previous value being replaced. This
may also be the symbol ``NEVER_SET`` or ``NO_VALUE``.
If the listener is registered with ``active_history=True``,
the previous value of the attribute will be loaded from
the database if the existing value is currently unloaded
the database if the existing value is currently unloaded
or expired.
:param initiator: the attribute implementation object
:param initiator: the attribute implementation object
which initiated this event.
:return: if the event was registered with ``retval=True``,
the given value, or a new effective value, should be returned.

View File

@@ -1,5 +1,5 @@
# orm/exc.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -18,21 +18,23 @@ class StaleDataError(sa.exc.SQLAlchemyError):
Conditions which cause this to happen include:
* A flush may have attempted to update or delete rows
and an unexpected number of rows were matched during
the UPDATE or DELETE statement. Note that when
and an unexpected number of rows were matched during
the UPDATE or DELETE statement. Note that when
version_id_col is used, rows in UPDATE or DELETE statements
are also matched against the current known version
identifier.
* A mapped object with version_id_col was refreshed,
* A mapped object with version_id_col was refreshed,
and the version number coming back from the database does
not match that of the object itself.
* A object is detached from its parent object, however
the object was previously attached to a different parent
identity which was garbage collected, and a decision
cannot be made if the new parent was really the most
recent "parent" (new in 0.7.4).
recent "parent".
.. versionadded:: 0.7.4
"""
@@ -50,7 +52,7 @@ class ObjectDereferencedError(sa.exc.SQLAlchemyError):
"""An operation cannot complete due to an object being garbage collected."""
class DetachedInstanceError(sa.exc.SQLAlchemyError):
"""An attempt to access unloaded attributes on a
"""An attempt to access unloaded attributes on a
mapped instance that is detached."""
class UnmappedInstanceError(UnmappedError):
@@ -89,21 +91,21 @@ class UnmappedClassError(UnmappedError):
class ObjectDeletedError(sa.exc.InvalidRequestError):
"""A refresh operation failed to retrieve the database
row corresponding to an object's known primary key identity.
A refresh operation proceeds when an expired attribute is
A refresh operation proceeds when an expired attribute is
accessed on an object, or when :meth:`.Query.get` is
used to retrieve an object which is, upon retrieval, detected
as expired. A SELECT is emitted for the target row
based on primary key; if no row is returned, this
exception is raised.
The true meaning of this exception is simply that
The true meaning of this exception is simply that
no row exists for the primary key identifier associated
with a persistent object. The row may have been
with a persistent object. The row may have been
deleted, or in some cases the primary key updated
to a new value, outside of the ORM's management of the target
object.
object.
"""
def __init__(self, state, msg=None):
if not msg:

View File

@@ -1,5 +1,5 @@
# orm/identity.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -128,7 +128,7 @@ class WeakInstanceDict(IdentityMap):
o = existing_state._is_really_none()
if o is not None:
raise AssertionError("A conflicting state is already "
"present in the identity map for key %r"
"present in the identity map for key %r"
% (key, ))
else:
return

View File

@@ -1,5 +1,5 @@
# orm/instrumentation.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -91,7 +91,7 @@ class ClassManager(dict):
self.originals = {}
self._bases = [mgr for mgr in [
manager_of_class(base)
manager_of_class(base)
for base in self.class_.__bases__
if isinstance(base, type)
] if mgr is not None]
@@ -139,7 +139,7 @@ class ClassManager(dict):
def _instrument_init(self):
# TODO: self.class_.__init__ is often the already-instrumented
# __init__ from an instrumented superclass. We still need to make
# __init__ from an instrumented superclass. We still need to make
# our own wrapper, but it would
# be nice to wrap the original __init__ and not our existing wrapper
# of such, since this adds method overhead.
@@ -212,7 +212,7 @@ class ClassManager(dict):
if key in self.mutable_attributes:
self.mutable_attributes.remove(key)
for cls in self.class_.__subclasses__():
manager = manager_of_class(cls)
manager = manager_of_class(cls)
if manager:
manager.uninstrument_attribute(key, True)
@@ -277,12 +277,12 @@ class ClassManager(dict):
def new_instance(self, state=None):
instance = self.class_.__new__(self.class_)
setattr(instance, self.STATE_ATTR,
setattr(instance, self.STATE_ATTR,
state or self._state_constructor(instance, self))
return instance
def setup_instance(self, instance, state=None):
setattr(instance, self.STATE_ATTR,
setattr(instance, self.STATE_ATTR,
state or self._state_constructor(instance, self))
def teardown_instance(self, instance):
@@ -387,7 +387,7 @@ class _ClassInstrumentationAdapter(ClassManager):
if delegate:
return delegate(key, state, factory)
else:
return ClassManager.initialize_collection(self, key,
return ClassManager.initialize_collection(self, key,
state, factory)
def new_instance(self, state=None):
@@ -463,7 +463,7 @@ def is_instrumented(instance, key):
class InstrumentationRegistry(object):
"""Private instrumentation registration singleton.
All classes are routed through this registry
All classes are routed through this registry
when first instrumented, however the InstrumentationRegistry
is not actually needed unless custom ClassManagers are in use.
@@ -501,7 +501,7 @@ class InstrumentationRegistry(object):
if factory != ClassManager and not self._extended:
# somebody invoked a custom ClassManager.
# reinstall global "getter" functions with the more
# reinstall global "getter" functions with the more
# expensive ones.
self._extended = True
_install_lookup_strategy(self)
@@ -543,7 +543,7 @@ class InstrumentationRegistry(object):
return factories
def manager_of_class(self, cls):
# this is only called when alternate instrumentation
# this is only called when alternate instrumentation
# has been established
if cls is None:
return None
@@ -555,7 +555,7 @@ class InstrumentationRegistry(object):
return finder(cls)
def state_of(self, instance):
# this is only called when alternate instrumentation
# this is only called when alternate instrumentation
# has been established
if instance is None:
raise AttributeError("None has no persistent state.")
@@ -566,7 +566,7 @@ class InstrumentationRegistry(object):
instance.__class__)
def dict_of(self, instance):
# this is only called when alternate instrumentation
# this is only called when alternate instrumentation
# has been established
if instance is None:
raise AttributeError("None has no persistent state.")
@@ -632,7 +632,7 @@ instrumentation_finders.append(find_native_user_instrumentation_hook)
def _generate_init(class_, class_manager):
"""Build an __init__ decorator that triggers ClassManager events."""
# TODO: we should use the ClassManager's notion of the
# TODO: we should use the ClassManager's notion of the
# original '__init__' method, once ClassManager is fixed
# to always reference that.
original__init__ = class_.__init__

View File

@@ -1,5 +1,5 @@
# orm/interfaces.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -61,13 +61,13 @@ class MapperProperty(object):
attribute, as well as that attribute as it appears on individual
instances of the class, including attribute instrumentation,
attribute access, loading behavior, and dependency calculations.
The most common occurrences of :class:`.MapperProperty` are the
mapped :class:`.Column`, which is represented in a mapping as
mapped :class:`.Column`, which is represented in a mapping as
an instance of :class:`.ColumnProperty`,
and a reference to another class produced by :func:`.relationship`,
represented in the mapping as an instance of :class:`.RelationshipProperty`.
"""
cascade = ()
@@ -87,7 +87,7 @@ class MapperProperty(object):
pass
def create_row_processor(self, context, path, reduced_path,
def create_row_processor(self, context, path, reduced_path,
mapper, row, adapter):
"""Return a 3-tuple consisting of three row processing functions.
@@ -263,13 +263,13 @@ class PropComparator(operators.ColumnOperators):
"""Return true if this collection contains any member that meets the
given criterion.
The usual implementation of ``any()`` is
The usual implementation of ``any()`` is
:meth:`.RelationshipProperty.Comparator.any`.
:param criterion: an optional ClauseElement formulated against the
:param criterion: an optional ClauseElement formulated against the
member class' table or attributes.
:param \**kwargs: key/value pairs corresponding to member class attribute
:param \**kwargs: key/value pairs corresponding to member class attribute
names which will be compared via equality to the corresponding
values.
@@ -281,13 +281,13 @@ class PropComparator(operators.ColumnOperators):
"""Return true if this element references a member which meets the
given criterion.
The usual implementation of ``has()`` is
The usual implementation of ``has()`` is
:meth:`.RelationshipProperty.Comparator.has`.
:param criterion: an optional ClauseElement formulated against the
:param criterion: an optional ClauseElement formulated against the
member class' table or attributes.
:param \**kwargs: key/value pairs corresponding to member class attribute
:param \**kwargs: key/value pairs corresponding to member class attribute
names which will be compared via equality to the corresponding
values.
@@ -337,12 +337,12 @@ class StrategizedProperty(MapperProperty):
def setup(self, context, entity, path, reduced_path, adapter, **kwargs):
self._get_context_strategy(context, reduced_path + (self.key,)).\
setup_query(context, entity, path,
setup_query(context, entity, path,
reduced_path, adapter, **kwargs)
def create_row_processor(self, context, path, reduced_path, mapper, row, adapter):
return self._get_context_strategy(context, reduced_path + (self.key,)).\
create_row_processor(context, path,
create_row_processor(context, path,
reduced_path, mapper, row, adapter)
def do_init(self):
@@ -365,7 +365,7 @@ def serialize_path(path):
return None
return zip(
[m.class_ for m in [path[i] for i in range(0, len(path), 2)]],
[m.class_ for m in [path[i] for i in range(0, len(path), 2)]],
[path[i] for i in range(1, len(path), 2)] + [None]
)
@@ -382,7 +382,7 @@ class MapperOption(object):
"""Describe a modification to a Query."""
propagate_to_loaders = False
"""if True, indicate this option should be carried along
"""if True, indicate this option should be carried along
Query object generated by scalar or object lazy loaders.
"""
@@ -464,9 +464,9 @@ class PropertyOption(MapperOption):
else:
raise sa_exc.ArgumentError(
"Can't find property '%s' on any entity "
"specified in this Query. Note the full path "
"from root (%s) to target entity must be specified."
% (token, ",".join(str(x) for
"specified in this Query. Note the full path "
"from root (%s) to target entity must be specified."
% (token, ",".join(str(x) for
x in query._mapper_entities))
)
else:
@@ -494,7 +494,7 @@ class PropertyOption(MapperOption):
l = []
mappers = []
# _current_path implies we're in a
# _current_path implies we're in a
# secondary load with an existing path
current_path = list(query._current_path)
@@ -520,8 +520,8 @@ class PropertyOption(MapperOption):
if not entity:
entity = self._find_entity_basestring(
query,
token,
query,
token,
raiseerr)
if entity is None:
return [], []
@@ -555,8 +555,8 @@ class PropertyOption(MapperOption):
if not entity:
entity = self._find_entity_prop_comparator(
query,
prop.key,
token.parententity,
prop.key,
token.parententity,
raiseerr)
if not entity:
return [], []
@@ -587,7 +587,7 @@ class PropertyOption(MapperOption):
)
if current_path:
# ran out of tokens before
# ran out of tokens before
# current_path was exhausted.
assert not tokens
return [], []
@@ -630,9 +630,9 @@ def _reduce_path(path):
of the mapper referenced by Mapper.prop1.
"""
return tuple([i % 2 != 0 and
element or
getattr(element, 'base_mapper', element)
return tuple([i % 2 != 0 and
element or
getattr(element, 'base_mapper', element)
for i, element in enumerate(path)])
class LoaderStrategy(object):
@@ -678,7 +678,7 @@ class LoaderStrategy(object):
def setup_query(self, context, entity, path, reduced_path, adapter, **kwargs):
pass
def create_row_processor(self, context, path, reduced_path, mapper,
def create_row_processor(self, context, path, reduced_path, mapper,
row, adapter):
"""Return row processing functions which fulfill the contract
specified by MapperProperty.create_row_processor.

View File

@@ -1,5 +1,5 @@
# orm/mapper.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -214,16 +214,16 @@ class Mapper(object):
local_table = None
"""The :class:`.Selectable` which this :class:`.Mapper` manages.
Typically is an instance of :class:`.Table` or :class:`.Alias`.
May also be ``None``.
Typically is an instance of :class:`.Table` or :class:`.Alias`.
May also be ``None``.
The "local" table is the
selectable that the :class:`.Mapper` is directly responsible for
selectable that the :class:`.Mapper` is directly responsible for
managing from an attribute access and flush perspective. For
non-inheriting mappers, the local table is the same as the
"mapped" table. For joined-table inheritance mappers, local_table
will be the particular sub-table of the overall "join" which
this :class:`.Mapper` represents. If this mapper is a
this :class:`.Mapper` represents. If this mapper is a
single-table inheriting mapper, local_table will be ``None``.
See also :attr:`~.Mapper.mapped_table`.
@@ -233,11 +233,11 @@ class Mapper(object):
mapped_table = None
"""The :class:`.Selectable` to which this :class:`.Mapper` is mapped.
Typically an instance of :class:`.Table`, :class:`.Join`, or
Typically an instance of :class:`.Table`, :class:`.Join`, or
:class:`.Alias`.
The "mapped" table is the selectable that
the mapper selects from during queries. For non-inheriting
The "mapped" table is the selectable that
the mapper selects from during queries. For non-inheriting
mappers, the mapped table is the same as the "local" table.
For joined-table inheritance mappers, mapped_table references the
full :class:`.Join` representing full rows for this particular
@@ -249,7 +249,7 @@ class Mapper(object):
"""
inherits = None
"""References the :class:`.Mapper` which this :class:`.Mapper`
"""References the :class:`.Mapper` which this :class:`.Mapper`
inherits from, if any.
This is a *read only* attribute determined during mapper construction.
@@ -268,7 +268,7 @@ class Mapper(object):
"""
concrete = None
"""Represent ``True`` if this :class:`.Mapper` is a concrete
"""Represent ``True`` if this :class:`.Mapper` is a concrete
inheritance mapper.
This is a *read only* attribute determined during mapper construction.
@@ -291,7 +291,7 @@ class Mapper(object):
primary_key = None
"""An iterable containing the collection of :class:`.Column` objects
which comprise the 'primary key' of the mapped table, from the
which comprise the 'primary key' of the mapped table, from the
perspective of this :class:`.Mapper`.
This list is against the selectable in :attr:`~.Mapper.mapped_table`. In the
@@ -301,7 +301,7 @@ class Mapper(object):
referenced by the :class:`.Join`.
The list is also not necessarily the same as the primary key column
collection associated with the underlying tables; the :class:`.Mapper`
collection associated with the underlying tables; the :class:`.Mapper`
features a ``primary_key`` argument that can override what the
:class:`.Mapper` considers as primary key columns.
@@ -328,7 +328,7 @@ class Mapper(object):
"""
single = None
"""Represent ``True`` if this :class:`.Mapper` is a single table
"""Represent ``True`` if this :class:`.Mapper` is a single table
inheritance mapper.
:attr:`~.Mapper.local_table` will be ``None`` if this flag is set.
@@ -339,8 +339,8 @@ class Mapper(object):
"""
non_primary = None
"""Represent ``True`` if this :class:`.Mapper` is a "non-primary"
mapper, e.g. a mapper that is used only to selet rows but not for
"""Represent ``True`` if this :class:`.Mapper` is a "non-primary"
mapper, e.g. a mapper that is used only to selet rows but not for
persistence management.
This is a *read only* attribute determined during mapper construction.
@@ -364,10 +364,10 @@ class Mapper(object):
"""A mapping of "polymorphic identity" identifiers mapped to :class:`.Mapper`
instances, within an inheritance scenario.
The identifiers can be of any type which is comparable to the
The identifiers can be of any type which is comparable to the
type of column represented by :attr:`~.Mapper.polymorphic_on`.
An inheritance chain of mappers will all reference the same
An inheritance chain of mappers will all reference the same
polymorphic map object. The object is used to correlate incoming
result rows to target mappers.
@@ -402,10 +402,10 @@ class Mapper(object):
"""
columns = None
"""A collection of :class:`.Column` or other scalar expression
"""A collection of :class:`.Column` or other scalar expression
objects maintained by this :class:`.Mapper`.
The collection behaves the same as that of the ``c`` attribute on
The collection behaves the same as that of the ``c`` attribute on
any :class:`.Table` object, except that only those columns included in
this mapping are present, and are keyed based on the attribute name
defined in the mapping, not necessarily the ``key`` attribute of the
@@ -419,11 +419,11 @@ class Mapper(object):
validators = None
"""An immutable dictionary of attributes which have been decorated
using the :func:`~.orm.validates` decorator.
using the :func:`~.orm.validates` decorator.
The dictionary contains string attribute names as keys
mapped to the actual validation method.
"""
c = None
@@ -443,13 +443,13 @@ class Mapper(object):
self.inherits = class_mapper(self.inherits, compile=False)
if not issubclass(self.class_, self.inherits.class_):
raise sa_exc.ArgumentError(
"Class '%s' does not inherit from '%s'" %
"Class '%s' does not inherit from '%s'" %
(self.class_.__name__, self.inherits.class_.__name__))
if self.non_primary != self.inherits.non_primary:
np = not self.non_primary and "primary" or "non-primary"
raise sa_exc.ArgumentError(
"Inheritance of %s mapper for class '%s' is "
"only allowed from a %s mapper" %
"only allowed from a %s mapper" %
(np, self.class_.__name__, np))
# inherit_condition is optional.
if self.local_table is None:
@@ -472,7 +472,7 @@ class Mapper(object):
self.inherits.local_table,
self.local_table)
self.mapped_table = sql.join(
self.inherits.mapped_table,
self.inherits.mapped_table,
self.local_table,
self.inherit_condition)
@@ -499,7 +499,7 @@ class Mapper(object):
"the inherited versioning column. "
"version_id_col should only be specified on "
"the base-most mapper that includes versioning." %
(self.version_id_col.description,
(self.version_id_col.description,
self.inherits.version_id_col.description)
)
@@ -528,7 +528,7 @@ class Mapper(object):
if self.mapped_table is None:
raise sa_exc.ArgumentError(
"Mapper '%s' does not have a mapped_table specified."
"Mapper '%s' does not have a mapped_table specified."
% self)
def _set_with_polymorphic(self, with_polymorphic):
@@ -580,6 +580,12 @@ class Mapper(object):
self.inherits._inheriting_mappers.add(self)
self.passive_updates = self.inherits.passive_updates
self._all_tables = self.inherits._all_tables
for key, prop in mapper._props.iteritems():
if key not in self._props and \
not self._should_exclude(key, key, local=False,
column=None):
self._adapt_inherited_property(key, prop, False)
def _set_polymorphic_on(self, polymorphic_on):
self.polymorphic_on = polymorphic_on
@@ -589,7 +595,7 @@ class Mapper(object):
if self.inherits:
self.dispatch._update(self.inherits.dispatch)
super_extensions = set(chain(*[m._deprecated_extensions
super_extensions = set(chain(*[m._deprecated_extensions
for m in self.inherits.iterate_to_root()]))
else:
super_extensions = set()
@@ -600,7 +606,7 @@ class Mapper(object):
def _configure_listeners(self):
if self.inherits:
super_extensions = set(chain(*[m._deprecated_extensions
super_extensions = set(chain(*[m._deprecated_extensions
for m in self.inherits.iterate_to_root()]))
else:
super_extensions = set()
@@ -647,8 +653,8 @@ class Mapper(object):
"remove *all* current mappers from all classes." %
self.class_)
#else:
# a ClassManager may already exist as
# ClassManager.instrument_attribute() creates
# a ClassManager may already exist as
# ClassManager.instrument_attribute() creates
# new managers for each subclass if they don't yet exist.
_mapper_registry[self] = True
@@ -662,8 +668,8 @@ class Mapper(object):
manager.mapper = self
manager.deferred_scalar_loader = self._load_scalar_attributes
# The remaining members can be added by any mapper,
# The remaining members can be added by any mapper,
# e_name None or not.
if manager.info.get(_INSTRUMENTOR, False):
return
@@ -678,9 +684,10 @@ class Mapper(object):
self._reconstructor = method
event.listen(manager, 'load', _event_on_load, raw=True)
elif hasattr(method, '__sa_validators__'):
include_removes = getattr(method, "__sa_include_removes__", False)
for name in method.__sa_validators__:
self.validators = self.validators.union(
{name : method}
{name : (method, include_removes)}
)
manager.info[_INSTRUMENTOR] = self
@@ -746,10 +753,10 @@ class Mapper(object):
self._readonly_props = set(
self._columntoproperty[col]
for col in self._columntoproperty
if not hasattr(col, 'table') or
if not hasattr(col, 'table') or
col.table not in self._cols_by_table)
# if explicit PK argument sent, add those columns to the
# if explicit PK argument sent, add those columns to the
# primary key mappings
if self._primary_key_argument:
for k in self._primary_key_argument:
@@ -762,23 +769,23 @@ class Mapper(object):
len(self._pks_by_table[self.mapped_table]) == 0:
raise sa_exc.ArgumentError(
"Mapper %s could not assemble any primary "
"key columns for mapped table '%s'" %
"key columns for mapped table '%s'" %
(self, self.mapped_table.description))
elif self.local_table not in self._pks_by_table and \
isinstance(self.local_table, schema.Table):
util.warn("Could not assemble any primary "
"keys for locally mapped table '%s' - "
"no rows will be persisted in this Table."
"no rows will be persisted in this Table."
% self.local_table.description)
if self.inherits and \
not self.concrete and \
not self._primary_key_argument:
# if inheriting, the "primary key" for this mapper is
# if inheriting, the "primary key" for this mapper is
# that of the inheriting (unless concrete or explicit)
self.primary_key = self.inherits.primary_key
else:
# determine primary key from argument or mapped_table pks -
# determine primary key from argument or mapped_table pks -
# reduce to the minimal set of columns
if self._primary_key_argument:
primary_key = sqlutil.reduce_columns(
@@ -793,7 +800,7 @@ class Mapper(object):
if len(primary_key) == 0:
raise sa_exc.ArgumentError(
"Mapper %s could not assemble any primary "
"key columns for mapped table '%s'" %
"key columns for mapped table '%s'" %
(self, self.mapped_table.description))
self.primary_key = tuple(primary_key)
@@ -845,19 +852,19 @@ class Mapper(object):
if column in mapper._columntoproperty:
column_key = mapper._columntoproperty[column].key
self._configure_property(column_key,
column,
init=False,
self._configure_property(column_key,
column,
init=False,
setparent=True)
def _configure_polymorphic_setter(self, init=False):
"""Configure an attribute on the mapper representing the
'polymorphic_on' column, if applicable, and not
"""Configure an attribute on the mapper representing the
'polymorphic_on' column, if applicable, and not
already generated by _configure_properties (which is typical).
Also create a setter function which will assign this
attribute to the value of the 'polymorphic_identity'
upon instance construction, also if applicable. This
upon instance construction, also if applicable. This
routine will run when an instance is created.
"""
@@ -906,15 +913,15 @@ class Mapper(object):
else:
# polymorphic_on is a Column or SQL expression and doesn't
# appear to be mapped.
# this means it can be 1. only present in the with_polymorphic
# this means it can be 1. only present in the with_polymorphic
# selectable or 2. a totally standalone SQL expression which we'd
# hope is compatible with this mapper's mapped_table
col = self.mapped_table.corresponding_column(self.polymorphic_on)
if col is None:
# polymorphic_on doesn't derive from any column/expression
# polymorphic_on doesn't derive from any column/expression
# isn't present in the mapped table.
# we will make a "hidden" ColumnProperty for it.
# Just check that if it's directly a schema.Column and we
# we will make a "hidden" ColumnProperty for it.
# Just check that if it's directly a schema.Column and we
# have with_polymorphic, it's likely a user error if the
# schema.Column isn't represented somehow in either mapped_table or
# with_polymorphic. Otherwise as of 0.7.4 we just go with it
@@ -932,15 +939,14 @@ class Mapper(object):
"loads will not function properly"
% col.description)
else:
# column/expression that polymorphic_on derives from
# column/expression that polymorphic_on derives from
# is present in our mapped table
# and is probably mapped, but polymorphic_on itself
# is not. This happens when
# the polymorphic_on is only directly present in the
# is not. This happens when
# the polymorphic_on is only directly present in the
# with_polymorphic selectable, as when use polymorphic_union.
# we'll make a separate ColumnProperty for it.
instrument = True
key = getattr(col, 'key', None)
if key:
if self._should_exclude(col.key, col.key, False, col):
@@ -952,7 +958,7 @@ class Mapper(object):
key = col.key
self._configure_property(
key,
key,
properties.ColumnProperty(col, _instrument=instrument),
init=init, setparent=True)
polymorphic_key = key
@@ -998,15 +1004,15 @@ class Mapper(object):
self._configure_property(key, prop, init=False, setparent=False)
elif key not in self._props:
self._configure_property(
key,
properties.ConcreteInheritedProperty(),
key,
properties.ConcreteInheritedProperty(),
init=init, setparent=True)
def _configure_property(self, key, prop, init=True, setparent=True):
self._log("_configure_property(%s, %s)", key, prop.__class__.__name__)
if not isinstance(prop, MapperProperty):
# we were passed a Column or a list of Columns;
# we were passed a Column or a list of Columns;
# generate a properties.ColumnProperty
columns = util.to_list(prop)
column = columns[0]
@@ -1026,7 +1032,7 @@ class Mapper(object):
"explicitly."
% (prop.columns[-1], column, key))
# existing properties.ColumnProperty from an inheriting
# existing properties.ColumnProperty from an inheriting
# mapper. make a copy and append our column to it
prop = prop.copy()
prop.columns.insert(0, column)
@@ -1065,14 +1071,14 @@ class Mapper(object):
"(including its availability as a foreign key), "
"use the 'include_properties' or 'exclude_properties' "
"mapper arguments to control specifically which table "
"columns get mapped." %
"columns get mapped." %
(key, self, column.key, prop))
if isinstance(prop, properties.ColumnProperty):
col = self.mapped_table.corresponding_column(prop.columns[0])
# if the column is not present in the mapped table,
# test if a column has been added after the fact to the
# if the column is not present in the mapped table,
# test if a column has been added after the fact to the
# parent table (or their parent, etc.) [ticket:1570]
if col is None and self.inherits:
path = [self]
@@ -1086,20 +1092,20 @@ class Mapper(object):
break
path.append(m)
# subquery expression, column not present in the mapped
# subquery expression, column not present in the mapped
# selectable.
if col is None:
col = prop.columns[0]
# column is coming in after _readonly_props was
# column is coming in after _readonly_props was
# initialized; check for 'readonly'
if hasattr(self, '_readonly_props') and \
(not hasattr(col, 'table') or
(not hasattr(col, 'table') or
col.table not in self._cols_by_table):
self._readonly_props.add(prop)
else:
# if column is coming in after _cols_by_table was
# if column is coming in after _cols_by_table was
# initialized, ensure the col is in the right set
if hasattr(self, '_cols_by_table') and \
col.table in self._cols_by_table and \
@@ -1199,10 +1205,10 @@ class Mapper(object):
def _log_desc(self):
return "(" + self.class_.__name__ + \
"|" + \
(self.local_table is not None and
self.local_table.description or
(self.local_table is not None and
self.local_table.description or
str(self.local_table)) +\
(self.non_primary and
(self.non_primary and
"|non-primary" or "") + ")"
def _log(self, msg, *args):
@@ -1223,7 +1229,7 @@ class Mapper(object):
def __str__(self):
return "Mapper|%s|%s%s" % (
self.class_.__name__,
self.local_table is not None and
self.local_table is not None and
self.local_table.description or None,
self.non_primary and "|non-primary" or ""
)
@@ -1288,7 +1294,7 @@ class Mapper(object):
for m in mappers:
if not m.isa(self):
raise sa_exc.InvalidRequestError(
"%r does not inherit from %r" %
"%r does not inherit from %r" %
(m, self))
else:
mappers = []
@@ -1387,7 +1393,7 @@ class Mapper(object):
mappers])
):
if getattr(c, '_is_polymorphic_discriminator', False) and \
(self.polymorphic_on is None or
(self.polymorphic_on is None or
c.columns[0] is not self.polymorphic_on):
continue
yield c
@@ -1452,7 +1458,7 @@ class Mapper(object):
return result
def _is_userland_descriptor(self, obj):
if isinstance(obj, (MapperProperty,
if isinstance(obj, (MapperProperty,
attributes.QueryableAttribute)):
return False
elif not hasattr(obj, '__get__'):
@@ -1505,7 +1511,7 @@ class Mapper(object):
return False
def common_parent(self, other):
"""Return true if the given mapper shares a
"""Return true if the given mapper shares a
common inherited parent as this mapper."""
return self.base_mapper is other.base_mapper
@@ -1634,7 +1640,7 @@ class Mapper(object):
for col in self.primary_key
]
def _get_state_attr_by_column(self, state, dict_, column,
def _get_state_attr_by_column(self, state, dict_, column,
passive=attributes.PASSIVE_OFF):
prop = self._columntoproperty[column]
return state.manager[prop.key].impl.get(state, dict_, passive=passive)
@@ -1648,7 +1654,7 @@ class Mapper(object):
dict_ = attributes.instance_dict(obj)
return self._get_committed_state_attr_by_column(state, dict_, column)
def _get_committed_state_attr_by_column(self, state, dict_,
def _get_committed_state_attr_by_column(self, state, dict_,
column, passive=attributes.PASSIVE_OFF):
prop = self._columntoproperty[column]
@@ -1674,8 +1680,8 @@ class Mapper(object):
statement = self._optimized_get_statement(state, attribute_names)
if statement is not None:
result = session.query(self).from_statement(statement).\
_load_on_ident(None,
only_load_props=attribute_names,
_load_on_ident(None,
only_load_props=attribute_names,
refresh_state=state)
if result is False:
@@ -1698,16 +1704,16 @@ class Mapper(object):
_none_set.issuperset(identity_key):
util.warn("Instance %s to be refreshed doesn't "
"contain a full primary key - can't be refreshed "
"(and shouldn't be expired, either)."
"(and shouldn't be expired, either)."
% state_str(state))
return
result = session.query(self)._load_on_ident(
identity_key,
refresh_state=state,
identity_key,
refresh_state=state,
only_load_props=attribute_names)
# if instance is pending, a refresh operation
# if instance is pending, a refresh operation
# may not complete (even if PK attributes are assigned)
if has_key and result is None:
raise orm_exc.ObjectDeletedError(state)
@@ -1716,16 +1722,16 @@ class Mapper(object):
"""assemble a WHERE clause which retrieves a given state by primary
key, using a minimized set of tables.
Applies to a joined-table inheritance mapper where the
Applies to a joined-table inheritance mapper where the
requested attribute names are only present on joined tables,
not the base table. The WHERE clause attempts to include
not the base table. The WHERE clause attempts to include
only those tables to minimize joins.
"""
props = self._props
tables = set(chain(
*[sqlutil.find_tables(c, check_columns=True)
*[sqlutil.find_tables(c, check_columns=True)
for key in attribute_names
for c in props[key].columns]
))
@@ -1744,8 +1750,8 @@ class Mapper(object):
if leftcol.table not in tables:
leftval = self._get_committed_state_attr_by_column(
state, state.dict,
leftcol,
state, state.dict,
leftcol,
passive=attributes.PASSIVE_NO_INITIALIZE)
if leftval is attributes.PASSIVE_NO_RESULT or leftval is None:
raise ColumnsNotAvailable()
@@ -1753,8 +1759,8 @@ class Mapper(object):
type_=binary.right.type)
elif rightcol.table not in tables:
rightval = self._get_committed_state_attr_by_column(
state, state.dict,
rightcol,
state, state.dict,
rightcol,
passive=attributes.PASSIVE_NO_INITIALIZE)
if rightval is attributes.PASSIVE_NO_RESULT or rightval is None:
raise ColumnsNotAvailable()
@@ -1770,8 +1776,8 @@ class Mapper(object):
start = True
if start and not mapper.single:
allconds.append(visitors.cloned_traverse(
mapper.inherit_condition,
{},
mapper.inherit_condition,
{},
{'binary':visit_binary}
)
)
@@ -1804,7 +1810,7 @@ class Mapper(object):
visited_states = set()
prp, mpp = object(), object()
visitables = deque([(deque(self._props.values()), prp,
visitables = deque([(deque(self._props.values()), prp,
state, state.dict)])
while visitables:
@@ -1817,7 +1823,7 @@ class Mapper(object):
prop = iterator.popleft()
if type_ not in prop.cascade:
continue
queue = deque(prop.cascade_iterator(type_, parent_state,
queue = deque(prop.cascade_iterator(type_, parent_state,
parent_dict, visited_states, halt_on))
if queue:
visitables.append((queue,mpp, None, None))
@@ -1826,8 +1832,8 @@ class Mapper(object):
corresponding_dict = iterator.popleft()
yield instance, instance_mapper, \
corresponding_state, corresponding_dict
visitables.append((deque(instance_mapper._props.values()),
prp, corresponding_state,
visitables.append((deque(instance_mapper._props.values()),
prp, corresponding_state,
corresponding_dict))
@_memoized_configured_property
@@ -1884,7 +1890,7 @@ class Mapper(object):
@util.memoized_property
def _table_to_equated(self):
"""memoized map of tables to collections of columns to be
"""memoized map of tables to collections of columns to be
synchronized upwards to the base mapper."""
result = util.defaultdict(list)
@@ -1900,16 +1906,16 @@ class Mapper(object):
return result
def _instance_processor(self, context, path, reduced_path, adapter,
polymorphic_from=None,
def _instance_processor(self, context, path, reduced_path, adapter,
polymorphic_from=None,
only_load_props=None, refresh_state=None,
polymorphic_discriminator=None):
"""Produce a mapper level row processor callable
"""Produce a mapper level row processor callable
which processes rows into mapped instances."""
# note that this method, most of which exists in a closure
# called _instance(), resists being broken out, as
# called _instance(), resists being broken out, as
# attempts to do so tend to add significant function
# call overhead. _instance() is the most
# performance-critical section in the whole ORM.
@@ -2019,7 +2025,7 @@ class Mapper(object):
identitykey = self._identity_key_from_state(refresh_state)
else:
identitykey = (
identity_class,
identity_class,
tuple([row[column] for column in pk_cols])
)
@@ -2036,22 +2042,22 @@ class Mapper(object):
version_id_col is not None and \
context.version_check and \
self._get_state_attr_by_column(
state,
dict_,
state,
dict_,
self.version_id_col) != \
row[version_id_col]:
raise orm_exc.StaleDataError(
"Instance '%s' has version id '%s' which "
"does not match database-loaded version id '%s'."
% (state_str(state),
"does not match database-loaded version id '%s'."
% (state_str(state),
self._get_state_attr_by_column(
state, dict_,
self.version_id_col),
row[version_id_col]))
elif refresh_state:
# out of band refresh_state detected (i.e. its not in the
# session.identity_map) honor it anyway. this can happen
# session.identity_map) honor it anyway. this can happen
# if a _get() occurs within save_obj(), such as
# when eager_defaults is True.
state = refresh_state
@@ -2072,7 +2078,7 @@ class Mapper(object):
if create_instance:
for fn in create_instance:
instance = fn(self, context,
instance = fn(self, context,
row, self.class_)
if instance is not EXT_CONTINUE:
manager = attributes.manager_of_class(
@@ -2103,8 +2109,8 @@ class Mapper(object):
if populate_instance:
for fn in populate_instance:
ret = fn(self, context, row, state,
only_load_props=only_load_props,
ret = fn(self, context, row, state,
only_load_props=only_load_props,
instancekey=identitykey, isnew=isnew)
if ret is not EXT_CONTINUE:
break
@@ -2132,8 +2138,8 @@ class Mapper(object):
if populate_instance:
for fn in populate_instance:
ret = fn(self, context, row, state,
only_load_props=attrs,
ret = fn(self, context, row, state,
only_load_props=attrs,
instancekey=identitykey, isnew=isnew)
if ret is not EXT_CONTINUE:
break
@@ -2153,7 +2159,7 @@ class Mapper(object):
if result is not None:
if append_result:
for fn in append_result:
if fn(self, context, row, state,
if fn(self, context, row, state,
result, instancekey=identitykey,
isnew=isnew) is not EXT_CONTINUE:
break
@@ -2173,7 +2179,7 @@ class Mapper(object):
pops = (new_populators, existing_populators, delayed_populators, eager_populators)
for prop in self._props.itervalues():
for i, pop in enumerate(prop.create_row_processor(
context, path,
context, path,
reduced_path,
self, row, adapter)):
if pop is not None:
@@ -2196,8 +2202,8 @@ class Mapper(object):
if mapper is self:
return None
# replace the tip of the path info with the subclass mapper
# being used. that way accurate "load_path" info is available
# replace the tip of the path info with the subclass mapper
# being used. that way accurate "load_path" info is available
# for options invoked during deferred loads.
# we lose AliasedClass path elements this way, but currently,
# those are not needed at this stage.
@@ -2205,7 +2211,7 @@ class Mapper(object):
# this asserts to true
#assert mapper.isa(_class_to_mapper(path[-1]))
return mapper._instance_processor(context, path[0:-1] + (mapper,),
return mapper._instance_processor(context, path[0:-1] + (mapper,),
reduced_path[0:-1] + (mapper.base_mapper,),
adapter,
polymorphic_from=self)
@@ -2217,14 +2223,14 @@ def configure_mappers():
"""Initialize the inter-mapper relationships of all mappers that
have been constructed thus far.
This function can be called any number of times, but in
This function can be called any number of times, but in
most cases is handled internally.
"""
global _new_mappers
if not _new_mappers:
return
return
_call_configured = None
_COMPILE_MUTEX.acquire()
@@ -2240,8 +2246,8 @@ def configure_mappers():
return
# initialize properties on all mappers
# note that _mapper_registry is unordered, which
# may randomly conceal/reveal issues related to
# note that _mapper_registry is unordered, which
# may randomly conceal/reveal issues related to
# the order of mapper compilation
for mapper in list(_mapper_registry):
if getattr(mapper, '_configure_failed', False):
@@ -2291,7 +2297,7 @@ def reconstructor(fn):
fn.__sa_reconstructor__ = True
return fn
def validates(*names):
def validates(*names, **kw):
"""Decorate a method as a 'validator' for one or more named properties.
Designates a method as a validator, a method which receives the
@@ -2307,9 +2313,18 @@ def validates(*names):
an assertion to avoid recursion overflows. This is a reentrant
condition which is not supported.
:param \*names: list of attribute names to be validated.
:param include_removes: if True, "remove" events will be
sent as well - the validation function must accept an additional
argument "is_remove" which will be a boolean.
.. versionadded:: 0.7.7
"""
include_removes = kw.pop('include_removes', False)
def wrap(fn):
fn.__sa_validators__ = names
fn.__sa_include_removes__ = include_removes
return fn
return wrap
@@ -2320,7 +2335,7 @@ def _event_on_load(state, ctx):
def _event_on_first_init(manager, cls):
"""Initial mapper compilation trigger.
instrumentation calls this one when InstanceState
is first generated, and is needed for legacy mutable
attributes to work.
@@ -2333,11 +2348,11 @@ def _event_on_first_init(manager, cls):
def _event_on_init(state, args, kwargs):
"""Run init_instance hooks.
This also includes mapper compilation, normally not needed
here but helps with some piecemeal configuration
scenarios (such as in the ORM tutorial).
"""
instrumenting_mapper = state.manager.info.get(_INSTRUMENTOR)

View File

@@ -1,5 +1,5 @@
# orm/persistence.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -23,7 +23,7 @@ from sqlalchemy.orm import attributes, sync, \
from sqlalchemy.orm.util import _state_mapper, state_str
def save_obj(base_mapper, states, uowtransaction, single=False):
"""Issue ``INSERT`` and/or ``UPDATE`` statements for a list
"""Issue ``INSERT`` and/or ``UPDATE`` statements for a list
of objects.
This is called within the context of a UOWTransaction during a
@@ -40,30 +40,30 @@ def save_obj(base_mapper, states, uowtransaction, single=False):
return
states_to_insert, states_to_update = _organize_states_for_save(
base_mapper,
states,
base_mapper,
states,
uowtransaction)
cached_connections = _cached_connection_dict(base_mapper)
for table, mapper in base_mapper._sorted_tables.iteritems():
insert = _collect_insert_commands(base_mapper, uowtransaction,
insert = _collect_insert_commands(base_mapper, uowtransaction,
table, states_to_insert)
update = _collect_update_commands(base_mapper, uowtransaction,
update = _collect_update_commands(base_mapper, uowtransaction,
table, states_to_update)
if update:
_emit_update_statements(base_mapper, uowtransaction,
cached_connections,
_emit_update_statements(base_mapper, uowtransaction,
cached_connections,
mapper, table, update)
if insert:
_emit_insert_statements(base_mapper, uowtransaction,
cached_connections,
_emit_insert_statements(base_mapper, uowtransaction,
cached_connections,
table, insert)
_finalize_insert_update_commands(base_mapper, uowtransaction,
_finalize_insert_update_commands(base_mapper, uowtransaction,
states_to_insert, states_to_update)
def post_update(base_mapper, states, uowtransaction, post_update_cols):
@@ -74,18 +74,18 @@ def post_update(base_mapper, states, uowtransaction, post_update_cols):
cached_connections = _cached_connection_dict(base_mapper)
states_to_update = _organize_states_for_post_update(
base_mapper,
base_mapper,
states, uowtransaction)
for table, mapper in base_mapper._sorted_tables.iteritems():
update = _collect_post_update_commands(base_mapper, uowtransaction,
table, states_to_update,
update = _collect_post_update_commands(base_mapper, uowtransaction,
table, states_to_update,
post_update_cols)
if update:
_emit_post_update_statements(base_mapper, uowtransaction,
cached_connections,
_emit_post_update_statements(base_mapper, uowtransaction,
cached_connections,
mapper, table, update)
def delete_obj(base_mapper, states, uowtransaction):
@@ -99,19 +99,19 @@ def delete_obj(base_mapper, states, uowtransaction):
cached_connections = _cached_connection_dict(base_mapper)
states_to_delete = _organize_states_for_delete(
base_mapper,
base_mapper,
states,
uowtransaction)
table_to_mapper = base_mapper._sorted_tables
for table in reversed(table_to_mapper.keys()):
delete = _collect_delete_commands(base_mapper, uowtransaction,
delete = _collect_delete_commands(base_mapper, uowtransaction,
table, states_to_delete)
mapper = table_to_mapper[table]
_emit_delete_statements(base_mapper, uowtransaction,
_emit_delete_statements(base_mapper, uowtransaction,
cached_connections, mapper, table, delete)
for state, state_dict, mapper, has_identity, connection \
@@ -121,20 +121,20 @@ def delete_obj(base_mapper, states, uowtransaction):
def _organize_states_for_save(base_mapper, states, uowtransaction):
"""Make an initial pass across a set of states for INSERT or
UPDATE.
This includes splitting out into distinct lists for
each, calling before_insert/before_update, obtaining
key information for each state including its dictionary,
mapper, the connection to use for the execution per state,
and the identity flag.
"""
states_to_insert = []
states_to_update = []
for state, dict_, mapper, connection in _connections_for_states(
base_mapper, uowtransaction,
base_mapper, uowtransaction,
states):
has_identity = bool(state.key)
@@ -148,9 +148,9 @@ def _organize_states_for_save(base_mapper, states, uowtransaction):
else:
mapper.dispatch.before_update(mapper, connection, state)
# detect if we have a "pending" instance (i.e. has
# no instance_key attached to it), and another instance
# with the same identity key already exists as persistent.
# detect if we have a "pending" instance (i.e. has
# no instance_key attached to it), and another instance
# with the same identity key already exists as persistent.
# convert to an UPDATE if so.
if not has_identity and \
instance_key in uowtransaction.session.identity_map:
@@ -160,14 +160,14 @@ def _organize_states_for_save(base_mapper, states, uowtransaction):
if not uowtransaction.is_deleted(existing):
raise orm_exc.FlushError(
"New instance %s with identity key %s conflicts "
"with persistent instance %s" %
"with persistent instance %s" %
(state_str(state), instance_key,
state_str(existing)))
base_mapper._log_debug(
"detected row switch for identity %s. "
"will update %s, remove %s from "
"transaction", instance_key,
"transaction", instance_key,
state_str(state), state_str(existing))
# remove the "delete" flag from the existing element
@@ -176,55 +176,55 @@ def _organize_states_for_save(base_mapper, states, uowtransaction):
if not has_identity and not row_switch:
states_to_insert.append(
(state, dict_, mapper, connection,
(state, dict_, mapper, connection,
has_identity, instance_key, row_switch)
)
else:
states_to_update.append(
(state, dict_, mapper, connection,
(state, dict_, mapper, connection,
has_identity, instance_key, row_switch)
)
return states_to_insert, states_to_update
def _organize_states_for_post_update(base_mapper, states,
def _organize_states_for_post_update(base_mapper, states,
uowtransaction):
"""Make an initial pass across a set of states for UPDATE
corresponding to post_update.
This includes obtaining key information for each state
including its dictionary, mapper, the connection to use for
This includes obtaining key information for each state
including its dictionary, mapper, the connection to use for
the execution per state.
"""
return list(_connections_for_states(base_mapper, uowtransaction,
return list(_connections_for_states(base_mapper, uowtransaction,
states))
def _organize_states_for_delete(base_mapper, states, uowtransaction):
"""Make an initial pass across a set of states for DELETE.
This includes calling out before_delete and obtaining
key information for each state including its dictionary,
mapper, the connection to use for the execution per state.
"""
states_to_delete = []
for state, dict_, mapper, connection in _connections_for_states(
base_mapper, uowtransaction,
base_mapper, uowtransaction,
states):
mapper.dispatch.before_delete(mapper, connection, state)
states_to_delete.append((state, dict_, mapper,
states_to_delete.append((state, dict_, mapper,
bool(state.key), connection))
return states_to_delete
def _collect_insert_commands(base_mapper, uowtransaction, table,
def _collect_insert_commands(base_mapper, uowtransaction, table,
states_to_insert):
"""Identify sets of values to use in INSERT statements for a
list of states.
"""
insert = []
for state, state_dict, mapper, connection, has_identity, \
@@ -242,7 +242,7 @@ def _collect_insert_commands(base_mapper, uowtransaction, table,
if col is mapper.version_id_col:
params[col.key] = mapper.version_id_generator(None)
else:
# pull straight from the dict for
# pull straight from the dict for
# pending objects
prop = mapper._columntoproperty[col]
value = state_dict.get(prop.key, None)
@@ -259,15 +259,15 @@ def _collect_insert_commands(base_mapper, uowtransaction, table,
else:
params[col.key] = value
insert.append((state, state_dict, params, mapper,
insert.append((state, state_dict, params, mapper,
connection, value_params, has_all_pks))
return insert
def _collect_update_commands(base_mapper, uowtransaction,
def _collect_update_commands(base_mapper, uowtransaction,
table, states_to_update):
"""Identify sets of values to use in UPDATE statements for a
list of states.
This function works intricately with the history system
to determine exactly what values should be updated
as well as how the row should be matched within an UPDATE
@@ -292,14 +292,14 @@ def _collect_update_commands(base_mapper, uowtransaction,
if col is mapper.version_id_col:
params[col._label] = \
mapper._get_committed_state_attr_by_column(
row_switch or state,
row_switch and row_switch.dict
row_switch or state,
row_switch and row_switch.dict
or state_dict,
col)
prop = mapper._columntoproperty[col]
history = attributes.get_state_history(
state, prop.key,
state, prop.key,
attributes.PASSIVE_NO_INITIALIZE
)
if history.added:
@@ -309,20 +309,20 @@ def _collect_update_commands(base_mapper, uowtransaction,
params[col.key] = mapper.version_id_generator(
params[col._label])
# HACK: check for history, in case the
# HACK: check for history, in case the
# history is only
# in a different table than the one
# in a different table than the one
# where the version_id_col is.
for prop in mapper._columntoproperty.itervalues():
history = attributes.get_state_history(
state, prop.key,
state, prop.key,
attributes.PASSIVE_NO_INITIALIZE)
if history.added:
hasdata = True
else:
prop = mapper._columntoproperty[col]
history = attributes.get_state_history(
state, prop.key,
state, prop.key,
attributes.PASSIVE_NO_INITIALIZE)
if history.added:
if isinstance(history.added[0],
@@ -344,7 +344,7 @@ def _collect_update_commands(base_mapper, uowtransaction,
value = history.added[0]
params[col._label] = value
else:
# use the old value to
# use the old value to
# locate the row
value = history.deleted[0]
params[col._label] = value
@@ -374,12 +374,12 @@ def _collect_update_commands(base_mapper, uowtransaction,
"Can't update table "
"using NULL for primary "
"key value")
update.append((state, state_dict, params, mapper,
update.append((state, state_dict, params, mapper,
connection, value_params))
return update
def _collect_post_update_commands(base_mapper, uowtransaction, table,
def _collect_post_update_commands(base_mapper, uowtransaction, table,
states_to_update, post_update_cols):
"""Identify sets of values to use in UPDATE statements for a
list of states within a post_update operation.
@@ -403,20 +403,20 @@ def _collect_post_update_commands(base_mapper, uowtransaction, table,
elif col in post_update_cols:
prop = mapper._columntoproperty[col]
history = attributes.get_state_history(
state, prop.key,
state, prop.key,
attributes.PASSIVE_NO_INITIALIZE)
if history.added:
value = history.added[0]
params[col.key] = value
hasdata = True
if hasdata:
update.append((state, state_dict, params, mapper,
update.append((state, state_dict, params, mapper,
connection))
return update
def _collect_delete_commands(base_mapper, uowtransaction, table,
def _collect_delete_commands(base_mapper, uowtransaction, table,
states_to_delete):
"""Identify values to use in DELETE statements for a list of
"""Identify values to use in DELETE statements for a list of
states to be deleted."""
delete = util.defaultdict(list)
@@ -448,7 +448,7 @@ def _collect_delete_commands(base_mapper, uowtransaction, table,
return delete
def _emit_update_statements(base_mapper, uowtransaction,
def _emit_update_statements(base_mapper, uowtransaction,
cached_connections, mapper, table, update):
"""Emit UPDATE statements corresponding to value lists collected
by _collect_update_commands()."""
@@ -466,7 +466,7 @@ def _emit_update_statements(base_mapper, uowtransaction,
if needs_version_id:
clause.clauses.append(mapper.version_id_col ==\
sql.bindparam(mapper.version_id_col._label,
type_=col.type))
type_=mapper.version_id_col.type))
return table.update(clause)
@@ -486,13 +486,13 @@ def _emit_update_statements(base_mapper, uowtransaction,
_postfetch(
mapper,
uowtransaction,
table,
state,
state_dict,
c.context.prefetch_cols,
uowtransaction,
table,
state,
state_dict,
c.context.prefetch_cols,
c.context.postfetch_cols,
c.context.compiled_parameters[0],
c.context.compiled_parameters[0],
value_params)
rows += c.rowcount
@@ -505,11 +505,11 @@ def _emit_update_statements(base_mapper, uowtransaction,
elif needs_version_id:
util.warn("Dialect %s does not support updated rowcount "
"- versioning cannot be verified." %
"- versioning cannot be verified." %
c.dialect.dialect_description,
stacklevel=12)
def _emit_insert_statements(base_mapper, uowtransaction,
def _emit_insert_statements(base_mapper, uowtransaction,
cached_connections, table, insert):
"""Emit INSERT statements corresponding to value lists collected
by _collect_insert_commands()."""
@@ -517,10 +517,10 @@ def _emit_insert_statements(base_mapper, uowtransaction,
statement = base_mapper._memo(('insert', table), table.insert)
for (connection, pkeys, hasvalue, has_all_pks), \
records in groupby(insert,
lambda rec: (rec[4],
rec[2].keys(),
bool(rec[5]),
records in groupby(insert,
lambda rec: (rec[4],
rec[2].keys(),
bool(rec[5]),
rec[6])
):
if has_all_pks and not hasvalue:
@@ -529,19 +529,19 @@ def _emit_insert_statements(base_mapper, uowtransaction,
c = cached_connections[connection].\
execute(statement, multiparams)
for (state, state_dict, params, mapper,
for (state, state_dict, params, mapper,
conn, value_params, has_all_pks), \
last_inserted_params in \
zip(records, c.context.compiled_parameters):
_postfetch(
mapper,
uowtransaction,
uowtransaction,
table,
state,
state,
state_dict,
c.context.prefetch_cols,
c.context.postfetch_cols,
last_inserted_params,
last_inserted_params,
value_params)
else:
@@ -561,31 +561,31 @@ def _emit_insert_statements(base_mapper, uowtransaction,
if primary_key is not None:
# set primary key attributes
for pk, col in zip(primary_key,
for pk, col in zip(primary_key,
mapper._pks_by_table[table]):
prop = mapper._columntoproperty[col]
if state_dict.get(prop.key) is None:
# TODO: would rather say:
#state_dict[prop.key] = pk
mapper._set_state_attr_by_column(
state,
state_dict,
state,
state_dict,
col, pk)
_postfetch(
mapper,
uowtransaction,
table,
state,
uowtransaction,
table,
state,
state_dict,
result.context.prefetch_cols,
result.context.prefetch_cols,
result.context.postfetch_cols,
result.context.compiled_parameters[0],
result.context.compiled_parameters[0],
value_params)
def _emit_post_update_statements(base_mapper, uowtransaction,
def _emit_post_update_statements(base_mapper, uowtransaction,
cached_connections, mapper, table, update):
"""Emit UPDATE statements corresponding to value lists collected
by _collect_post_update_commands()."""
@@ -603,19 +603,19 @@ def _emit_post_update_statements(base_mapper, uowtransaction,
# execute each UPDATE in the order according to the original
# list of states to guarantee row access order, but
# also group them into common (connection, cols) sets
# also group them into common (connection, cols) sets
# to support executemany().
for key, grouper in groupby(
update, lambda rec: (rec[4], rec[2].keys())
):
connection = key[0]
multiparams = [params for state, state_dict,
multiparams = [params for state, state_dict,
params, mapper, conn in grouper]
cached_connections[connection].\
execute(statement, multiparams)
def _emit_delete_statements(base_mapper, uowtransaction, cached_connections,
def _emit_delete_statements(base_mapper, uowtransaction, cached_connections,
mapper, table, delete):
"""Emit DELETE statements corresponding to value lists collected
by _collect_delete_commands()."""
@@ -631,9 +631,9 @@ def _emit_delete_statements(base_mapper, uowtransaction, cached_connections,
if need_version_id:
clause.clauses.append(
mapper.version_id_col ==
mapper.version_id_col ==
sql.bindparam(
mapper.version_id_col.key,
mapper.version_id_col.key,
type_=mapper.version_id_col.type
)
)
@@ -657,13 +657,13 @@ def _emit_delete_statements(base_mapper, uowtransaction, cached_connections,
if rows != len(del_objects):
raise orm_exc.StaleDataError(
"DELETE statement on table '%s' expected to "
"delete %d row(s); %d were matched." %
"delete %d row(s); %d were matched." %
(table.description, len(del_objects), c.rowcount)
)
else:
util.warn(
"Dialect %s does not support deleted rowcount "
"- versioning cannot be verified." %
"- versioning cannot be verified." %
connection.dialect.dialect_description,
stacklevel=12)
connection.execute(statement, del_objects)
@@ -671,11 +671,11 @@ def _emit_delete_statements(base_mapper, uowtransaction, cached_connections,
connection.execute(statement, del_objects)
def _finalize_insert_update_commands(base_mapper, uowtransaction,
def _finalize_insert_update_commands(base_mapper, uowtransaction,
states_to_insert, states_to_update):
"""finalize state on states that have been inserted or updated,
including calling after_insert/after_update events.
"""
for state, state_dict, mapper, connection, has_identity, \
instance_key, row_switch in states_to_insert + \
@@ -683,7 +683,7 @@ def _finalize_insert_update_commands(base_mapper, uowtransaction,
if mapper._readonly_props:
readonly = state.unmodified_intersection(
[p.key for p in mapper._readonly_props
[p.key for p in mapper._readonly_props
if p.expire_on_flush or p.key not in state.dict]
)
if readonly:
@@ -703,7 +703,7 @@ def _finalize_insert_update_commands(base_mapper, uowtransaction,
else:
mapper.dispatch.after_update(mapper, connection, state)
def _postfetch(mapper, uowtransaction, table,
def _postfetch(mapper, uowtransaction, table,
state, dict_, prefetch_cols, postfetch_cols,
params, value_params):
"""Expire attributes in need of newly persisted database state,
@@ -718,9 +718,9 @@ def _postfetch(mapper, uowtransaction, table,
mapper._set_state_attr_by_column(state, dict_, c, params[c.key])
if postfetch_cols:
state.expire_attributes(state.dict,
[mapper._columntoproperty[c].key
for c in postfetch_cols if c in
state.expire_attributes(state.dict,
[mapper._columntoproperty[c].key
for c in postfetch_cols if c in
mapper._columntoproperty]
)
@@ -728,33 +728,35 @@ def _postfetch(mapper, uowtransaction, table,
# TODO: this still goes a little too often. would be nice to
# have definitive list of "columns that changed" here
for m, equated_pairs in mapper._table_to_equated[table]:
sync.populate(state, m, state, m,
equated_pairs,
sync.populate(state, m, state, m,
equated_pairs,
uowtransaction,
mapper.passive_updates)
def _connections_for_states(base_mapper, uowtransaction, states):
"""Return an iterator of (state, state.dict, mapper, connection).
The states are sorted according to _sort_states, then paired
with the connection they should be using for the given
unit of work transaction.
"""
# if session has a connection callable,
# organize individual states with the connection
# organize individual states with the connection
# to use for update
if uowtransaction.session.connection_callable:
connection_callable = \
uowtransaction.session.connection_callable
else:
connection = uowtransaction.transaction.connection(
base_mapper)
connection = None
connection_callable = None
for state in _sort_states(states):
if connection_callable:
connection = connection_callable(base_mapper, state.obj())
elif not connection:
connection = uowtransaction.transaction.connection(
base_mapper)
mapper = _state_mapper(state)

View File

@@ -1,5 +1,5 @@
# orm/properties.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -33,9 +33,9 @@ from descriptor_props import CompositeProperty, SynonymProperty, \
class ColumnProperty(StrategizedProperty):
"""Describes an object attribute that corresponds to a table column.
Public constructor is the :func:`.orm.column_property` function.
"""
def __init__(self, *columns, **kwargs):
@@ -62,7 +62,7 @@ class ColumnProperty(StrategizedProperty):
"""
self._orig_columns = [expression._labeled(c) for c in columns]
self.columns = [expression._labeled(_orm_deannotate(c))
self.columns = [expression._labeled(_orm_deannotate(c))
for c in columns]
self.group = kwargs.pop('group', None)
self.deferred = kwargs.pop('deferred', False)
@@ -88,7 +88,7 @@ class ColumnProperty(StrategizedProperty):
if kwargs:
raise TypeError(
"%s received unexpected keyword argument(s): %s" % (
self.__class__.__name__,
self.__class__.__name__,
', '.join(sorted(kwargs.keys()))))
util.set_creation_order(self)
@@ -104,9 +104,9 @@ class ColumnProperty(StrategizedProperty):
return
attributes.register_descriptor(
mapper.class_,
self.key,
comparator=self.comparator_factory(self, mapper),
mapper.class_,
self.key,
comparator=self.comparator_factory(self, mapper),
parententity=mapper,
doc=self.doc
)
@@ -124,19 +124,21 @@ class ColumnProperty(StrategizedProperty):
def copy(self):
return ColumnProperty(
deferred=self.deferred,
group=self.group,
deferred=self.deferred,
group=self.group,
active_history=self.active_history,
*self.columns)
def _getcommitted(self, state, dict_, column,
def _getcommitted(self, state, dict_, column,
passive=attributes.PASSIVE_OFF):
return state.get_impl(self.key).\
get_committed_value(state, dict_, passive=passive)
def merge(self, session, source_state, source_dict, dest_state,
def merge(self, session, source_state, source_dict, dest_state,
dest_dict, load, _recursive):
if self.key in source_dict:
if not self.instrument:
return
elif self.key in source_dict:
value = source_dict[self.key]
if not load:
@@ -144,9 +146,8 @@ class ColumnProperty(StrategizedProperty):
else:
impl = dest_state.get_impl(self.key)
impl.set(dest_state, dest_dict, value, None)
else:
if dest_state.has_identity and self.key not in dest_dict:
dest_state.expire_attributes(dest_dict, [self.key])
elif dest_state.has_identity and self.key not in dest_dict:
dest_state.expire_attributes(dest_dict, [self.key])
class Comparator(PropComparator):
@util.memoized_instancemethod
@@ -176,20 +177,20 @@ log.class_logger(ColumnProperty)
class RelationshipProperty(StrategizedProperty):
"""Describes an object property that holds a single item or list
of items that correspond to a related database table.
Public constructor is the :func:`.orm.relationship` function.
Of note here is the :class:`.RelationshipProperty.Comparator`
class, which implements comparison operations for scalar-
and collection-referencing mapped attributes.
"""
strategy_wildcard_key = 'relationship:*'
def __init__(self, argument,
secondary=None, primaryjoin=None,
secondaryjoin=None,
secondaryjoin=None,
foreign_keys=None,
uselist=None,
order_by=False,
@@ -207,7 +208,7 @@ class RelationshipProperty(StrategizedProperty):
active_history=False,
cascade_backrefs=True,
load_on_pending=False,
strategy_class=None, _local_remote_pairs=None,
strategy_class=None, _local_remote_pairs=None,
query_class=None):
self.uselist = uselist
@@ -256,7 +257,7 @@ class RelationshipProperty(StrategizedProperty):
self.cascade = CascadeOptions("save-update, merge")
if self.passive_deletes == 'all' and \
("delete" in self.cascade or
("delete" in self.cascade or
"delete-orphan" in self.cascade):
raise sa_exc.ArgumentError(
"Can't set passive_deletes='all' in conjunction "
@@ -278,9 +279,9 @@ class RelationshipProperty(StrategizedProperty):
def instrument_class(self, mapper):
attributes.register_descriptor(
mapper.class_,
self.key,
comparator=self.comparator_factory(self, mapper),
mapper.class_,
self.key,
comparator=self.comparator_factory(self, mapper),
parententity=mapper,
doc=self.doc,
)
@@ -292,7 +293,7 @@ class RelationshipProperty(StrategizedProperty):
def __init__(self, prop, mapper, of_type=None, adapter=None):
"""Construction of :class:`.RelationshipProperty.Comparator`
is internal to the ORM's attribute mechanics.
"""
self.prop = prop
self.mapper = mapper
@@ -322,29 +323,23 @@ class RelationshipProperty(StrategizedProperty):
else:
return elem
def operate(self, op, *other, **kwargs):
return op(self, *other, **kwargs)
def reverse_operate(self, op, other, **kwargs):
return op(self, *other, **kwargs)
def of_type(self, cls):
"""Produce a construct that represents a particular 'subtype' of
attribute for the parent class.
Currently this is usable in conjunction with :meth:`.Query.join`
and :meth:`.Query.outerjoin`.
"""
return RelationshipProperty.Comparator(
self.property,
self.mapper,
self.property,
self.mapper,
cls, adapter=self.adapter)
def in_(self, other):
"""Produce an IN clause - this is not implemented
"""Produce an IN clause - this is not implemented
for :func:`~.orm.relationship`-based attributes at this time.
"""
raise NotImplementedError('in_() not yet supported for '
'relationships. For a simple many-to-one, use '
@@ -361,20 +356,20 @@ class RelationshipProperty(StrategizedProperty):
this will typically produce a
clause such as::
mytable.related_id == <some id>
Where ``<some id>`` is the primary key of the given
Where ``<some id>`` is the primary key of the given
object.
The ``==`` operator provides partial functionality for non-
many-to-one comparisons:
* Comparisons against collections are not supported.
Use :meth:`~.RelationshipProperty.Comparator.contains`.
* Compared to a scalar one-to-many, will produce a
* Compared to a scalar one-to-many, will produce a
clause that compares the target columns in the parent to
the given target.
the given target.
* Compared to a scalar many-to-many, an alias
of the association table will be rendered as
well, forming a natural join that is part of the
@@ -448,9 +443,9 @@ class RelationshipProperty(StrategizedProperty):
# limit this adapter to annotated only?
criterion = target_adapter.traverse(criterion)
# only have the "joined left side" of what we
# only have the "joined left side" of what we
# return be subject to Query adaption. The right
# side of it is used for an exists() subquery and
# side of it is used for an exists() subquery and
# should not correlate or otherwise reach out
# to anything in the enclosing query.
if criterion is not None:
@@ -464,42 +459,42 @@ class RelationshipProperty(StrategizedProperty):
def any(self, criterion=None, **kwargs):
"""Produce an expression that tests a collection against
particular criterion, using EXISTS.
An expression like::
session.query(MyClass).filter(
MyClass.somereference.any(SomeRelated.x==2)
)
Will produce a query like::
SELECT * FROM my_table WHERE
EXISTS (SELECT 1 FROM related WHERE related.my_id=my_table.id
EXISTS (SELECT 1 FROM related WHERE related.my_id=my_table.id
AND related.x=2)
Because :meth:`~.RelationshipProperty.Comparator.any` uses
a correlated subquery, its performance is not nearly as
good when compared against large target tables as that of
using a join.
:meth:`~.RelationshipProperty.Comparator.any` is particularly
useful for testing for empty collections::
session.query(MyClass).filter(
~MyClass.somereference.any()
)
will produce::
SELECT * FROM my_table WHERE
NOT EXISTS (SELECT 1 FROM related WHERE related.my_id=my_table.id)
:meth:`~.RelationshipProperty.Comparator.any` is only
valid for collections, i.e. a :func:`.relationship`
that has ``uselist=True``. For scalar references,
use :meth:`~.RelationshipProperty.Comparator.has`.
"""
if not self.property.uselist:
raise sa_exc.InvalidRequestError(
@@ -514,14 +509,14 @@ class RelationshipProperty(StrategizedProperty):
particular criterion, using EXISTS.
An expression like::
session.query(MyClass).filter(
MyClass.somereference.has(SomeRelated.x==2)
)
Will produce a query like::
SELECT * FROM my_table WHERE
EXISTS (SELECT 1 FROM related WHERE related.id==my_table.related_id
AND related.x=2)
@@ -530,12 +525,12 @@ class RelationshipProperty(StrategizedProperty):
a correlated subquery, its performance is not nearly as
good when compared against large target tables as that of
using a join.
:meth:`~.RelationshipProperty.Comparator.has` is only
valid for scalar references, i.e. a :func:`.relationship`
that has ``uselist=False``. For collection references,
use :meth:`~.RelationshipProperty.Comparator.any`.
"""
if self.property.uselist:
raise sa_exc.InvalidRequestError(
@@ -544,46 +539,46 @@ class RelationshipProperty(StrategizedProperty):
return self._criterion_exists(criterion, **kwargs)
def contains(self, other, **kwargs):
"""Return a simple expression that tests a collection for
"""Return a simple expression that tests a collection for
containment of a particular item.
:meth:`~.RelationshipProperty.Comparator.contains` is
only valid for a collection, i.e. a
:func:`~.orm.relationship` that implements
one-to-many or many-to-many with ``uselist=True``.
When used in a simple one-to-many context, an
When used in a simple one-to-many context, an
expression like::
MyClass.contains(other)
Produces a clause like::
mytable.id == <some id>
Where ``<some id>`` is the value of the foreign key
attribute on ``other`` which refers to the primary
key of its parent object. From this it follows that
:meth:`~.RelationshipProperty.Comparator.contains` is
very useful when used with simple one-to-many
operations.
For many-to-many operations, the behavior of
:meth:`~.RelationshipProperty.Comparator.contains`
has more caveats. The association table will be
rendered in the statement, producing an "implicit"
join, that is, includes multiple tables in the FROM
clause which are equated in the WHERE clause::
query(MyClass).filter(MyClass.contains(other))
Produces a query like::
SELECT * FROM my_table, my_association_table AS
my_association_table_1 WHERE
my_table.id = my_association_table_1.parent_id
AND my_association_table_1.child_id = <some id>
Where ``<some id>`` would be the primary key of
``other``. From the above, it is clear that
:meth:`~.RelationshipProperty.Comparator.contains`
@@ -597,7 +592,7 @@ class RelationshipProperty(StrategizedProperty):
a less-performant alternative using EXISTS, or refer
to :meth:`.Query.outerjoin` as well as :ref:`ormtutorial_joins`
for more details on constructing outer joins.
"""
if not self.property.uselist:
raise sa_exc.InvalidRequestError(
@@ -635,7 +630,7 @@ class RelationshipProperty(StrategizedProperty):
adapt(x) == None)
for (x, y) in self.property.local_remote_pairs])
criterion = sql.and_(*[x==y for (x, y) in
criterion = sql.and_(*[x==y for (x, y) in
zip(
self.property.mapper.primary_key,
self.property.\
@@ -648,26 +643,26 @@ class RelationshipProperty(StrategizedProperty):
"""Implement the ``!=`` operator.
In a many-to-one context, such as::
MyClass.some_prop != <some object>
This will typically produce a clause such as::
mytable.related_id != <some id>
Where ``<some id>`` is the primary key of the
given object.
The ``!=`` operator provides partial functionality for non-
many-to-one comparisons:
* Comparisons against collections are not supported.
Use
:meth:`~.RelationshipProperty.Comparator.contains`
in conjunction with :func:`~.expression.not_`.
* Compared to a scalar one-to-many, will produce a
* Compared to a scalar one-to-many, will produce a
clause that compares the target columns in the parent to
the given target.
the given target.
* Compared to a scalar many-to-many, an alias
of the association table will be rendered as
well, forming a natural join that is part of the
@@ -681,7 +676,7 @@ class RelationshipProperty(StrategizedProperty):
membership tests.
* Comparisons against ``None`` given in a one-to-many
or many-to-many context produce an EXISTS clause.
"""
if isinstance(other, (NoneType, expression._Null)):
if self.property.direction == MANYTOONE:
@@ -702,26 +697,26 @@ class RelationshipProperty(StrategizedProperty):
configure_mappers()
return self.prop
def compare(self, op, value,
value_is_parent=False,
def compare(self, op, value,
value_is_parent=False,
alias_secondary=True):
if op == operators.eq:
if value is None:
if self.uselist:
return ~sql.exists([1], self.primaryjoin)
else:
return self._optimized_compare(None,
return self._optimized_compare(None,
value_is_parent=value_is_parent,
alias_secondary=alias_secondary)
else:
return self._optimized_compare(value,
return self._optimized_compare(value,
value_is_parent=value_is_parent,
alias_secondary=alias_secondary)
else:
return op(self.comparator, value)
def _optimized_compare(self, value, value_is_parent=False,
adapt_source=None,
def _optimized_compare(self, value, value_is_parent=False,
adapt_source=None,
alias_secondary=True):
if value is not None:
value = attributes.instance_state(value)
@@ -733,12 +728,12 @@ class RelationshipProperty(StrategizedProperty):
def __str__(self):
return str(self.parent.class_.__name__) + "." + self.key
def merge(self,
def merge(self,
session,
source_state,
source_dict,
dest_state,
dest_dict,
dest_dict,
load, _recursive):
if load:
@@ -848,8 +843,8 @@ class RelationshipProperty(StrategizedProperty):
raise AssertionError("Attribute '%s' on class '%s' "
"doesn't handle objects "
"of type '%s'" % (
self.key,
self.parent.class_,
self.key,
self.parent.class_,
c.__class__
))
@@ -877,11 +872,11 @@ class RelationshipProperty(StrategizedProperty):
@util.memoized_property
def mapper(self):
"""Return the targeted :class:`.Mapper` for this
"""Return the targeted :class:`.Mapper` for this
:class:`.RelationshipProperty`.
This is a lazy-initializing static attribute.
"""
if isinstance(self.argument, type):
mapper_ = mapper.class_mapper(self.argument,
@@ -905,8 +900,8 @@ class RelationshipProperty(StrategizedProperty):
@util.memoized_property
@util.deprecated("0.7", "Use .target")
def table(self):
"""Return the selectable linked to this
:class:`.RelationshipProperty` object's target
"""Return the selectable linked to this
:class:`.RelationshipProperty` object's target
:class:`.Mapper`."""
return self.target
@@ -922,7 +917,7 @@ class RelationshipProperty(StrategizedProperty):
super(RelationshipProperty, self).do_init()
def _check_conflicts(self):
"""Test that this relationship is legal, warn about
"""Test that this relationship is legal, warn about
inheritance conflicts."""
if not self.is_primary() \
@@ -949,11 +944,11 @@ class RelationshipProperty(StrategizedProperty):
% (self.key, self.parent, inheriting))
def _process_dependent_arguments(self):
"""Convert incoming configuration arguments to their
"""Convert incoming configuration arguments to their
proper form.
Callables are resolved, ORM annotations removed.
"""
# accept callables for other attributes which may require
# deferred initialization. This technique is used
@@ -983,20 +978,20 @@ class RelationshipProperty(StrategizedProperty):
# remote_side are all columns, not strings.
if self.order_by is not False and self.order_by is not None:
self.order_by = [
expression._only_column_elements(x, "order_by")
expression._only_column_elements(x, "order_by")
for x in
util.to_list(self.order_by)]
self._user_defined_foreign_keys = \
util.column_set(
expression._only_column_elements(x, "foreign_keys")
expression._only_column_elements(x, "foreign_keys")
for x in util.to_column_set(
self._user_defined_foreign_keys
))
self.remote_side = \
util.column_set(
expression._only_column_elements(x, "remote_side")
expression._only_column_elements(x, "remote_side")
for x in
util.to_column_set(self.remote_side))
@@ -1010,10 +1005,10 @@ class RelationshipProperty(StrategizedProperty):
def _determine_joins(self):
"""Determine the 'primaryjoin' and 'secondaryjoin' attributes,
if not passed to the constructor already.
This is based on analysis of the foreign key relationships
between the parent and target mapped selectables.
"""
if self.secondaryjoin is not None and self.secondary is None:
raise sa_exc.ArgumentError("Property '" + self.key
@@ -1029,7 +1024,7 @@ class RelationshipProperty(StrategizedProperty):
# for more specificity, then if not found will try the more
# general mapped table, which in the case of inheritance is
# a join.
return join_condition(mapper.mapped_table, table,
return join_condition(mapper.mapped_table, table,
a_subset=mapper.local_table)
try:
@@ -1053,9 +1048,9 @@ class RelationshipProperty(StrategizedProperty):
% self)
def _columns_are_mapped(self, *cols):
"""Return True if all columns in the given collection are
"""Return True if all columns in the given collection are
mapped by the tables referenced by this :class:`.Relationship`.
"""
for c in cols:
if self.secondary is not None \
@@ -1070,11 +1065,11 @@ class RelationshipProperty(StrategizedProperty):
"""Determine a list of "source"/"destination" column pairs
based on the given join condition, as well as the
foreign keys argument.
"source" would be a column referenced by a foreign key,
and "destination" would be the column who has a foreign key
reference to "source".
"""
fks = self._user_defined_foreign_keys
@@ -1083,7 +1078,7 @@ class RelationshipProperty(StrategizedProperty):
consider_as_foreign_keys=fks,
any_operator=self.viewonly)
# couldn't find any fks, but we have
# couldn't find any fks, but we have
# "secondary" - assume the "secondary" columns
# are the fks
if not eq_pairs and \
@@ -1108,19 +1103,19 @@ class RelationshipProperty(StrategizedProperty):
# Filter out just to columns that are mapped.
# If viewonly, allow pairs where the FK col
# was part of "foreign keys" - the column it references
# may be in an un-mapped table - see
# may be in an un-mapped table - see
# test.orm.test_relationships.ViewOnlyComplexJoin.test_basic
# for an example of this.
eq_pairs = [(l, r) for (l, r) in eq_pairs
if self._columns_are_mapped(l, r)
or self.viewonly and
or self.viewonly and
r in fks]
if eq_pairs:
return eq_pairs
# from here below is just determining the best error message
# to report. Check for a join condition using any operator
# to report. Check for a join condition using any operator
# (not just ==), perhaps they need to turn on "viewonly=True".
if not self.viewonly and criterion_as_pairs(join_condition,
consider_as_foreign_keys=self._user_defined_foreign_keys,
@@ -1130,8 +1125,8 @@ class RelationshipProperty(StrategizedProperty):
"foreign-key-equated, locally mapped column "\
"pairs for %s "\
"condition '%s' on relationship %s." % (
primary and 'primaryjoin' or 'secondaryjoin',
join_condition,
primary and 'primaryjoin' or 'secondaryjoin',
join_condition,
self
)
@@ -1160,10 +1155,10 @@ class RelationshipProperty(StrategizedProperty):
"have adequate ForeignKey and/or "
"ForeignKeyConstraint objects established "
"(in which case 'foreign_keys' is usually "
"unnecessary)?"
"unnecessary)?"
% (
primary and 'primaryjoin' or 'secondaryjoin',
join_condition,
join_condition,
self,
primary and 'mapped' or 'secondary'
))
@@ -1174,18 +1169,18 @@ class RelationshipProperty(StrategizedProperty):
"referencing Column objects have a "
"ForeignKey present, or are otherwise part "
"of a ForeignKeyConstraint on their parent "
"Table, or specify the foreign_keys parameter "
"Table, or specify the foreign_keys parameter "
"to this relationship."
% (
primary and 'primaryjoin' or 'secondaryjoin',
join_condition,
primary and 'primaryjoin' or 'secondaryjoin',
join_condition,
self
))
def _determine_synchronize_pairs(self):
"""Resolve 'primary'/foreign' column pairs from the primaryjoin
and secondaryjoin arguments.
"""
if self.local_remote_pairs:
if not self._user_defined_foreign_keys:
@@ -1200,7 +1195,7 @@ class RelationshipProperty(StrategizedProperty):
self.synchronize_pairs.append((r, l))
else:
self.synchronize_pairs = self._sync_pairs_from_join(
self.primaryjoin,
self.primaryjoin,
True)
self._calculated_foreign_keys = util.column_set(
@@ -1209,7 +1204,7 @@ class RelationshipProperty(StrategizedProperty):
if self.secondaryjoin is not None:
self.secondary_synchronize_pairs = self._sync_pairs_from_join(
self.secondaryjoin,
self.secondaryjoin,
False)
self._calculated_foreign_keys.update(
r for (l, r) in
@@ -1218,12 +1213,12 @@ class RelationshipProperty(StrategizedProperty):
self.secondary_synchronize_pairs = None
def _determine_direction(self):
"""Determine if this relationship is one to many, many to one,
"""Determine if this relationship is one to many, many to one,
many to many.
This is derived from the primaryjoin, presence of "secondary",
and in the case of self-referential the "remote side".
"""
if self.secondaryjoin is not None:
self.direction = MANYTOMANY
@@ -1300,19 +1295,19 @@ class RelationshipProperty(StrategizedProperty):
% self)
def _determine_local_remote_pairs(self):
"""Determine pairs of columns representing "local" to
"""Determine pairs of columns representing "local" to
"remote", where "local" columns are on the parent mapper,
"remote" are on the target mapper.
These pairs are used on the load side only to generate
lazy loading clauses.
"""
if not self.local_remote_pairs and not self.remote_side:
# the most common, trivial case. Derive
# the most common, trivial case. Derive
# local/remote pairs from the synchronize pairs.
eq_pairs = util.unique_list(
self.synchronize_pairs +
self.synchronize_pairs +
(self.secondary_synchronize_pairs or []))
if self.direction is MANYTOONE:
self.local_remote_pairs = [(r, l) for l, r in eq_pairs]
@@ -1474,8 +1469,8 @@ class RelationshipProperty(StrategizedProperty):
if not self.viewonly and self._dependency_processor:
self._dependency_processor.per_property_preprocessors(uow)
def _create_joins(self, source_polymorphic=False,
source_selectable=None, dest_polymorphic=False,
def _create_joins(self, source_polymorphic=False,
source_selectable=None, dest_polymorphic=False,
dest_selectable=None, of_type=None):
if source_selectable is None:
if source_polymorphic and self.parent.with_polymorphic:
@@ -1497,10 +1492,10 @@ class RelationshipProperty(StrategizedProperty):
# place a barrier on the destination such that
# replacement traversals won't ever dig into it.
# its internal structure remains fixed
# its internal structure remains fixed
# regardless of context.
dest_selectable = _shallow_annotate(
dest_selectable,
dest_selectable,
{'no_replacement_traverse':True})
aliased = aliased or (source_selectable is not None)

File diff suppressed because it is too large Load Diff

View File

@@ -1,5 +1,5 @@
# orm/scoping.py
# Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file>
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
@@ -16,10 +16,10 @@ __all__ = ['ScopedSession']
class ScopedSession(object):
"""Provides thread-local management of Sessions.
Typical invocation is via the :func:`.scoped_session`
function::
Session = scoped_session(sessionmaker())
The internal registry is accessible,
@@ -71,7 +71,7 @@ class ScopedSession(object):
self.session_factory.configure(**kwargs)
def query_property(self, query_cls=None):
"""return a class property which produces a `Query` object
"""return a class property which produces a `Query` object
against the class when called.
e.g.::
@@ -122,7 +122,7 @@ def makeprop(name):
def get(self):
return getattr(self.registry(), name)
return property(get, set)
for prop in ('bind', 'dirty', 'deleted', 'new', 'identity_map',
for prop in ('bind', 'dirty', 'deleted', 'new', 'identity_map',
'is_active', 'autoflush', 'no_autoflush'):
setattr(ScopedSession, prop, makeprop(prop))

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More