- postgres cursor option is now server_side_cursors=False; some users get bad results using them
so theyre off by default - type system slightly modified to support TypeDecorators that can be overridden by the dialect - added an NVarchar type to mssql (produces NVARCHAR), also MSUnicode which provides Unicode-translation for the NVarchar regardless of dialect convert_unicode setting.
This commit is contained in:
parent
5b4871f436
commit
437f1ce670
19
CHANGES
19
CHANGES
|
@ -16,14 +16,23 @@
|
|||
- the "op()" function is now treated as an "operation", rather than a "comparison".
|
||||
the difference is, an operation produces a BinaryExpression from which further operations
|
||||
can occur whereas comparison produces the more restrictive BooleanExpression
|
||||
- postgres
|
||||
- postgres no longer uses client-side cursors, uses more efficient server side
|
||||
cursors via apparently undocumented psycopg2 behavior recently discovered on the
|
||||
mailing list. disable it via create_engine('postgres://', client_side_cursors=True)
|
||||
- type system slightly modified to support TypeDecorators that can be overridden by the dialect
|
||||
(ok, thats not very clear, it allows the mssql tweak below to be possible)
|
||||
- mssql:
|
||||
- added an NVarchar type (produces NVARCHAR), also MSUnicode which provides Unicode-translation
|
||||
for the NVarchar regardless of dialect convert_unicode setting.
|
||||
- postgres:
|
||||
- postgres has an optional "server_side_cursors=True" flag which will utilize
|
||||
server side cursors. these are appropriate for fetching only partial results
|
||||
and are necessary for working with very large unbounded result sets.
|
||||
While we'd like this to be the default behavior, different environments seem
|
||||
to have different results and the causes have not been isolated so we are leaving
|
||||
the feature off by default for now. Uses an apparently undocumented psycopg2
|
||||
behavior recently discovered on the psycopg mailing list.
|
||||
- added "BIGSERIAL" support for postgres table with PGBigInteger/autoincrement
|
||||
- fixes to postgres reflection to better handle when schema names are present;
|
||||
thanks to jason (at) ncsmags.com [ticket:402]
|
||||
- mysql
|
||||
- mysql:
|
||||
- mysql is inconsistent with what kinds of quotes it uses in foreign keys during a
|
||||
SHOW CREATE TABLE, reflection updated to accomodate for all three styles [ticket:420]
|
||||
- firebird:
|
||||
|
|
|
@ -81,3 +81,8 @@ utility with the "-a" (annotate) option, such as:
|
|||
|
||||
which will create a new annotated file ./lib/sqlalchemy/sql.py,cover . Pretty cool !
|
||||
|
||||
TIPS
|
||||
----
|
||||
When running the tests on postgres, postgres gets slower and slower each time you run the tests.
|
||||
This seems to be related to the constant creation/dropping of tables. Running a "VACUUM FULL"
|
||||
on the database will speed it up again.
|
||||
|
|
|
@ -146,9 +146,13 @@ class MSText(sqltypes.TEXT):
|
|||
class MSString(sqltypes.String):
|
||||
def get_col_spec(self):
|
||||
return "VARCHAR(%(length)s)" % {'length' : self.length}
|
||||
class MSUnicode(sqltypes.Unicode):
|
||||
class MSNVarchar(MSString):
|
||||
"""NVARCHAR string, does unicode conversion if dialect.convert_encoding is true"""
|
||||
def get_col_spec(self):
|
||||
return "NVARCHAR(%(length)s)" % {'length' : self.length}
|
||||
class MSUnicode(sqltypes.Unicode):
|
||||
"""Unicode subclass, does unicode conversion in all cases, uses NVARCHAR impl"""
|
||||
impl = MSNVarchar
|
||||
class MSChar(sqltypes.CHAR):
|
||||
def get_col_spec(self):
|
||||
return "CHAR(%(length)s)" % {'length' : self.length}
|
||||
|
@ -259,8 +263,6 @@ class MSSQLExecutionContext(default.DefaultExecutionContext):
|
|||
self.HASIDENT = False
|
||||
|
||||
|
||||
|
||||
|
||||
class MSSQLDialect(ansisql.ANSIDialect):
|
||||
def __init__(self, module=None, auto_identity_insert=False, **params):
|
||||
self.module = module or dbmodule
|
||||
|
@ -546,7 +548,7 @@ class MSSQLCompiler(ansisql.ANSICompiler):
|
|||
class MSSQLSchemaGenerator(ansisql.ANSISchemaGenerator):
|
||||
def get_column_specification(self, column, **kwargs):
|
||||
colspec = self.preparer.format_column(column) + " " + column.type.engine_impl(self.engine).get_col_spec()
|
||||
|
||||
|
||||
# install a IDENTITY Sequence if we have an implicit IDENTITY column
|
||||
if column.primary_key and column.autoincrement and isinstance(column.type, sqltypes.Integer) and not column.foreign_key:
|
||||
if column.default is None or (isinstance(column.default, schema.Sequence) and column.default.optional):
|
||||
|
@ -583,7 +585,7 @@ class MSSQLIdentifierPreparer(ansisql.ANSIIdentifierPreparer):
|
|||
#TODO: determin MSSQL's case folding rules
|
||||
return value
|
||||
|
||||
if dbmodule.__name__ == 'adodbapi':
|
||||
if dbmodule and dbmodule.__name__ == 'adodbapi':
|
||||
dialect = MSSQLDialect
|
||||
else:
|
||||
dialect = PyMSSQLDialect
|
||||
|
|
|
@ -207,9 +207,9 @@ class PGExecutionContext(default.DefaultExecutionContext):
|
|||
self._last_inserted_ids = [v for v in row]
|
||||
|
||||
class PGDialect(ansisql.ANSIDialect):
|
||||
def __init__(self, module=None, use_oids=False, use_information_schema=False, client_side_cursors=False, **params):
|
||||
def __init__(self, module=None, use_oids=False, use_information_schema=False, server_side_cursors=False, **params):
|
||||
self.use_oids = use_oids
|
||||
self.client_side_cursors = client_side_cursors
|
||||
self.server_side_cursors = server_side_cursors
|
||||
if module is None:
|
||||
#if psycopg is None:
|
||||
# raise exceptions.ArgumentError("Couldnt locate psycopg1 or psycopg2: specify postgres module argument")
|
||||
|
@ -241,13 +241,12 @@ class PGDialect(ansisql.ANSIDialect):
|
|||
return ([], opts)
|
||||
|
||||
def create_cursor(self, connection):
|
||||
if self.client_side_cursors:
|
||||
return connection.cursor()
|
||||
else:
|
||||
if self.server_side_cursors:
|
||||
# use server-side cursors:
|
||||
# http://lists.initd.org/pipermail/psycopg/2007-January/005251.html
|
||||
return connection.cursor('x')
|
||||
|
||||
else:
|
||||
return connection.cursor()
|
||||
|
||||
def create_execution_context(self):
|
||||
return PGExecutionContext(self)
|
||||
|
|
|
@ -82,6 +82,12 @@ class TypeDecorator(AbstractType):
|
|||
try:
|
||||
return self.impl_dict[dialect]
|
||||
except:
|
||||
# see if the dialect has an adaptation of the TypeDecorator itself
|
||||
adapted_decorator = dialect.type_descriptor(self)
|
||||
if adapted_decorator is not self:
|
||||
result = adapted_decorator.dialect_impl(dialect)
|
||||
self.impl_dict[dialect] = result
|
||||
return result
|
||||
typedesc = dialect.type_descriptor(self.impl)
|
||||
tt = self.copy()
|
||||
if not isinstance(tt, self.__class__):
|
||||
|
@ -138,7 +144,8 @@ def adapt_type(typeobj, colspecs):
|
|||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
# couldnt adapt...raise exception ?
|
||||
# couldnt adapt - so just return the type itself
|
||||
# (it may be a user-defined type)
|
||||
return typeobj
|
||||
# if we adapted the given generic type to a database-specific type,
|
||||
# but it turns out the originally given "generic" type
|
||||
|
|
|
@ -60,7 +60,20 @@ class AdaptTest(PersistTest):
|
|||
assert (t1.impl.length == 20)
|
||||
assert isinstance(t2.impl, TEXT)
|
||||
assert t2.impl.length is None
|
||||
|
||||
|
||||
|
||||
def testdialecttypedecorators(self):
|
||||
"""test that a a Dialect can provide a dialect-specific subclass of a TypeDecorator subclass."""
|
||||
import sqlalchemy.databases.mssql as mssql
|
||||
dialect = mssql.MSSQLDialect()
|
||||
# run the test twice to insure the caching step works too
|
||||
for x in range(0, 1):
|
||||
col = Column('', Unicode(length=10))
|
||||
dialect_type = col.type.dialect_impl(dialect)
|
||||
assert isinstance(dialect_type, mssql.MSUnicode)
|
||||
assert dialect_type.get_col_spec() == 'NVARCHAR(10)'
|
||||
assert isinstance(dialect_type.impl, mssql.MSString)
|
||||
|
||||
class OverrideTest(PersistTest):
|
||||
"""tests user-defined types, including a full type as well as a TypeDecorator"""
|
||||
|
||||
|
@ -166,6 +179,7 @@ class UnicodeTest(AssertMixin):
|
|||
self.assert_(isinstance(x['plain_data'], unicode) and x['plain_data'] == unicodedata)
|
||||
finally:
|
||||
db.engine.dialect.convert_unicode = prev_unicode
|
||||
|
||||
|
||||
|
||||
class BinaryTest(AssertMixin):
|
||||
|
|
Loading…
Reference in New Issue