desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Formats a number into a string with the requisite number of digits and decimal places.'
def format_number(self, value):
from django.db.backends import util return util.format_number(value, self.max_digits, self.decimal_places)
'Should the related object be hidden?'
def is_hidden(self):
return (self.related_name and (self.related_name[(-1)] == '+'))
'Returns the Field in the \'to\' object to which this relationship is tied.'
def get_related_field(self):
data = self.to._meta.get_field_by_name(self.field_name) if (not data[2]): raise FieldDoesNotExist(("No related field named '%s'" % self.field_name)) return data[0]
'Should the related object be hidden?'
def is_hidden(self):
return (self.related_name and (self.related_name[(-1)] == '+'))
'Returns the field in the to\' object to which this relationship is tied (this is always the primary key on the target model). Provided for symmetry with ManyToOneRel.'
def get_related_field(self):
return self.to._meta.pk
'Here we check if the default value is an object and return the to_field if so.'
def get_default(self):
field_default = super(ForeignKey, self).get_default() if isinstance(field_default, self.rel.to): return getattr(field_default, self.rel.get_related_field().attname) return field_default
'Function that can be curried to provide the m2m table name for this relation'
def _get_m2m_db_table(self, opts):
if (self.rel.through is not None): return self.rel.through._meta.db_table elif self.db_table: return self.db_table else: return util.truncate_name(('%s_%s' % (opts.db_table, self.name)), connection.ops.max_name_length())
'Function that can be curried to provide the source accessor or DB column name for the m2m table'
def _get_m2m_attr(self, related, attr):
cache_attr = ('_m2m_%s_cache' % attr) if hasattr(self, cache_attr): return getattr(self, cache_attr) for f in self.rel.through._meta.fields: if (hasattr(f, 'rel') and f.rel and (f.rel.to == related.model)): setattr(self, cache_attr, getattr(f, attr)) return getattr(self, cache_attr)
'Function that can be curried to provide the related accessor or DB column name for the m2m table'
def _get_m2m_reverse_attr(self, related, attr):
cache_attr = ('_m2m_reverse_%s_cache' % attr) if hasattr(self, cache_attr): return getattr(self, cache_attr) found = False for f in self.rel.through._meta.fields: if (hasattr(f, 'rel') and f.rel and (f.rel.to == related.parent_model)): if (related.model == related.parent_model): if found: setattr(self, cache_attr, getattr(f, attr)) break else: found = True else: setattr(self, cache_attr, getattr(f, attr)) break return getattr(self, cache_attr)
'Returns the value of this field in the given model instance.'
def value_from_object(self, obj):
return getattr(obj, self.attname).all()
'Implements the interval functionality for expressions format for Oracle: (datefield + INTERVAL \'3 00:03:20.000000\' DAY(1) TO SECOND(6))'
def date_interval_sql(self, sql, connector, timedelta):
(minutes, seconds) = divmod(timedelta.seconds, 60) (hours, minutes) = divmod(minutes, 60) days = str(timedelta.days) day_precision = len(days) fmt = u"(%s %s INTERVAL '%s %02d:%02d:%02d.%06d' DAY(%d) TO SECOND(6))" return (fmt % (sql, connector, days, hours, minutes, seconds, timedelta.microseconds, day_precision))
'Oracle requires special cases for %% and & operators in query expressions'
def combine_expression(self, connector, sub_expressions):
if (connector == u'%%'): return (u'MOD(%s)' % u','.join(sub_expressions)) elif (connector == u'&'): return (u'BITAND(%s)' % u','.join(sub_expressions)) elif (connector == u'|'): raise NotImplementedError(u'Bit-wise or is not supported in Oracle.') return super(DatabaseOperations, self).combine_expression(connector, sub_expressions)
'To check constraints, we set constraints to immediate. Then, when, we\'re done we must ensure they are returned to deferred.'
def check_constraints(self, table_names=None):
self.cursor().execute(u'SET CONSTRAINTS ALL IMMEDIATE') self.cursor().execute(u'SET CONSTRAINTS ALL DEFERRED')
'Destroy a test database, prompting the user for confirmation if the database already exists. Returns the name of the test database created.'
def _destroy_test_db(self, test_database_name, verbosity=1):
TEST_NAME = self._test_database_name() TEST_USER = self._test_database_user() TEST_PASSWD = self._test_database_passwd() TEST_TBLSPACE = self._test_database_tblspace() TEST_TBLSPACE_TMP = self._test_database_tblspace_tmp() self.connection.settings_dict['USER'] = self.connection.settings_dict['SAVED_USER'] self.connection.settings_dict['PASSWORD'] = self.connection.settings_dict['SAVED_PASSWORD'] parameters = {'dbname': TEST_NAME, 'user': TEST_USER, 'password': TEST_PASSWD, 'tblspace': TEST_TBLSPACE, 'tblspace_temp': TEST_TBLSPACE_TMP} cursor = self.connection.cursor() time.sleep(1) if self._test_user_create(): if (verbosity >= 1): print 'Destroying test user...' self._destroy_test_user(cursor, parameters, verbosity) if self._test_database_create(): if (verbosity >= 1): print 'Destroying test database tables...' self._execute_test_db_destruction(cursor, parameters, verbosity) self.connection.close()
'We need to return the \'production\' DB name to get the test DB creation machinery to work. This isn\'t a great deal in this case because DB names as handled by Django haven\'t real counterparts in Oracle.'
def _get_test_db_name(self):
return self.connection.settings_dict['NAME']
'Returns a list of table names in the current database.'
def get_table_list(self, cursor):
cursor.execute('SELECT TABLE_NAME FROM USER_TABLES') return [row[0].lower() for row in cursor.fetchall()]
'Returns a description of the table, with the DB-API cursor.description interface.'
def get_table_description(self, cursor, table_name):
cursor.execute(('SELECT * FROM %s WHERE ROWNUM < 2' % self.connection.ops.quote_name(table_name))) description = [] for desc in cursor.description: description.append(((desc[0].lower(),) + desc[1:])) return description
'Table name comparison is case insensitive under Oracle'
def table_name_converter(self, name):
return name.lower()
'Returns a dictionary of {field_name: field_index} for the given table. Indexes are 0-based.'
def _name_to_index(self, cursor, table_name):
return dict([(d[0], i) for (i, d) in enumerate(self.get_table_description(cursor, table_name))])
'Returns a dictionary of {field_index: (field_index_other_table, other_table)} representing all relationships to the given table. Indexes are 0-based.'
def get_relations(self, cursor, table_name):
table_name = table_name.upper() cursor.execute('\n SELECT ta.column_id - 1, tb.table_name, tb.column_id - 1\n FROM user_constraints, USER_CONS_COLUMNS ca, USER_CONS_COLUMNS cb,\n user_tab_cols ta, user_tab_cols tb\n WHERE user_constraints.table_name = %s AND\n ta.table_name = user_constraints.table_name AND\n ta.column_name = ca.column_name AND\n ca.table_name = ta.table_name AND\n user_constraints.constraint_name = ca.constraint_name AND\n user_constraints.r_constraint_name = cb.constraint_name AND\n cb.table_name = tb.table_name AND\n cb.column_name = tb.column_name AND\n ca.position = cb.position', [table_name]) relations = {} for row in cursor.fetchall(): relations[row[0]] = (row[2], row[1].lower()) return relations
'Creates the SQL for this query. Returns the SQL string and list of parameters. This is overriden from the original Query class to handle the additional SQL Oracle requires to emulate LIMIT and OFFSET. If \'with_limits\' is False, any limit/offset information is not included in the query.'
def as_sql(self, with_limits=True, with_col_aliases=False):
if (with_limits and (self.query.low_mark == self.query.high_mark)): return ('', ()) do_offset = (with_limits and ((self.query.high_mark is not None) or self.query.low_mark)) if (not do_offset): (sql, params) = super(SQLCompiler, self).as_sql(with_limits=False, with_col_aliases=with_col_aliases) else: (sql, params) = super(SQLCompiler, self).as_sql(with_limits=False, with_col_aliases=True) high_where = '' if (self.query.high_mark is not None): high_where = ('WHERE ROWNUM <= %d' % (self.query.high_mark,)) sql = ('SELECT * FROM (SELECT ROWNUM AS "_RN", "_SUB".* FROM (%s) "_SUB" %s) WHERE "_RN" > %d' % (sql, high_where, self.query.low_mark)) return (sql, params)
'To check constraints, we set constraints to immediate. Then, when, we\'re done we must ensure they are returned to deferred.'
def check_constraints(self, table_names=None):
self.cursor().execute('SET CONSTRAINTS ALL IMMEDIATE') self.cursor().execute('SET CONSTRAINTS ALL DEFERRED')
'Switch the isolation level when needing transaction support, so that the same transaction is visible across all the queries.'
def _enter_transaction_management(self, managed):
if (self.features.uses_autocommit and managed and (not self.isolation_level)): self._set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED)
'If the normal operating mode is "autocommit", switch back to that when leaving transaction management.'
def _leave_transaction_management(self, managed):
if (self.features.uses_autocommit and (not managed) and self.isolation_level): self._set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
'Do all the related feature configurations for changing isolation levels. This doesn\'t touch the uses_autocommit feature, since that controls the movement *between* isolation levels.'
def _set_isolation_level(self, level):
assert (level in range(5)) try: if (self.connection is not None): self.connection.set_isolation_level(level) finally: self.isolation_level = level self.features.uses_savepoints = bool(level)
'implements the interval functionality for expressions format for Postgres: (datefield + interval \'3 days 200 seconds 5 microseconds\')'
def date_interval_sql(self, sql, connector, timedelta):
modifiers = [] if timedelta.days: modifiers.append((u'%s days' % timedelta.days)) if timedelta.seconds: modifiers.append((u'%s seconds' % timedelta.seconds)) if timedelta.microseconds: modifiers.append((u'%s microseconds' % timedelta.microseconds)) mods = u' '.join(modifiers) conn = (u' %s ' % connector) return (u'(%s)' % conn.join([sql, (u"interval '%s'" % mods)]))
'Check that the backend fully supports the provided aggregate. The implementation of population statistics (STDDEV_POP and VAR_POP) under Postgres 8.2 - 8.2.4 is known to be faulty. Raise NotImplementedError if this is the database in use.'
def check_aggregate_support(self, aggregate):
if (aggregate.sql_function in (u'STDDEV_POP', u'VAR_POP')): pg_version = self.connection.pg_version if ((pg_version >= 80200) and (pg_version <= 80204)): raise NotImplementedError((u'PostgreSQL 8.2 to 8.2.4 is known to have a faulty implementation of %s. Please upgrade your version of PostgreSQL.' % aggregate.sql_function))
'Returns the maximum length of an identifier. Note that the maximum length of an identifier is 63 by default, but can be changed by recompiling PostgreSQL after editing the NAMEDATALEN macro in src/include/pg_config_manual.h . This implementation simply returns 63, but can easily be overridden by a custom database backend that inherits most of its behavior from this one.'
def max_name_length(self):
return 63
'Rollback and close the active transaction.'
def _prepare_for_test_db_ddl(self):
self.connection.connection.rollback() self.connection.connection.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
'Returns a list of table names in the current database.'
def get_table_list(self, cursor):
cursor.execute(u"\n SELECT c.relname\n FROM pg_catalog.pg_class c\n LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace\n WHERE c.relkind IN ('r', 'v', '')\n AND n.nspname NOT IN ('pg_catalog', 'pg_toast')\n AND pg_catalog.pg_table_is_visible(c.oid)") return [row[0] for row in cursor.fetchall()]
'Returns a description of the table, with the DB-API cursor.description interface.'
def get_table_description(self, cursor, table_name):
cursor.execute(u'\n SELECT column_name, is_nullable\n FROM information_schema.columns\n WHERE table_name = %s', [table_name]) null_map = dict(cursor.fetchall()) cursor.execute((u'SELECT * FROM %s LIMIT 1' % self.connection.ops.quote_name(table_name))) return [(line[:6] + ((null_map[line[0]] == u'YES'),)) for line in cursor.description]
'Returns a dictionary of {field_index: (field_index_other_table, other_table)} representing all relationships to the given table. Indexes are 0-based.'
def get_relations(self, cursor, table_name):
cursor.execute(u"\n SELECT con.conkey, con.confkey, c2.relname\n FROM pg_constraint con, pg_class c1, pg_class c2\n WHERE c1.oid = con.conrelid\n AND c2.oid = con.confrelid\n AND c1.relname = %s\n AND con.contype = 'f'", [table_name]) relations = {} for row in cursor.fetchall(): relations[(row[0][0] - 1)] = ((row[1][0] - 1), row[2]) return relations
'A hook for backend-specific changes required when entering manual transaction handling.'
def _enter_transaction_management(self, managed):
pass
'A hook for backend-specific changes required when leaving manual transaction handling. Will usually be implemented only when _enter_transaction_management() is also required.'
def _leave_transaction_management(self, managed):
pass
'Roll back any ongoing transaction and clean the transaction state stack.'
def abort(self):
if self._dirty: self._rollback() self._dirty = False while self.transaction_state: self.leave_transaction_management()
'Enters transaction management for a running thread. It must be balanced with the appropriate leave_transaction_management call, since the actual state is managed as a stack. The state and dirty flag are carried over from the surrounding block or from the settings, if there is no surrounding block (dirty is always false when no current block is running).'
def enter_transaction_management(self, managed=True):
if self.transaction_state: self.transaction_state.append(self.transaction_state[(-1)]) else: self.transaction_state.append(settings.TRANSACTIONS_MANAGED) if (self._dirty is None): self._dirty = False self._enter_transaction_management(managed)
'Leaves transaction management for a running thread. A dirty flag is carried over to the surrounding block, as a commit will commit all changes, even those from outside. (Commits are on connection level.)'
def leave_transaction_management(self):
if self.transaction_state: del self.transaction_state[(-1)] else: raise TransactionManagementError("This code isn't under transaction management") self._leave_transaction_management(self.is_managed()) if self._dirty: self.rollback() raise TransactionManagementError('Transaction managed block ended with pending COMMIT/ROLLBACK') self._dirty = False
'Validates that the connection isn\'t accessed by another thread than the one which originally created it, unless the connection was explicitly authorized to be shared between threads (via the `allow_thread_sharing` property). Raises an exception if the validation fails.'
def validate_thread_sharing(self):
if ((not self.allow_thread_sharing) and (self._thread_ident != thread.get_ident())): raise DatabaseError(("DatabaseWrapper objects created in a thread can only be used in that same thread. The object with alias '%s' was created in thread id %s and this is thread id %s." % (self.alias, self._thread_ident, thread.get_ident())))
'Returns True if the current transaction requires a commit for changes to happen.'
def is_dirty(self):
return self._dirty
'Sets a dirty flag for the current thread and code streak. This can be used to decide in a managed block of code to decide whether there are open changes waiting for commit.'
def set_dirty(self):
if (self._dirty is not None): self._dirty = True else: raise TransactionManagementError("This code isn't under transaction management")
'Resets a dirty flag for the current thread and code streak. This can be used to decide in a managed block of code to decide whether a commit or rollback should happen.'
def set_clean(self):
if (self._dirty is not None): self._dirty = False else: raise TransactionManagementError("This code isn't under transaction management") self.clean_savepoints()
'Checks whether the transaction manager is in manual or in auto state.'
def is_managed(self):
if self.transaction_state: return self.transaction_state[(-1)] return settings.TRANSACTIONS_MANAGED
'Puts the transaction manager into a manual state: managed transactions have to be committed explicitly by the user. If you switch off transaction management and there is a pending commit/rollback, the data will be commited.'
def managed(self, flag=True):
top = self.transaction_state if top: top[(-1)] = flag if ((not flag) and self.is_dirty()): self._commit() self.set_clean() else: raise TransactionManagementError("This code isn't under transaction management")
'Commits changes if the system is not in managed transaction mode.'
def commit_unless_managed(self):
self.validate_thread_sharing() if (not self.is_managed()): self._commit() self.clean_savepoints() else: self.set_dirty()
'Rolls back changes if the system is not in managed transaction mode.'
def rollback_unless_managed(self):
self.validate_thread_sharing() if (not self.is_managed()): self._rollback() else: self.set_dirty()
'Does the commit itself and resets the dirty flag.'
def commit(self):
self.validate_thread_sharing() self._commit() self.set_clean()
'This function does the rollback itself and resets the dirty flag.'
def rollback(self):
self.validate_thread_sharing() self._rollback() self.set_clean()
'Creates a savepoint (if supported and required by the backend) inside the current transaction. Returns an identifier for the savepoint that will be used for the subsequent rollback or commit.'
def savepoint(self):
thread_ident = thread.get_ident() self.savepoint_state += 1 tid = str(thread_ident).replace('-', '') sid = ('s%s_x%d' % (tid, self.savepoint_state)) self._savepoint(sid) return sid
'Rolls back the most recent savepoint (if one exists). Does nothing if savepoints are not supported.'
def savepoint_rollback(self, sid):
self.validate_thread_sharing() if self.savepoint_state: self._savepoint_rollback(sid)
'Commits the most recent savepoint (if one exists). Does nothing if savepoints are not supported.'
def savepoint_commit(self, sid):
self.validate_thread_sharing() if self.savepoint_state: self._savepoint_commit(sid)
'Backends can implement as needed to temporarily disable foreign key constraint checking.'
def disable_constraint_checking(self):
pass
'Backends can implement as needed to re-enable foreign key constraint checking.'
def enable_constraint_checking(self):
pass
'Backends can override this method if they can apply constraint checking (e.g. via "SET CONSTRAINTS ALL IMMEDIATE"). Should raise an IntegrityError if any invalid foreign key references are encountered.'
def check_constraints(self, table_names=None):
pass
'Confirm support for transactions'
@cached_property def supports_transactions(self):
try: self.connection.enter_transaction_management() self.connection.managed(True) cursor = self.connection.cursor() cursor.execute('CREATE TABLE ROLLBACK_TEST (X INT)') self.connection._commit() cursor.execute('INSERT INTO ROLLBACK_TEST (X) VALUES (8)') self.connection._rollback() cursor.execute('SELECT COUNT(X) FROM ROLLBACK_TEST') (count,) = cursor.fetchone() cursor.execute('DROP TABLE ROLLBACK_TEST') self.connection._commit() self.connection._dirty = False finally: self.connection.leave_transaction_management() return (count == 0)
'Confirm support for STDDEV and related stats functions'
@cached_property def supports_stddev(self):
class StdDevPop(object, ): sql_function = 'STDDEV_POP' try: self.connection.ops.check_aggregate_support(StdDevPop()) return True except NotImplementedError: return False
'Returns any SQL needed to support auto-incrementing primary keys, or None if no SQL is necessary. This SQL is executed when a table is created.'
def autoinc_sql(self, table, column):
return None
'Returns the maximum allowed batch size for the backend. The fields are the fields going to be inserted in the batch, the objs contains all the objects to be inserted.'
def bulk_batch_size(self, fields, objs):
return len(objs)
'Returns a SQL query that retrieves the first cache key greater than the n smallest. This is used by the \'db\' cache backend to determine where to start culling.'
def cache_key_culling_sql(self):
return 'SELECT cache_key FROM %s ORDER BY cache_key LIMIT 1 OFFSET %%s'
'Given a lookup_type of \'year\', \'month\' or \'day\', returns the SQL that extracts a value from the given date field field_name.'
def date_extract_sql(self, lookup_type, field_name):
raise NotImplementedError()
'Implements the date interval functionality for expressions'
def date_interval_sql(self, sql, connector, timedelta):
raise NotImplementedError()
'Given a lookup_type of \'year\', \'month\' or \'day\', returns the SQL that truncates the given date field field_name to a DATE object with only the given specificity.'
def date_trunc_sql(self, lookup_type, field_name):
raise NotImplementedError()
'Returns the SQL necessary to cast a datetime value so that it will be retrieved as a Python datetime object instead of a string. This SQL should include a \'%s\' in place of the field\'s name.'
def datetime_cast_sql(self):
return '%s'
'Returns the SQL necessary to make a constraint "initially deferred" during a CREATE TABLE statement.'
def deferrable_sql(self):
return ''
'Returns an SQL DISTINCT clause which removes duplicate rows from the result set. If any fields are given, only the given fields are being checked for duplicates.'
def distinct_sql(self, fields):
if fields: raise NotImplementedError('DISTINCT ON fields is not supported by this database backend') else: return 'DISTINCT'
'Returns the SQL command that drops a foreign key.'
def drop_foreignkey_sql(self):
return 'DROP CONSTRAINT'
'Returns any SQL necessary to drop the sequence for the given table. Returns None if no SQL is necessary.'
def drop_sequence_sql(self, table):
return None
'Given a cursor object that has just performed an INSERT...RETURNING statement into a table that has an auto-incrementing ID, returns the newly created ID.'
def fetch_returned_insert_id(self, cursor):
return cursor.fetchone()[0]
'Given a column type (e.g. \'BLOB\', \'VARCHAR\'), returns the SQL necessary to cast it before using it in a WHERE statement. Note that the resulting string should contain a \'%s\' placeholder for the column being searched against.'
def field_cast_sql(self, db_type):
return '%s'
'Returns a list used in the "ORDER BY" clause to force no ordering at all. Returning an empty list means that nothing will be included in the ordering.'
def force_no_ordering(self):
return []
'Returns the FOR UPDATE SQL clause to lock rows for an update operation.'
def for_update_sql(self, nowait=False):
if nowait: return 'FOR UPDATE NOWAIT' else: return 'FOR UPDATE'
'Returns the SQL WHERE clause to use in order to perform a full-text search of the given field_name. Note that the resulting string should contain a \'%s\' placeholder for the value being searched against.'
def fulltext_search_sql(self, field_name):
raise NotImplementedError('Full-text search is not implemented for this database backend')
'Returns a string of the query last executed by the given cursor, with placeholders replaced with actual values. `sql` is the raw query containing placeholders, and `params` is the sequence of parameters. These are used by default, but this method exists for database backends to provide a better implementation according to their own quoting schemes.'
def last_executed_query(self, cursor, sql, params):
from django.utils.encoding import force_text to_unicode = (lambda s: force_text(s, strings_only=True, errors='replace')) if isinstance(params, (list, tuple)): u_params = tuple([to_unicode(val) for val in params]) else: u_params = dict([(to_unicode(k), to_unicode(v)) for (k, v) in params.items()]) return (force_text(sql) % u_params)
'Given a cursor object that has just performed an INSERT statement into a table that has an auto-incrementing ID, returns the newly created ID. This method also receives the table name and the name of the primary-key column.'
def last_insert_id(self, cursor, table_name, pk_name):
return cursor.lastrowid
'Returns the string to use in a query when performing lookups ("contains", "like", etc). The resulting string should contain a \'%s\' placeholder for the column being searched against.'
def lookup_cast(self, lookup_type):
return '%s'
'Returns the maximum number of items that can be passed in a single \'IN\' list condition, or None if the backend does not impose a limit.'
def max_in_list_size(self):
return None
'Returns the maximum length of table and column names, or None if there is no limit.'
def max_name_length(self):
return None
'Returns the value to use for the LIMIT when we are wanting "LIMIT infinity". Returns None if the limit clause can be omitted in this case.'
def no_limit_value(self):
raise NotImplementedError
'Returns the value to use during an INSERT statement to specify that the field should use its default value.'
def pk_default_value(self):
return 'DEFAULT'
'Returns the value of a CLOB column, for backends that return a locator object that requires additional processing.'
def process_clob(self, value):
return value
'For backends that support returning the last insert ID as part of an insert query, this method returns the SQL and params to append to the INSERT query. The returned fragment should contain a format string to hold the appropriate column.'
def return_insert_id(self):
pass
'Returns the SQLCompiler class corresponding to the given name, in the namespace corresponding to the `compiler_module` attribute on this backend.'
def compiler(self, compiler_name):
if (self._cache is None): self._cache = import_module(self.compiler_module) return getattr(self._cache, compiler_name)
'Returns a quoted version of the given table, index or column name. Does not quote the given name if it\'s already been quoted.'
def quote_name(self, name):
raise NotImplementedError()
'Returns a SQL expression that returns a random value.'
def random_function_sql(self):
return 'RANDOM()'
'Returns the string to use in a query when performing regular expression lookups (using "regex" or "iregex"). The resulting string should contain a \'%s\' placeholder for the column being searched against. If the feature is not supported (or part of it is not supported), a NotImplementedError exception can be raised.'
def regex_lookup(self, lookup_type):
raise NotImplementedError
'Returns the SQL for starting a new savepoint. Only required if the "uses_savepoints" feature is True. The "sid" parameter is a string for the savepoint id.'
def savepoint_create_sql(self, sid):
raise NotImplementedError
'Returns the SQL for committing the given savepoint.'
def savepoint_commit_sql(self, sid):
raise NotImplementedError
'Returns the SQL for rolling back the given savepoint.'
def savepoint_rollback_sql(self, sid):
raise NotImplementedError
'Returns the SQL that will set the connection\'s time zone. Returns \'\' if the backend doesn\'t support time zones.'
def set_time_zone_sql(self):
return ''
'Returns a list of SQL statements required to remove all data from the given database tables (without actually removing the tables themselves). The returned value also includes SQL statements required to reset DB sequences passed in :param sequences:. The `style` argument is a Style object as returned by either color_style() or no_style() in django.core.management.color.'
def sql_flush(self, style, tables, sequences):
raise NotImplementedError()
'Returns a list of the SQL statements required to reset sequences passed in :param sequences:. The `style` argument is a Style object as returned by either color_style() or no_style() in django.core.management.color.'
def sequence_reset_by_name_sql(self, style, sequences):
return []
'Returns a list of the SQL statements required to reset sequences for the given models. The `style` argument is a Style object as returned by either color_style() or no_style() in django.core.management.color.'
def sequence_reset_sql(self, style, model_list):
return []
'Returns the SQL statement required to start a transaction.'
def start_transaction_sql(self):
return 'BEGIN;'
'Returns the SQL that will be used in a query to define the tablespace. Returns \'\' if the backend doesn\'t support tablespaces. If inline is True, the SQL is appended to a row; otherwise it\'s appended to the entire CREATE TABLE or CREATE INDEX statement.'
def tablespace_sql(self, tablespace, inline=False):
return ''
'Prepares a value for use in a LIKE query.'
def prep_for_like_query(self, x):
from django.utils.encoding import force_text return force_text(x).replace('\\', '\\\\').replace('%', '\\%').replace('_', '\\_')
'Certain backends do not accept some values for "serial" fields (for example zero in MySQL). This method will raise a ValueError if the value is invalid, otherwise returns validated value.'
def validate_autopk_value(self, value):
return value
'Transform a date value to an object compatible with what is expected by the backend driver for date columns.'
def value_to_db_date(self, value):
if (value is None): return None return six.text_type(value)
'Transform a datetime value to an object compatible with what is expected by the backend driver for datetime columns.'
def value_to_db_datetime(self, value):
if (value is None): return None return six.text_type(value)
'Transform a time value to an object compatible with what is expected by the backend driver for time columns.'
def value_to_db_time(self, value):
if (value is None): return None if is_aware(value): raise ValueError('Django does not support timezone-aware times.') return six.text_type(value)
'Transform a decimal.Decimal value to an object compatible with what is expected by the backend driver for decimal (numeric) columns.'
def value_to_db_decimal(self, value, max_digits, decimal_places):
if (value is None): return None return util.format_number(value, max_digits, decimal_places)
'Returns a two-elements list with the lower and upper bound to be used with a BETWEEN operator to query a field value using a year lookup `value` is an int, containing the looked-up year.'
def year_lookup_bounds(self, value):
first = '%s-01-01 00:00:00' second = '%s-12-31 23:59:59.999999' return [(first % value), (second % value)]