desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Changes the aliases in change_map (which maps old-alias -> new-alias), relabelling any references to them in select columns and the where clause.'
def change_aliases(self, change_map):
assert (set(change_map.keys()).intersection(set(change_map.values())) == set()) self.where.relabel_aliases(change_map) self.having.relabel_aliases(change_map) for columns in [self.select, (self.group_by or [])]: for (pos, col) in enumerate(columns): if isinstance(col, (list, tuple)): old_alias = col[0] columns[pos] = (change_map.get(old_alias, old_alias), col[1]) else: col.relabel_aliases(change_map) for mapping in [self.aggregates]: for (key, col) in mapping.items(): if isinstance(col, (list, tuple)): old_alias = col[0] mapping[key] = (change_map.get(old_alias, old_alias), col[1]) else: col.relabel_aliases(change_map) for (k, aliases) in self.join_map.items(): aliases = tuple([change_map.get(a, a) for a in aliases]) self.join_map[k] = aliases for (old_alias, new_alias) in six.iteritems(change_map): alias_data = self.alias_map[old_alias] alias_data = alias_data._replace(rhs_alias=new_alias) self.alias_refcount[new_alias] = self.alias_refcount[old_alias] del self.alias_refcount[old_alias] self.alias_map[new_alias] = alias_data del self.alias_map[old_alias] table_aliases = self.table_map[alias_data.table_name] for (pos, alias) in enumerate(table_aliases): if (alias == old_alias): table_aliases[pos] = new_alias break for (pos, alias) in enumerate(self.tables): if (alias == old_alias): self.tables[pos] = new_alias break for (key, alias) in self.included_inherited_models.items(): if (alias in change_map): self.included_inherited_models[key] = change_map[alias] for (alias, data) in six.iteritems(self.alias_map): lhs = data.lhs_alias if (lhs in change_map): data = data._replace(lhs_alias=change_map[lhs]) self.alias_map[alias] = data
'Changes the alias prefix to the next letter in the alphabet and relabels all the aliases. Even tables that previously had no alias will get an alias after this call (it\'s mostly used for nested queries and the outer query will already be using the non-aliased table name). Subclasses who create their own prefix should override this method to produce a similar result (a new prefix and relabelled aliases). The \'exceptions\' parameter is a container that holds alias names which should not be changed.'
def bump_prefix(self, exceptions=()):
current = ord(self.alias_prefix) assert (current < ord('Z')) prefix = chr((current + 1)) self.alias_prefix = prefix change_map = SortedDict() for (pos, alias) in enumerate(self.tables): if (alias in exceptions): continue new_alias = ('%s%d' % (prefix, pos)) change_map[alias] = new_alias self.tables[pos] = new_alias self.change_aliases(change_map)
'Returns the first alias for this query, after increasing its reference count.'
def get_initial_alias(self):
if self.tables: alias = self.tables[0] self.ref_alias(alias) else: alias = self.join((None, self.model._meta.db_table, None, None)) return alias
'Returns the number of tables in this query with a non-zero reference count. Note that after execution, the reference counts are zeroed, so tables added in compiler will not be seen by this method.'
def count_active_tables(self):
return len([1 for count in self.alias_refcount.values() if count])
'Returns an alias for the join in \'connection\', either reusing an existing alias for that join or creating a new one. \'connection\' is a tuple (lhs, table, lhs_col, col) where \'lhs\' is either an existing table alias or a table name. The join correspods to the SQL equivalent of:: lhs.lhs_col = table.col If \'always_create\' is True and \'reuse\' is None, a new alias is always created, regardless of whether one already exists or not. If \'always_create\' is True and \'reuse\' is a set, an alias in \'reuse\' that matches the connection will be returned, if possible. If \'always_create\' is False, the first existing alias that matches the \'connection\' is returned, if any. Otherwise a new join is created. If \'exclusions\' is specified, it is something satisfying the container protocol ("foo in exclusions" must work) and specifies a list of aliases that should not be returned, even if they satisfy the join. If \'promote\' is True, the join type for the alias will be LOUTER (if the alias previously existed, the join type will be promoted from INNER to LOUTER, if necessary). If \'outer_if_first\' is True and a new join is created, it will have the LOUTER join type. This is used when joining certain types of querysets and Q-objects together. A join is always created as LOUTER if the lhs alias is LOUTER to make sure we do not generate chains like a LOUTER b INNER c. If \'nullable\' is True, the join can potentially involve NULL values and is a candidate for promotion (to "left outer") when combining querysets.'
def join(self, connection, always_create=False, exclusions=(), promote=False, outer_if_first=False, nullable=False, reuse=None):
(lhs, table, lhs_col, col) = connection if (lhs in self.alias_map): lhs_table = self.alias_map[lhs].table_name else: lhs_table = lhs if (reuse and always_create and (table in self.table_map)): exclusions = set(self.table_map[table]).difference(reuse).union(set(exclusions)) always_create = False t_ident = (lhs_table, table, lhs_col, col) if (not always_create): for alias in self.join_map.get(t_ident, ()): if (alias not in exclusions): if (lhs_table and (not self.alias_refcount[self.alias_map[alias].lhs_alias])): continue if (self.alias_map[alias].lhs_alias != lhs): continue self.ref_alias(alias) if (promote or (lhs and (self.alias_map[lhs].join_type == self.LOUTER))): self.promote_joins([alias]) return alias (alias, _) = self.table_alias(table, True) if (not lhs): join_type = None elif (promote or outer_if_first or (self.alias_map[lhs].join_type == self.LOUTER)): join_type = self.LOUTER else: join_type = self.INNER join = JoinInfo(table, alias, join_type, lhs, lhs_col, col, nullable) self.alias_map[alias] = join if (t_ident in self.join_map): self.join_map[t_ident] += (alias,) else: self.join_map[t_ident] = (alias,) return alias
'If the model that is the basis for this QuerySet inherits other models, we need to ensure that those other models have their tables included in the query. We do this as a separate step so that subclasses know which tables are going to be active in the query, without needing to compute all the select columns (this method is called from pre_sql_setup(), whereas column determination is a later part, and side-effect, of as_sql()).'
def setup_inherited_models(self):
opts = self.model._meta.concrete_model._meta root_alias = self.tables[0] seen = {None: root_alias} for (field, model) in opts.get_fields_with_model(): if (model not in seen): link_field = opts.get_ancestor_link(model) seen[model] = self.join((root_alias, model._meta.db_table, link_field.column, model._meta.pk.column)) self.included_inherited_models = seen
'Undoes the effects of setup_inherited_models(). Should be called whenever select columns (self.select) are set explicitly.'
def remove_inherited_models(self):
for (key, alias) in self.included_inherited_models.items(): if key: self.unref_alias(alias) self.included_inherited_models = {}
'Returns whether or not all elements of this q_object need to be put together in the HAVING clause.'
def need_force_having(self, q_object):
for child in q_object.children: if isinstance(child, Node): if self.need_force_having(child): return True elif (child[0].split(LOOKUP_SEP)[0] in self.aggregates): return True return False
'Adds a single aggregate expression to the Query'
def add_aggregate(self, aggregate, model, alias, is_summary):
opts = model._meta field_list = aggregate.lookup.split(LOOKUP_SEP) if ((len(field_list) == 1) and (aggregate.lookup in self.aggregates)): field_name = field_list[0] col = field_name source = self.aggregates[field_name] if (not is_summary): raise FieldError(("Cannot compute %s('%s'): '%s' is an aggregate" % (aggregate.name, field_name, field_name))) elif ((len(field_list) > 1) or (field_list[0] not in [i.name for i in opts.fields]) or (self.group_by is None) or (not is_summary)): (field, source, opts, join_list, last, _) = self.setup_joins(field_list, opts, self.get_initial_alias(), False) (col, _, join_list) = self.trim_joins(source, join_list, last, False) self.promote_joins(join_list, True) col = (join_list[(-1)], col) else: field_name = field_list[0] source = opts.get_field(field_name) col = field_name aggregate.add_to_query(self, alias, col=col, source=source, is_summary=is_summary)
'Add a single filter to the query. The \'filter_expr\' is a pair: (filter_string, value). E.g. (\'name__contains\', \'fred\') If \'negate\' is True, this is an exclude() filter. It\'s important to note that this method does not negate anything in the where-clause object when inserting the filter constraints. This is because negated filters often require multiple calls to add_filter() and the negation should only happen once. So the caller is responsible for this (the caller will normally be add_q(), so that as an example). If \'trim\' is True, we automatically trim the final join group (used internally when constructing nested queries). If \'can_reuse\' is a set, we are processing a component of a multi-component filter (e.g. filter(Q1, Q2)). In this case, \'can_reuse\' will be a set of table aliases that can be reused in this filter, even if we would otherwise force the creation of new aliases for a join (needed for nested Q-filters). The set is updated by this method. If \'process_extras\' is set, any extra filters returned from the table joining process will be processed. This parameter is set to False during the processing of extra filters to avoid infinite recursion.'
def add_filter(self, filter_expr, connector=AND, negate=False, trim=False, can_reuse=None, process_extras=True, force_having=False):
(arg, value) = filter_expr parts = arg.split(LOOKUP_SEP) if (not parts): raise FieldError(('Cannot parse keyword query %r' % arg)) lookup_type = 'exact' num_parts = len(parts) if ((len(parts) > 1) and (parts[(-1)] in self.query_terms) and (arg not in self.aggregates)): lookup_model = self.model for (counter, field_name) in enumerate(parts): try: lookup_field = lookup_model._meta.get_field(field_name) except FieldDoesNotExist: lookup_type = parts.pop() break if ((counter + 1) < num_parts): try: lookup_model = lookup_field.rel.to except AttributeError: lookup_type = parts.pop() break having_clause = False if (value is None): if (lookup_type != 'exact'): raise ValueError('Cannot use None as a query value') lookup_type = 'isnull' value = True elif callable(value): value = value() elif isinstance(value, ExpressionNode): value = SQLEvaluator(value, self, reuse=can_reuse) having_clause = value.contains_aggregate for (alias, aggregate) in self.aggregates.items(): if (alias in (parts[0], LOOKUP_SEP.join(parts))): entry = self.where_class() entry.add((aggregate, lookup_type, value), AND) if negate: entry.negate() self.having.add(entry, connector) return opts = self.get_meta() alias = self.get_initial_alias() allow_many = (trim or (not negate)) try: (field, target, opts, join_list, last, extra_filters) = self.setup_joins(parts, opts, alias, True, allow_many, allow_explicit_fk=True, can_reuse=can_reuse, negate=negate, process_extras=process_extras) except MultiJoin as e: self.split_exclude(filter_expr, LOOKUP_SEP.join(parts[:e.level]), can_reuse) return table_promote = False join_promote = False if ((lookup_type == 'isnull') and (value is True) and (not negate) and (len(join_list) > 1)): self.promote_joins(join_list) join_promote = True nonnull_comparison = ((lookup_type == 'isnull') and (value is False)) (col, alias, join_list) = self.trim_joins(target, join_list, last, trim, nonnull_comparison) if (connector == OR): join_it = iter(join_list) table_it = iter(self.tables) (next(join_it), next(table_it)) unconditional = False for join in join_it: table = next(table_it) unconditional = (unconditional or (self.alias_map[join].join_type == self.LOUTER)) if ((join == table) and (self.alias_refcount[join] > 1)): continue join_promote = (join_promote or self.promote_joins([join], unconditional)) if (table != join): table_promote = self.promote_joins([table]) break self.promote_joins(join_it, join_promote) self.promote_joins(table_it, (table_promote or join_promote)) if (having_clause or force_having): if ((alias, col) not in self.group_by): self.group_by.append((alias, col)) self.having.add((Constraint(alias, col, field), lookup_type, value), connector) else: self.where.add((Constraint(alias, col, field), lookup_type, value), connector) if negate: self.promote_joins(join_list) if (lookup_type != 'isnull'): if (len(join_list) > 1): for j_alias in join_list: if (self.alias_map[j_alias].join_type == self.LOUTER): j_col = self.alias_map[j_alias].rhs_join_col assert (j_col is not None) entry = self.where_class() entry.add((Constraint(j_alias, j_col, None), 'isnull', True), AND) entry.negate() self.where.add(entry, AND) break if self.is_nullable(field): self.where.add((Constraint(alias, col, None), 'isnull', False), AND) if (can_reuse is not None): can_reuse.update(join_list) if process_extras: for filter in extra_filters: self.add_filter(filter, negate=negate, can_reuse=can_reuse, process_extras=False)
'Adds a Q-object to the current filter. Can also be used to add anything that has an \'add_to_query()\' method.'
def add_q(self, q_object, used_aliases=None, force_having=False):
if (used_aliases is None): used_aliases = self.used_aliases if hasattr(q_object, 'add_to_query'): q_object.add_to_query(self, used_aliases) else: if (self.where and (q_object.connector != AND) and (len(q_object) > 1)): self.where.start_subtree(AND) subtree = True else: subtree = False connector = AND if ((q_object.connector == OR) and (not force_having)): force_having = self.need_force_having(q_object) for child in q_object.children: if (connector == OR): refcounts_before = self.alias_refcount.copy() if force_having: self.having.start_subtree(connector) else: self.where.start_subtree(connector) if isinstance(child, Node): self.add_q(child, used_aliases, force_having=force_having) else: self.add_filter(child, connector, q_object.negated, can_reuse=used_aliases, force_having=force_having) if force_having: self.having.end_subtree() else: self.where.end_subtree() if (connector == OR): self.promote_unused_aliases(refcounts_before, used_aliases) connector = q_object.connector if q_object.negated: self.where.negate() if subtree: self.where.end_subtree() if self.filter_is_sticky: self.used_aliases = used_aliases
'Compute the necessary table joins for the passage through the fields given in \'names\'. \'opts\' is the Options class for the current model (which gives the table we are joining to), \'alias\' is the alias for the table we are joining to. If dupe_multis is True, any many-to-many or many-to-one joins will always create a new alias (necessary for disjunctive filters). If can_reuse is not None, it\'s a list of aliases that can be reused in these joins (nothing else can be reused in this case). Finally, \'negate\' is used in the same sense as for add_filter() -- it indicates an exclude() filter, or something similar. It is only passed in here so that it can be passed to a field\'s extra_filter() for customized behavior. Returns the final field involved in the join, the target database column (used for any \'where\' constraint), the final \'opts\' value and the list of tables joined.'
def setup_joins(self, names, opts, alias, dupe_multis, allow_many=True, allow_explicit_fk=False, can_reuse=None, negate=False, process_extras=True):
joins = [alias] last = [0] dupe_set = set() exclusions = set() extra_filters = [] int_alias = None for (pos, name) in enumerate(names): if (int_alias is not None): exclusions.add(int_alias) exclusions.add(alias) last.append(len(joins)) if (name == 'pk'): name = opts.pk.name try: (field, model, direct, m2m) = opts.get_field_by_name(name) except FieldDoesNotExist: for f in opts.fields: if (allow_explicit_fk and (name == f.attname)): (field, model, direct, m2m) = opts.get_field_by_name(f.name) break else: names = (opts.get_all_field_names() + list(self.aggregate_select)) raise FieldError(('Cannot resolve keyword %r into field. Choices are: %s' % (name, ', '.join(names)))) if ((not allow_many) and (m2m or (not direct))): for alias in joins: self.unref_alias(alias) raise MultiJoin((pos + 1)) if model: proxied_model = opts.concrete_model for int_model in opts.get_base_chain(model): if (int_model is proxied_model): opts = int_model._meta else: lhs_col = opts.parents[int_model].column dedupe = (lhs_col in opts.duplicate_targets) if dedupe: exclusions.update(self.dupe_avoidance.get((id(opts), lhs_col), ())) dupe_set.add((opts, lhs_col)) opts = int_model._meta alias = self.join((alias, opts.db_table, lhs_col, opts.pk.column), exclusions=exclusions) joins.append(alias) exclusions.add(alias) for (dupe_opts, dupe_col) in dupe_set: self.update_dupe_avoidance(dupe_opts, dupe_col, alias) cached_data = opts._join_cache.get(name) orig_opts = opts dupe_col = ((direct and field.column) or field.field.column) dedupe = (dupe_col in opts.duplicate_targets) if (dupe_set or dedupe): if dedupe: dupe_set.add((opts, dupe_col)) exclusions.update(self.dupe_avoidance.get((id(opts), dupe_col), ())) if (process_extras and hasattr(field, 'extra_filters')): extra_filters.extend(field.extra_filters(names, pos, negate)) if direct: if m2m: if cached_data: (table1, from_col1, to_col1, table2, from_col2, to_col2, opts, target) = cached_data else: table1 = field.m2m_db_table() from_col1 = opts.get_field_by_name(field.m2m_target_field_name())[0].column to_col1 = field.m2m_column_name() opts = field.rel.to._meta table2 = opts.db_table from_col2 = field.m2m_reverse_name() to_col2 = opts.get_field_by_name(field.m2m_reverse_target_field_name())[0].column target = opts.pk orig_opts._join_cache[name] = (table1, from_col1, to_col1, table2, from_col2, to_col2, opts, target) int_alias = self.join((alias, table1, from_col1, to_col1), dupe_multis, exclusions, nullable=True, reuse=can_reuse) if ((int_alias == table2) and (from_col2 == to_col2)): joins.append(int_alias) alias = int_alias else: alias = self.join((int_alias, table2, from_col2, to_col2), dupe_multis, exclusions, nullable=True, reuse=can_reuse) joins.extend([int_alias, alias]) elif field.rel: if cached_data: (table, from_col, to_col, opts, target) = cached_data else: opts = field.rel.to._meta target = field.rel.get_related_field() table = opts.db_table from_col = field.column to_col = target.column orig_opts._join_cache[name] = (table, from_col, to_col, opts, target) alias = self.join((alias, table, from_col, to_col), exclusions=exclusions, nullable=self.is_nullable(field)) joins.append(alias) else: target = field break else: orig_field = field field = field.field if m2m: if cached_data: (table1, from_col1, to_col1, table2, from_col2, to_col2, opts, target) = cached_data else: table1 = field.m2m_db_table() from_col1 = opts.get_field_by_name(field.m2m_reverse_target_field_name())[0].column to_col1 = field.m2m_reverse_name() opts = orig_field.opts table2 = opts.db_table from_col2 = field.m2m_column_name() to_col2 = opts.get_field_by_name(field.m2m_target_field_name())[0].column target = opts.pk orig_opts._join_cache[name] = (table1, from_col1, to_col1, table2, from_col2, to_col2, opts, target) int_alias = self.join((alias, table1, from_col1, to_col1), dupe_multis, exclusions, nullable=True, reuse=can_reuse) alias = self.join((int_alias, table2, from_col2, to_col2), dupe_multis, exclusions, nullable=True, reuse=can_reuse) joins.extend([int_alias, alias]) else: if cached_data: (table, from_col, to_col, opts, target) = cached_data else: local_field = opts.get_field_by_name(field.rel.field_name)[0] opts = orig_field.opts table = opts.db_table from_col = local_field.column to_col = field.column if (orig_field.model is local_field.model): target = opts.get_field_by_name(field.rel.field_name)[0] else: target = opts.pk orig_opts._join_cache[name] = (table, from_col, to_col, opts, target) alias = self.join((alias, table, from_col, to_col), dupe_multis, exclusions, nullable=True, reuse=can_reuse) joins.append(alias) for (dupe_opts, dupe_col) in dupe_set: if (int_alias is None): to_avoid = alias else: to_avoid = int_alias self.update_dupe_avoidance(dupe_opts, dupe_col, to_avoid) if (pos != (len(names) - 1)): if (pos == (len(names) - 2)): raise FieldError(('Join on field %r not permitted. Did you misspell %r for the lookup type?' % (name, names[(pos + 1)]))) else: raise FieldError(('Join on field %r not permitted.' % name)) return (field, target, opts, joins, last, extra_filters)
'Sometimes joins at the end of a multi-table sequence can be trimmed. If the final join is against the same column as we are comparing against, and is an inner join, we can go back one step in a join chain and compare against the LHS of the join instead (and then repeat the optimization). The result, potentially, involves fewer table joins. The \'target\' parameter is the final field being joined to, \'join_list\' is the full list of join aliases. The \'last\' list contains offsets into \'join_list\', corresponding to each component of the filter. Many-to-many relations, for example, add two tables to the join list and we want to deal with both tables the same way, so \'last\' has an entry for the first of the two tables and then the table immediately after the second table, in that case. The \'trim\' parameter forces the final piece of the join list to be trimmed before anything. See the documentation of add_filter() for details about this. The \'nonnull_check\' parameter is True when we are using inner joins between tables explicitly to exclude NULL entries. In that case, the tables shouldn\'t be trimmed, because the very action of joining to them alters the result set. Returns the final active column and table alias and the new active join_list.'
def trim_joins(self, target, join_list, last, trim, nonnull_check=False):
final = len(join_list) penultimate = last.pop() if (penultimate == final): penultimate = last.pop() if (trim and (final > 1)): extra = join_list[penultimate:] join_list = join_list[:penultimate] final = penultimate penultimate = last.pop() col = self.alias_map[extra[0]].lhs_join_col for alias in extra: self.unref_alias(alias) else: col = target.column alias = join_list[(-1)] while (final > 1): join = self.alias_map[alias] if ((col != join.rhs_join_col) or (join.join_type != self.INNER) or nonnull_check): break self.unref_alias(alias) alias = join.lhs_alias col = join.lhs_join_col join_list.pop() final -= 1 if (final == penultimate): penultimate = last.pop() return (col, alias, join_list)
'For a column that is one of multiple pointing to the same table, update the internal data structures to note that this alias shouldn\'t be used for those other columns.'
def update_dupe_avoidance(self, opts, col, alias):
ident = id(opts) for name in opts.duplicate_targets[col]: try: self.dupe_avoidance[(ident, name)].add(alias) except KeyError: self.dupe_avoidance[(ident, name)] = set([alias])
'When doing an exclude against any kind of N-to-many relation, we need to use a subquery. This method constructs the nested query, given the original exclude filter (filter_expr) and the portion up to the first N-to-many relation field.'
def split_exclude(self, filter_expr, prefix, can_reuse):
query = Query(self.model) query.add_filter(filter_expr) query.bump_prefix() query.clear_ordering(True) query.set_start(prefix) (alias, col) = query.select[0] query.where.add((Constraint(alias, col, None), 'isnull', False), AND) self.add_filter((('%s__in' % prefix), query), negate=True, trim=True, can_reuse=can_reuse) active_positions = len([count for count in query.alias_refcount.items() if count]) if (active_positions > 1): self.add_filter((('%s__isnull' % prefix), False), negate=True, trim=True, can_reuse=can_reuse)
'Adjusts the limits on the rows retrieved. We use low/high to set these, as it makes it more Pythonic to read and write. When the SQL query is created, they are converted to the appropriate offset and limit values. Any limits passed in here are applied relative to the existing constraints. So low is added to the current low value and both will be clamped to any existing high value.'
def set_limits(self, low=None, high=None):
if (high is not None): if (self.high_mark is not None): self.high_mark = min(self.high_mark, (self.low_mark + high)) else: self.high_mark = (self.low_mark + high) if (low is not None): if (self.high_mark is not None): self.low_mark = min(self.high_mark, (self.low_mark + low)) else: self.low_mark = (self.low_mark + low)
'Clears any existing limits.'
def clear_limits(self):
(self.low_mark, self.high_mark) = (0, None)
'Returns True if adding filters to this instance is still possible. Typically, this means no limits or offsets have been put on the results.'
def can_filter(self):
return ((not self.low_mark) and (self.high_mark is None))
'Removes all fields from SELECT clause.'
def clear_select_clause(self):
self.select = [] self.select_fields = [] self.default_cols = False self.select_related = False self.set_extra_mask(()) self.set_aggregate_mask(())
'Clears the list of fields to select (but not extra_select columns). Some queryset types completely replace any existing list of select columns.'
def clear_select_fields(self):
self.select = [] self.select_fields = []
'Adds and resolves the given fields to the query\'s "distinct on" clause.'
def add_distinct_fields(self, *field_names):
self.distinct_fields = field_names self.distinct = True
'Adds the given (model) fields to the select set. The field names are added in the order specified.'
def add_fields(self, field_names, allow_m2m=True):
alias = self.get_initial_alias() opts = self.get_meta() try: for name in field_names: (field, target, u2, joins, u3, u4) = self.setup_joins(name.split(LOOKUP_SEP), opts, alias, False, allow_m2m, True) final_alias = joins[(-1)] col = target.column if (len(joins) > 1): join = self.alias_map[final_alias] if (col == join.rhs_join_col): self.unref_alias(final_alias) final_alias = join.lhs_alias col = join.lhs_join_col joins = joins[:(-1)] self.promote_joins(joins[1:]) self.select.append((final_alias, col)) self.select_fields.append(field) except MultiJoin: raise FieldError(("Invalid field name: '%s'" % name)) except FieldError: if (LOOKUP_SEP in name): raise else: names = sorted(((opts.get_all_field_names() + list(self.extra)) + list(self.aggregate_select))) raise FieldError(('Cannot resolve keyword %r into field. Choices are: %s' % (name, ', '.join(names)))) self.remove_inherited_models()
'Adds items from the \'ordering\' sequence to the query\'s "order by" clause. These items are either field names (not column names) -- possibly with a direction prefix (\'-\' or \'?\') -- or ordinals, corresponding to column positions in the \'select\' list. If \'ordering\' is empty, all ordering is cleared from the query.'
def add_ordering(self, *ordering):
errors = [] for item in ordering: if (not ORDER_PATTERN.match(item)): errors.append(item) if errors: raise FieldError(('Invalid order_by arguments: %s' % errors)) if ordering: self.order_by.extend(ordering) else: self.default_ordering = False
'Removes any ordering settings. If \'force_empty\' is True, there will be no ordering in the resulting query (not even the model\'s default).'
def clear_ordering(self, force_empty=False):
self.order_by = [] self.extra_order_by = () if force_empty: self.default_ordering = False
'Expands the GROUP BY clause required by the query. This will usually be the set of all non-aggregate fields in the return data. If the database backend supports grouping by the primary key, and the query would be equivalent, the optimization will be made automatically.'
def set_group_by(self):
self.group_by = [] for sel in self.select: self.group_by.append(sel)
'Converts the query to do count(...) or count(distinct(pk)) in order to get its size.'
def add_count_column(self):
if (not self.distinct): if (not self.select): count = self.aggregates_module.Count('*', is_summary=True) else: assert (len(self.select) == 1), ("Cannot add count col with multiple cols in 'select': %r" % self.select) count = self.aggregates_module.Count(self.select[0]) else: opts = self.model._meta if (not self.select): count = self.aggregates_module.Count((self.join((None, opts.db_table, None, None)), opts.pk.column), is_summary=True, distinct=True) else: assert (len(self.select) == 1), "Cannot add count col with multiple cols in 'select'." count = self.aggregates_module.Count(self.select[0], distinct=True) self.distinct = False self.aggregates = {None: count} self.set_aggregate_mask(None) self.group_by = None
'Sets up the select_related data structure so that we only select certain related models (as opposed to all models, when self.select_related=True).'
def add_select_related(self, fields):
field_dict = {} for field in fields: d = field_dict for part in field.split(LOOKUP_SEP): d = d.setdefault(part, {}) self.select_related = field_dict self.related_select_cols = [] self.related_select_fields = []
'Adds data to the various extra_* attributes for user-created additions to the query.'
def add_extra(self, select, select_params, where, params, tables, order_by):
if select: select_pairs = SortedDict() if select_params: param_iter = iter(select_params) else: param_iter = iter([]) for (name, entry) in select.items(): entry = force_text(entry) entry_params = [] pos = entry.find('%s') while (pos != (-1)): entry_params.append(next(param_iter)) pos = entry.find('%s', (pos + 2)) select_pairs[name] = (entry, entry_params) self.extra.update(select_pairs) if (where or params): self.where.add(ExtraWhere(where, params), AND) if tables: self.extra_tables += tuple(tables) if order_by: self.extra_order_by = order_by
'Remove any fields from the deferred loading set.'
def clear_deferred_loading(self):
self.deferred_loading = (set(), True)
'Add the given list of model field names to the set of fields to exclude from loading from the database when automatic column selection is done. The new field names are added to any existing field names that are deferred (or removed from any existing field names that are marked as the only ones for immediate loading).'
def add_deferred_loading(self, field_names):
(existing, defer) = self.deferred_loading if defer: self.deferred_loading = (existing.union(field_names), True) else: self.deferred_loading = (existing.difference(field_names), False)
'Add the given list of model field names to the set of fields to retrieve when the SQL is executed ("immediate loading" fields). The field names replace any existing immediate loading field names. If there are field names already specified for deferred loading, those names are removed from the new field_names before storing the new names for immediate loading. (That is, immediate loading overrides any existing immediate values, but respects existing deferrals.)'
def add_immediate_loading(self, field_names):
(existing, defer) = self.deferred_loading field_names = set(field_names) if ('pk' in field_names): field_names.remove('pk') field_names.add(self.model._meta.pk.name) if defer: self.deferred_loading = (field_names.difference(existing), False) else: self.deferred_loading = (field_names, False)
'If any fields are marked to be deferred, returns a dictionary mapping models to a set of names in those fields that will be loaded. If a model is not in the returned dictionary, none of it\'s fields are deferred. If no fields are marked for deferral, returns an empty dictionary.'
def get_loaded_field_names(self):
try: return self._loaded_field_names_cache except AttributeError: collection = {} self.deferred_to_data(collection, self.get_loaded_field_names_cb) self._loaded_field_names_cache = collection return collection
'Callback used by get_deferred_field_names().'
def get_loaded_field_names_cb(self, target, model, fields):
target[model] = set([f.name for f in fields])
'Set the mask of aggregates that will actually be returned by the SELECT'
def set_aggregate_mask(self, names):
if (names is None): self.aggregate_select_mask = None else: self.aggregate_select_mask = set(names) self._aggregate_select_cache = None
'Set the mask of extra select items that will be returned by SELECT, we don\'t actually remove them from the Query since they might be used later'
def set_extra_mask(self, names):
if (names is None): self.extra_select_mask = None else: self.extra_select_mask = set(names) self._extra_select_cache = None
'The SortedDict of aggregate columns that are not masked, and should be used in the SELECT clause. This result is cached for optimization purposes.'
def _aggregate_select(self):
if (self._aggregate_select_cache is not None): return self._aggregate_select_cache elif (self.aggregate_select_mask is not None): self._aggregate_select_cache = SortedDict([(k, v) for (k, v) in self.aggregates.items() if (k in self.aggregate_select_mask)]) return self._aggregate_select_cache else: return self.aggregates
'Sets the table from which to start joining. The start position is specified by the related attribute from the base model. This will automatically set to the select column to be the column linked from the previous table. This method is primarily for internal use and the error checking isn\'t as friendly as add_filter(). Mostly useful for querying directly against the join table of many-to-many relation in a subquery.'
def set_start(self, start):
opts = self.model._meta alias = self.get_initial_alias() (field, col, opts, joins, last, extra) = self.setup_joins(start.split(LOOKUP_SEP), opts, alias, False) select_col = self.alias_map[joins[1]].lhs_join_col select_alias = alias for alias in joins: self.unref_alias(alias) for alias in joins[1:]: join_info = self.alias_map[alias] if ((join_info.lhs_join_col != select_col) or (join_info.join_type != self.INNER)): break self.unref_alias(select_alias) select_alias = join_info.rhs_alias select_col = join_info.rhs_join_col self.select = [(select_alias, select_col)] self.remove_inherited_models()
'A helper to check if the given field should be treated as nullable. Some backends treat \'\' as null and Django treats such fields as nullable for those backends. In such situations field.null can be False even if we should treat the field as nullable.'
def is_nullable(self, field):
if (connections[DEFAULT_DB_ALIAS].features.interprets_empty_strings_as_nulls and field.empty_strings_allowed): return True else: return field.null
'Instantiate an SQL aggregate * col is a column reference describing the subject field of the aggregate. It can be an alias, or a tuple describing a table and column name. * source is the underlying field or aggregate definition for the column reference. If the aggregate is not an ordinal or computed type, this reference is used to determine the coerced output type of the aggregate. * extra is a dictionary of additional data to provide for the aggregate definition Also utilizes the class variables: * sql_function, the name of the SQL function that implements the aggregate. * sql_template, a template string that is used to render the aggregate into SQL. * is_ordinal, a boolean indicating if the output of this aggregate is an integer (e.g., a count) * is_computed, a boolean indicating if this output of this aggregate is a computed float (e.g., an average), regardless of the input type.'
def __init__(self, col, source=None, is_summary=False, **extra):
self.col = col self.source = source self.is_summary = is_summary self.extra = extra tmp = self while (tmp and isinstance(tmp, Aggregate)): if getattr(tmp, 'is_ordinal', False): tmp = ordinal_aggregate_field elif getattr(tmp, 'is_computed', False): tmp = computed_aggregate_field else: tmp = tmp.source self.field = tmp
'Return the aggregate, rendered as SQL.'
def as_sql(self, qn, connection):
if hasattr(self.col, 'as_sql'): field_name = self.col.as_sql(qn, connection) elif isinstance(self.col, (list, tuple)): field_name = '.'.join([qn(c) for c in self.col]) else: field_name = self.col params = {'function': self.sql_function, 'field': field_name} params.update(self.extra) return (self.sql_template % params)
'Set up and execute delete queries for all the objects in pk_list. More than one physical query may be executed if there are a lot of values in pk_list.'
def delete_batch(self, pk_list, using, field=None):
if (not field): field = self.model._meta.pk for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE): where = self.where_class() where.add((Constraint(None, field.column, field), 'in', pk_list[offset:(offset + GET_ITERATOR_CHUNK_SIZE)]), AND) self.do_query(self.model._meta.db_table, where, using=using)
'Delete the queryset in one SQL query (if possible). For simple queries this is done by copying the query.query.where to self.query, for complex queries by using subquery.'
def delete_qs(self, query, using):
innerq = query.query innerq.get_initial_alias() self.get_initial_alias() innerq_used_tables = [t for t in innerq.tables if innerq.alias_refcount[t]] if (((not innerq_used_tables) or (innerq_used_tables == self.tables)) and (not len(innerq.having))): self.where = innerq.where else: pk = query.model._meta.pk if (not connections[using].features.update_can_self_select): values = list(query.values_list('pk', flat=True)) if (not values): return self.delete_batch(values, using) return else: innerq.clear_select_clause() (innerq.select, innerq.select_fields) = ([(self.get_initial_alias(), pk.column)], [None]) values = innerq where = self.where_class() where.add((Constraint(None, pk.column, pk), 'in', values), AND) self.where = where self.get_compiler(using).execute_sql(None)
'Runs on initialization and after cloning. Any attributes that would normally be set in __init__ should go in here, instead, so that they are also set up after a clone() call.'
def _setup_query(self):
self.values = [] self.related_ids = None if (not hasattr(self, 'related_updates')): self.related_updates = {}
'Convert a dictionary of field name to value mappings into an update query. This is the entry point for the public update() method on querysets.'
def add_update_values(self, values):
values_seq = [] for (name, val) in six.iteritems(values): (field, model, direct, m2m) = self.model._meta.get_field_by_name(name) if ((not direct) or m2m): raise FieldError(('Cannot update model field %r (only non-relations and foreign keys permitted).' % field)) if model: self.add_related_update(model, field, val) continue values_seq.append((field, model, val)) return self.add_update_fields(values_seq)
'Turn a sequence of (field, model, value) triples into an update query. Used by add_update_values() as well as the "fast" update path when saving models.'
def add_update_fields(self, values_seq):
values_seq = [((value[0], value[1], force_text(value[2])) if isinstance(value[2], Promise) else value) for value in values_seq] self.values.extend(values_seq)
'Adds (name, value) to an update query for an ancestor model. Updates are coalesced so that we only run one update query per ancestor.'
def add_related_update(self, model, field, value):
try: self.related_updates[model].append((field, None, value)) except KeyError: self.related_updates[model] = [(field, None, value)]
'Returns a list of query objects: one for each update required to an ancestor model. Each query will have the same filtering conditions as the current query but will only update a single table.'
def get_related_updates(self):
if (not self.related_updates): return [] result = [] for (model, values) in six.iteritems(self.related_updates): query = UpdateQuery(model) query.values = values if (self.related_ids is not None): query.add_filter(('pk__in', self.related_ids)) result.append(query) return result
'Set up the insert query from the \'insert_values\' dictionary. The dictionary gives the model field names and their target values. If \'raw_values\' is True, the values in the \'insert_values\' dictionary are inserted directly into the query, rather than passed as SQL parameters. This provides a way to insert NULL and DEFAULT keywords into the query, for example.'
def insert_values(self, fields, objs, raw=False):
self.fields = fields for field in fields: for obj in objs: value = getattr(obj, field.attname) if isinstance(value, Promise): setattr(obj, field.attname, force_text(value)) self.objs = objs self.raw = raw
'Converts the query into a date extraction query.'
def add_date_select(self, field_name, lookup_type, order='ASC'):
try: result = self.setup_joins(field_name.split(LOOKUP_SEP), self.get_meta(), self.get_initial_alias(), False) except FieldError: raise FieldDoesNotExist(("%s has no field named '%s'" % (self.model._meta.object_name, field_name))) field = result[0] assert isinstance(field, DateField), ("%r isn't a DateField." % field.name) alias = result[3][(-1)] select = Date((alias, field.column), lookup_type) self.clear_select_clause() (self.select, self.select_fields) = ([select], [None]) self.distinct = True self.order_by = (((order == 'ASC') and [1]) or [(-1)]) if field.null: self.add_filter((('%s__isnull' % field_name), False))
'Does any necessary class setup immediately prior to producing SQL. This is for things that can\'t necessarily be done in __init__ because we might not have all the pieces in place at that time. # TODO: after the query has been executed, the altered state should be # cleaned. We are not using a clone() of the query here.'
def pre_sql_setup(self):
if (not self.query.tables): self.query.join((None, self.query.model._meta.db_table, None, None)) if ((not self.query.select) and self.query.default_cols and (not self.query.included_inherited_models)): self.query.setup_inherited_models() if (self.query.select_related and (not self.query.related_select_cols)): self.fill_related_selections()
'A wrapper around connection.ops.quote_name that doesn\'t quote aliases for table names. This avoids problems with some SQL dialects that treat quoted strings specially (e.g. PostgreSQL).'
def quote_name_unless_alias(self, name):
if (name in self.quote_cache): return self.quote_cache[name] if (((name in self.query.alias_map) and (name not in self.query.table_map)) or (name in self.query.extra_select)): self.quote_cache[name] = name return name r = self.connection.ops.quote_name(name) self.quote_cache[name] = r return r
'Creates the SQL for this query. Returns the SQL string and list of parameters. If \'with_limits\' is False, any limit/offset information is not included in the query.'
def as_sql(self, with_limits=True, with_col_aliases=False):
if (with_limits and (self.query.low_mark == self.query.high_mark)): return ('', ()) self.pre_sql_setup() self.refcounts_before = self.query.alias_refcount.copy() out_cols = self.get_columns(with_col_aliases) (ordering, ordering_group_by) = self.get_ordering() distinct_fields = self.get_distinct() (from_, f_params) = self.get_from_clause() qn = self.quote_name_unless_alias (where, w_params) = self.query.where.as_sql(qn=qn, connection=self.connection) (having, h_params) = self.query.having.as_sql(qn=qn, connection=self.connection) params = [] for val in six.itervalues(self.query.extra_select): params.extend(val[1]) result = ['SELECT'] if self.query.distinct: result.append(self.connection.ops.distinct_sql(distinct_fields)) result.append(', '.join((out_cols + self.query.ordering_aliases))) result.append('FROM') result.extend(from_) params.extend(f_params) if where: result.append(('WHERE %s' % where)) params.extend(w_params) (grouping, gb_params) = self.get_grouping(ordering_group_by) if grouping: if distinct_fields: raise NotImplementedError('annotate() + distinct(fields) not implemented.') if (not ordering): ordering = self.connection.ops.force_no_ordering() result.append(('GROUP BY %s' % ', '.join(grouping))) params.extend(gb_params) if having: result.append(('HAVING %s' % having)) params.extend(h_params) if ordering: result.append(('ORDER BY %s' % ', '.join(ordering))) if with_limits: if (self.query.high_mark is not None): result.append(('LIMIT %d' % (self.query.high_mark - self.query.low_mark))) if self.query.low_mark: if (self.query.high_mark is None): val = self.connection.ops.no_limit_value() if val: result.append(('LIMIT %d' % val)) result.append(('OFFSET %d' % self.query.low_mark)) if (self.query.select_for_update and self.connection.features.has_select_for_update): nowait = self.query.select_for_update_nowait if (nowait and (not self.connection.features.has_select_for_update_nowait)): raise DatabaseError('NOWAIT is not supported on this database backend.') result.append(self.connection.ops.for_update_sql(nowait=nowait)) self.query.reset_refcounts(self.refcounts_before) return (' '.join(result), tuple(params))
'Perform the same functionality as the as_sql() method, returning an SQL string and parameters. However, the alias prefixes are bumped beforehand (in a copy -- the current query isn\'t changed), and any ordering is removed if the query is unsliced. Used when nesting this query inside another.'
def as_nested_sql(self):
obj = self.query.clone() if ((obj.low_mark == 0) and (obj.high_mark is None)): obj.clear_ordering(True) obj.bump_prefix() return obj.get_compiler(connection=self.connection).as_sql()
'Returns the list of columns to use in the select statement. If no columns have been specified, returns all columns relating to fields in the model. If \'with_aliases\' is true, any column names that are duplicated (without the table names) are given unique aliases. This is needed in some cases to avoid ambiguity with nested queries.'
def get_columns(self, with_aliases=False):
qn = self.quote_name_unless_alias qn2 = self.connection.ops.quote_name result = [('(%s) AS %s' % (col[0], qn2(alias))) for (alias, col) in six.iteritems(self.query.extra_select)] aliases = set(self.query.extra_select.keys()) if with_aliases: col_aliases = aliases.copy() else: col_aliases = set() if self.query.select: only_load = self.deferred_to_columns() for col in self.query.select: if isinstance(col, (list, tuple)): (alias, column) = col table = self.query.alias_map[alias].table_name if ((table in only_load) and (column not in only_load[table])): continue r = ('%s.%s' % (qn(alias), qn(column))) if with_aliases: if (col[1] in col_aliases): c_alias = ('Col%d' % len(col_aliases)) result.append(('%s AS %s' % (r, c_alias))) aliases.add(c_alias) col_aliases.add(c_alias) else: result.append(('%s AS %s' % (r, qn2(col[1])))) aliases.add(r) col_aliases.add(col[1]) else: result.append(r) aliases.add(r) col_aliases.add(col[1]) else: result.append(col.as_sql(qn, self.connection)) if hasattr(col, 'alias'): aliases.add(col.alias) col_aliases.add(col.alias) elif self.query.default_cols: (cols, new_aliases) = self.get_default_columns(with_aliases, col_aliases) result.extend(cols) aliases.update(new_aliases) max_name_length = self.connection.ops.max_name_length() result.extend([('%s%s' % (aggregate.as_sql(qn, self.connection), (((alias is not None) and (' AS %s' % qn(truncate_name(alias, max_name_length)))) or ''))) for (alias, aggregate) in self.query.aggregate_select.items()]) for (table, col) in self.query.related_select_cols: r = ('%s.%s' % (qn(table), qn(col))) if (with_aliases and (col in col_aliases)): c_alias = ('Col%d' % len(col_aliases)) result.append(('%s AS %s' % (r, c_alias))) aliases.add(c_alias) col_aliases.add(c_alias) else: result.append(r) aliases.add(r) col_aliases.add(col) self._select_aliases = aliases return result
'Computes the default columns for selecting every field in the base model. Will sometimes be called to pull in related models (e.g. via select_related), in which case "opts" and "start_alias" will be given to provide a starting point for the traversal. Returns a list of strings, quoted appropriately for use in SQL directly, as well as a set of aliases used in the select statement (if \'as_pairs\' is True, returns a list of (alias, col_name) pairs instead of strings as the first component and None as the second component).'
def get_default_columns(self, with_aliases=False, col_aliases=None, start_alias=None, opts=None, as_pairs=False, local_only=False):
result = [] if (opts is None): opts = self.query.model._meta qn = self.quote_name_unless_alias qn2 = self.connection.ops.quote_name aliases = set() only_load = self.deferred_to_columns() if start_alias: seen = {None: start_alias} for (field, model) in opts.get_fields_with_model(): if (model == opts.concrete_model): model = None if (local_only and (model is not None)): continue if start_alias: try: alias = seen[model] except KeyError: link_field = opts.get_ancestor_link(model) alias = self.query.join((start_alias, model._meta.db_table, link_field.column, model._meta.pk.column)) seen[model] = alias else: alias = self.query.included_inherited_models[model] table = self.query.alias_map[alias].table_name if ((table in only_load) and (field.column not in only_load[table])): continue if as_pairs: result.append((alias, field.column)) aliases.add(alias) continue if (with_aliases and (field.column in col_aliases)): c_alias = ('Col%d' % len(col_aliases)) result.append(('%s.%s AS %s' % (qn(alias), qn2(field.column), c_alias))) col_aliases.add(c_alias) aliases.add(c_alias) else: r = ('%s.%s' % (qn(alias), qn2(field.column))) result.append(r) aliases.add(r) if with_aliases: col_aliases.add(field.column) return (result, aliases)
'Returns a quoted list of fields to use in DISTINCT ON part of the query. Note that this method can alter the tables in the query, and thus it must be called before get_from_clause().'
def get_distinct(self):
qn = self.quote_name_unless_alias qn2 = self.connection.ops.quote_name result = [] opts = self.query.model._meta for name in self.query.distinct_fields: parts = name.split(LOOKUP_SEP) (field, col, alias, _, _) = self._setup_joins(parts, opts, None) (col, alias) = self._final_join_removal(col, alias) result.append(('%s.%s' % (qn(alias), qn2(col)))) return result
'Returns a tuple containing a list representing the SQL elements in the "order by" clause, and the list of SQL elements that need to be added to the GROUP BY clause as a result of the ordering. Also sets the ordering_aliases attribute on this instance to a list of extra aliases needed in the select. Determining the ordering SQL can change the tables we need to include, so this should be run *before* get_from_clause().'
def get_ordering(self):
if self.query.extra_order_by: ordering = self.query.extra_order_by elif (not self.query.default_ordering): ordering = self.query.order_by else: ordering = (self.query.order_by or self.query.model._meta.ordering or []) qn = self.quote_name_unless_alias qn2 = self.connection.ops.quote_name distinct = self.query.distinct select_aliases = self._select_aliases result = [] group_by = [] ordering_aliases = [] if self.query.standard_ordering: (asc, desc) = ORDER_DIR['ASC'] else: (asc, desc) = ORDER_DIR['DESC'] processed_pairs = set() for field in ordering: if (field == '?'): result.append(self.connection.ops.random_function_sql()) continue if isinstance(field, int): if (field < 0): order = desc field = (- field) else: order = asc result.append(('%s %s' % (field, order))) group_by.append((str(field), [])) continue (col, order) = get_order_dir(field, asc) if (col in self.query.aggregate_select): result.append(('%s %s' % (qn(col), order))) continue if ('.' in field): (table, col) = col.split('.', 1) if ((table, col) not in processed_pairs): elt = ('%s.%s' % (qn(table), col)) processed_pairs.add((table, col)) if ((not distinct) or (elt in select_aliases)): result.append(('%s %s' % (elt, order))) group_by.append((elt, [])) elif (get_order_dir(field)[0] not in self.query.extra_select): for (table, col, order) in self.find_ordering_name(field, self.query.model._meta, default_order=asc): if ((table, col) not in processed_pairs): elt = ('%s.%s' % (qn(table), qn2(col))) processed_pairs.add((table, col)) if (distinct and (elt not in select_aliases)): ordering_aliases.append(elt) result.append(('%s %s' % (elt, order))) group_by.append((elt, [])) else: elt = qn2(col) if (distinct and (col not in select_aliases)): ordering_aliases.append(elt) result.append(('%s %s' % (elt, order))) group_by.append(self.query.extra_select[col]) self.query.ordering_aliases = ordering_aliases return (result, group_by)
'Returns the table alias (the name might be ambiguous, the alias will not be) and column name for ordering by the given \'name\' parameter. The \'name\' is of the form \'field1__field2__...__fieldN\'.'
def find_ordering_name(self, name, opts, alias=None, default_order='ASC', already_seen=None):
(name, order) = get_order_dir(name, default_order) pieces = name.split(LOOKUP_SEP) (field, col, alias, joins, opts) = self._setup_joins(pieces, opts, alias) if (field.rel and (len(joins) > 1) and opts.ordering): if (not already_seen): already_seen = set() join_tuple = tuple([self.query.alias_map[j].table_name for j in joins]) if (join_tuple in already_seen): raise FieldError('Infinite loop caused by ordering.') already_seen.add(join_tuple) results = [] for item in opts.ordering: results.extend(self.find_ordering_name(item, opts, alias, order, already_seen)) return results (col, alias) = self._final_join_removal(col, alias) return [(alias, col, order)]
'A helper method for get_ordering and get_distinct. This method will call query.setup_joins, handle refcounts and then promote the joins. Note that get_ordering and get_distinct must produce same target columns on same input, as the prefixes of get_ordering and get_distinct must match. Executing SQL where this is not true is an error.'
def _setup_joins(self, pieces, opts, alias):
if (not alias): alias = self.query.get_initial_alias() (field, target, opts, joins, _, _) = self.query.setup_joins(pieces, opts, alias, False) joins_to_promote = [j for j in joins if (self.query.alias_refcount[j] < 2)] alias = joins[(-1)] col = target.column if (not field.rel): self.query.ref_alias(alias) self.query.promote_joins(joins_to_promote) return (field, col, alias, joins, opts)
'A helper method for get_distinct and get_ordering. This method will trim extra not-needed joins from the tail of the join chain. This is very similar to what is done in trim_joins, but we will trim LEFT JOINS here. It would be a good idea to consolidate this method and query.trim_joins().'
def _final_join_removal(self, col, alias):
if alias: while 1: join = self.query.alias_map[alias] if (col != join.rhs_join_col): break self.query.unref_alias(alias) alias = join.lhs_alias col = join.lhs_join_col return (col, alias)
'Returns a list of strings that are joined together to go after the "FROM" part of the query, as well as a list any extra parameters that need to be included. Sub-classes, can override this to create a from-clause via a "select". This should only be called after any SQL construction methods that might change the tables we need. This means the select columns, ordering and distinct must be done first.'
def get_from_clause(self):
result = [] qn = self.quote_name_unless_alias qn2 = self.connection.ops.quote_name first = True for alias in self.query.tables: if (not self.query.alias_refcount[alias]): continue try: (name, alias, join_type, lhs, lhs_col, col, nullable) = self.query.alias_map[alias] except KeyError: continue alias_str = (((alias != name) and (' %s' % alias)) or '') if (join_type and (not first)): result.append(('%s %s%s ON (%s.%s = %s.%s)' % (join_type, qn(name), alias_str, qn(lhs), qn2(lhs_col), qn(alias), qn2(col)))) else: connector = (((not first) and ', ') or '') result.append(('%s%s%s' % (connector, qn(name), alias_str))) first = False for t in self.query.extra_tables: (alias, unused) = self.query.table_alias(t) if ((alias not in self.query.alias_map) or (self.query.alias_refcount[alias] == 1)): connector = (((not first) and ', ') or '') result.append(('%s%s' % (connector, qn(alias)))) first = False return (result, [])
'Returns a tuple representing the SQL elements in the "group by" clause.'
def get_grouping(self, ordering_group_by):
qn = self.quote_name_unless_alias (result, params) = ([], []) if (self.query.group_by is not None): select_cols = (self.query.select + self.query.related_select_cols) if ((len(self.query.model._meta.fields) == len(self.query.select)) and self.connection.features.allows_group_by_pk): self.query.group_by = [(self.query.model._meta.db_table, self.query.model._meta.pk.column)] select_cols = [] seen = set() cols = (self.query.group_by + select_cols) for col in cols: if isinstance(col, (list, tuple)): sql = ('%s.%s' % (qn(col[0]), qn(col[1]))) elif hasattr(col, 'as_sql'): sql = col.as_sql(qn, self.connection) else: sql = ('(%s)' % str(col)) if (sql not in seen): result.append(sql) seen.add(sql) if (ordering_group_by and (not self.connection.features.allows_group_by_pk)): for (order, order_params) in ordering_group_by: if ((order not in seen) or params): result.append(order) params.extend(order_params) seen.add(order) for (extra_select, extra_params) in self.query.extra_select.values(): sql = ('(%s)' % str(extra_select)) result.append(sql) params.extend(extra_params) return (result, params)
'Fill in the information needed for a select_related query. The current depth is measured as the number of connections away from the root model (for example, cur_depth=1 means we are looking at models with direct connections to the root model).'
def fill_related_selections(self, opts=None, root_alias=None, cur_depth=1, used=None, requested=None, restricted=None, nullable=None, dupe_set=None, avoid_set=None):
if ((not restricted) and self.query.max_depth and (cur_depth > self.query.max_depth)): return if (not opts): opts = self.query.get_meta() root_alias = self.query.get_initial_alias() self.query.related_select_cols = [] self.query.related_select_fields = [] if (not used): used = set() if (dupe_set is None): dupe_set = set() if (avoid_set is None): avoid_set = set() orig_dupe_set = dupe_set only_load = self.query.get_loaded_field_names() if (requested is None): if isinstance(self.query.select_related, dict): requested = self.query.select_related restricted = True else: restricted = False for (f, model) in opts.get_fields_with_model(): field_model = (model or f.model) if (not select_related_descend(f, restricted, requested, only_load.get(field_model))): continue avoid = avoid_set.copy() dupe_set = orig_dupe_set.copy() table = f.rel.to._meta.db_table promote = (nullable or f.null) if model: int_opts = opts alias = root_alias alias_chain = [] for int_model in opts.get_base_chain(model): if (not int_opts.parents[int_model]): int_opts = int_model._meta continue lhs_col = int_opts.parents[int_model].column dedupe = (lhs_col in opts.duplicate_targets) if dedupe: avoid.update(self.query.dupe_avoidance.get((id(opts), lhs_col), ())) dupe_set.add((opts, lhs_col)) int_opts = int_model._meta alias = self.query.join((alias, int_opts.db_table, lhs_col, int_opts.pk.column), exclusions=used, promote=promote) alias_chain.append(alias) for (dupe_opts, dupe_col) in dupe_set: self.query.update_dupe_avoidance(dupe_opts, dupe_col, alias) else: alias = root_alias dedupe = (f.column in opts.duplicate_targets) if (dupe_set or dedupe): avoid.update(self.query.dupe_avoidance.get((id(opts), f.column), ())) if dedupe: dupe_set.add((opts, f.column)) alias = self.query.join((alias, table, f.column, f.rel.get_related_field().column), exclusions=used.union(avoid), promote=promote) used.add(alias) (columns, aliases) = self.get_default_columns(start_alias=alias, opts=f.rel.to._meta, as_pairs=True) self.query.related_select_cols.extend(columns) self.query.related_select_fields.extend(f.rel.to._meta.fields) if restricted: next = requested.get(f.name, {}) else: next = False new_nullable = (f.null or promote) for (dupe_opts, dupe_col) in dupe_set: self.query.update_dupe_avoidance(dupe_opts, dupe_col, alias) self.fill_related_selections(f.rel.to._meta, alias, (cur_depth + 1), used, next, restricted, new_nullable, dupe_set, avoid) if restricted: related_fields = [(o.field, o.model) for o in opts.get_all_related_objects() if o.field.unique] for (f, model) in related_fields: if (not select_related_descend(f, restricted, requested, only_load.get(model), reverse=True)): continue avoid = avoid_set.copy() dupe_set = orig_dupe_set.copy() table = model._meta.db_table int_opts = opts alias = root_alias alias_chain = [] chain = opts.get_base_chain(f.rel.to) if (chain is not None): for int_model in chain: if (not int_opts.parents[int_model]): int_opts = int_model._meta continue lhs_col = int_opts.parents[int_model].column dedupe = (lhs_col in opts.duplicate_targets) if dedupe: avoid.update((self.query.dupe_avoidance.get(id(opts), lhs_col), ())) dupe_set.add((opts, lhs_col)) int_opts = int_model._meta alias = self.query.join((alias, int_opts.db_table, lhs_col, int_opts.pk.column), exclusions=used, promote=True, reuse=used) alias_chain.append(alias) for (dupe_opts, dupe_col) in dupe_set: self.query.update_dupe_avoidance(dupe_opts, dupe_col, alias) dedupe = (f.column in opts.duplicate_targets) if (dupe_set or dedupe): avoid.update(self.query.dupe_avoidance.get((id(opts), f.column), ())) if dedupe: dupe_set.add((opts, f.column)) alias = self.query.join((alias, table, f.rel.get_related_field().column, f.column), exclusions=used.union(avoid), promote=True) used.add(alias) (columns, aliases) = self.get_default_columns(start_alias=alias, opts=model._meta, as_pairs=True, local_only=True) self.query.related_select_cols.extend(columns) self.query.related_select_fields.extend(model._meta.fields) next = requested.get(f.related_query_name(), {}) new_nullable = True self.fill_related_selections(model._meta, table, (cur_depth + 1), used, next, restricted, new_nullable)
'Converts the self.deferred_loading data structure to mapping of table names to sets of column names which are to be loaded. Returns the dictionary.'
def deferred_to_columns(self):
columns = {} self.query.deferred_to_data(columns, self.query.deferred_to_columns_cb) return columns
'Returns an iterator over the results from executing this query.'
def results_iter(self):
resolve_columns = hasattr(self, 'resolve_columns') fields = None has_aggregate_select = bool(self.query.aggregate_select) if (self.query.select_for_update and transaction.is_managed(self.using)): transaction.set_dirty(self.using) for rows in self.execute_sql(MULTI): for row in rows: if resolve_columns: if (fields is None): if self.query.select_fields: fields = self.query.select_fields else: fields = self.query.model._meta.fields fields = (fields + self.query.related_select_fields) only_load = self.deferred_to_columns() if only_load: db_table = self.query.model._meta.db_table fields = [f for f in fields if ((db_table in only_load) and (f.column in only_load[db_table]))] row = self.resolve_columns(row, fields) if has_aggregate_select: aggregate_start = (len(self.query.extra_select) + len(self.query.select)) aggregate_end = (aggregate_start + len(self.query.aggregate_select)) row = ((tuple(row[:aggregate_start]) + tuple([self.query.resolve_aggregate(value, aggregate, self.connection) for ((alias, aggregate), value) in zip(self.query.aggregate_select.items(), row[aggregate_start:aggregate_end])])) + tuple(row[aggregate_end:])) (yield row)
'Run the query against the database and returns the result(s). The return value is a single data item if result_type is SINGLE, or an iterator over the results if the result_type is MULTI. result_type is either MULTI (use fetchmany() to retrieve all rows), SINGLE (only retrieve a single row), or None. In this last case, the cursor is returned if any query is executed, since it\'s used by subclasses such as InsertQuery). It\'s possible, however, that no query is needed, as the filters describe an empty set. In that case, None is returned, to avoid any unnecessary database interaction.'
def execute_sql(self, result_type=MULTI):
try: (sql, params) = self.as_sql() if (not sql): raise EmptyResultSet except EmptyResultSet: if (result_type == MULTI): return iter([]) else: return cursor = self.connection.cursor() cursor.execute(sql, params) if (not result_type): return cursor if (result_type == SINGLE): if self.query.ordering_aliases: return cursor.fetchone()[:(- len(self.query.ordering_aliases))] return cursor.fetchone() if self.query.ordering_aliases: result = order_modified_iter(cursor, len(self.query.ordering_aliases), self.connection.features.empty_fetchmany_value) else: result = iter((lambda : cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)), self.connection.features.empty_fetchmany_value) if (not self.connection.features.can_use_chunked_reads): return list(result) return result
'Creates the SQL for this query. Returns the SQL string and list of parameters.'
def as_sql(self):
assert (len(self.query.tables) == 1), 'Can only delete from one table at a time.' qn = self.quote_name_unless_alias result = [('DELETE FROM %s' % qn(self.query.tables[0]))] (where, params) = self.query.where.as_sql(qn=qn, connection=self.connection) if where: result.append(('WHERE %s' % where)) return (' '.join(result), tuple(params))
'Creates the SQL for this query. Returns the SQL string and list of parameters.'
def as_sql(self):
self.pre_sql_setup() if (not self.query.values): return ('', ()) table = self.query.tables[0] qn = self.quote_name_unless_alias result = [('UPDATE %s' % qn(table))] result.append('SET') (values, update_params) = ([], []) for (field, model, val) in self.query.values: if hasattr(val, 'prepare_database_save'): val = val.prepare_database_save(field) else: val = field.get_db_prep_save(val, connection=self.connection) if hasattr(field, 'get_placeholder'): placeholder = field.get_placeholder(val, self.connection) else: placeholder = '%s' if hasattr(val, 'evaluate'): val = SQLEvaluator(val, self.query, allow_joins=False) name = field.column if hasattr(val, 'as_sql'): (sql, params) = val.as_sql(qn, self.connection) values.append(('%s = %s' % (qn(name), sql))) update_params.extend(params) elif (val is not None): values.append(('%s = %s' % (qn(name), placeholder))) update_params.append(val) else: values.append(('%s = NULL' % qn(name))) if (not values): return ('', ()) result.append(', '.join(values)) (where, params) = self.query.where.as_sql(qn=qn, connection=self.connection) if where: result.append(('WHERE %s' % where)) return (' '.join(result), tuple((update_params + params)))
'Execute the specified update. Returns the number of rows affected by the primary update query. The "primary update query" is the first non-empty query that is executed. Row counts for any subsequent, related queries are not available.'
def execute_sql(self, result_type):
cursor = super(SQLUpdateCompiler, self).execute_sql(result_type) rows = ((cursor and cursor.rowcount) or 0) is_empty = (cursor is None) del cursor for query in self.query.get_related_updates(): aux_rows = query.get_compiler(self.using).execute_sql(result_type) if is_empty: rows = aux_rows is_empty = False return rows
'If the update depends on results from other tables, we need to do some munging of the "where" conditions to match the format required for (portable) SQL updates. That is done here. Further, if we are going to be running multiple updates, we pull out the id values to update at this point so that they don\'t change as a result of the progressive updates.'
def pre_sql_setup(self):
self.query.select_related = False self.query.clear_ordering(True) super(SQLUpdateCompiler, self).pre_sql_setup() count = self.query.count_active_tables() if ((not self.query.related_updates) and (count == 1)): return query = self.query.clone(klass=Query) query.bump_prefix() query.extra = {} query.select = [] query.add_fields([query.model._meta.pk.name]) count = query.count_active_tables() if ((not self.query.related_updates) and (count == 1)): return must_pre_select = ((count > 1) and (not self.connection.features.update_can_self_select)) self.query.where = self.query.where_class() if (self.query.related_updates or must_pre_select): idents = [] for rows in query.get_compiler(self.using).execute_sql(MULTI): idents.extend([r[0] for r in rows]) self.query.add_filter(('pk__in', idents)) self.query.related_ids = idents else: self.query.add_filter(('pk__in', query)) for alias in self.query.tables[1:]: self.query.alias_refcount[alias] = 0
'Creates the SQL for this query. Returns the SQL string and list of parameters.'
def as_sql(self, qn=None):
if (qn is None): qn = self.quote_name_unless_alias sql = ('SELECT %s FROM (%s) subquery' % (', '.join([aggregate.as_sql(qn, self.connection) for aggregate in self.query.aggregate_select.values()]), self.query.subquery)) params = self.query.sub_params return (sql, params)
'Returns an iterator over the results from executing this query.'
def results_iter(self):
resolve_columns = hasattr(self, 'resolve_columns') if resolve_columns: from django.db.models.fields import DateTimeField fields = [DateTimeField()] else: from django.db.backends.util import typecast_timestamp needs_string_cast = self.connection.features.needs_datetime_string_cast offset = len(self.query.extra_select) for rows in self.execute_sql(MULTI): for row in rows: date = row[offset] if resolve_columns: date = self.resolve_columns(row, fields)[offset] elif needs_string_cast: date = typecast_timestamp(str(date)) (yield date)
'Adds \'objs\' to the collection of objects to be deleted. If the call is the result of a cascade, \'source\' should be the model that caused it, and \'nullable\' should be set to True if the relation can be null. Returns a list of all objects that were not already collected.'
def add(self, objs, source=None, nullable=False, reverse_dependency=False):
if (not objs): return [] new_objs = [] model = objs[0].__class__ instances = self.data.setdefault(model, set()) for obj in objs: if (obj not in instances): new_objs.append(obj) instances.update(new_objs) if ((source is not None) and (not nullable)): if reverse_dependency: (source, model) = (model, source) self.dependencies.setdefault(source._meta.concrete_model, set()).add(model._meta.concrete_model) return new_objs
'Schedules a batch delete. Every instance of \'model\' that is related to an instance of \'obj\' through \'field\' will be deleted.'
def add_batch(self, model, field, objs):
self.batches.setdefault(model, {}).setdefault(field, set()).update(objs)
'Schedules a field update. \'objs\' must be a homogenous iterable collection of model instances (e.g. a QuerySet).'
def add_field_update(self, field, value, objs):
if (not objs): return model = objs[0].__class__ self.field_updates.setdefault(model, {}).setdefault((field, value), set()).update(objs)
'Determines if the objects in the given queryset-like can be fast-deleted. This can be done if there are no cascades, no parents and no signal listeners for the object class. The \'from_field\' tells where we are coming from - we need this to determine if the objects are in fact to be deleted. Allows also skipping parent -> child -> parent chain preventing fast delete of the child.'
def can_fast_delete(self, objs, from_field=None):
if (from_field and (from_field.rel.on_delete is not CASCADE)): return False if (not (hasattr(objs, 'model') and hasattr(objs, '_raw_delete'))): return False model = objs.model if (signals.pre_delete.has_listeners(model) or signals.post_delete.has_listeners(model) or signals.m2m_changed.has_listeners(model)): return False opts = model._meta if any(((link != from_field) for link in opts.concrete_model._meta.parents.values())): return False for related in opts.get_all_related_objects(include_hidden=True, include_proxy_eq=True): if (related.field.rel.on_delete is not DO_NOTHING): return False for relation in opts.many_to_many: if (not relation.rel.through): return False return True
'Adds \'objs\' to the collection of objects to be deleted as well as all parent instances. \'objs\' must be a homogenous iterable collection of model instances (e.g. a QuerySet). If \'collect_related\' is True, related objects will be handled by their respective on_delete handler. If the call is the result of a cascade, \'source\' should be the model that caused it and \'nullable\' should be set to True, if the relation can be null. If \'reverse_dependency\' is True, \'source\' will be deleted before the current model, rather than after. (Needed for cascading to parent models, the one case in which the cascade follows the forwards direction of an FK rather than the reverse direction.)'
def collect(self, objs, source=None, nullable=False, collect_related=True, source_attr=None, reverse_dependency=False):
if self.can_fast_delete(objs): self.fast_deletes.append(objs) return new_objs = self.add(objs, source, nullable, reverse_dependency=reverse_dependency) if (not new_objs): return model = new_objs[0].__class__ concrete_model = model._meta.concrete_model for ptr in six.itervalues(concrete_model._meta.parents): if ptr: parent_objs = [getattr(obj, ptr.name) for obj in new_objs] self.collect(parent_objs, source=model, source_attr=ptr.rel.related_name, collect_related=False, reverse_dependency=True) if collect_related: for related in model._meta.get_all_related_objects(include_hidden=True, include_proxy_eq=True): field = related.field if (field.rel.on_delete == DO_NOTHING): continue sub_objs = self.related_objects(related, new_objs) if self.can_fast_delete(sub_objs, from_field=field): self.fast_deletes.append(sub_objs) elif sub_objs: field.rel.on_delete(self, field, sub_objs, self.using) for relation in model._meta.many_to_many: if (not relation.rel.through): sub_objs = relation.bulk_related_objects(new_objs, self.using) self.collect(sub_objs, source=model, source_attr=relation.rel.related_name, nullable=True)
'Gets a QuerySet of objects related to ``objs`` via the relation ``related``.'
def related_objects(self, related, objs):
return related.model._base_manager.using(self.using).filter(**{('%s__in' % related.field.name): objs})
'Returns field\'s value prepared for saving into a database.'
def get_prep_value(self, value):
if (value is None): return None return six.text_type(value)
'Returns field\'s value just before saving.'
def pre_save(self, model_instance, add):
file = super(FileField, self).pre_save(model_instance, add) if (file and (not file._committed)): file.save(file.name, file, save=False) return file
'Updates field\'s width and height fields, if defined. This method is hooked up to model\'s post_init signal to update dimensions after instantiating a model instance. However, dimensions won\'t be updated if the dimensions fields are already populated. This avoids unnecessary recalculation when loading an object from the database. Dimensions can be forced to update with force=True, which is how ImageFileDescriptor.__set__ calls this method.'
def update_dimension_fields(self, instance, force=False, *args, **kwargs):
has_dimension_fields = (self.width_field or self.height_field) if (not has_dimension_fields): return file = getattr(instance, self.attname) if ((not file) and (not force)): return dimension_fields_filled = (not ((self.width_field and (not getattr(instance, self.width_field))) or (self.height_field and (not getattr(instance, self.height_field))))) if (dimension_fields_filled and (not force)): return if file: width = file.width height = file.height else: width = None height = None if self.width_field: setattr(instance, self.width_field, width) if self.height_field: setattr(instance, self.height_field, height)
'Converts the input value into the expected Python data type, raising django.core.exceptions.ValidationError if the data can\'t be converted. Returns the converted value. Subclasses should override this.'
def to_python(self, value):
return value
'Validates value and throws ValidationError. Subclasses should override this to provide validation logic.'
def validate(self, value, model_instance):
if (not self.editable): return if (self._choices and (value not in validators.EMPTY_VALUES)): for (option_key, option_value) in self.choices: if isinstance(option_value, (list, tuple)): for (optgroup_key, optgroup_value) in option_value: if (value == optgroup_key): return elif (value == option_key): return msg = (self.error_messages[u'invalid_choice'] % value) raise exceptions.ValidationError(msg) if ((value is None) and (not self.null)): raise exceptions.ValidationError(self.error_messages[u'null']) if ((not self.blank) and (value in validators.EMPTY_VALUES)): raise exceptions.ValidationError(self.error_messages[u'blank'])
'Convert the value\'s type and run validation. Validation errors from to_python and validate are propagated. The correct value is returned if no error is raised.'
def clean(self, value, model_instance):
value = self.to_python(value) self.validate(value, model_instance) self.run_validators(value) return value
'Returns the database column data type for this field, for the provided connection.'
def db_type(self, connection):
data = DictWrapper(self.__dict__, connection.ops.quote_name, u'qn_') try: return (connection.creation.data_types[self.get_internal_type()] % data) except KeyError: return None
'Returns field\'s value just before saving.'
def pre_save(self, model_instance, add):
return getattr(model_instance, self.attname)
'Perform preliminary non-db specific value checks and conversions.'
def get_prep_value(self, value):
return value
'Returns field\'s value prepared for interacting with the database backend. Used by the default implementations of ``get_db_prep_save``and `get_db_prep_lookup```'
def get_db_prep_value(self, value, connection, prepared=False):
if (not prepared): value = self.get_prep_value(value) return value
'Returns field\'s value prepared for saving into a database.'
def get_db_prep_save(self, value, connection):
return self.get_db_prep_value(value, connection=connection, prepared=False)
'Perform preliminary non-db specific lookup checks and conversions'
def get_prep_lookup(self, lookup_type, value):
if hasattr(value, u'prepare'): return value.prepare() if hasattr(value, u'_prepare'): return value._prepare() if (lookup_type in (u'regex', u'iregex', u'month', u'day', u'week_day', u'search', u'contains', u'icontains', u'iexact', u'startswith', u'istartswith', u'endswith', u'iendswith', u'isnull')): return value elif (lookup_type in (u'exact', u'gt', u'gte', u'lt', u'lte')): return self.get_prep_value(value) elif (lookup_type in (u'range', u'in')): return [self.get_prep_value(v) for v in value] elif (lookup_type == u'year'): try: return int(value) except ValueError: raise ValueError(u'The __year lookup type requires an integer argument') raise TypeError((u'Field has invalid lookup: %s' % lookup_type))
'Returns field\'s value prepared for database lookup.'
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
if (not prepared): value = self.get_prep_lookup(lookup_type, value) if hasattr(value, u'get_compiler'): value = value.get_compiler(connection=connection) if (hasattr(value, u'as_sql') or hasattr(value, u'_as_sql')): if hasattr(value, u'relabel_aliases'): return value if hasattr(value, u'as_sql'): (sql, params) = value.as_sql() else: (sql, params) = value._as_sql(connection=connection) return QueryWrapper((u'(%s)' % sql), params) if (lookup_type in (u'regex', u'iregex', u'month', u'day', u'week_day', u'search')): return [value] elif (lookup_type in (u'exact', u'gt', u'gte', u'lt', u'lte')): return [self.get_db_prep_value(value, connection=connection, prepared=prepared)] elif (lookup_type in (u'range', u'in')): return [self.get_db_prep_value(v, connection=connection, prepared=prepared) for v in value] elif (lookup_type in (u'contains', u'icontains')): return [(u'%%%s%%' % connection.ops.prep_for_like_query(value))] elif (lookup_type == u'iexact'): return [connection.ops.prep_for_iexact_query(value)] elif (lookup_type in (u'startswith', u'istartswith')): return [(u'%s%%' % connection.ops.prep_for_like_query(value))] elif (lookup_type in (u'endswith', u'iendswith')): return [(u'%%%s' % connection.ops.prep_for_like_query(value))] elif (lookup_type == u'isnull'): return [] elif (lookup_type == u'year'): if (self.get_internal_type() == u'DateField'): return connection.ops.year_lookup_bounds_for_date_field(value) else: return connection.ops.year_lookup_bounds(value)
'Returns a boolean of whether this field has a default value.'
def has_default(self):
return (self.default is not NOT_PROVIDED)
'Returns the default value for this field.'
def get_default(self):
if self.has_default(): if callable(self.default): return self.default() return force_text(self.default, strings_only=True) if ((not self.empty_strings_allowed) or (self.null and (not connection.features.interprets_empty_strings_as_nulls))): return None return u''
'Returns choices with a default blank choices included, for use as SelectField choices for this field.'
def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH):
first_choice = ((include_blank and blank_choice) or []) if self.choices: return (first_choice + list(self.choices)) rel_model = self.rel.to if hasattr(self.rel, u'get_related_field'): lst = [(getattr(x, self.rel.get_related_field().attname), smart_text(x)) for x in rel_model._default_manager.complex_filter(self.rel.limit_choices_to)] else: lst = [(x._get_pk_val(), smart_text(x)) for x in rel_model._default_manager.complex_filter(self.rel.limit_choices_to)] return (first_choice + lst)
'Returns flattened choices with a default blank choice included.'
def get_flatchoices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH):
first_choice = ((include_blank and blank_choice) or []) return (first_choice + list(self.flatchoices))
'Returns a string value of this field from the passed obj. This is used by the serialization framework.'
def value_to_string(self, obj):
return smart_text(self._get_val_from_obj(obj))
'Flattened version of choices tuple.'
def _get_flatchoices(self):
flat = [] for (choice, value) in self.choices: if isinstance(value, (list, tuple)): flat.extend(value) else: flat.append((choice, value)) return flat
'Returns a django.forms.Field instance for this database Field.'
def formfield(self, form_class=forms.CharField, **kwargs):
defaults = {u'required': (not self.blank), u'label': capfirst(self.verbose_name), u'help_text': self.help_text} if self.has_default(): if callable(self.default): defaults[u'initial'] = self.default defaults[u'show_hidden_initial'] = True else: defaults[u'initial'] = self.get_default() if self.choices: include_blank = (self.blank or (not (self.has_default() or (u'initial' in kwargs)))) defaults[u'choices'] = self.get_choices(include_blank=include_blank) defaults[u'coerce'] = self.to_python if self.null: defaults[u'empty_value'] = None form_class = forms.TypedChoiceField for k in list(kwargs): if (k not in (u'coerce', u'empty_value', u'choices', u'required', u'widget', u'label', u'initial', u'help_text', u'error_messages', u'show_hidden_initial')): del kwargs[k] defaults.update(kwargs) return form_class(**defaults)
'Returns the value of this field in the given model instance.'
def value_from_object(self, obj):
return getattr(obj, self.attname)
'Displays the module, class and name of the field.'
def __repr__(self):
path = (u'%s.%s' % (self.__class__.__module__, self.__class__.__name__)) name = getattr(self, u'name', None) if (name is not None): return (u'<%s: %s>' % (path, name)) return (u'<%s>' % path)