desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Returns True if the QuerySet is ordered -- i.e. has an order_by() clause or a default ordering on the model.'
def ordered(self):
if (self.query.extra_order_by or self.query.order_by): return True elif (self.query.default_ordering and self.query.model._meta.ordering): return True else: return False
'Return the database that will be used if this query is executed now'
@property def db(self):
if self._for_write: return (self._db or router.db_for_write(self.model)) return (self._db or router.db_for_read(self.model))
'A little helper method for bulk_insert to insert the bulk one batch at a time. Inserts recursively a batch from the front of the bulk and then _batched_insert() the remaining objects again.'
def _batched_insert(self, objs, fields, batch_size):
if (not objs): return ops = connections[self.db].ops batch_size = (batch_size or max(ops.bulk_batch_size(fields, objs), 1)) for batch in [objs[i:(i + batch_size)] for i in range(0, len(objs), batch_size)]: self.model._base_manager._insert(batch, fields=fields, using=self.db)
'Fills the result cache with \'num\' more entries (or until the results iterator is exhausted).'
def _fill_cache(self, num=None):
if self._iter: try: for i in range((num or ITER_CHUNK_SIZE)): self._result_cache.append(next(self._iter)) except StopIteration: self._iter = None
'Indicates that the next filter call and the one following that should be treated as a single filter. This is only important when it comes to determining when to reuse tables for many-to-many filters. Required so that we can filter naturally on the results of related managers. This doesn\'t return a clone of the current QuerySet (it returns "self"). The method is only used internally and should be immediately followed by a filter() that does create a clone.'
def _next_is_sticky(self):
self._sticky_filter = True return self
'Checks that we are merging two comparable QuerySet classes. By default this does nothing, but see the ValuesQuerySet for an example of where it\'s useful.'
def _merge_sanity_check(self, other):
pass
'Keep track of all known related objects from either QuerySet instance.'
def _merge_known_related_objects(self, other):
for (field, objects) in other._known_related_objects.items(): self._known_related_objects.setdefault(field, {}).update(objects)
'Prepare the query for computing a result that contains aggregate annotations.'
def _setup_aggregate_query(self, aggregates):
opts = self.model._meta if (self.query.group_by is None): field_names = [f.attname for f in opts.fields] self.query.add_fields(field_names, False) self.query.set_group_by()
'Returns the internal query\'s SQL and parameters (as a tuple).'
def _as_sql(self, connection):
obj = self.values('pk') if ((obj._db is None) or (connection == connections[obj._db])): return obj.query.get_compiler(connection=connection).as_nested_sql() raise ValueError("Can't do subqueries with queries on different DBs.")
'Constructs the field_names list that the values query will be retrieving. Called by the _clone() method after initializing the rest of the instance.'
def _setup_query(self):
self.query.clear_deferred_loading() self.query.clear_select_fields() if self._fields: self.extra_names = [] self.aggregate_names = [] if ((not self.query.extra) and (not self.query.aggregates)): self.field_names = list(self._fields) else: self.query.default_cols = False self.field_names = [] for f in self._fields: if (f in self.query.extra): self.extra_names.append(f) elif (f in self.query.aggregate_select): self.aggregate_names.append(f) else: self.field_names.append(f) else: self.extra_names = None self.field_names = [f.attname for f in self.model._meta.fields] self.aggregate_names = None self.query.select = [] if (self.extra_names is not None): self.query.set_extra_mask(self.extra_names) self.query.add_fields(self.field_names, True) if (self.aggregate_names is not None): self.query.set_aggregate_mask(self.aggregate_names)
'Cloning a ValuesQuerySet preserves the current fields.'
def _clone(self, klass=None, setup=False, **kwargs):
c = super(ValuesQuerySet, self)._clone(klass, **kwargs) if (not hasattr(c, '_fields')): c._fields = self._fields[:] c.field_names = self.field_names c.extra_names = self.extra_names c.aggregate_names = self.aggregate_names if (setup and hasattr(c, '_setup_query')): c._setup_query() return c
'Prepare the query for computing a result that contains aggregate annotations.'
def _setup_aggregate_query(self, aggregates):
self.query.set_group_by() if (self.aggregate_names is not None): self.aggregate_names.extend(aggregates) self.query.set_aggregate_mask(self.aggregate_names) super(ValuesQuerySet, self)._setup_aggregate_query(aggregates)
'For ValueQuerySet (and subclasses like ValuesListQuerySet), they can only be used as nested queries if they\'re already set up to select only a single field (in which case, that is the field column that is returned). This differs from QuerySet.as_sql(), where the column to select is set up by Django.'
def _as_sql(self, connection):
if ((self._fields and (len(self._fields) > 1)) or ((not self._fields) and (len(self.model._meta.fields) > 1))): raise TypeError(('Cannot use a multi-field %s as a filter value.' % self.__class__.__name__)) obj = self._clone() if ((obj._db is None) or (connection == connections[obj._db])): return obj.query.get_compiler(connection=connection).as_nested_sql() raise ValueError("Can't do subqueries with queries on different DBs.")
'Validates that we aren\'t trying to do a query like value__in=qs.values(\'value1\', \'value2\'), which isn\'t valid.'
def _prepare(self):
if ((self._fields and (len(self._fields) > 1)) or ((not self._fields) and (len(self.model._meta.fields) > 1))): raise TypeError(('Cannot use a multi-field %s as a filter value.' % self.__class__.__name__)) return self
'Sets up any special features of the query attribute. Called by the _clone() method after initializing the rest of the instance.'
def _setup_query(self):
self.query.clear_deferred_loading() self.query = self.query.clone(klass=sql.DateQuery, setup=True) self.query.select = [] self.query.add_date_select(self._field_name, self._kind, self._order)
'Always returns EmptyQuerySet.'
def all(self):
return self
'Always returns EmptyQuerySet.'
def filter(self, *args, **kwargs):
return self
'Always returns EmptyQuerySet.'
def exclude(self, *args, **kwargs):
return self
'Always returns EmptyQuerySet.'
def complex_filter(self, filter_obj):
return self
'Always returns EmptyQuerySet.'
def select_related(self, *fields, **kwargs):
return self
'Always returns EmptyQuerySet.'
def annotate(self, *args, **kwargs):
return self
'Always returns EmptyQuerySet.'
def order_by(self, *field_names):
return self
'Always returns EmptyQuerySet.'
def distinct(self, *field_names):
return self
'Always returns EmptyQuerySet.'
def extra(self, select=None, where=None, params=None, tables=None, order_by=None, select_params=None):
assert self.query.can_filter(), 'Cannot change a query once a slice has been taken' return self
'Always returns EmptyQuerySet.'
def reverse(self):
return self
'Always returns EmptyQuerySet.'
def defer(self, *fields):
return self
'Always returns EmptyQuerySet.'
def only(self, *fields):
return self
'Don\'t update anything.'
def update(self, **kwargs):
return 0
'Return a dict mapping the aggregate names to None'
def aggregate(self, *args, **kwargs):
for arg in args: kwargs[arg.default_alias] = arg return dict([(key, None) for key in kwargs])
'Always returns EmptyQuerySet.'
def values(self, *fields):
return self
'Always returns EmptyQuerySet.'
def values_list(self, *fields, **kwargs):
return self
'Return the database that will be used if this query is executed now'
@property def db(self):
return (self._db or router.db_for_read(self.model))
'Selects which database this Raw QuerySet should excecute it\'s query against.'
def using(self, alias):
return RawQuerySet(self.raw_query, model=self.model, query=self.query.clone(using=alias), params=self.params, translations=self.translations, using=alias)
'A list of model field names in the order they\'ll appear in the query results.'
@property def columns(self):
if (not hasattr(self, '_columns')): self._columns = self.query.get_columns() for (query_name, model_name) in self.translations.items(): try: index = self._columns.index(query_name) self._columns[index] = model_name except ValueError: pass return self._columns
'A dict mapping column names to model field names.'
@property def model_fields(self):
if (not hasattr(self, '_model_fields')): converter = connections[self.db].introspection.table_name_converter self._model_fields = {} for field in self.model._meta.fields: (name, column) = field.get_attname_column() self._model_fields[converter(column)] = field return self._model_fields
'Instantiate a new aggregate. * lookup is the field on which the aggregate operates. * extra is a dictionary of additional data to provide for the aggregate definition Also utilizes the class variables: * name, the identifier for this aggregate function.'
def __init__(self, lookup, **extra):
self.lookup = lookup self.extra = extra
'Add the aggregate to the nominated query. This method is used to convert the generic Aggregate definition into a backend-specific definition. * query is the backend-specific query instance to which the aggregate is to be added. * col is a column reference describing the subject field of the aggregate. It can be an alias, or a tuple describing a table and column name. * source is the underlying field or aggregate definition for the column reference. If the aggregate is not an ordinal or computed type, this reference is used to determine the coerced output type of the aggregate. * is_summary is a boolean that is set True if the aggregate is a summary value rather than an annotation.'
def add_to_query(self, query, alias, col, source, is_summary):
klass = getattr(query.aggregates_module, self.name) aggregate = klass(col, source=source, is_summary=is_summary, **self.extra) query.aggregates[alias] = aggregate
'Retrieves and caches the value from the datastore on the first lookup. Returns the cached value.'
def __get__(self, instance, owner):
from django.db.models.fields import FieldDoesNotExist non_deferred_model = instance._meta.proxy_for_model opts = non_deferred_model._meta assert (instance is not None) data = instance.__dict__ if (data.get(self.field_name, self) is self): try: f = opts.get_field_by_name(self.field_name)[0] except FieldDoesNotExist: f = [f for f in opts.fields if (f.attname == self.field_name)][0] name = f.name val = self._check_parent_chain(instance, name) if (val is None): val = getattr(non_deferred_model._base_manager.only(name).using(instance._state.db).get(pk=instance.pk), self.field_name) data[self.field_name] = val return data[self.field_name]
'Deferred loading attributes can be set normally (which means there will never be a database lookup involved.'
def __set__(self, instance, value):
instance.__dict__[self.field_name] = value
'Check if the field value can be fetched from a parent field already loaded in the instance. This can be done if the to-be fetched field is a primary key field.'
def _check_parent_chain(self, instance, name):
opts = instance._meta f = opts.get_field_by_name(name)[0] link_field = opts.get_ancestor_link(f.model) if (f.primary_key and (f != link_field)): return getattr(instance, link_field.attname) return None
'Sets the creation counter value for this instance and increments the class-level copy.'
def _set_creation_counter(self):
self.creation_counter = Manager.creation_counter Manager.creation_counter += 1
'Makes a copy of the manager and assigns it to \'model\', which should be a child of the existing model (used when inheriting a manager from an abstract base class).'
def _copy_to_model(self, model):
assert issubclass(model, self.model) mgr = copy.copy(self) mgr._set_creation_counter() mgr.model = model mgr._inherited = True return mgr
'Returns a new QuerySet object. Subclasses can override this method to easily customize the behavior of the Manager.'
def get_query_set(self):
return QuerySet(self.model, using=self._db)
'Returns the index of the primary key field in the self.fields list.'
def pk_index(self):
return self.fields.index(self.pk)
'Does the internal setup so that the current model is a proxy for "target".'
def setup_proxy(self, target):
self.pk = target._meta.pk self.proxy_for_model = target self.db_table = target._meta.db_table
'There are a few places where the untranslated verbose name is needed (so that we get the same value regardless of currently active locale).'
def verbose_name_raw(self):
lang = get_language() deactivate_all() raw = force_text(self.verbose_name) activate(lang) return raw
'Has this model been swapped out for another? If so, return the model name of the replacement; otherwise, return None. For historical reasons, model name lookups using get_model() are case insensitive, so we make sure we are case insensitive here.'
def _swapped(self):
if self.swappable: model_label = (u'%s.%s' % (self.app_label, self.object_name.lower())) swapped_for = getattr(settings, self.swappable, None) if swapped_for: try: (swapped_label, swapped_object) = swapped_for.split(u'.') except ValueError: return swapped_for if ((u'%s.%s' % (swapped_label, swapped_object.lower())) not in (None, model_label)): return swapped_for return None
'The getter for self.fields. This returns the list of field objects available to this model (including through parent models). Callers are not permitted to modify this list, since it\'s a reference to this instance (not a copy).'
def _fields(self):
try: self._field_name_cache except AttributeError: self._fill_fields_cache() return self._field_name_cache
'Returns a sequence of (field, model) pairs for all fields. The "model" element is None for fields on the current model. Mostly of use when constructing queries so that we know which model a field belongs to.'
def get_fields_with_model(self):
try: self._field_cache except AttributeError: self._fill_fields_cache() return self._field_cache
'The many-to-many version of get_fields_with_model().'
def get_m2m_with_model(self):
try: self._m2m_cache except AttributeError: self._fill_m2m_cache() return list(six.iteritems(self._m2m_cache))
'Returns the requested field by name. Raises FieldDoesNotExist on error.'
def get_field(self, name, many_to_many=True):
to_search = ((many_to_many and (self.fields + self.many_to_many)) or self.fields) for f in to_search: if (f.name == name): return f raise FieldDoesNotExist((u'%s has no field named %r' % (self.object_name, name)))
'Returns the (field_object, model, direct, m2m), where field_object is the Field instance for the given name, model is the model containing this field (None for local fields), direct is True if the field exists on this model, and m2m is True for many-to-many relations. When \'direct\' is False, \'field_object\' is the corresponding RelatedObject for this field (since the field doesn\'t have an instance associated with it). Uses a cache internally, so after the first access, this is very fast.'
def get_field_by_name(self, name):
try: try: return self._name_map[name] except AttributeError: cache = self.init_name_map() return cache[name] except KeyError: raise FieldDoesNotExist((u'%s has no field named %r' % (self.object_name, name)))
'Returns a list of all field names that are possible for this model (including reverse relation names). This is used for pretty printing debugging output (a list of choices), so any internal-only field names are not included.'
def get_all_field_names(self):
try: cache = self._name_map except AttributeError: cache = self.init_name_map() names = sorted(cache.keys()) return [val for val in names if (not val.endswith(u'+'))]
'Initialises the field name -> field object mapping.'
def init_name_map(self):
cache = {} for (f, model) in self.get_all_related_m2m_objects_with_model(): cache[f.field.related_query_name()] = (f, model, False, True) for (f, model) in self.get_all_related_objects_with_model(): cache[f.field.related_query_name()] = (f, model, False, False) for (f, model) in self.get_m2m_with_model(): cache[f.name] = (f, model, True, True) for (f, model) in self.get_fields_with_model(): cache[f.name] = (f, model, True, False) if app_cache_ready(): self._name_map = cache return cache
'Returns a list of (related-object, model) pairs. Similar to get_fields_with_model().'
def get_all_related_objects_with_model(self, local_only=False, include_hidden=False, include_proxy_eq=False):
try: self._related_objects_cache except AttributeError: self._fill_related_objects_cache() predicates = [] if local_only: predicates.append((lambda k, v: (not v))) if (not include_hidden): predicates.append((lambda k, v: (not k.field.rel.is_hidden()))) cache = (self._related_objects_proxy_cache if include_proxy_eq else self._related_objects_cache) return [t for t in cache.items() if all((p(*t) for p in predicates))]
'Returns a list of (related-m2m-object, model) pairs. Similar to get_fields_with_model().'
def get_all_related_m2m_objects_with_model(self):
try: cache = self._related_many_to_many_cache except AttributeError: cache = self._fill_related_many_to_many_cache() return list(six.iteritems(cache))
'Returns a list of parent classes leading to \'model\' (order from closet to most distant ancestor). This has to handle the case were \'model\' is a granparent or even more distant relation.'
def get_base_chain(self, model):
if (not self.parents): return if (model in self.parents): return [model] for parent in self.parents: res = parent._meta.get_base_chain(model) if res: res.insert(0, parent) return res raise TypeError((u'%r is not an ancestor of this model' % model._meta.module_name))
'Returns a list of all the ancestor of this model as a list. Useful for determining if something is an ancestor, regardless of lineage.'
def get_parent_list(self):
result = set() for parent in self.parents: result.add(parent) result.update(parent._meta.get_parent_list()) return result
'Returns the field on the current model which points to the given "ancestor". This is possible an indirect link (a pointer to a parent model, which points, eventually, to the ancestor). Used when constructing table joins for model inheritance. Returns None if the model isn\'t an ancestor of this one.'
def get_ancestor_link(self, ancestor):
if (ancestor in self.parents): return self.parents[ancestor] for parent in self.parents: parent_link = parent._meta.get_ancestor_link(ancestor) if parent_link: return (self.parents[parent] or parent_link)
'Returns a list of Options objects that are ordered with respect to this object.'
def get_ordered_objects(self):
if (not hasattr(self, u'_ordered_objects')): objects = [] self._ordered_objects = objects return self._ordered_objects
'Returns choices with a default blank choices included, for use as SelectField choices for this field. Analogue of django.db.models.fields.Field.get_choices, provided initially for utilisation by RelatedFieldListFilter.'
def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH, limit_to_currently_related=False):
first_choice = ((include_blank and blank_choice) or []) queryset = self.model._default_manager.all() if limit_to_currently_related: queryset = queryset.complex_filter({('%s__isnull' % self.parent_model._meta.module_name): False}) lst = [(x._get_pk_val(), smart_text(x)) for x in queryset] return (first_choice + lst)
'Get the fields in this class that should be edited inline.'
def editable_fields(self):
return [f for f in (self.opts.fields + self.opts.many_to_many) if (f.editable and (f != self.field))]
'Fill in all the cache information. This method is threadsafe, in the sense that every caller will see the same state upon return, and if the cache is already initialised, it does no work.'
def _populate(self):
if self.loaded: return imp.acquire_lock() try: if self.loaded: return for app_name in settings.INSTALLED_APPS: if (app_name in self.handled): continue self.load_app(app_name, True) if (not self.nesting_level): for app_name in self.postponed: self.load_app(app_name) self.loaded = True finally: imp.release_lock()
'Return app_label for given models module.'
def _label_for(self, app_mod):
return app_mod.__name__.split('.')[(-2)]
'Loads the app with the provided fully qualified name, and returns the model module.'
def load_app(self, app_name, can_postpone=False):
self.handled[app_name] = None self.nesting_level += 1 app_module = import_module(app_name) try: models = import_module('.models', app_name) except ImportError: self.nesting_level -= 1 if (not module_has_submodule(app_module, 'models')): return None elif can_postpone: self.postponed.append(app_name) return None else: raise self.nesting_level -= 1 if (models not in self.app_store): self.app_store[models] = len(self.app_store) self.app_labels[self._label_for(models)] = models return models
'Returns true if the model cache is fully populated. Useful for code that wants to cache the results of get_models() for themselves once it is safe to do so.'
def app_cache_ready(self):
return self.loaded
'Returns a list of all installed modules that contain models.'
def get_apps(self):
self._populate() apps = [(v, k) for (k, v) in self.app_store.items()] apps.sort() return [elt[1] for elt in apps]
'Returns the module containing the models for the given app_label. If the app has no models in it and \'emptyOK\' is True, returns None.'
def get_app(self, app_label, emptyOK=False):
self._populate() imp.acquire_lock() try: for app_name in settings.INSTALLED_APPS: if (app_label == app_name.split('.')[(-1)]): mod = self.load_app(app_name, False) if (mod is None): if emptyOK: return None raise ImproperlyConfigured(('App with label %s is missing a models.py module.' % app_label)) else: return mod raise ImproperlyConfigured(('App with label %s could not be found' % app_label)) finally: imp.release_lock()
'Returns the map of known problems with the INSTALLED_APPS.'
def get_app_errors(self):
self._populate() return self.app_errors
'Given a module containing models, returns a list of the models. Otherwise returns a list of all installed models. By default, auto-created models (i.e., m2m models without an explicit intermediate table) are not included. However, if you specify include_auto_created=True, they will be. By default, models created to satisfy deferred attribute queries are *not* included in the list of models. However, if you specify include_deferred, they will be. By default, models that aren\'t part of installed apps will *not* be included in the list of models. However, if you specify only_installed=False, they will be. By default, models that have been swapped out will *not* be included in the list of models. However, if you specify include_swapped, they will be.'
def get_models(self, app_mod=None, include_auto_created=False, include_deferred=False, only_installed=True, include_swapped=False):
cache_key = (app_mod, include_auto_created, include_deferred, only_installed, include_swapped) try: return self._get_models_cache[cache_key] except KeyError: pass self._populate() if app_mod: if (app_mod in self.app_store): app_list = [self.app_models.get(self._label_for(app_mod), SortedDict())] else: app_list = [] elif only_installed: app_list = [self.app_models.get(app_label, SortedDict()) for app_label in six.iterkeys(self.app_labels)] else: app_list = six.itervalues(self.app_models) model_list = [] for app in app_list: model_list.extend((model for model in app.values() if (((not model._deferred) or include_deferred) and ((not model._meta.auto_created) or include_auto_created) and ((not model._meta.swapped) or include_swapped)))) self._get_models_cache[cache_key] = model_list return model_list
'Returns the model matching the given app_label and case-insensitive model_name. Returns None if no model is found.'
def get_model(self, app_label, model_name, seed_cache=True, only_installed=True):
if seed_cache: self._populate() if (only_installed and (app_label not in self.app_labels)): return None return self.app_models.get(app_label, SortedDict()).get(model_name.lower())
'Register a set of models as belonging to an app.'
def register_models(self, app_label, *models):
for model in models: model_name = model._meta.object_name.lower() model_dict = self.app_models.setdefault(app_label, SortedDict()) if (model_name in model_dict): fname1 = os.path.abspath(upath(sys.modules[model.__module__].__file__)) fname2 = os.path.abspath(upath(sys.modules[model_dict[model_name].__module__].__file__)) if (os.path.splitext(fname1)[0] == os.path.splitext(fname2)[0]): continue model_dict[model_name] = model self._get_models_cache.clear()
'Add a node to the where-tree. If the data is a list or tuple, it is expected to be of the form (obj, lookup_type, value), where obj is a Constraint object, and is then slightly munged before being stored (to avoid storing any reference to field objects). Otherwise, the \'data\' is stored unchanged and can be any class with an \'as_sql()\' method.'
def add(self, data, connector):
if (not isinstance(data, (list, tuple))): super(WhereNode, self).add(data, connector) return (obj, lookup_type, value) = data if isinstance(value, collections.Iterator): value = list(value) if isinstance(value, datetime.datetime): value_annotation = datetime.datetime elif hasattr(value, 'value_annotation'): value_annotation = value.value_annotation else: value_annotation = bool(value) if hasattr(obj, 'prepare'): value = obj.prepare(lookup_type, value) super(WhereNode, self).add((obj, lookup_type, value_annotation, value), connector)
'Returns the SQL version of the where clause and the value to be substituted in. Returns \'\', [] if this node matches everything, None, [] if this node is empty, and raises EmptyResultSet if this node can\'t match anything.'
def as_sql(self, qn, connection):
result = [] result_params = [] (everything_childs, nothing_childs) = (0, 0) non_empty_childs = len(self.children) for child in self.children: try: if hasattr(child, 'as_sql'): (sql, params) = child.as_sql(qn=qn, connection=connection) else: (sql, params) = self.make_atom(child, qn, connection) except EmptyResultSet: nothing_childs += 1 else: if sql: result.append(sql) result_params.extend(params) else: if (sql is None): non_empty_childs -= 1 continue everything_childs += 1 if (self.connector == AND): (full_needed, empty_needed) = (non_empty_childs, 1) else: (full_needed, empty_needed) = (1, non_empty_childs) if ((empty_needed - nothing_childs) <= 0): if self.negated: return ('', []) else: raise EmptyResultSet if ((full_needed - everything_childs) <= 0): if self.negated: raise EmptyResultSet else: return ('', []) if (non_empty_childs == 0): return (None, []) conn = (' %s ' % self.connector) sql_string = conn.join(result) if sql_string: if self.negated: sql_string = ('NOT (%s)' % sql_string) elif (len(result) > 1): sql_string = ('(%s)' % sql_string) return (sql_string, result_params)
'Turn a tuple (Constraint(table_alias, column_name, db_type), lookup_type, value_annotation, params) into valid SQL. The first item of the tuple may also be an Aggregate. Returns the string for the SQL fragment and the parameters to use for it.'
def make_atom(self, child, qn, connection):
(lvalue, lookup_type, value_annotation, params_or_value) = child if isinstance(lvalue, Constraint): try: (lvalue, params) = lvalue.process(lookup_type, params_or_value, connection) except EmptyShortCircuit: raise EmptyResultSet elif isinstance(lvalue, Aggregate): params = lvalue.field.get_db_prep_lookup(lookup_type, params_or_value, connection) else: raise TypeError("'make_atom' expects a Constraint or an Aggregate as the first item of its 'child' argument.") if isinstance(lvalue, tuple): field_sql = self.sql_for_columns(lvalue, qn, connection) else: field_sql = lvalue.as_sql(qn, connection) if (value_annotation is datetime.datetime): cast_sql = connection.ops.datetime_cast_sql() else: cast_sql = '%s' if hasattr(params, 'as_sql'): (extra, params) = params.as_sql(qn, connection) cast_sql = '' else: extra = '' if ((len(params) == 1) and (params[0] == '') and (lookup_type == 'exact') and connection.features.interprets_empty_strings_as_nulls): lookup_type = 'isnull' value_annotation = True if (lookup_type in connection.operators): format = ('%s %%s %%s' % (connection.ops.lookup_cast(lookup_type),)) return ((format % (field_sql, (connection.operators[lookup_type] % cast_sql), extra)), params) if (lookup_type == 'in'): if (not value_annotation): raise EmptyResultSet if extra: return (('%s IN %s' % (field_sql, extra)), params) max_in_list_size = connection.ops.max_in_list_size() if (max_in_list_size and (len(params) > max_in_list_size)): in_clause_elements = ['('] for offset in xrange(0, len(params), max_in_list_size): if (offset > 0): in_clause_elements.append(' OR ') in_clause_elements.append(('%s IN (' % field_sql)) group_size = min((len(params) - offset), max_in_list_size) param_group = ', '.join(repeat('%s', group_size)) in_clause_elements.append(param_group) in_clause_elements.append(')') in_clause_elements.append(')') return (''.join(in_clause_elements), params) else: return (('%s IN (%s)' % (field_sql, ', '.join(repeat('%s', len(params))))), params) elif (lookup_type in ('range', 'year')): return (('%s BETWEEN %%s and %%s' % field_sql), params) elif (lookup_type in ('month', 'day', 'week_day')): return (('%s = %%s' % connection.ops.date_extract_sql(lookup_type, field_sql)), params) elif (lookup_type == 'isnull'): return (('%s IS %sNULL' % (field_sql, (((not value_annotation) and 'NOT ') or ''))), ()) elif (lookup_type == 'search'): return (connection.ops.fulltext_search_sql(field_sql), params) elif (lookup_type in ('regex', 'iregex')): return ((connection.ops.regex_lookup(lookup_type) % (field_sql, cast_sql)), params) raise TypeError(('Invalid lookup_type: %r' % lookup_type))
'Returns the SQL fragment used for the left-hand side of a column constraint (for example, the "T1.foo" portion in the clause "WHERE ... T1.foo = 6").'
def sql_for_columns(self, data, qn, connection):
(table_alias, name, db_type) = data if table_alias: lhs = ('%s.%s' % (qn(table_alias), qn(name))) else: lhs = qn(name) return (connection.ops.field_cast_sql(db_type) % lhs)
'Relabels the alias values of any children. \'change_map\' is a dictionary mapping old (current) alias values to the new values.'
def relabel_aliases(self, change_map, node=None):
if (not node): node = self for (pos, child) in enumerate(node.children): if hasattr(child, 'relabel_aliases'): child.relabel_aliases(change_map) elif isinstance(child, tree.Node): self.relabel_aliases(change_map, child) elif isinstance(child, (list, tuple)): if isinstance(child[0], (list, tuple)): elt = list(child[0]) if (elt[0] in change_map): elt[0] = change_map[elt[0]] node.children[pos] = ((tuple(elt),) + child[1:]) else: child[0].relabel_aliases(change_map) if hasattr(child[3], 'relabel_aliases'): child[3].relabel_aliases(change_map)
'Save the state of the Constraint for pickling. Fields aren\'t necessarily pickleable, because they can have callable default values. So, instead of pickling the field store a reference so we can restore it manually'
def __getstate__(self):
obj_dict = self.__dict__.copy() if self.field: obj_dict['model'] = self.field.model obj_dict['field_name'] = self.field.name del obj_dict['field'] return obj_dict
'Restore the constraint'
def __setstate__(self, data):
model = data.pop('model', None) field_name = data.pop('field_name', None) self.__dict__.update(data) if (model is not None): self.field = model._meta.get_field(field_name) else: self.field = None
'Returns a tuple of data suitable for inclusion in a WhereNode instance.'
def process(self, lookup_type, value, connection):
from django.db.models.base import ObjectDoesNotExist try: if self.field: params = self.field.get_db_prep_lookup(lookup_type, value, connection=connection, prepared=True) db_type = self.field.db_type(connection=connection) else: params = Field().get_db_prep_lookup(lookup_type, value, connection=connection, prepared=True) db_type = None except ObjectDoesNotExist: raise EmptyShortCircuit return ((self.alias, self.col, db_type), params)
'Convert the database-returned value into a type that is consistent across database backends. By default, this defers to the underlying backend operations, but it can be overridden by Query classes for specific backends.'
def convert_values(self, value, field, connection):
return connection.ops.convert_values(value, field)
'Returns the query as a string of SQL with the parameter values substituted in (use sql_with_params() to see the unsubstituted string). Parameter values won\'t necessarily be quoted correctly, since that is done by the database interface at execution time.'
def __str__(self):
(sql, params) = self.sql_with_params() return (sql % params)
'Returns the query as an SQL string and the parameters that will be subsituted into the query.'
def sql_with_params(self):
return self.get_compiler(DEFAULT_DB_ALIAS).as_sql()
'Pickling support.'
def __getstate__(self):
obj_dict = self.__dict__.copy() obj_dict['related_select_fields'] = [] obj_dict['related_select_cols'] = [] obj_dict['select_fields'] = [(((f is not None) and f.name) or None) for f in obj_dict['select_fields']] return obj_dict
'Unpickling support.'
def __setstate__(self, obj_dict):
opts = obj_dict['model']._meta obj_dict['select_fields'] = [(((name is not None) and opts.get_field(name)) or None) for name in obj_dict['select_fields']] self.__dict__.update(obj_dict)
'Returns the Options instance (the model._meta) from which to start processing. Normally, this is self.model._meta, but it can be changed by subclasses.'
def get_meta(self):
return self.model._meta
'Creates a copy of the current instance. The \'kwargs\' parameter can be used by clients to update attributes after copying has taken place.'
def clone(self, klass=None, memo=None, **kwargs):
obj = Empty() obj.__class__ = (klass or self.__class__) obj.model = self.model obj.alias_refcount = self.alias_refcount.copy() obj.alias_map = self.alias_map.copy() obj.table_map = self.table_map.copy() obj.join_map = self.join_map.copy() obj.default_cols = self.default_cols obj.default_ordering = self.default_ordering obj.standard_ordering = self.standard_ordering obj.included_inherited_models = self.included_inherited_models.copy() obj.ordering_aliases = [] obj.select_fields = self.select_fields[:] obj.related_select_fields = self.related_select_fields[:] obj.dupe_avoidance = self.dupe_avoidance.copy() obj.select = self.select[:] obj.tables = self.tables[:] obj.where = copy.deepcopy(self.where, memo=memo) obj.where_class = self.where_class if (self.group_by is None): obj.group_by = None else: obj.group_by = self.group_by[:] obj.having = copy.deepcopy(self.having, memo=memo) obj.order_by = self.order_by[:] (obj.low_mark, obj.high_mark) = (self.low_mark, self.high_mark) obj.distinct = self.distinct obj.distinct_fields = self.distinct_fields[:] obj.select_for_update = self.select_for_update obj.select_for_update_nowait = self.select_for_update_nowait obj.select_related = self.select_related obj.related_select_cols = [] obj.aggregates = copy.deepcopy(self.aggregates, memo=memo) if (self.aggregate_select_mask is None): obj.aggregate_select_mask = None else: obj.aggregate_select_mask = self.aggregate_select_mask.copy() obj._aggregate_select_cache = None obj.max_depth = self.max_depth obj.extra = self.extra.copy() if (self.extra_select_mask is None): obj.extra_select_mask = None else: obj.extra_select_mask = self.extra_select_mask.copy() if (self._extra_select_cache is None): obj._extra_select_cache = None else: obj._extra_select_cache = self._extra_select_cache.copy() obj.extra_tables = self.extra_tables obj.extra_order_by = self.extra_order_by obj.deferred_loading = copy.deepcopy(self.deferred_loading, memo=memo) if (self.filter_is_sticky and self.used_aliases): obj.used_aliases = self.used_aliases.copy() else: obj.used_aliases = set() obj.filter_is_sticky = False obj.__dict__.update(kwargs) if hasattr(obj, '_setup_query'): obj._setup_query() return obj
'Convert the database-returned value into a type that is consistent across database backends. By default, this defers to the underlying backend operations, but it can be overridden by Query classes for specific backends.'
def convert_values(self, value, field, connection):
return connection.ops.convert_values(value, field)
'Resolve the value of aggregates returned by the database to consistent (and reasonable) types. This is required because of the predisposition of certain backends to return Decimal and long types when they are not needed.'
def resolve_aggregate(self, value, aggregate, connection):
if (value is None): if aggregate.is_ordinal: return 0 return value elif aggregate.is_ordinal: return int(value) elif aggregate.is_computed: return float(value) else: return self.convert_values(value, aggregate.field, connection)
'Returns the dictionary with the values of the existing aggregations.'
def get_aggregation(self, using):
if (not self.aggregate_select): return {} if (self.group_by is not None): from django.db.models.sql.subqueries import AggregateQuery query = AggregateQuery(self.model) obj = self.clone() for (alias, aggregate) in self.aggregate_select.items(): if aggregate.is_summary: query.aggregate_select[alias] = aggregate del obj.aggregate_select[alias] try: query.add_subquery(obj, using) except EmptyResultSet: return dict(((alias, None) for alias in query.aggregate_select)) else: query = self self.select = [] self.default_cols = False self.extra = {} self.remove_inherited_models() query.clear_ordering(True) query.clear_limits() query.select_for_update = False query.select_related = False query.related_select_cols = [] query.related_select_fields = [] result = query.get_compiler(using).execute_sql(SINGLE) if (result is None): result = [None for q in query.aggregate_select.items()] return dict([(alias, self.resolve_aggregate(val, aggregate, connection=connections[using])) for ((alias, aggregate), val) in zip(query.aggregate_select.items(), result)])
'Performs a COUNT() query using the current filter constraints.'
def get_count(self, using):
obj = self.clone() if ((len(self.select) > 1) or self.aggregate_select or (self.distinct and self.distinct_fields)): from django.db.models.sql.subqueries import AggregateQuery subquery = obj subquery.clear_ordering(True) subquery.clear_limits() obj = AggregateQuery(obj.model) try: obj.add_subquery(subquery, using=using) except EmptyResultSet: return 0 obj.add_count_column() number = obj.get_aggregation(using=using)[None] number = max(0, (number - self.low_mark)) if (self.high_mark is not None): number = min(number, (self.high_mark - self.low_mark)) return number
'Merge the \'rhs\' query into the current one (with any \'rhs\' effects being applied *after* (that is, "to the right of") anything in the current query. \'rhs\' is not modified during a call to this function. The \'connector\' parameter describes how to connect filters from the \'rhs\' query.'
def combine(self, rhs, connector):
assert (self.model == rhs.model), 'Cannot combine queries on two different base models.' assert self.can_filter(), 'Cannot combine queries once a slice has been taken.' assert (self.distinct == rhs.distinct), 'Cannot combine a unique query with a non-unique query.' assert (self.distinct_fields == rhs.distinct_fields), 'Cannot combine queries with different distinct fields.' self.remove_inherited_models() change_map = {} used = set() conjunction = (connector == AND) first = True for alias in rhs.tables: if (not rhs.alias_refcount[alias]): continue (table, _, join_type, lhs, lhs_col, col, _) = rhs.alias_map[alias] promote = (join_type == self.LOUTER) lhs = change_map.get(lhs, lhs) new_alias = self.join((lhs, table, lhs_col, col), (conjunction and (not first)), used, promote, (not conjunction)) used.add(new_alias) change_map[alias] = new_alias first = False if (not conjunction): l_tables = set(self.tables) r_tables = set(rhs.tables) for alias in change_map: if (alias in r_tables): if rhs.alias_refcount[alias]: r_tables.remove(alias) r_tables.add(change_map[alias]) outer_tables = ((l_tables | r_tables) - (l_tables & r_tables)) for alias in outer_tables: if (self.alias_refcount.get(alias) or rhs.alias_refcount.get(alias)): self.promote_joins([alias], True) if rhs.where: w = copy.deepcopy(rhs.where) w.relabel_aliases(change_map) if (not self.where): self.where.add(EverythingNode(), AND) elif self.where: w = self.where_class() w.add(EverythingNode(), AND) else: w = self.where_class() self.where.add(w, connector) self.select = [] for col in rhs.select: if isinstance(col, (list, tuple)): self.select.append((change_map.get(col[0], col[0]), col[1])) else: item = copy.deepcopy(col) item.relabel_aliases(change_map) self.select.append(item) self.select_fields = rhs.select_fields[:] if (connector == OR): if (self.extra and rhs.extra): raise ValueError("When merging querysets using 'or', you cannot have extra(select=...) on both sides.") self.extra.update(rhs.extra) extra_select_mask = set() if (self.extra_select_mask is not None): extra_select_mask.update(self.extra_select_mask) if (rhs.extra_select_mask is not None): extra_select_mask.update(rhs.extra_select_mask) if extra_select_mask: self.set_extra_mask(extra_select_mask) self.extra_tables += rhs.extra_tables self.order_by = ((rhs.order_by and rhs.order_by[:]) or self.order_by) self.extra_order_by = (rhs.extra_order_by or self.extra_order_by)
'Converts the self.deferred_loading data structure to an alternate data structure, describing the field that *will* be loaded. This is used to compute the columns to select from the database and also by the QuerySet class to work out which fields are being initialised on each model. Models that have all their fields included aren\'t mentioned in the result, only those that have field restrictions in place. The "target" parameter is the instance that is populated (in place). The "callback" is a function that is called whenever a (model, field) pair need to be added to "target". It accepts three parameters: "target", and the model and list of fields being added for that model.'
def deferred_to_data(self, target, callback):
(field_names, defer) = self.deferred_loading if (not field_names): return orig_opts = self.model._meta seen = {} must_include = {orig_opts.concrete_model: set([orig_opts.pk])} for field_name in field_names: parts = field_name.split(LOOKUP_SEP) cur_model = self.model opts = orig_opts for name in parts[:(-1)]: old_model = cur_model source = opts.get_field_by_name(name)[0] if is_reverse_o2o(source): cur_model = source.model else: cur_model = source.rel.to opts = cur_model._meta if (not is_reverse_o2o(source)): must_include[old_model].add(source) add_to_dict(must_include, cur_model, opts.pk) (field, model, _, _) = opts.get_field_by_name(parts[(-1)]) if (model is None): model = cur_model if (not is_reverse_o2o(field)): add_to_dict(seen, model, field) if defer: workset = {} for (model, values) in six.iteritems(seen): for (field, m) in model._meta.get_fields_with_model(): if (field in values): continue add_to_dict(workset, (m or model), field) for (model, values) in six.iteritems(must_include): if (model in workset): workset[model].update(values) for (model, values) in six.iteritems(workset): callback(target, model, values) else: for (model, values) in six.iteritems(must_include): if (model in seen): seen[model].update(values) else: seen[model] = values for model in orig_opts.get_parent_list(): if (model not in seen): seen[model] = set() for (model, values) in six.iteritems(seen): callback(target, model, values)
'Callback used by deferred_to_columns(). The "target" parameter should be a set instance.'
def deferred_to_columns_cb(self, target, model, fields):
table = model._meta.db_table if (table not in target): target[table] = set() for field in fields: target[table].add(field.column)
'Returns a table alias for the given table_name and whether this is a new alias or not. If \'create\' is true, a new alias is always created. Otherwise, the most recently created alias for the table (if one exists) is reused.'
def table_alias(self, table_name, create=False):
current = self.table_map.get(table_name) if ((not create) and current): alias = current[0] self.alias_refcount[alias] += 1 return (alias, False) if current: alias = ('%s%d' % (self.alias_prefix, (len(self.alias_map) + 1))) current.append(alias) else: alias = table_name self.table_map[alias] = [alias] self.alias_refcount[alias] = 1 self.tables.append(alias) return (alias, True)
'Increases the reference count for this alias.'
def ref_alias(self, alias):
self.alias_refcount[alias] += 1
'Decreases the reference count for this alias.'
def unref_alias(self, alias, amount=1):
self.alias_refcount[alias] -= amount
'Promotes recursively the join type of given aliases and its children to an outer join. If \'unconditional\' is False, the join is only promoted if it is nullable or the parent join is an outer join. Note about join promotion: When promoting any alias, we make sure all joins which start from that alias are promoted, too. When adding a join in join(), we make sure any join added to already existing LOUTER join is generated as LOUTER. This ensures we don\'t ever have broken join chains which contain first a LOUTER join, then an INNER JOIN, that is this kind of join should never be generated: a LOUTER b INNER c. The reason for avoiding this type of join chain is that the INNER after the LOUTER will effectively remove any effect the LOUTER had.'
def promote_joins(self, aliases, unconditional=False):
aliases = list(aliases) while aliases: alias = aliases.pop(0) if (self.alias_map[alias].rhs_join_col is None): continue parent_alias = self.alias_map[alias].lhs_alias parent_louter = (parent_alias and (self.alias_map[parent_alias].join_type == self.LOUTER)) already_louter = (self.alias_map[alias].join_type == self.LOUTER) if ((unconditional or self.alias_map[alias].nullable or parent_louter) and (not already_louter)): data = self.alias_map[alias]._replace(join_type=self.LOUTER) self.alias_map[alias] = data aliases.extend((join for join in self.alias_map.keys() if ((self.alias_map[join].lhs_alias == alias) and (join not in aliases))))
'This method will reset reference counts for aliases so that they match the value passed in :param to_counts:.'
def reset_refcounts(self, to_counts):
for (alias, cur_refcount) in self.alias_refcount.copy().items(): unref_amount = (cur_refcount - to_counts.get(alias, 0)) self.unref_alias(alias, unref_amount)
'Given a "before" copy of the alias_refcounts dictionary (as \'initial_refcounts\') and a collection of aliases that may have been changed or created, works out which aliases have been created since then and which ones haven\'t been used and promotes all of those aliases, plus any children of theirs in the alias tree, to outer joins.'
def promote_unused_aliases(self, initial_refcounts, used_aliases):
for alias in self.tables: if ((alias in used_aliases) and ((alias not in initial_refcounts) or (self.alias_refcount[alias] == initial_refcounts[alias]))): self.promote_joins([alias])