desc
stringlengths 3
26.7k
| decl
stringlengths 11
7.89k
| bodies
stringlengths 8
553k
|
---|---|---|
'Returns a list of the columns that have been updated since instantiation or save'
| def get_changed_columns(self):
| return [k for (k, v) in self._values.items() if v.changed]
|
'Returns an in operator
used where you\'d typically want to use python\'s `in` operator'
| def in_(self, item):
| return WhereClause(six.text_type(self), InOperator(), item)
|
'Returns a CONTAINS operator'
| def contains_(self, item):
| return WhereClause(six.text_type(self), ContainsOperator(), item)
|
':param batch_type: (optional) One of batch type values available through BatchType enum
:type batch_type: str or None
:param timestamp: (optional) A datetime or timedelta object with desired timestamp to be applied
to the batch conditional.
:type timestamp: datetime or timedelta or None
:param consistency: (optional) One of consistency values ("ANY", "ONE", "QUORUM" etc)
:type consistency: The :class:`.ConsistencyLevel` to be used for the batch query, or None.
:param execute_on_exception: (Defaults to False) Indicates that when the BatchQuery instance is used
as a context manager the queries accumulated within the context must be executed despite
encountering an error within the context. By default, any exception raised from within
the context scope will cause the batched queries not to be executed.
:type execute_on_exception: bool
:param timeout: (optional) Timeout for the entire batch (in seconds), if not specified fallback
to default session timeout
:type timeout: float or None
:param str connection: Connection name to use for the batch execution'
| def __init__(self, batch_type=None, timestamp=None, consistency=None, execute_on_exception=False, timeout=conn.NOT_SET, connection=None):
| self.queries = []
self.batch_type = batch_type
if ((timestamp is not None) and (not isinstance(timestamp, (datetime, timedelta)))):
raise CQLEngineException('timestamp object must be an instance of datetime')
self.timestamp = timestamp
self._consistency = consistency
self._execute_on_exception = execute_on_exception
self._timeout = timeout
self._callbacks = []
self._executed = False
self._context_entered = False
self._connection = connection
if connection:
self._connection_explicit = True
|
'Add a function and arguments to be passed to it to be executed after the batch executes.
A batch can support multiple callbacks.
Note, that if the batch does not execute, the callbacks are not executed.
A callback, thus, is an "on batch success" handler.
:param fn: Callable object
:type fn: callable
:param \*args: Positional arguments to be passed to the callback at the time of execution
:param \*\*kwargs: Named arguments to be passed to the callback at the time of execution'
| def add_callback(self, fn, *args, **kwargs):
| if (not callable(fn)):
raise ValueError("Value for argument 'fn' is {0} and is not a callable object.".format(type(fn)))
self._callbacks.append((fn, args, kwargs))
|
'returns the fields to select'
| def _select_fields(self):
| return []
|
'Returns a select clause based on the given filter args'
| def _select_query(self):
| if self._where:
self._validate_select_where()
return SelectStatement(self.column_family_name, fields=self._select_fields(), where=self._where, order_by=self._order, limit=self._limit, allow_filtering=self._allow_filtering, distinct_fields=self._distinct_fields, fetch_size=self._fetch_size)
|
'Fill the result cache with all results.'
| def _fill_result_cache(self):
| idx = 0
try:
while True:
idx += 1000
self._fill_result_cache_to_idx(idx)
except StopIteration:
pass
self._count = len(self._result_cache)
|
'Returns a function that will be used to instantiate query results'
| def _get_result_constructor(self):
| raise NotImplementedError
|
'Set a batch object to run the query on.
Note: running a select query with a batch object will raise an exception'
| def batch(self, batch_obj):
| if self._connection:
raise CQLEngineException('Cannot specify the connection on model in batch mode.')
if ((batch_obj is not None) and (not isinstance(batch_obj, BatchQuery))):
raise CQLEngineException('batch_obj must be a BatchQuery instance or None')
clone = copy.deepcopy(self)
clone._batch = batch_obj
return clone
|
'Returns a queryset matching all rows
.. code-block:: python
for user in User.objects().all():
print(user)'
| def all(self):
| return copy.deepcopy(self)
|
'Sets the consistency level for the operation. See :class:`.ConsistencyLevel`.
.. code-block:: python
for user in User.objects(id=3).consistency(CL.ONE):
print(user)'
| def consistency(self, consistency):
| clone = copy.deepcopy(self)
clone._consistency = consistency
return clone
|
'Parses a filter arg in the format:
<colname>__<op>
:returns: colname, op tuple'
| def _parse_filter_arg(self, arg):
| statement = arg.rsplit('__', 1)
if (len(statement) == 1):
return (arg, None)
elif (len(statement) == 2):
return ((statement[0], statement[1]) if (arg != 'pk__token') else (arg, None))
else:
raise QueryException("Can't parse '{0}'".format(arg))
|
'Adds IF statements to queryset'
| def iff(self, *args, **kwargs):
| if len([x for x in kwargs.values() if (x is None)]):
raise CQLEngineException('None values on iff are not allowed')
clone = copy.deepcopy(self)
for operator in args:
if (not isinstance(operator, ConditionalClause)):
raise QueryException('{0} is not a valid query operator'.format(operator))
clone._conditional.append(operator)
for (arg, val) in kwargs.items():
if isinstance(val, Token):
raise QueryException('Token() values are not valid in conditionals')
(col_name, col_op) = self._parse_filter_arg(arg)
try:
column = self.model._get_column(col_name)
except KeyError:
raise QueryException("Can't resolve column name: '{0}'".format(col_name))
if isinstance(val, BaseQueryFunction):
query_val = val
else:
query_val = column.to_database(val)
operator_class = BaseWhereOperator.get_operator((col_op or 'EQ'))
operator = operator_class()
clone._conditional.append(WhereClause(column.db_field_name, operator, query_val))
return clone
|
'Adds WHERE arguments to the queryset, returning a new queryset
See :ref:`retrieving-objects-with-filters`
Returns a QuerySet filtered on the keyword arguments'
| def filter(self, *args, **kwargs):
| if len([x for x in kwargs.values() if (x is None)]):
raise CQLEngineException('None values on filter are not allowed')
clone = copy.deepcopy(self)
for operator in args:
if (not isinstance(operator, WhereClause)):
raise QueryException('{0} is not a valid query operator'.format(operator))
clone._where.append(operator)
for (arg, val) in kwargs.items():
(col_name, col_op) = self._parse_filter_arg(arg)
quote_field = True
if (not isinstance(val, Token)):
try:
column = self.model._get_column(col_name)
except KeyError:
raise QueryException("Can't resolve column name: '{0}'".format(col_name))
else:
if (col_name != 'pk__token'):
raise QueryException("Token() values may only be compared to the 'pk__token' virtual column")
column = columns._PartitionKeysToken(self.model)
quote_field = False
partition_columns = column.partition_columns
if (len(partition_columns) != len(val.value)):
raise QueryException('Token() received {0} arguments but model has {1} partition keys'.format(len(val.value), len(partition_columns)))
val.set_columns(partition_columns)
operator_class = BaseWhereOperator.get_operator((col_op or 'EQ'))
operator = operator_class()
if isinstance(operator, InOperator):
if (not isinstance(val, (list, tuple))):
raise QueryException('IN queries must use a list/tuple value')
query_val = [column.to_database(v) for v in val]
elif isinstance(val, BaseQueryFunction):
query_val = val
elif (isinstance(operator, ContainsOperator) and isinstance(column, (columns.List, columns.Set, columns.Map))):
query_val = val
else:
query_val = column.to_database(val)
if (not col_op):
clone._defer_fields.add(column.db_field_name)
clone._deferred_values[column.db_field_name] = val
clone._where.append(WhereClause(column.db_field_name, operator, query_val, quote_field=quote_field))
return clone
|
'Returns a single instance matching this query, optionally with additional filter kwargs.
See :ref:`retrieving-objects-with-filters`
Returns a single object matching the QuerySet.
.. code-block:: python
user = User.get(id=1)
If no objects are matched, a :class:`~.DoesNotExist` exception is raised.
If more than one object is found, a :class:`~.MultipleObjectsReturned` exception is raised.'
| def get(self, *args, **kwargs):
| if (args or kwargs):
return self.filter(*args, **kwargs).get()
self._execute_query()
try:
self[1]
raise self.model.MultipleObjectsReturned('Multiple objects found')
except IndexError:
pass
try:
obj = self[0]
except IndexError:
raise self.model.DoesNotExist
return obj
|
'Sets the column(s) to be used for ordering
Default order is ascending, prepend a \'-\' to any column name for descending
*Note: column names must be a clustering key*
.. code-block:: python
from uuid import uuid1,uuid4
class Comment(Model):
photo_id = UUID(primary_key=True)
comment_id = TimeUUID(primary_key=True, default=uuid1) # second primary key component is a clustering key
comment = Text()
sync_table(Comment)
u = uuid4()
for x in range(5):
Comment.create(photo_id=u, comment="test %d" % x)
print("Normal")
for comment in Comment.objects(photo_id=u):
print comment.comment_id
print("Reversed")
for comment in Comment.objects(photo_id=u).order_by("-comment_id"):
print comment.comment_id'
| def order_by(self, *colnames):
| if (len(colnames) == 0):
clone = copy.deepcopy(self)
clone._order = []
return clone
conditions = []
for colname in colnames:
conditions.append('"{0}" {1}'.format(*self._get_ordering_condition(colname)))
clone = copy.deepcopy(self)
clone._order.extend(conditions)
return clone
|
'Returns the number of rows matched by this query.
*Note: This function executes a SELECT COUNT() and has a performance cost on large datasets*'
| def count(self):
| if self._batch:
raise CQLEngineException('Only inserts, updates, and deletes are available in batch mode')
if (self._count is None):
query = self._select_query()
query.count = True
result = self._execute(query)
count_row = result[0].popitem()
self._count = count_row[1]
return self._count
|
'Returns the DISTINCT rows matched by this query.
distinct_fields default to the partition key fields if not specified.
*Note: distinct_fields must be a partition key or a static column*
.. code-block:: python
class Automobile(Model):
manufacturer = columns.Text(partition_key=True)
year = columns.Integer(primary_key=True)
model = columns.Text(primary_key=True)
price = columns.Decimal()
sync_table(Automobile)
# create rows
Automobile.objects.distinct()
# or
Automobile.objects.distinct([\'manufacturer\'])'
| def distinct(self, distinct_fields=None):
| clone = copy.deepcopy(self)
if distinct_fields:
clone._distinct_fields = distinct_fields
else:
clone._distinct_fields = [x.column_name for x in self.model._partition_keys.values()]
return clone
|
'Limits the number of results returned by Cassandra. Use *0* or *None* to disable.
*Note that CQL\'s default limit is 10,000, so all queries without a limit set explicitly will have an implicit limit of 10,000*
.. code-block:: python
# Fetch 100 users
for user in User.objects().limit(100):
print(user)
# Fetch all users
for user in User.objects().limit(None):
print(user)'
| def limit(self, v):
| if (v is None):
v = 0
if (not isinstance(v, six.integer_types)):
raise TypeError
if (v == self._limit):
return self
if (v < 0):
raise QueryException('Negative limit is not allowed')
clone = copy.deepcopy(self)
clone._limit = v
return clone
|
'Sets the number of rows that are fetched at a time.
*Note that driver\'s default fetch size is 5000.*
.. code-block:: python
for user in User.objects().fetch_size(500):
print(user)'
| def fetch_size(self, v):
| if (not isinstance(v, six.integer_types)):
raise TypeError
if (v == self._fetch_size):
return self
if (v < 1):
raise QueryException('fetch size less than 1 is not allowed')
clone = copy.deepcopy(self)
clone._fetch_size = v
return clone
|
'Enables the (usually) unwise practive of querying on a clustering key without also defining a partition key'
| def allow_filtering(self):
| clone = copy.deepcopy(self)
clone._allow_filtering = True
return clone
|
'Load only these fields for the returned query'
| def only(self, fields):
| return self._only_or_defer('only', fields)
|
'Don\'t load these fields for the returned query'
| def defer(self, fields):
| return self._only_or_defer('defer', fields)
|
'Deletes the contents of a query'
| def delete(self):
| partition_keys = set((x.db_field_name for x in self.model._partition_keys.values()))
if (partition_keys - set((c.field for c in self._where))):
raise QueryException('The partition key must be defined on delete queries')
dq = DeleteStatement(self.column_family_name, where=self._where, timestamp=self._timestamp, conditionals=self._conditional, if_exists=self._if_exists)
self._execute(dq)
|
':param timeout: Timeout for the query (in seconds)
:type timeout: float or None'
| def timeout(self, timeout):
| clone = copy.deepcopy(self)
clone._timeout = timeout
return clone
|
'Change the context on-the-fly of the Model class (keyspace, connection)'
| def using(self, keyspace=None, connection=None):
| if (connection and self._batch):
raise CQLEngineException('Cannot specify a connection on model in batch mode.')
clone = copy.deepcopy(self)
if keyspace:
from cassandra.cqlengine.models import _clone_model_class
clone.model = _clone_model_class(self.model, {'__keyspace__': keyspace})
if connection:
clone._connection = connection
return clone
|
'Returns a function that will be used to instantiate query results'
| def _get_result_constructor(self):
| return ResultObject
|
'Checks that a filterset will not create invalid select statement'
| def _validate_select_where(self):
| equal_ops = [self.model._get_column_by_db_name(w.field) for w in self._where if (isinstance(w.operator, EqualsOperator) and (not isinstance(w.value, Token)))]
token_comparison = any([w for w in self._where if isinstance(w.value, Token)])
if ((not any(((w.primary_key or w.index) for w in equal_ops))) and (not token_comparison) and (not self._allow_filtering)):
raise QueryException('Where clauses require either =, a IN or a CONTAINS (collection) comparison with either a primary key or indexed field')
if (not self._allow_filtering):
if (not any((w.index for w in equal_ops))):
if ((not any([w.partition_key for w in equal_ops])) and (not token_comparison)):
raise QueryException('Filtering on a clustering key without a partition key is not allowed unless allow_filtering() is called on the querset')
|
'Returns a function that will be used to instantiate query results'
| def _get_result_constructor(self):
| if (not self._values_list):
return self.model._construct_instance
elif self._flat_values_list:
key = self._only_fields[0]
return (lambda row: row[key])
else:
return (lambda row: [row[f] for f in self._only_fields])
|
'Instructs the query set to return tuples, not model instance'
| def values_list(self, *fields, **kwargs):
| flat = kwargs.pop('flat', False)
if kwargs:
raise TypeError(('Unexpected keyword arguments to values_list: %s' % (kwargs.keys(),)))
if (flat and (len(fields) > 1)):
raise TypeError("'flat' is not valid when values_list is called with more than one field.")
clone = self.only(fields)
clone._values_list = True
clone._flat_values_list = flat
return clone
|
'Sets the ttl (in seconds) for modified data.
*Note that running a select query with a ttl value will raise an exception*'
| def ttl(self, ttl):
| clone = copy.deepcopy(self)
clone._ttl = ttl
return clone
|
'Allows for custom timestamps to be saved with the record.'
| def timestamp(self, timestamp):
| clone = copy.deepcopy(self)
clone._timestamp = timestamp
return clone
|
'Check the existence of an object before insertion.
If the insertion isn\'t applied, a LWTException is raised.'
| def if_not_exists(self):
| if self.model._has_counter:
raise IfNotExistsWithCounterColumn('if_not_exists cannot be used with tables containing counter columns')
clone = copy.deepcopy(self)
clone._if_not_exists = True
return clone
|
'Check the existence of an object before an update or delete.
If the update or delete isn\'t applied, a LWTException is raised.'
| def if_exists(self):
| if self.model._has_counter:
raise IfExistsWithCounterColumn('if_exists cannot be used with tables containing counter columns')
clone = copy.deepcopy(self)
clone._if_exists = True
return clone
|
'Performs an update on the row selected by the queryset. Include values to update in the
update like so:
.. code-block:: python
Model.objects(key=n).update(value=\'x\')
Passing in updates for columns which are not part of the model will raise a ValidationError.
Per column validation will be performed, but instance level validation will not
(i.e., `Model.validate` is not called). This is sometimes referred to as a blind update.
For example:
.. code-block:: python
class User(Model):
id = Integer(primary_key=True)
name = Text()
setup(["localhost"], "test")
sync_table(User)
u = User.create(id=1, name="jon")
User.objects(id=1).update(name="Steve")
# sets name to null
User.objects(id=1).update(name=None)
Also supported is blindly adding and removing elements from container columns,
without loading a model instance from Cassandra.
Using the syntax `.update(column_name={x, y, z})` will overwrite the contents of the container, like updating a
non container column. However, adding `__<operation>` to the end of the keyword arg, makes the update call add
or remove items from the collection, without overwriting then entire column.
Given the model below, here are the operations that can be performed on the different container columns:
.. code-block:: python
class Row(Model):
row_id = columns.Integer(primary_key=True)
set_column = columns.Set(Integer)
list_column = columns.List(Integer)
map_column = columns.Map(Integer, Integer)
:class:`~cqlengine.columns.Set`
- `add`: adds the elements of the given set to the column
- `remove`: removes the elements of the given set to the column
.. code-block:: python
# add elements to a set
Row.objects(row_id=5).update(set_column__add={6})
# remove elements to a set
Row.objects(row_id=5).update(set_column__remove={4})
:class:`~cqlengine.columns.List`
- `append`: appends the elements of the given list to the end of the column
- `prepend`: prepends the elements of the given list to the beginning of the column
.. code-block:: python
# append items to a list
Row.objects(row_id=5).update(list_column__append=[6, 7])
# prepend items to a list
Row.objects(row_id=5).update(list_column__prepend=[1, 2])
:class:`~cqlengine.columns.Map`
- `update`: adds the given keys/values to the columns, creating new entries if they didn\'t exist, and overwriting old ones if they did
.. code-block:: python
# add items to a map
Row.objects(row_id=5).update(map_column__update={1: 2, 3: 4})
# remove items from a map
Row.objects(row_id=5).update(map_column__remove={1, 2})'
| def update(self, **values):
| if (not values):
return
nulled_columns = set()
updated_columns = set()
us = UpdateStatement(self.column_family_name, where=self._where, ttl=self._ttl, timestamp=self._timestamp, conditionals=self._conditional, if_exists=self._if_exists)
for (name, val) in values.items():
(col_name, col_op) = self._parse_filter_arg(name)
col = self.model._columns.get(col_name)
if (col is None):
raise ValidationError('{0}.{1} has no column named: {2}'.format(self.__module__, self.model.__name__, col_name))
if col.is_primary_key:
raise ValidationError("Cannot apply update to primary key '{0}' for {1}.{2}".format(col_name, self.__module__, self.model.__name__))
if ((col_op == 'remove') and isinstance(col, columns.Map)):
if (not isinstance(val, set)):
raise ValidationError("Cannot apply update operation '{0}' on column '{1}' with value '{2}'. A set is required.".format(col_op, col_name, val))
val = {v: None for v in val}
else:
val = col.validate(val)
if (val is None):
nulled_columns.add(col_name)
continue
us.add_update(col, val, operation=col_op)
updated_columns.add(col_name)
if us.assignments:
self._execute(us)
if nulled_columns:
delete_conditional = ([condition for condition in self._conditional if (condition.field not in updated_columns)] if self._conditional else None)
ds = DeleteStatement(self.column_family_name, fields=nulled_columns, where=self._where, conditionals=delete_conditional, if_exists=self._if_exists)
self._execute(ds)
|
'executes a delete query to remove columns that have changed to null'
| def _delete_null_columns(self, conditionals=None):
| ds = DeleteStatement(self.column_family_name, conditionals=conditionals, if_exists=self._if_exists)
deleted_fields = False
static_only = True
for (_, v) in self.instance._values.items():
col = v.column
if v.deleted:
ds.add_field(col.db_field_name)
deleted_fields = True
static_only &= col.static
elif isinstance(col, columns.Map):
uc = MapDeleteClause(col.db_field_name, v.value, v.previous_value)
if (uc.get_context_size() > 0):
ds.add_field(uc)
deleted_fields = True
static_only |= col.static
if deleted_fields:
keys = (self.model._partition_keys if static_only else self.model._primary_keys)
for (name, col) in keys.items():
ds.add_where(col, EqualsOperator(), getattr(self.instance, name))
self._execute(ds)
|
'updates a row.
This is a blind update call.
All validation and cleaning needs to happen
prior to calling this.'
| def update(self):
| if (self.instance is None):
raise CQLEngineException('DML Query intance attribute is None')
assert (type(self.instance) == self.model)
null_clustering_key = (False if (len(self.instance._clustering_keys) == 0) else True)
static_changed_only = True
statement = UpdateStatement(self.column_family_name, ttl=self._ttl, timestamp=self._timestamp, conditionals=self._conditional, if_exists=self._if_exists)
for (name, col) in self.instance._clustering_keys.items():
null_clustering_key = (null_clustering_key and col._val_is_null(getattr(self.instance, name, None)))
updated_columns = set()
for (name, col) in self.model._columns.items():
if (null_clustering_key and (not col.static) and (not col.partition_key)):
continue
if (not col.is_primary_key):
val = getattr(self.instance, name, None)
val_mgr = self.instance._values[name]
if (val is None):
continue
if ((not val_mgr.changed) and (not isinstance(col, columns.Counter))):
continue
static_changed_only = (static_changed_only and col.static)
statement.add_update(col, val, previous=val_mgr.previous_value)
updated_columns.add(col.db_field_name)
if statement.assignments:
for (name, col) in self.model._primary_keys.items():
if ((null_clustering_key or static_changed_only) and (not col.partition_key)):
continue
statement.add_where(col, EqualsOperator(), getattr(self.instance, name))
self._execute(statement)
if (not null_clustering_key):
delete_conditionals = ([condition for condition in self._conditional if (condition.field not in updated_columns)] if self._conditional else None)
self._delete_null_columns(delete_conditionals)
|
'Creates / updates a row.
This is a blind insert call.
All validation and cleaning needs to happen
prior to calling this.'
| def save(self):
| if (self.instance is None):
raise CQLEngineException('DML Query intance attribute is None')
assert (type(self.instance) == self.model)
nulled_fields = set()
if (self.instance._has_counter or self.instance._can_update()):
if self.instance._has_counter:
warn("'create' and 'save' actions on Counters are deprecated. A future version will disallow this. Use the 'update' mechanism instead.")
return self.update()
else:
insert = InsertStatement(self.column_family_name, ttl=self._ttl, timestamp=self._timestamp, if_not_exists=self._if_not_exists)
static_save_only = (False if (len(self.instance._clustering_keys) == 0) else True)
for (name, col) in self.instance._clustering_keys.items():
static_save_only = (static_save_only and col._val_is_null(getattr(self.instance, name, None)))
for (name, col) in self.instance._columns.items():
if (static_save_only and (not col.static) and (not col.partition_key)):
continue
val = getattr(self.instance, name, None)
if col._val_is_null(val):
if self.instance._values[name].changed:
nulled_fields.add(col.db_field_name)
continue
if (col.has_default and (not self.instance._values[name].changed)):
self.instance._values[name].explicit = True
insert.add_assignment(col, getattr(self.instance, name, None))
if (not insert.is_empty):
self._execute(insert)
if (not static_save_only):
self._delete_null_columns()
|
'Deletes one instance'
| def delete(self):
| if (self.instance is None):
raise CQLEngineException('DML Query instance attribute is None')
ds = DeleteStatement(self.column_family_name, timestamp=self._timestamp, conditionals=self._conditional, if_exists=self._if_exists)
for (name, col) in self.model._primary_keys.items():
val = getattr(self.instance, name)
if ((val is None) and (not col.partition_key)):
continue
ds.add_where(col, EqualsOperator(), val)
self._execute(ds)
|
'Setup the connection'
| def setup(self):
| global cluster, session
if (('username' in self.cluster_options) or ('password' in self.cluster_options)):
raise CQLEngineException("Username & Password are now handled by using the native driver's auth_provider")
if self.lazy_connect:
return
self.cluster = Cluster(self.hosts, **self.cluster_options)
try:
self.session = self.cluster.connect()
log.debug(format_log_context('connection initialized with internally created session', connection=self.name))
except NoHostAvailable:
if self.retry_connect:
log.warning(format_log_context('connect failed, setting up for re-attempt on first use', connection=self.name))
self.lazy_connect = True
raise
if (self.consistency is not None):
self.session.default_consistency_level = self.consistency
if ((DEFAULT_CONNECTION in _connections) and (_connections[DEFAULT_CONNECTION] == self)):
cluster = _connections[DEFAULT_CONNECTION].cluster
session = _connections[DEFAULT_CONNECTION].session
self.setup_session()
|
':param value: the time to create bounding time uuid from
:type value: datetime'
| def __init__(self, value):
| if (not isinstance(value, datetime)):
raise ValidationError('datetime instance is required')
super(TimeUUIDQueryFunction, self).__init__(value)
|
'Returns a list of column IDs.'
| def keys(self):
| return [k for k in self]
|
'Returns list of column values.'
| def values(self):
| return [self[k] for k in self]
|
'Returns a list of column ID/value tuples.'
| def items(self):
| return [(k, self[k]) for k in self]
|
'Returns the type name if it\'s been defined
otherwise, it creates it from the class name'
| @classmethod
def type_name(cls):
| if cls.__type_name__:
type_name = cls.__type_name__.lower()
else:
camelcase = re.compile('([a-z])([A-Z])')
ccase = (lambda s: camelcase.sub((lambda v: '{0}_{1}'.format(v.group(1), v.group(2))), s))
type_name = ccase(cls.__name__)
type_name = type_name[(-48):]
type_name = type_name.lower()
type_name = re.sub('^_+', '', type_name)
cls.__type_name__ = type_name
return type_name
|
'Cleans and validates the field values'
| def validate(self):
| for (name, field) in self._fields.items():
v = getattr(self, name)
if ((v is None) and (not self._values[name].explicit) and field.has_default):
v = field.get_default()
val = field.validate(v)
setattr(self, name, val)
|
':rtype: ModelQuerySet'
| def __get__(self, obj, model):
| if model.__abstract__:
raise CQLEngineException('cannot execute queries against abstract models')
return SimpleQuerySet(obj)
|
'Just a hint to IDEs that it\'s ok to call this
:rtype: ModelQuerySet'
| def __call__(self, *args, **kwargs):
| raise NotImplementedError
|
':rtype: NamedColumn'
| def _get_column(self):
| return self
|
'Returns the column family name if it\'s been defined
otherwise, it creates it from the module and class name'
| def column_family_name(self, include_keyspace=True):
| if include_keyspace:
return '{0}.{1}'.format(self.keyspace, self.name)
else:
return self.name
|
'Returns the column matching the given name
:rtype: Column'
| def _get_column(self, name):
| return self.column(name)
|
'returns a table descriptor with the given
name that belongs to this keyspace'
| def table(self, name):
| return NamedTable(self.name, name)
|
'The datacenter the node is in.'
| @property
def datacenter(self):
| return self._datacenter
|
'The rack the node is in.'
| @property
def rack(self):
| return self._rack
|
'Sets the datacenter and rack for this node. Intended for internal
use (by the control connection, which periodically checks the
ring topology) only.'
| def set_location_info(self, datacenter, rack):
| self._datacenter = datacenter
self._rack = rack
|
'Atomically replaces the reconnection handler for this
host. Intended for internal use only.'
| def get_and_set_reconnection_handler(self, new_handler):
| with self.lock:
old = self._reconnection_handler
self._reconnection_handler = new_handler
return old
|
'Subclasses must implement this method. It should attempt to
open a new Connection and return it; if a failure occurs, an
Exception should be raised.'
| def try_reconnect(self):
| raise NotImplementedError()
|
'Called when a new Connection is successfully opened. Nothing is
done by default.'
| def on_reconnection(self, connection):
| pass
|
'Called when an Exception is raised when trying to connect.
`exc` is the Exception that was raised and `next_delay` is the
number of seconds (as a float) that the handler will wait before
attempting to connect again.
Subclasses should return :const:`False` if no more attempts to
connection should be made, :const:`True` otherwise. The default
behavior is to always retry unless the error is an
:exc:`.AuthenticationFailed` instance.'
| def on_exception(self, exc, next_delay):
| if isinstance(exc, AuthenticationFailed):
return False
else:
return True
|
'Asynchronously sets the keyspace for all connections. When all
connections have been set, `callback` will be called with two
arguments: this pool, and a list of any errors that occurred.'
| def _set_keyspace_for_all_conns(self, keyspace, callback):
| remaining_callbacks = set(self._connections)
errors = []
if (not remaining_callbacks):
callback(self, errors)
return
def connection_finished_setting_keyspace(conn, error):
self.return_connection(conn)
remaining_callbacks.remove(conn)
if error:
errors.append(error)
if (not remaining_callbacks):
callback(self, errors)
self._keyspace = keyspace
for conn in self._connections:
conn.set_keyspace_async(keyspace, connection_finished_setting_keyspace)
|
'Returns a string that can be executed as a query in order to recreate
the entire schema. The string is formatted to be human readable.'
| def export_schema_as_string(self):
| return '\n\n'.join((ks.export_as_string() for ks in self.keyspaces.values()))
|
'Rebuild our view of the topology from fresh rows from the
system topology tables.
For internal use only.'
| def rebuild_token_map(self, partitioner, token_map):
| self.partitioner = partitioner
if partitioner.endswith('RandomPartitioner'):
token_class = MD5Token
elif partitioner.endswith('Murmur3Partitioner'):
token_class = Murmur3Token
elif partitioner.endswith('ByteOrderedPartitioner'):
token_class = BytesToken
else:
self.token_map = None
return
token_to_host_owner = {}
ring = []
for (host, token_strings) in six.iteritems(token_map):
for token_string in token_strings:
token = token_class.from_string(token_string)
ring.append(token)
token_to_host_owner[token] = host
all_tokens = sorted(ring)
self.token_map = TokenMap(token_class, token_to_host_owner, all_tokens, self)
|
'Returns a list of :class:`.Host` instances that are replicas for a given
partition key.'
| def get_replicas(self, keyspace, key):
| t = self.token_map
if (not t):
return []
try:
return t.get_replicas(keyspace, t.token_class.from_key(key))
except NoMurmur3:
return []
|
'Returns a tuple (host, new), where ``host`` is a Host
instance, and ``new`` is a bool indicating whether
the host was newly added.'
| def add_or_return_host(self, host):
| with self._hosts_lock:
try:
return (self._hosts[host.address], False)
except KeyError:
self._hosts[host.address] = host
return (host, True)
|
'Returns a list of all known :class:`.Host` instances in the cluster.'
| def all_hosts(self):
| with self._hosts_lock:
return list(self._hosts.values())
|
'Returns a string version of these replication options which are
suitable for use in a CREATE KEYSPACE statement.'
| def export_for_schema(self):
| if self.options_map:
return dict(((str(key), str(value)) for (key, value) in self.options_map.items()))
return ("{'class': '%s'}" % (self.name,))
|
'Returns a string version of these replication options which are
suitable for use in a CREATE KEYSPACE statement.'
| def export_for_schema(self):
| return ("{'class': 'SimpleStrategy', 'replication_factor': '%d'}" % (self.replication_factor,))
|
'Returns a string version of these replication options which are
suitable for use in a CREATE KEYSPACE statement.'
| def export_for_schema(self):
| ret = "{'class': 'NetworkTopologyStrategy'"
for (dc, repl_factor) in sorted(self.dc_replication_factors.items()):
ret += (", '%s': '%d'" % (dc, repl_factor))
return (ret + '}')
|
'Returns a string version of these replication options which are
suitable for use in a CREATE KEYSPACE statement.'
| def export_for_schema(self):
| return "{'class': 'LocalStrategy'}"
|
'Returns a CQL query string that can be used to recreate the entire keyspace,
including user-defined types and tables.'
| def export_as_string(self):
| cql = '\n\n'.join((((([(self.as_cql_query() + ';')] + self.user_type_strings()) + [f.export_as_string() for f in self.functions.values()]) + [a.export_as_string() for a in self.aggregates.values()]) + [t.export_as_string() for t in self.tables.values()]))
if self._exc_info:
import traceback
ret = ('/*\nWarning: Keyspace %s is incomplete because of an error processing metadata.\n' % self.name)
for line in traceback.format_exception(*self._exc_info):
ret += line
ret += ('\nApproximate structure, for reference:\n(this should not be used to reproduce this schema)\n\n%s\n*/' % cql)
return ret
return cql
|
'Returns a CQL query string that can be used to recreate just this keyspace,
not including user-defined types and tables.'
| def as_cql_query(self):
| ret = ('CREATE KEYSPACE %s WITH replication = %s ' % (protect_name(self.name), self.replication_strategy.export_for_schema()))
return (ret + (' AND durable_writes = %s' % ('true' if self.durable_writes else 'false')))
|
'Returns a CQL query that can be used to recreate this type.
If `formatted` is set to :const:`True`, extra whitespace will
be added to make the query more readable.'
| def as_cql_query(self, formatted=False):
| ret = ('CREATE TYPE %s.%s (%s' % (protect_name(self.keyspace), protect_name(self.name), ('\n' if formatted else '')))
if formatted:
field_join = ',\n'
padding = ' '
else:
field_join = ', '
padding = ''
fields = []
for (field_name, field_type) in zip(self.field_names, self.field_types):
fields.append(('%s %s' % (protect_name(field_name), field_type)))
ret += field_join.join((('%s%s' % (padding, field)) for field in fields))
ret += ('\n)' if formatted else ')')
return ret
|
'Returns a CQL query that can be used to recreate this aggregate.
If `formatted` is set to :const:`True`, extra whitespace will
be added to make the query more readable.'
| def as_cql_query(self, formatted=False):
| sep = ('\n ' if formatted else ' ')
keyspace = protect_name(self.keyspace)
name = protect_name(self.name)
type_list = ', '.join(self.argument_types)
state_func = protect_name(self.state_func)
state_type = self.state_type
ret = ('CREATE AGGREGATE %(keyspace)s.%(name)s(%(type_list)s)%(sep)sSFUNC %(state_func)s%(sep)sSTYPE %(state_type)s' % locals())
ret += (''.join((sep, 'FINALFUNC ', protect_name(self.final_func))) if self.final_func else '')
ret += (''.join((sep, 'INITCOND ', self.initial_condition)) if (self.initial_condition is not None) else '')
return ret
|
'Returns a CQL query that can be used to recreate this function.
If `formatted` is set to :const:`True`, extra whitespace will
be added to make the query more readable.'
| def as_cql_query(self, formatted=False):
| sep = ('\n ' if formatted else ' ')
keyspace = protect_name(self.keyspace)
name = protect_name(self.name)
arg_list = ', '.join([('%s %s' % (protect_name(n), t)) for (n, t) in zip(self.argument_names, self.argument_types)])
typ = self.return_type
lang = self.language
body = self.body
on_null = ('CALLED' if self.called_on_null_input else 'RETURNS NULL')
return ('CREATE FUNCTION %(keyspace)s.%(name)s(%(arg_list)s)%(sep)s%(on_null)s ON NULL INPUT%(sep)sRETURNS %(typ)s%(sep)sLANGUAGE %(lang)s%(sep)sAS $$%(body)s$$' % locals())
|
'A list of :class:`.ColumnMetadata` representing the components of
the primary key for this table.'
| @property
def primary_key(self):
| return (self.partition_key + self.clustering_key)
|
'A boolean indicating if this table can be represented as CQL in export'
| @property
def is_cql_compatible(self):
| comparator = getattr(self, 'comparator', None)
if comparator:
incompatible = (self.is_compact_storage and (len(self.columns) > (len(self.primary_key) + 1)) and (len(self.clustering_key) >= 1))
return (not incompatible)
return True
|
'Returns a string of CQL queries that can be used to recreate this table
along with all indexes on it. The returned string is formatted to
be human readable.'
| def export_as_string(self):
| if self._exc_info:
import traceback
ret = ('/*\nWarning: Table %s.%s is incomplete because of an error processing metadata.\n' % (self.keyspace_name, self.name))
for line in traceback.format_exception(*self._exc_info):
ret += line
ret += ('\nApproximate structure, for reference:\n(this should not be used to reproduce this schema)\n\n%s\n*/' % self._all_as_cql())
elif (not self.is_cql_compatible):
ret = ('/*\nWarning: Table %s.%s omitted because it has constructs not compatible with CQL (was created via legacy API).\n' % (self.keyspace_name, self.name))
ret += ('\nApproximate structure, for reference:\n(this should not be used to reproduce this schema)\n\n%s\n*/' % self._all_as_cql())
else:
ret = self._all_as_cql()
return ret
|
'Returns a CQL query that can be used to recreate this table (index
creations are not included). If `formatted` is set to :const:`True`,
extra whitespace will be added to make the query human readable.'
| def as_cql_query(self, formatted=False):
| ret = ('CREATE TABLE %s.%s (%s' % (protect_name(self.keyspace_name), protect_name(self.name), ('\n' if formatted else '')))
if formatted:
column_join = ',\n'
padding = ' '
else:
column_join = ', '
padding = ''
columns = []
for col in self.columns.values():
columns.append(('%s %s%s' % (protect_name(col.name), col.cql_type, (' static' if col.is_static else ''))))
if ((len(self.partition_key) == 1) and (not self.clustering_key)):
columns[0] += ' PRIMARY KEY'
ret += column_join.join((('%s%s' % (padding, col)) for col in columns))
if ((len(self.partition_key) > 1) or self.clustering_key):
ret += ('%s%sPRIMARY KEY (' % (column_join, padding))
if (len(self.partition_key) > 1):
ret += ('(%s)' % ', '.join((protect_name(col.name) for col in self.partition_key)))
else:
ret += protect_name(self.partition_key[0].name)
if self.clustering_key:
ret += (', %s' % ', '.join((protect_name(col.name) for col in self.clustering_key)))
ret += ')'
ret += ('%s) WITH ' % ('\n' if formatted else ''))
ret += self._property_string(formatted, self.clustering_key, self.options, self.is_compact_storage)
return ret
|
'Called to produce CQL/DDL to follow the table definition.
Should contain requisite terminating semicolon(s).'
| @classmethod
def after_table_cql(cls, ext_key, ext_blob):
| pass
|
'Returns a CQL query that can be used to recreate this index.'
| def as_cql_query(self):
| options = dict(self.index_options)
index_target = options.pop('target')
if (self.kind != 'CUSTOM'):
return ('CREATE INDEX %s ON %s.%s (%s)' % (protect_name(self.name), protect_name(self.keyspace_name), protect_name(self.table_name), index_target))
else:
class_name = options.pop('class_name')
ret = ("CREATE CUSTOM INDEX %s ON %s.%s (%s) USING '%s'" % (protect_name(self.name), protect_name(self.keyspace_name), protect_name(self.table_name), index_target, class_name))
if options:
ret += (' WITH OPTIONS = %s' % Encoder().cql_encode_all_types(options))
return ret
|
'Returns a CQL query string that can be used to recreate this index.'
| def export_as_string(self):
| return (self.as_cql_query() + ';')
|
'Get a set of :class:`.Host` instances representing all of the
replica nodes for a given :class:`.Token`.'
| def get_replicas(self, keyspace, token):
| tokens_to_hosts = self.tokens_to_hosts_by_ks.get(keyspace, None)
if (tokens_to_hosts is None):
self.rebuild_keyspace(keyspace, build_if_absent=True)
tokens_to_hosts = self.tokens_to_hosts_by_ks.get(keyspace, None)
if tokens_to_hosts:
point = bisect_right(self.ring, token)
if (point == len(self.ring)):
return tokens_to_hosts[self.ring[0]]
else:
return tokens_to_hosts[self.ring[point]]
return []
|
'`token_string` should be the string representation from the server.'
| @classmethod
def from_string(cls, token_string):
| return cls(int(token_string))
|
'`token` is an int or string representing the token.'
| def __init__(self, token):
| self.value = int(token)
|
'`token_string` should be the string representation from the server.'
| @classmethod
def from_string(cls, token_string):
| if isinstance(token_string, six.text_type):
token_string = token_string.encode('ascii')
return cls(unhexlify(token_string))
|
'Setup the mostly-non-schema table options, like caching settings'
| def _build_table_options(self, row):
| options = dict(((o, row.get(o)) for o in self.recognized_table_options if (o in row)))
if ('local_read_repair_chance' in options):
val = options.pop('local_read_repair_chance')
options['dclocal_read_repair_chance'] = val
return options
|
'Setup the mostly-non-schema table options, like caching settings'
| def _build_table_options(self, row):
| return dict(((o, row.get(o)) for o in self.recognized_table_options if (o in row)))
|
'Returns a CQL query that can be used to recreate this function.
If `formatted` is set to :const:`True`, extra whitespace will
be added to make the query more readable.'
| def as_cql_query(self, formatted=False):
| sep = ('\n ' if formatted else ' ')
keyspace = protect_name(self.keyspace_name)
name = protect_name(self.name)
selected_cols = ('*' if self.include_all_columns else ', '.join((protect_name(col.name) for col in self.columns.values())))
base_table = protect_name(self.base_table_name)
where_clause = self.where_clause
part_key = ', '.join((protect_name(col.name) for col in self.partition_key))
if (len(self.partition_key) > 1):
pk = ('((%s)' % part_key)
else:
pk = ('(%s' % part_key)
if self.clustering_key:
pk += (', %s' % ', '.join((protect_name(col.name) for col in self.clustering_key)))
pk += ')'
properties = TableMetadataV3._property_string(formatted, self.clustering_key, self.options)
ret = ('CREATE MATERIALIZED VIEW %(keyspace)s.%(name)s AS%(sep)sSELECT %(selected_cols)s%(sep)sFROM %(keyspace)s.%(base_table)s%(sep)sWHERE %(where_clause)s%(sep)sPRIMARY KEY %(pk)s%(sep)sWITH %(properties)s' % locals())
if self.extensions:
registry = _RegisteredExtensionType._extension_registry
for k in (six.viewkeys(registry) & self.extensions):
ext = registry[k]
cql = ext.after_table_cql(self, k, self.extensions[k])
if cql:
ret += ('\n\n%s' % (cql,))
return ret
|
'Returns the timestamp that should be used if ``now`` is the current
time and ``last`` is the last timestamp returned by this object.
Intended for internal and testing use only; to generate timestamps,
call an instantiated ``MonotonicTimestampGenerator`` object.
:param int now: an integer to be used as the current time, typically
representing the current time in seconds since the UNIX epoch
:param int last: an integer representing the last timestamp returned by
this object'
| def _next_timestamp(self, now, last):
| if (now > last):
self.last = now
return now
else:
self._maybe_warn(now=now)
self.last = (last + 1)
return self.last
|
'Makes ``MonotonicTimestampGenerator`` objects callable; defers
internally to _next_timestamp.'
| def __call__(self):
| with self.lock:
return self._next_timestamp(now=int((time.time() * 1000000.0)), last=self.last)
|
'Deserialize a bytestring into a value. See the deserialize() method
for more information. This method differs in that if None or the empty
string is passed in, None may be returned.'
| @classmethod
def from_binary(cls, byts, protocol_version):
| if (byts is None):
return None
elif ((len(byts) == 0) and (not cls.empty_binary_ok)):
return (EMPTY if cls.support_empty_values else None)
return cls.deserialize(byts, protocol_version)
|
'Serialize a value into a bytestring. See the serialize() method for
more information. This method differs in that if None is passed in,
the result is the empty string.'
| @classmethod
def to_binary(cls, val, protocol_version):
| return ('' if (val is None) else cls.serialize(val, protocol_version))
|
'Given a bytestring, deserialize into a value according to the protocol
for this type. Note that this does not create a new instance of this
class; it merely gives back a value that would be appropriate to go
inside an instance of this class.'
| @staticmethod
def deserialize(byts, protocol_version):
| return byts
|
'Given a value appropriate for this class, serialize it according to the
protocol for this type and return the corresponding bytestring.'
| @staticmethod
def serialize(val, protocol_version):
| return val
|
'Return the name of this type as it would be expressed by Cassandra,
optionally fully qualified. If subtypes is not None, it is expected
to be a list of other CassandraType subclasses, and the output
string includes the Cassandra names for those subclasses as well,
as parameters to this one.
Example:
>>> LongType.cass_parameterized_type_with(())
\'LongType\'
>>> LongType.cass_parameterized_type_with((), full=True)
\'org.apache.cassandra.db.marshal.LongType\'
>>> SetType.cass_parameterized_type_with([DecimalType], full=True)
\'org.apache.cassandra.db.marshal.SetType(org.apache.cassandra.db.marshal.DecimalType)\''
| @classmethod
def cass_parameterized_type_with(cls, subtypes, full=False):
| cname = cls.cassname
if (full and ('.' not in cname)):
cname = (apache_cassandra_type_prefix + cname)
if (not subtypes):
return cname
sublist = ', '.join((styp.cass_parameterized_type(full=full) for styp in subtypes))
return ('%s(%s)' % (cname, sublist))
|
'Given a set of other CassandraTypes, create a new subtype of this type
using them as parameters. This is how composite types are constructed.
>>> MapType.apply_parameters([DateType, BooleanType])
<class \'cassandra.cqltypes.MapType(DateType, BooleanType)\'>
`subtypes` will be a sequence of CassandraTypes. If provided, `names`
will be an equally long sequence of column names or Nones.'
| @classmethod
def apply_parameters(cls, subtypes, names=None):
| if ((cls.num_subtypes != 'UNKNOWN') and (len(subtypes) != cls.num_subtypes)):
raise ValueError(('%s types require %d subtypes (%d given)' % (cls.typename, cls.num_subtypes, len(subtypes))))
newname = cls.cass_parameterized_type_with(subtypes)
if (six.PY2 and isinstance(newname, unicode)):
newname = newname.encode('utf-8')
return type(newname, (cls,), {'subtypes': subtypes, 'cassname': cls.cassname, 'fieldnames': names})
|
'Return a CQL type specifier for this type. If this type has parameters,
they are included in standard CQL <> notation.'
| @classmethod
def cql_parameterized_type(cls):
| if (not cls.subtypes):
return cls.typename
return ('%s<%s>' % (cls.typename, ', '.join((styp.cql_parameterized_type() for styp in cls.subtypes))))
|
'Return a Cassandra type specifier for this type. If this type has
parameters, they are included in the standard () notation.'
| @classmethod
def cass_parameterized_type(cls, full=False):
| return cls.cass_parameterized_type_with(cls.subtypes, full=full)
|
'There is no CQL notation for Composites, so we override this.'
| @classmethod
def cql_parameterized_type(cls):
| typestring = cls.cass_parameterized_type(full=True)
return ("'%s'" % (typestring,))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.