desc
stringlengths 3
26.7k
| decl
stringlengths 11
7.89k
| bodies
stringlengths 8
553k
|
---|---|---|
'Encodes a message using the specified frame parameters, and compressor
:param msg: the message, typically of cassandra.protocol._MessageType, generated by the driver
:param stream_id: protocol stream id for the frame header
:param protocol_version: version for the frame header, and used encoding contents
:param compressor: optional compression function to be used on the body'
| @classmethod
def encode_message(cls, msg, stream_id, protocol_version, compressor, allow_beta_protocol_version):
| flags = 0
body = io.BytesIO()
if msg.custom_payload:
if (protocol_version < 4):
raise UnsupportedOperation('Custom key/value payloads can only be used with protocol version 4 or higher')
flags |= CUSTOM_PAYLOAD_FLAG
write_bytesmap(body, msg.custom_payload)
msg.send_body(body, protocol_version)
body = body.getvalue()
if (compressor and (len(body) > 0)):
body = compressor(body)
flags |= COMPRESSED_FLAG
if msg.tracing:
flags |= TRACING_FLAG
if allow_beta_protocol_version:
flags |= USE_BETA_FLAG
buff = io.BytesIO()
cls._write_header(buff, protocol_version, flags, stream_id, msg.opcode, len(body))
buff.write(body)
return buff.getvalue()
|
'Write a CQL protocol frame header.'
| @staticmethod
def _write_header(f, version, flags, stream_id, opcode, length):
| pack = (v3_header_pack if (version >= 3) else header_pack)
f.write(pack(version, flags, stream_id, opcode))
write_int(f, length)
|
'Decodes a native protocol message body
:param protocol_version: version to use decoding contents
:param user_type_map: map[keyspace name] = map[type name] = custom type to instantiate when deserializing this type
:param stream_id: native protocol stream id from the frame header
:param flags: native protocol flags bitmap from the header
:param opcode: native protocol opcode from the header
:param body: frame body
:param decompressor: optional decompression function to inflate the body
:return: a message decoded from the body and frame attributes'
| @classmethod
def decode_message(cls, protocol_version, user_type_map, stream_id, flags, opcode, body, decompressor, result_metadata):
| if (flags & COMPRESSED_FLAG):
if (decompressor is None):
raise RuntimeError('No de-compressor available for compressed frame!')
body = decompressor(body)
flags ^= COMPRESSED_FLAG
body = io.BytesIO(body)
if (flags & TRACING_FLAG):
trace_id = UUID(bytes=body.read(16))
flags ^= TRACING_FLAG
else:
trace_id = None
if (flags & WARNING_FLAG):
warnings = read_stringlist(body)
flags ^= WARNING_FLAG
else:
warnings = None
if (flags & CUSTOM_PAYLOAD_FLAG):
custom_payload = read_bytesmap(body)
flags ^= CUSTOM_PAYLOAD_FLAG
else:
custom_payload = None
flags &= USE_BETA_MASK
if flags:
log.warning('Unknown protocol flags set: %02x. May cause problems.', flags)
msg_class = cls.message_types_by_opcode[opcode]
msg = msg_class.recv_body(body, protocol_version, user_type_map, result_metadata)
msg.stream_id = stream_id
msg.trace_id = trace_id
msg.custom_payload = custom_payload
msg.warnings = warnings
if msg.warnings:
for w in msg.warnings:
log.warning('Server warning: %s', w)
return msg
|
'Called when a node is marked up.'
| def on_up(self, host):
| raise NotImplementedError()
|
'Called when a node is marked down.'
| def on_down(self, host):
| raise NotImplementedError()
|
'Called when a node is added to the cluster. The newly added node
should be considered up.'
| def on_add(self, host):
| raise NotImplementedError()
|
'Called when a node is removed from the cluster.'
| def on_remove(self, host):
| raise NotImplementedError()
|
'Returns a measure of how remote a :class:`~.pool.Host` is in
terms of the :class:`.HostDistance` enums.'
| def distance(self, host):
| raise NotImplementedError()
|
'This method is called to initialize the load balancing
policy with a set of :class:`.Host` instances before its
first use. The `cluster` parameter is an instance of
:class:`.Cluster`.'
| def populate(self, cluster, hosts):
| raise NotImplementedError()
|
'Given a :class:`~.query.Statement` instance, return a iterable
of :class:`.Host` instances which should be queried in that
order. A generator may work well for custom implementations
of this method.
Note that the `query` argument may be :const:`None` when preparing
statements.
`working_keyspace` should be the string name of the current keyspace,
as set through :meth:`.Session.set_keyspace()` or with a ``USE``
statement.'
| def make_query_plan(self, working_keyspace=None, query=None):
| raise NotImplementedError()
|
'This will be called after the cluster Metadata has been initialized.
If the load balancing policy implementation cannot be supported for
some reason (such as a missing C extension), this is the point at
which it should raise an exception.'
| def check_supported(self):
| pass
|
'The `local_dc` parameter should be the name of the datacenter
(such as is reported by ``nodetool ring``) that should
be considered local. If not specified, the driver will choose
a local_dc based on the first host among :attr:`.Cluster.contact_points`
having a valid DC. If relying on this mechanism, all specified
contact points should be nodes in a single, local DC.
`used_hosts_per_remote_dc` controls how many nodes in
each remote datacenter will have connections opened
against them. In other words, `used_hosts_per_remote_dc` hosts
will be considered :attr:`~.HostDistance.REMOTE` and the
rest will be considered :attr:`~.HostDistance.IGNORED`.
By default, all remote hosts are ignored.'
| def __init__(self, local_dc='', used_hosts_per_remote_dc=0):
| self.local_dc = local_dc
self.used_hosts_per_remote_dc = used_hosts_per_remote_dc
self._dc_live_hosts = {}
self._position = 0
self._contact_points = []
LoadBalancingPolicy.__init__(self)
|
'The `hosts` parameter should be a sequence of hosts to permit
connections to.'
| def __init__(self, hosts):
| msg = 'WhiteListRoundRobinPolicy is deprecated. It will be removed in 4.0. It can effectively be reimplemented using HostFilterPolicy.'
warn(msg, DeprecationWarning)
log.warning(msg)
self._allowed_hosts = hosts
self._allowed_hosts_resolved = [endpoint[4][0] for a in self._allowed_hosts for endpoint in socket.getaddrinfo(a, None, socket.AF_UNSPEC, socket.SOCK_STREAM)]
RoundRobinPolicy.__init__(self)
|
':param child_policy: an instantiated :class:`.LoadBalancingPolicy`
that this one will defer to.
:param predicate: a one-parameter function that takes a :class:`.Host`.
If it returns a falsey value, the :class:`.Host` will
be :attr:`.IGNORED` and not returned in query plans.'
| def __init__(self, child_policy, predicate):
| super(HostFilterPolicy, self).__init__()
self._child_policy = child_policy
self._predicate = predicate
|
'A predicate, set on object initialization, that takes a :class:`.Host`
and returns a value. If the value is falsy, the :class:`.Host` is
:class:`~HostDistance.IGNORED`. If the value is truthy,
:class:`.HostFilterPolicy` defers to the child policy to determine the
host\'s distance.
This is a read-only value set in ``__init__``, implemented as a
``property``.'
| @property
def predicate(self):
| return self._predicate
|
'Checks if ``predicate(host)``, then returns
:attr:`~HostDistance.IGNORED` if falsey, and defers to the child policy
otherwise.'
| def distance(self, host):
| if self.predicate(host):
return self._child_policy.distance(host)
else:
return HostDistance.IGNORED
|
'Defers to the child policy\'s
:meth:`.LoadBalancingPolicy.make_query_plan` and filters the results.
Note that this filtering may break desirable properties of the wrapped
policy in some cases. For instance, imagine if you configure this
policy to filter out ``host2``, and to wrap a round-robin policy that
rotates through three hosts in the order ``host1, host2, host3``,
``host2, host3, host1``, ``host3, host1, host2``, repeating. This
policy will yield ``host1, host3``, ``host3, host1``, ``host3, host1``,
disproportionately favoring ``host3``.'
| def make_query_plan(self, working_keyspace=None, query=None):
| child_qp = self._child_policy.make_query_plan(working_keyspace=working_keyspace, query=query)
for host in child_qp:
if self.predicate(host):
(yield host)
|
'`host` is an instance of :class:`.Host`.'
| def __init__(self, host):
| self.host = host
|
'Implementations should return :const:`True` if the host should be
convicted, :const:`False` otherwise.'
| def add_failure(self, connection_exc):
| raise NotImplementedError()
|
'Implementations should clear out any convictions or state regarding
the host.'
| def reset(self):
| raise NotImplementedError()
|
'This should return a finite or infinite iterable of delays (each as a
floating point number of seconds) inbetween each failed reconnection
attempt. Note that if the iterable is finite, reconnection attempts
will cease once the iterable is exhausted.'
| def new_schedule(self):
| raise NotImplementedError()
|
'`delay` should be a floating point number of seconds to wait inbetween
each attempt.
`max_attempts` should be a total number of attempts to be made before
giving up, or :const:`None` to continue reconnection attempts forever.
The default is 64.'
| def __init__(self, delay, max_attempts=64):
| if (delay < 0):
raise ValueError('delay must not be negative')
if ((max_attempts is not None) and (max_attempts < 0)):
raise ValueError('max_attempts must not be negative')
self.delay = delay
self.max_attempts = max_attempts
|
'`base_delay` and `max_delay` should be in floating point units of
seconds.
`max_attempts` should be a total number of attempts to be made before
giving up, or :const:`None` to continue reconnection attempts forever.
The default is 64.'
| def __init__(self, base_delay, max_delay, max_attempts=64):
| if ((base_delay < 0) or (max_delay < 0)):
raise ValueError('Delays may not be negative')
if (max_delay < base_delay):
raise ValueError('Max delay must be greater than base delay')
if ((max_attempts is not None) and (max_attempts < 0)):
raise ValueError('max_attempts must not be negative')
self.base_delay = base_delay
self.max_delay = max_delay
self.max_attempts = max_attempts
|
'This is called when a read operation times out from the coordinator\'s
perspective (i.e. a replica did not respond to the coordinator in time).
It should return a tuple with two items: one of the class enums (such
as :attr:`.RETRY`) and a :class:`.ConsistencyLevel` to retry the
operation at or :const:`None` to keep the same consistency level.
`query` is the :class:`.Statement` that timed out.
`consistency` is the :class:`.ConsistencyLevel` that the operation was
attempted at.
The `required_responses` and `received_responses` parameters describe
how many replicas needed to respond to meet the requested consistency
level and how many actually did respond before the coordinator timed
out the request. `data_retrieved` is a boolean indicating whether
any of those responses contained data (as opposed to just a digest).
`retry_num` counts how many times the operation has been retried, so
the first time this method is called, `retry_num` will be 0.
By default, operations will be retried at most once, and only if
a sufficient number of replicas responded (with data digests).'
| def on_read_timeout(self, query, consistency, required_responses, received_responses, data_retrieved, retry_num):
| if (retry_num != 0):
return (self.RETHROW, None)
elif ((received_responses >= required_responses) and (not data_retrieved)):
return (self.RETRY, consistency)
else:
return (self.RETHROW, None)
|
'This is called when a write operation times out from the coordinator\'s
perspective (i.e. a replica did not respond to the coordinator in time).
`query` is the :class:`.Statement` that timed out.
`consistency` is the :class:`.ConsistencyLevel` that the operation was
attempted at.
`write_type` is one of the :class:`.WriteType` enums describing the
type of write operation.
The `required_responses` and `received_responses` parameters describe
how many replicas needed to acknowledge the write to meet the requested
consistency level and how many replicas actually did acknowledge the
write before the coordinator timed out the request.
`retry_num` counts how many times the operation has been retried, so
the first time this method is called, `retry_num` will be 0.
By default, failed write operations will retried at most once, and
they will only be retried if the `write_type` was
:attr:`~.WriteType.BATCH_LOG`.'
| def on_write_timeout(self, query, consistency, write_type, required_responses, received_responses, retry_num):
| if (retry_num != 0):
return (self.RETHROW, None)
elif (write_type == WriteType.BATCH_LOG):
return (self.RETRY, consistency)
else:
return (self.RETHROW, None)
|
'This is called when the coordinator node determines that a read or
write operation cannot be successful because the number of live
replicas are too low to meet the requested :class:`.ConsistencyLevel`.
This means that the read or write operation was never forwared to
any replicas.
`query` is the :class:`.Statement` that failed.
`consistency` is the :class:`.ConsistencyLevel` that the operation was
attempted at.
`required_replicas` is the number of replicas that would have needed to
acknowledge the operation to meet the requested consistency level.
`alive_replicas` is the number of replicas that the coordinator
considered alive at the time of the request.
`retry_num` counts how many times the operation has been retried, so
the first time this method is called, `retry_num` will be 0.
By default, no retries will be attempted and the error will be re-raised.'
| def on_unavailable(self, query, consistency, required_replicas, alive_replicas, retry_num):
| return ((self.RETRY_NEXT_HOST, consistency) if (retry_num == 0) else (self.RETHROW, None))
|
'Accepts the node ip address, and returns a translated address to be used connecting to this node.'
| def translate(self, addr):
| raise NotImplementedError()
|
'Reverse DNS the public broadcast_address, then lookup that hostname to get the AWS-resolved IP, which
will point to the private IP address within the same datacenter.'
| def translate(self, addr):
| family = socket.getaddrinfo(addr, 0, socket.AF_UNSPEC, socket.SOCK_STREAM)[0][0]
host = socket.getfqdn(addr)
for a in socket.getaddrinfo(host, 0, family, socket.SOCK_STREAM):
try:
return a[4][0]
except Exception:
pass
return addr
|
'Returns
:param keyspace:
:param statement:
:return:'
| def new_plan(self, keyspace, statement):
| raise NotImplementedError()
|
'Called once by Cluster.connect(). This should be used by implementations
to set up any resources that will be shared across connections.'
| @classmethod
def initialize_reactor(cls):
| pass
|
'Called after a forking. This should cleanup any remaining reactor state
from the parent process.'
| @classmethod
def handle_fork(cls):
| pass
|
'A factory function which returns connections which have
succeeded in connecting and are ready for service (or
raises an exception otherwise).'
| @classmethod
def factory(cls, host, timeout, *args, **kwargs):
| start = time.time()
kwargs['connect_timeout'] = timeout
conn = cls(host, *args, **kwargs)
elapsed = (time.time() - start)
conn.connected_event.wait((timeout - elapsed))
if conn.last_error:
if conn.is_unsupported_proto_version:
raise ProtocolVersionUnsupported(host, conn.protocol_version)
raise conn.last_error
elif (not conn.connected_event.is_set()):
conn.close()
raise OperationTimedOut(('Timed out creating connection (%s seconds)' % timeout))
else:
return conn
|
'This must be called while self.lock is held.'
| def get_request_id(self):
| try:
return self.request_ids.popleft()
except IndexError:
new_request_id = (self.highest_request_id + 1)
assert (new_request_id <= self.max_request_id)
self.highest_request_id = new_request_id
return self.highest_request_id
|
'Returns a list of (success, response) tuples. If success
is False, response will be an Exception. Otherwise, response
will be the normal query response.
If fail_on_error was left as True and one of the requests
failed, the corresponding Exception will be raised.'
| def wait_for_responses(self, *msgs, **kwargs):
| if (self.is_closed or self.is_defunct):
raise ConnectionShutdown(('Connection %s is already closed' % (self,)))
timeout = kwargs.get('timeout')
fail_on_error = kwargs.get('fail_on_error', True)
waiter = ResponseWaiter(self, len(msgs), fail_on_error)
messages_sent = 0
while True:
needed = (len(msgs) - messages_sent)
with self.lock:
available = min(needed, ((self.max_request_id - self.in_flight) + 1))
request_ids = [self.get_request_id() for _ in range(available)]
self.in_flight += available
for (i, request_id) in enumerate(request_ids):
self.send_msg(msgs[(messages_sent + i)], request_id, partial(waiter.got_response, index=(messages_sent + i)))
messages_sent += available
if (messages_sent == len(msgs)):
break
else:
if (timeout is not None):
timeout -= 0.01
if (timeout <= 0.0):
raise OperationTimedOut()
time.sleep(0.01)
try:
return waiter.deliver(timeout)
except OperationTimedOut:
raise
except Exception as exc:
self.defunct(exc)
raise
|
'Register a callback for a given event type.'
| def register_watcher(self, event_type, callback, register_timeout=None):
| self._push_watchers[event_type].add(callback)
self.wait_for_response(RegisterMessage(event_list=[event_type]), timeout=register_timeout)
|
'Register multiple callback/event type pairs, expressed as a dict.'
| def register_watchers(self, type_callback_dict, register_timeout=None):
| for (event_type, callback) in type_callback_dict.items():
self._push_watchers[event_type].add(callback)
self.wait_for_response(RegisterMessage(event_list=type_callback_dict.keys()), timeout=register_timeout)
|
'Use this in order to avoid deadlocking the event loop thread.
When the operation completes, `callback` will be called with
two arguments: this connection and an Exception if an error
occurred, otherwise :const:`None`.
This method will always increment :attr:`.in_flight` attribute, even if
it doesn\'t need to make a request, just to maintain an
":attr:`.in_flight` is incremented" invariant.'
| def set_keyspace_async(self, keyspace, callback):
| while True:
with self.lock:
if (self.in_flight < self.max_request_id):
self.in_flight += 1
break
time.sleep(0.001)
if ((not keyspace) or (keyspace == self.keyspace)):
callback(self, None)
return
query = QueryMessage(query=('USE "%s"' % (keyspace,)), consistency_level=ConsistencyLevel.ONE)
def process_result(result):
if isinstance(result, ResultMessage):
self.keyspace = keyspace
callback(self, None)
elif isinstance(result, InvalidRequestException):
callback(self, result.to_exception())
else:
callback(self, self.defunct(ConnectionException(('Problem while setting keyspace: %r' % (result,)), self.host)))
request_id = self.get_request_id()
self.send_msg(query, request_id, process_result)
|
'If fail_on_error was set to False, a list of (success, response)
tuples will be returned. If success is False, response will be
an Exception. Otherwise, response will be the normal query response.
If fail_on_error was left as True and one of the requests
failed, the corresponding Exception will be raised. Otherwise,
the normal response will be returned.'
| def deliver(self, timeout=None):
| self.event.wait(timeout)
if self.error:
raise self.error
elif (not self.event.is_set()):
raise OperationTimedOut()
else:
return self.responses
|
'called from client thread with a Timer object'
| def add_timer(self, timer):
| self._new_timers.append((timer.end, timer))
|
'run callbacks on all expired timers
Called from the event thread
:return: next end time, or None'
| def service_timeouts(self):
| queue = self._queue
if self._new_timers:
new_timers = self._new_timers
while new_timers:
heappush(queue, new_timers.pop())
if queue:
now = time.time()
while queue:
try:
timer = queue[0][1]
if timer.finish(now):
heappop(queue)
else:
return timer.end
except Exception:
log.exception('Exception while servicing timeout callback: ')
|
'returns the number of entries this clause will add to the query context'
| def get_context_size(self):
| return 1
|
'sets the value placeholder that will be used in the query'
| def set_context_id(self, i):
| self.context_id = i
|
'updates the query context with this clauses values'
| def update_context(self, ctx):
| assert isinstance(ctx, dict)
ctx[str(self.context_id)] = self.value
|
':param field:
:param operator:
:param value:
:param quote_field: hack to get the token function rendering properly
:return:'
| def __init__(self, field, operator, value, quote_field=True):
| if (not isinstance(operator, BaseWhereOperator)):
raise StatementException('operator must be of type {0}, got {1}'.format(BaseWhereOperator, type(operator)))
super(WhereClause, self).__init__(field, value)
self.operator = operator
self.query_value = (self.value if isinstance(self.value, QueryValue) else QueryValue(self.value))
self.quote_field = quote_field
|
'works out the updates to be performed'
| def _analyze(self):
| if ((self.value is None) or (self.value == self.previous)):
pass
elif (self._operation == 'add'):
self._additions = self.value
elif (self._operation == 'remove'):
self._removals = self.value
elif (self.previous is None):
self._assignments = self.value
else:
self._additions = ((self.value - self.previous) or None)
self._removals = ((self.previous - self.value) or None)
self._analyzed = True
|
'works out the updates to be performed'
| def _analyze(self):
| if ((self.value is None) or (self.value == self.previous)):
pass
elif (self._operation == 'append'):
self._append = self.value
elif (self._operation == 'prepend'):
self._prepend = self.value
elif (self.previous is None):
self._assignments = self.value
elif (len(self.value) < len(self.previous)):
self._assignments = self.value
elif (len(self.previous) == 0):
self._assignments = self.value
else:
search_space = (len(self.value) - max(0, (len(self.previous) - 1)))
search_size = len(self.previous)
for i in range(search_space):
j = (i + search_size)
sub = self.value[i:j]
idx_cmp = (lambda idx: (self.previous[idx] == sub[idx]))
if (idx_cmp(0) and idx_cmp((-1)) and (self.previous == sub)):
self._prepend = (self.value[:i] or None)
self._append = (self.value[j:] or None)
break
if (self._prepend is self._append is None):
self._assignments = self.value
self._analyzed = True
|
'returns the context dict for this statement
:rtype: dict'
| def get_context(self):
| ctx = {}
for clause in (self.where_clauses or []):
clause.update_context(ctx)
return ctx
|
'Adds a iff clause to this statement
:param clause: The clause that will be added to the iff statement
:type clause: ConditionalClause'
| def add_conditional_clause(self, clause):
| clause.set_context_id(self.context_counter)
self.context_counter += clause.get_context_size()
self.conditionals.append(clause)
|
'we\'re expecting self.timestamp to be either a long, int, a datetime, or a timedelta
:return:'
| @property
def timestamp_normalized(self):
| if (not self.timestamp):
return None
if isinstance(self.timestamp, six.integer_types):
return self.timestamp
if isinstance(self.timestamp, timedelta):
tmp = (datetime.now() + self.timestamp)
else:
tmp = self.timestamp
return int(((time.mktime(tmp.timetuple()) * 1000000.0) + tmp.microsecond))
|
':param where
:type where list of cqlengine.statements.WhereClause'
| def __init__(self, table, fields=None, count=False, where=None, order_by=None, limit=None, allow_filtering=False, distinct_fields=None, fetch_size=None):
| super(SelectStatement, self).__init__(table, where=where, fetch_size=fetch_size)
self.fields = ([fields] if isinstance(fields, six.string_types) else (fields or []))
self.distinct_fields = distinct_fields
self.count = count
self.order_by = ([order_by] if isinstance(order_by, six.string_types) else order_by)
self.limit = limit
self.allow_filtering = allow_filtering
|
'Indicates whether or not this value has changed.
:rtype: boolean'
| @property
def changed(self):
| if self.explicit:
return (self.value != self.previous_value)
if isinstance(self.column, BaseContainerColumn):
default_value = self.column.get_default()
if self.column._val_is_null(default_value):
return ((not self.column._val_is_null(self.value)) and (self.value != self.previous_value))
elif (self.previous_value is None):
return (self.value != default_value)
return (self.value != self.previous_value)
return False
|
'Returns a cleaned and validated value. Raises a ValidationError
if there\'s a problem'
| def validate(self, value):
| if (value is None):
if self.required:
raise ValidationError('{0} - None values are not allowed'.format((self.column_name or self.db_field)))
return value
|
'Converts data from the database into python values
raises a ValidationError if the value can\'t be converted'
| def to_python(self, value):
| return value
|
'Converts python value into database value'
| def to_database(self, value):
| return value
|
'Returns a column definition for CQL table definition'
| def get_column_def(self):
| static = ('static' if self.static else '')
return '{0} {1} {2}'.format(self.cql, self.db_type, static)
|
'Sets the column name during document class construction
This value will be ignored if db_field is set in __init__'
| def set_column_name(self, name):
| self.column_name = name
|
'Returns the name of the cql name of this column'
| @property
def db_field_name(self):
| return (self.db_field or self.column_name)
|
'Returns the name of the cql index'
| @property
def db_index_name(self):
| return 'index_{0}'.format(self.db_field_name)
|
'determines if the given value equates to a null value for the given column type'
| def _val_is_null(self, val):
| return (val is None)
|
':param int min_length: Sets the minimum length of this string, for validation purposes.
Defaults to 1 if this is a ``required`` column. Otherwise, None.
:param int max_length: Sets the maximum length of this string, for validation purposes.'
| def __init__(self, min_length=None, max_length=None, **kwargs):
| self.min_length = (1 if ((not min_length) and kwargs.get('required', False)) else min_length)
self.max_length = max_length
if (self.min_length is not None):
if (self.min_length < 0):
raise ValueError('Minimum length is not allowed to be negative.')
if (self.max_length is not None):
if (self.max_length < 0):
raise ValueError('Maximum length is not allowed to be negative.')
if ((self.min_length is not None) and (self.max_length is not None)):
if (self.max_length < self.min_length):
raise ValueError('Maximum length must be greater or equal to minimum length.')
super(Text, self).__init__(**kwargs)
|
'Only allow ASCII and None values.
Check against US-ASCII, a.k.a. 7-bit ASCII, a.k.a. ISO646-US, a.k.a.
the Basic Latin block of the Unicode character set.
Source: https://github.com/apache/cassandra/blob
/3dcbe90e02440e6ee534f643c7603d50ca08482b/src/java/org/apache/cassandra
/serializers/AsciiSerializer.java#L29'
| def validate(self, value):
| value = super(Ascii, self).validate(value)
if value:
charset = (value if isinstance(value, (bytearray,)) else map(ord, value))
if (not set(range(128)).issuperset(charset)):
raise ValidationError('{!r} is not an ASCII string.'.format(value))
return value
|
'Always returns a Python boolean.'
| def validate(self, value):
| value = super(Boolean, self).validate(value)
if (value is not None):
value = bool(value)
return value
|
':param types: a sequence of sub types in this collection'
| def __init__(self, types, **kwargs):
| instances = []
for t in types:
inheritance_comparator = (issubclass if isinstance(t, type) else isinstance)
if (not inheritance_comparator(t, Column)):
raise ValidationError(('%s is not a column class' % (t,)))
if (t.db_type is None):
raise ValidationError(('%s is an abstract type' % (t,)))
inst = (t() if isinstance(t, type) else t)
if isinstance(t, BaseCollectionColumn):
inst._freeze_db_type()
instances.append(inst)
self.types = instances
super(BaseCollectionColumn, self).__init__(**kwargs)
|
':param args: column types representing tuple composition'
| def __init__(self, *args, **kwargs):
| if (not args):
raise ValueError('Tuple must specify at least one inner type')
super(Tuple, self).__init__(args, **kwargs)
self.db_type = 'tuple<{0}>'.format(', '.join((typ.db_type for typ in self.types)))
|
':param value_type: a column class indicating the types of the value
:param strict: sets whether non set values will be coerced to set
type on validation, or raise a validation error, defaults to True'
| def __init__(self, value_type, strict=True, default=set, **kwargs):
| self.strict = strict
super(Set, self).__init__((value_type,), default=default, **kwargs)
self.value_col = self.types[0]
if (not self.value_col._python_type_hashable):
raise ValidationError('Cannot create a Set with unhashable value type (see PYTHON-494)')
self.db_type = 'set<{0}>'.format(self.value_col.db_type)
|
':param value_type: a column class indicating the types of the value'
| def __init__(self, value_type, default=list, **kwargs):
| super(List, self).__init__((value_type,), default=default, **kwargs)
self.value_col = self.types[0]
self.db_type = 'list<{0}>'.format(self.value_col.db_type)
|
':param key_type: a column class indicating the types of the key
:param value_type: a column class indicating the types of the value'
| def __init__(self, key_type, value_type, default=dict, **kwargs):
| super(Map, self).__init__((key_type, value_type), default=default, **kwargs)
self.key_col = self.types[0]
self.value_col = self.types[1]
if (not self.key_col._python_type_hashable):
raise ValidationError('Cannot create a Map with unhashable key type (see PYTHON-494)')
self.db_type = 'map<{0}, {1}>'.format(self.key_col.db_type, self.value_col.db_type)
|
':param type user_type: specifies the :class:`~.cqlengine.usertype.UserType` model of the column'
| def __init__(self, user_type, **kwargs):
| self.user_type = user_type
self.db_type = ('frozen<%s>' % user_type.type_name())
super(UserDefinedType, self).__init__(**kwargs)
|
'Just a hint to IDEs that it\'s ok to call this'
| def __call__(self, *args, **kwargs):
| raise NotImplementedError
|
':rtype: ModelQuerySet'
| def __get__(self, obj, model):
| if model.__abstract__:
raise CQLEngineException('cannot execute queries against abstract models')
queryset = model.__queryset__(model)
if (model._is_polymorphic and (not model._is_polymorphic_base)):
(name, column) = (model._discriminator_column_name, model._discriminator_column)
if (column.partition_key or column.index):
return queryset.filter(**{name: model.__discriminator_value__})
return queryset
|
'Just a hint to IDEs that it\'s ok to call this
:rtype: ModelQuerySet'
| def __call__(self, *args, **kwargs):
| raise NotImplementedError
|
':param column:
:type column: columns.Column
:return:'
| def __init__(self, column):
| self.column = column
self.query_evaluator = ColumnQueryEvaluator(self.column)
|
'Returns either the value or column, depending
on if an instance is provided or not
:param instance: the model instance
:type instance: Model'
| def __get__(self, instance, owner):
| try:
return instance._values[self.column.column_name].getval()
except AttributeError:
return self.query_evaluator
|
'Sets the value on an instance, raises an exception with classes
TODO: use None instance to create update statements'
| def __set__(self, instance, value):
| if instance:
return instance._values[self.column.column_name].setval(value)
else:
raise AttributeError('cannot reassign column values')
|
'Sets the column value to None, if possible'
| def __delete__(self, instance):
| if instance:
if self.column.can_delete:
instance._values[self.column.column_name].delval()
else:
raise AttributeError('cannot delete {0} columns'.format(self.column.column_name))
|
'Pretty printing of models by their primary key'
| def __str__(self):
| return '{0} <{1}>'.format(self.__class__.__name__, ', '.join(('{0}={1}'.format(k, getattr(self, k)) for k in self._primary_keys.keys())))
|
'method used to construct instances from query results
this is where polymorphic deserialization occurs'
| @classmethod
def _construct_instance(cls, values):
| if cls._db_map:
values = dict(((cls._db_map.get(k, k), v) for (k, v) in values.items()))
if cls._is_polymorphic:
disc_key = values.get(cls._discriminator_column_name)
if (disc_key is None):
raise PolymorphicModelException('discriminator value was not found in values')
poly_base = (cls if cls._is_polymorphic_base else cls._polymorphic_base)
klass = poly_base._get_model_by_discriminator_value(disc_key)
if (klass is None):
poly_base._discover_polymorphic_submodels()
klass = poly_base._get_model_by_discriminator_value(disc_key)
if (klass is None):
raise PolymorphicModelException('unrecognized discriminator column {0} for class {1}'.format(disc_key, poly_base.__name__))
if (not issubclass(klass, cls)):
raise PolymorphicModelException('{0} is not a subclass of {1}'.format(klass.__name__, cls.__name__))
values = dict(((k, v) for (k, v) in values.items() if (k in klass._columns.keys())))
else:
klass = cls
instance = klass(**values)
instance._set_persisted(force=True)
return instance
|
'Called by the save function to check if this should be
persisted with update or insert
:return:'
| def _can_update(self):
| if (not self._is_persisted):
return False
return all(((not self._values[k].changed) for k in self._primary_keys))
|
'Returns the manual keyspace, if set, otherwise the default keyspace'
| @classmethod
def _get_keyspace(cls):
| return (cls.__keyspace__ or DEFAULT_KEYSPACE)
|
'Returns the column matching the given name, raising a key error if
it doesn\'t exist
:param name: the name of the column to return
:rtype: Column'
| @classmethod
def _get_column(cls, name):
| return cls._columns[name]
|
'Returns the column, mapped by db_field name'
| @classmethod
def _get_column_by_db_name(cls, name):
| return cls._columns.get(cls._db_map.get(name, name))
|
'Returns the column family name if it\'s been defined
otherwise, it creates it from the module and class name'
| @classmethod
def column_family_name(cls, include_keyspace=True):
| cf_name = protect_name(cls._raw_column_family_name())
if include_keyspace:
keyspace = cls._get_keyspace()
if (not keyspace):
raise CQLEngineException('Model keyspace is not set and no default is available. Set model keyspace or setup connection before attempting to generate a query.')
return '{0}.{1}'.format(protect_name(keyspace), cf_name)
return cf_name
|
'Function to change a column value without changing the value manager states'
| def _set_column_value(self, name, value):
| self._values[name].value = value
|
'Cleans and validates the field values'
| def validate(self):
| for (name, col) in self._columns.items():
v = getattr(self, name)
if ((v is None) and (not self._values[name].explicit) and col.has_default):
v = col.get_default()
val = col.validate(v)
self._set_column_value(name, val)
|
'Iterate over column ids.'
| def __iter__(self):
| for column_id in self._columns.keys():
(yield column_id)
|
'Returns column\'s value.'
| def __getitem__(self, key):
| if (not isinstance(key, six.string_types)):
raise TypeError
if (key not in self._columns.keys()):
raise KeyError
return getattr(self, key)
|
'Sets a column\'s value.'
| def __setitem__(self, key, val):
| if (not isinstance(key, six.string_types)):
raise TypeError
if (key not in self._columns.keys()):
raise KeyError
return setattr(self, key, val)
|
'Returns the number of columns defined on that model.'
| def __len__(self):
| try:
return self._len
except:
self._len = len(self._columns.keys())
return self._len
|
'Returns a list of column IDs.'
| def keys(self):
| return [k for k in self]
|
'Returns list of column values.'
| def values(self):
| return [self[k] for k in self]
|
'Returns a list of column ID/value tuples.'
| def items(self):
| return [(k, self[k]) for k in self]
|
'Returns a map of column names to cleaned values'
| def _as_dict(self):
| values = (self._dynamic_columns or {})
for (name, col) in self._columns.items():
values[name] = col.to_database(getattr(self, name, None))
return values
|
'Create an instance of this model in the database.
Takes the model column values as keyword arguments. Setting a value to
`None` is equivalent to running a CQL `DELETE` on that column.
Returns the instance.'
| @classmethod
def create(cls, **kwargs):
| extra_columns = (set(kwargs.keys()) - set(cls._columns.keys()))
if extra_columns:
raise ValidationError('Incorrect columns passed: {0}'.format(extra_columns))
return cls.objects.create(**kwargs)
|
'Returns a queryset representing all stored objects
This is a pass-through to the model objects().all()'
| @classmethod
def all(cls):
| return cls.objects.all()
|
'Returns a queryset based on filter parameters.
This is a pass-through to the model objects().:method:`~cqlengine.queries.filter`.'
| @classmethod
def filter(cls, *args, **kwargs):
| return cls.objects.filter(*args, **kwargs)
|
'Returns a single object based on the passed filter constraints.
This is a pass-through to the model objects().:method:`~cqlengine.queries.get`.'
| @classmethod
def get(cls, *args, **kwargs):
| return cls.objects.get(*args, **kwargs)
|
'Sets a timeout for use in :meth:`~.save`, :meth:`~.update`, and :meth:`~.delete`
operations'
| def timeout(self, timeout):
| assert (self._batch is None), 'Setting both timeout and batch is not supported'
self._timeout = timeout
return self
|
'Saves an object to the database.
.. code-block:: python
#create a person instance
person = Person(first_name=\'Kimberly\', last_name=\'Eggleston\')
#saves it to Cassandra
person.save()'
| def save(self):
| if self._is_polymorphic:
if self._is_polymorphic_base:
raise PolymorphicModelException('cannot save polymorphic base model')
else:
setattr(self, self._discriminator_column_name, self.__discriminator_value__)
self.validate()
self.__dmlquery__(self.__class__, self, batch=self._batch, ttl=self._ttl, timestamp=self._timestamp, consistency=self.__consistency__, if_not_exists=self._if_not_exists, conditional=self._conditional, timeout=self._timeout, if_exists=self._if_exists).save()
self._set_persisted()
self._timestamp = None
return self
|
'Performs an update on the model instance. You can pass in values to set on the model
for updating, or you can call without values to execute an update against any modified
fields. If no fields on the model have been modified since loading, no query will be
performed. Model validation is performed normally. Setting a value to `None` is
equivalent to running a CQL `DELETE` on that column.
It is possible to do a blind update, that is, to update a field without having first selected the object out of the database.
See :ref:`Blind Updates <blind_updates>`'
| def update(self, **values):
| for (column_id, v) in values.items():
col = self._columns.get(column_id)
if (col is None):
raise ValidationError('{0}.{1} has no column named: {2}'.format(self.__module__, self.__class__.__name__, column_id))
if col.is_primary_key:
current_value = getattr(self, column_id)
if (v != current_value):
raise ValidationError("Cannot apply update to primary key '{0}' for {1}.{2}".format(column_id, self.__module__, self.__class__.__name__))
setattr(self, column_id, v)
if self._is_polymorphic:
if self._is_polymorphic_base:
raise PolymorphicModelException('cannot update polymorphic base model')
else:
setattr(self, self._discriminator_column_name, self.__discriminator_value__)
self.validate()
self.__dmlquery__(self.__class__, self, batch=self._batch, ttl=self._ttl, timestamp=self._timestamp, consistency=self.__consistency__, conditional=self._conditional, timeout=self._timeout, if_exists=self._if_exists).update()
self._set_persisted()
self._timestamp = None
return self
|
'Deletes the object from the database'
| def delete(self):
| self.__dmlquery__(self.__class__, self, batch=self._batch, timestamp=self._timestamp, consistency=self.__consistency__, timeout=self._timeout, conditional=self._conditional, if_exists=self._if_exists).delete()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.