desc
stringlengths 3
26.7k
| decl
stringlengths 11
7.89k
| bodies
stringlengths 8
553k
|
---|---|---|
'Return a built-in datetime.time (nanosecond precision truncated to micros).'
| def time(self):
| return datetime.time(hour=self.hour, minute=self.minute, second=self.second, microsecond=(self.nanosecond // Time.MICRO))
|
'Initializer value can be:
- integer_type: absolute days from epoch (1970, 1, 1). Can be negative.
- datetime.date: built-in date
- string_type: a string time of the form "yyyy-mm-dd"'
| def __init__(self, value):
| if isinstance(value, six.integer_types):
self.days_from_epoch = value
elif isinstance(value, (datetime.date, datetime.datetime)):
self._from_timetuple(value.timetuple())
elif isinstance(value, six.string_types):
self._from_datestring(value)
else:
raise TypeError('Date arguments must be a whole number, datetime.date, or string')
|
'Absolute seconds from epoch (can be negative)'
| @property
def seconds(self):
| return (self.days_from_epoch * Date.DAY)
|
'Return a built-in datetime.date for Dates falling in the years [datetime.MINYEAR, datetime.MAXYEAR]
ValueError is raised for Dates outside this range.'
| def date(self):
| try:
dt = datetime_from_timestamp(self.seconds)
return datetime.date(dt.year, dt.month, dt.day)
except Exception:
raise ValueError(('%r exceeds ranges for built-in datetime.date' % self))
|
'internal-only; no checks are done because this entry is populated on cluster init'
| @property
def default(self):
| return self.profiles[EXEC_PROFILE_DEFAULT]
|
'When :attr:`~.Cluster.protocol_version` is 2 or higher, this should
be an instance of a subclass of :class:`~cassandra.auth.AuthProvider`,
such as :class:`~.PlainTextAuthProvider`.
When :attr:`~.Cluster.protocol_version` is 1, this should be
a function that accepts one argument, the IP address of a node,
and returns a dict of credentials for that node.
When not using authentication, this should be left as :const:`None`.'
| @property
def auth_provider(self):
| return self._auth_provider
|
'An instance of :class:`.policies.LoadBalancingPolicy` or
one of its subclasses.
.. versionchanged:: 2.6.0
Defaults to :class:`~.TokenAwarePolicy` (:class:`~.DCAwareRoundRobinPolicy`).
when using CPython (where the murmur3 extension is available). :class:`~.DCAwareRoundRobinPolicy`
otherwise. Default local DC will be chosen from contact points.
**Please see** :class:`~.DCAwareRoundRobinPolicy` **for a discussion on default behavior with respect to
DC locality and remote nodes.**'
| @property
def load_balancing_policy(self):
| return self._load_balancing_policy
|
'A default :class:`.policies.RetryPolicy` instance to use for all
:class:`.Statement` objects which do not have a :attr:`~.Statement.retry_policy`
explicitly set.'
| @property
def default_retry_policy(self):
| return self._default_retry_policy
|
'Flag indicating whether internal schema metadata is updated.
When disabled, the driver does not populate Cluster.metadata.keyspaces on connect, or on schema change events. This
can be used to speed initial connection, and reduce load on client and server during operation. Turning this off
gives away token aware request routing, and programmatic inspection of the metadata model.'
| @property
def schema_metadata_enabled(self):
| return self.control_connection._schema_meta_enabled
|
'Flag indicating whether internal token metadata is updated.
When disabled, the driver does not query node token information on connect, or on topology change events. This
can be used to speed initial connection, and reduce load on client and server during operation. It is most useful
in large clusters using vnodes, where the token map can be expensive to compute. Turning this off
gives away token aware request routing, and programmatic inspection of the token ring.'
| @property
def token_metadata_enabled(self):
| return self.control_connection._token_meta_enabled
|
'``executor_threads`` defines the number of threads in a pool for handling asynchronous tasks such as
extablishing connection pools or refreshing metadata.
Any of the mutable Cluster attributes may be set as keyword arguments to the constructor.'
| def __init__(self, contact_points=['127.0.0.1'], port=9042, compression=True, auth_provider=None, load_balancing_policy=None, reconnection_policy=None, default_retry_policy=None, conviction_policy_factory=None, metrics_enabled=False, connection_class=None, ssl_options=None, sockopts=None, cql_version=None, protocol_version=_NOT_SET, executor_threads=2, max_schema_agreement_wait=10, control_connection_timeout=2.0, idle_heartbeat_interval=30, schema_event_refresh_window=2, topology_event_refresh_window=10, connect_timeout=5, schema_metadata_enabled=True, token_metadata_enabled=True, address_translator=None, status_event_refresh_window=2, prepare_on_all_hosts=True, reprepare_on_up=True, execution_profiles=None, allow_beta_protocol_version=False, timestamp_generator=None, idle_heartbeat_timeout=30):
| if (contact_points is not None):
if isinstance(contact_points, six.string_types):
raise TypeError('contact_points should not be a string, it should be a sequence (e.g. list) of strings')
if (None in contact_points):
raise ValueError('contact_points should not contain None (it can resolve to localhost)')
self.contact_points = contact_points
self.port = port
self.contact_points_resolved = [endpoint[4][0] for a in self.contact_points for endpoint in socket.getaddrinfo(a, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM)]
self.compression = compression
if (protocol_version is not _NOT_SET):
self.protocol_version = protocol_version
self._protocol_version_explicit = True
self.allow_beta_protocol_version = allow_beta_protocol_version
self.auth_provider = auth_provider
if (load_balancing_policy is not None):
if isinstance(load_balancing_policy, type):
raise TypeError('load_balancing_policy should not be a class, it should be an instance of that class')
self.load_balancing_policy = load_balancing_policy
else:
self._load_balancing_policy = default_lbp_factory()
if (reconnection_policy is not None):
if isinstance(reconnection_policy, type):
raise TypeError('reconnection_policy should not be a class, it should be an instance of that class')
self.reconnection_policy = reconnection_policy
if (default_retry_policy is not None):
if isinstance(default_retry_policy, type):
raise TypeError('default_retry_policy should not be a class, it should be an instance of that class')
self.default_retry_policy = default_retry_policy
if (conviction_policy_factory is not None):
if (not callable(conviction_policy_factory)):
raise ValueError('conviction_policy_factory must be callable')
self.conviction_policy_factory = conviction_policy_factory
if (address_translator is not None):
if isinstance(address_translator, type):
raise TypeError('address_translator should not be a class, it should be an instance of that class')
self.address_translator = address_translator
if (connection_class is not None):
self.connection_class = connection_class
if (timestamp_generator is not None):
if (not callable(timestamp_generator)):
raise ValueError('timestamp_generator must be callable')
self.timestamp_generator = timestamp_generator
else:
self.timestamp_generator = MonotonicTimestampGenerator()
self.profile_manager = ProfileManager()
self.profile_manager.profiles[EXEC_PROFILE_DEFAULT] = ExecutionProfile(self.load_balancing_policy, self.default_retry_policy, Session._default_consistency_level, Session._default_serial_consistency_level, Session._default_timeout, Session._row_factory)
if (load_balancing_policy or default_retry_policy):
if execution_profiles:
raise ValueError('Clusters constructed with execution_profiles should not specify legacy parameters load_balancing_policy or default_retry_policy. Configure this in a profile instead.')
self._config_mode = _ConfigMode.LEGACY
elif execution_profiles:
self.profile_manager.profiles.update(execution_profiles)
self._config_mode = _ConfigMode.PROFILES
self.metrics_enabled = metrics_enabled
self.ssl_options = ssl_options
self.sockopts = sockopts
self.cql_version = cql_version
self.max_schema_agreement_wait = max_schema_agreement_wait
self.control_connection_timeout = control_connection_timeout
self.idle_heartbeat_interval = idle_heartbeat_interval
self.idle_heartbeat_timeout = idle_heartbeat_timeout
self.schema_event_refresh_window = schema_event_refresh_window
self.topology_event_refresh_window = topology_event_refresh_window
self.status_event_refresh_window = status_event_refresh_window
self.connect_timeout = connect_timeout
self.prepare_on_all_hosts = prepare_on_all_hosts
self.reprepare_on_up = reprepare_on_up
self._listeners = set()
self._listener_lock = Lock()
self.sessions = WeakSet()
self.metadata = Metadata()
self.control_connection = None
self._prepared_statements = WeakValueDictionary()
self._prepared_statement_lock = Lock()
self._user_types = defaultdict(dict)
self._min_requests_per_connection = {HostDistance.LOCAL: DEFAULT_MIN_REQUESTS, HostDistance.REMOTE: DEFAULT_MIN_REQUESTS}
self._max_requests_per_connection = {HostDistance.LOCAL: DEFAULT_MAX_REQUESTS, HostDistance.REMOTE: DEFAULT_MAX_REQUESTS}
self._core_connections_per_host = {HostDistance.LOCAL: DEFAULT_MIN_CONNECTIONS_PER_LOCAL_HOST, HostDistance.REMOTE: DEFAULT_MIN_CONNECTIONS_PER_REMOTE_HOST}
self._max_connections_per_host = {HostDistance.LOCAL: DEFAULT_MAX_CONNECTIONS_PER_LOCAL_HOST, HostDistance.REMOTE: DEFAULT_MAX_CONNECTIONS_PER_REMOTE_HOST}
self.executor = ThreadPoolExecutor(max_workers=executor_threads)
self.scheduler = _Scheduler(self.executor)
self._lock = RLock()
if self.metrics_enabled:
from cassandra.metrics import Metrics
self.metrics = Metrics(weakref.proxy(self))
self.control_connection = ControlConnection(self, self.control_connection_timeout, self.schema_event_refresh_window, self.topology_event_refresh_window, self.status_event_refresh_window, schema_metadata_enabled, token_metadata_enabled)
|
'Registers a class to use to represent a particular user-defined type.
Query parameters for this user-defined type will be assumed to be
instances of `klass`. Result sets for this user-defined type will
be instances of `klass`. If no class is registered for a user-defined
type, a namedtuple will be used for result sets, and non-prepared
statements may not encode parameters for this type correctly.
`keyspace` is the name of the keyspace that the UDT is defined in.
`user_type` is the string name of the UDT to register the mapping
for.
`klass` should be a class with attributes whose names match the
fields of the user-defined type. The constructor must accepts kwargs
for each of the fields in the UDT.
This method should only be called after the type has been created
within Cassandra.
Example::
cluster = Cluster(protocol_version=3)
session = cluster.connect()
session.set_keyspace(\'mykeyspace\')
session.execute("CREATE TYPE address (street text, zipcode int)")
session.execute("CREATE TABLE users (id int PRIMARY KEY, location address)")
# create a class to map to the "address" UDT
class Address(object):
def __init__(self, street, zipcode):
self.street = street
self.zipcode = zipcode
cluster.register_user_type(\'mykeyspace\', \'address\', Address)
# insert a row using an instance of Address
session.execute("INSERT INTO users (id, location) VALUES (%s, %s)",
(0, Address("123 Main St.", 78723)))
# results will include Address instances
results = session.execute("SELECT * FROM users")
row = results[0]
print row.id, row.location.street, row.location.zipcode'
| def register_user_type(self, keyspace, user_type, klass):
| if (self.protocol_version < 3):
log.warning('User Type serialization is only supported in native protocol version 3+ (%d in use). CQL encoding for simple statements will still work, but named tuples will be returned when reading type %s.%s.', self.protocol_version, keyspace, user_type)
self._user_types[keyspace][user_type] = klass
for session in self.sessions:
session.user_type_registered(keyspace, user_type, klass)
UserType.evict_udt_class(keyspace, user_type)
|
'Adds an :class:`.ExecutionProfile` to the cluster. This makes it available for use by ``name`` in :meth:`.Session.execute`
and :meth:`.Session.execute_async`. This method will raise if the profile already exists.
Normally profiles will be injected at cluster initialization via ``Cluster(execution_profiles)``. This method
provides a way of adding them dynamically.
Adding a new profile updates the connection pools according to the specified ``load_balancing_policy``. By default,
this method will wait up to five seconds for the pool creation to complete, so the profile can be used immediately
upon return. This behavior can be controlled using ``pool_wait_timeout`` (see
`concurrent.futures.wait <https://docs.python.org/3/library/concurrent.futures.html#concurrent.futures.wait>`_
for timeout semantics).'
| def add_execution_profile(self, name, profile, pool_wait_timeout=5):
| if (not isinstance(profile, ExecutionProfile)):
raise TypeError('profile must be an instance of ExecutionProfile')
if (self._config_mode == _ConfigMode.LEGACY):
raise ValueError('Cannot add execution profiles when legacy parameters are set explicitly.')
if (name in self.profile_manager.profiles):
raise ValueError('Profile %s already exists')
self.profile_manager.profiles[name] = profile
profile.load_balancing_policy.populate(self, self.metadata.all_hosts())
for host in filter((lambda h: h.is_up), self.metadata.all_hosts()):
profile.load_balancing_policy.on_up(host)
futures = set()
for session in self.sessions:
futures.update(session.update_created_pools())
(_, not_done) = wait_futures(futures, pool_wait_timeout)
if not_done:
raise OperationTimedOut('Failed to create all new connection pools in the %ss timeout.')
|
'Sets a threshold for concurrent requests per connection, below which
connections will be considered for disposal (down to core connections;
see :meth:`~Cluster.set_core_connections_per_host`).
Pertains to connection pool management in protocol versions {1,2}.'
| def set_min_requests_per_connection(self, host_distance, min_requests):
| if (self.protocol_version >= 3):
raise UnsupportedOperation('Cluster.set_min_requests_per_connection() only has an effect when using protocol_version 1 or 2.')
if ((min_requests < 0) or (min_requests > 126) or (min_requests >= self._max_requests_per_connection[host_distance])):
raise ValueError(('min_requests must be 0-126 and less than the max_requests for this host_distance (%d)' % (self._min_requests_per_connection[host_distance],)))
self._min_requests_per_connection[host_distance] = min_requests
|
'Sets a threshold for concurrent requests per connection, above which new
connections will be created to a host (up to max connections;
see :meth:`~Cluster.set_max_connections_per_host`).
Pertains to connection pool management in protocol versions {1,2}.'
| def set_max_requests_per_connection(self, host_distance, max_requests):
| if (self.protocol_version >= 3):
raise UnsupportedOperation('Cluster.set_max_requests_per_connection() only has an effect when using protocol_version 1 or 2.')
if ((max_requests < 1) or (max_requests > 127) or (max_requests <= self._min_requests_per_connection[host_distance])):
raise ValueError(('max_requests must be 1-127 and greater than the min_requests for this host_distance (%d)' % (self._min_requests_per_connection[host_distance],)))
self._max_requests_per_connection[host_distance] = max_requests
|
'Gets the minimum number of connections per Session that will be opened
for each host with :class:`~.HostDistance` equal to `host_distance`.
The default is 2 for :attr:`~HostDistance.LOCAL` and 1 for
:attr:`~HostDistance.REMOTE`.
This property is ignored if :attr:`~.Cluster.protocol_version` is
3 or higher.'
| def get_core_connections_per_host(self, host_distance):
| return self._core_connections_per_host[host_distance]
|
'Sets the minimum number of connections per Session that will be opened
for each host with :class:`~.HostDistance` equal to `host_distance`.
The default is 2 for :attr:`~HostDistance.LOCAL` and 1 for
:attr:`~HostDistance.REMOTE`.
Protocol version 1 and 2 are limited in the number of concurrent
requests they can send per connection. The driver implements connection
pooling to support higher levels of concurrency.
If :attr:`~.Cluster.protocol_version` is set to 3 or higher, this
is not supported (there is always one connection per host, unless
the host is remote and :attr:`connect_to_remote_hosts` is :const:`False`)
and using this will result in an :exc:`~.UnsupporteOperation`.'
| def set_core_connections_per_host(self, host_distance, core_connections):
| if (self.protocol_version >= 3):
raise UnsupportedOperation('Cluster.set_core_connections_per_host() only has an effect when using protocol_version 1 or 2.')
old = self._core_connections_per_host[host_distance]
self._core_connections_per_host[host_distance] = core_connections
if (old < core_connections):
self._ensure_core_connections()
|
'Gets the maximum number of connections per Session that will be opened
for each host with :class:`~.HostDistance` equal to `host_distance`.
The default is 8 for :attr:`~HostDistance.LOCAL` and 2 for
:attr:`~HostDistance.REMOTE`.
This property is ignored if :attr:`~.Cluster.protocol_version` is
3 or higher.'
| def get_max_connections_per_host(self, host_distance):
| return self._max_connections_per_host[host_distance]
|
'Sets the maximum number of connections per Session that will be opened
for each host with :class:`~.HostDistance` equal to `host_distance`.
The default is 2 for :attr:`~HostDistance.LOCAL` and 1 for
:attr:`~HostDistance.REMOTE`.
If :attr:`~.Cluster.protocol_version` is set to 3 or higher, this
is not supported (there is always one connection per host, unless
the host is remote and :attr:`connect_to_remote_hosts` is :const:`False`)
and using this will result in an :exc:`~.UnsupporteOperation`.'
| def set_max_connections_per_host(self, host_distance, max_connections):
| if (self.protocol_version >= 3):
raise UnsupportedOperation('Cluster.set_max_connections_per_host() only has an effect when using protocol_version 1 or 2.')
self._max_connections_per_host[host_distance] = max_connections
|
'Called to create a new connection with proper configuration.
Intended for internal use only.'
| def connection_factory(self, address, *args, **kwargs):
| kwargs = self._make_connection_kwargs(address, kwargs)
return self.connection_class.factory(address, self.connect_timeout, *args, **kwargs)
|
'Creates and returns a new :class:`~.Session` object. If `keyspace`
is specified, that keyspace will be the default keyspace for
operations on the ``Session``.'
| def connect(self, keyspace=None, wait_for_all_pools=False):
| with self._lock:
if self.is_shutdown:
raise DriverException('Cluster is already shut down')
if (not self._is_setup):
log.debug('Connecting to cluster, contact points: %s; protocol version: %s', self.contact_points, self.protocol_version)
self.connection_class.initialize_reactor()
_register_cluster_shutdown(self)
for address in self.contact_points_resolved:
(host, new) = self.add_host(address, signal=False)
if new:
host.set_up()
for listener in self.listeners:
listener.on_add(host)
self.profile_manager.populate(weakref.proxy(self), self.metadata.all_hosts())
try:
self.control_connection.connect()
for address in self.contact_points_resolved:
h = self.metadata.get_host(address)
if (h and (self.profile_manager.distance(h) == HostDistance.IGNORED)):
h.is_up = None
log.debug('Control connection created')
except Exception:
log.exception('Control connection failed to connect, shutting down Cluster:')
self.shutdown()
raise
self.profile_manager.check_supported()
if self.idle_heartbeat_interval:
self._idle_heartbeat = ConnectionHeartbeat(self.idle_heartbeat_interval, self.get_connection_holders, timeout=self.idle_heartbeat_timeout)
self._is_setup = True
session = self._new_session(keyspace)
if wait_for_all_pools:
wait_futures(session._initial_connect_futures)
return session
|
'Closes all sessions and connection associated with this Cluster.
To ensure all connections are properly closed, **you should always
call shutdown() on a Cluster instance when you are done with it**.
Once shutdown, a Cluster should not be used for any purpose.'
| def shutdown(self):
| with self._lock:
if self.is_shutdown:
return
else:
self.is_shutdown = True
if self._idle_heartbeat:
self._idle_heartbeat.stop()
self.scheduler.shutdown()
self.control_connection.shutdown()
for session in self.sessions:
session.shutdown()
self.executor.shutdown()
_discard_cluster_shutdown(self)
|
'Intended for internal use only.'
| def on_up(self, host):
| if self.is_shutdown:
return
log.debug('Waiting to acquire lock for handling up status of node %s', host)
with host.lock:
if host._currently_handling_node_up:
log.debug('Another thread is already handling up status of node %s', host)
return
if host.is_up:
log.debug('Host %s was already marked up', host)
return
host._currently_handling_node_up = True
log.debug('Starting to handle up status of node %s', host)
have_future = False
futures = set()
try:
log.info('Host %s may be up; will prepare queries and open connection pool', host)
reconnector = host.get_and_set_reconnection_handler(None)
if reconnector:
log.debug('Now that host %s is up, cancelling the reconnection handler', host)
reconnector.cancel()
if (self.profile_manager.distance(host) != HostDistance.IGNORED):
self._prepare_all_queries(host)
log.debug('Done preparing all queries for host %s, ', host)
for session in self.sessions:
session.remove_pool(host)
log.debug('Signalling to load balancing policies that host %s is up', host)
self.profile_manager.on_up(host)
log.debug('Signalling to control connection that host %s is up', host)
self.control_connection.on_up(host)
log.debug('Attempting to open new connection pools for host %s', host)
futures_lock = Lock()
futures_results = []
callback = partial(self._on_up_future_completed, host, futures, futures_results, futures_lock)
for session in self.sessions:
future = session.add_or_renew_pool(host, is_host_addition=False)
if (future is not None):
have_future = True
future.add_done_callback(callback)
futures.add(future)
except Exception:
log.exception('Unexpected failure handling node %s being marked up:', host)
for future in futures:
future.cancel()
self._cleanup_failed_on_up_handling(host)
with host.lock:
host._currently_handling_node_up = False
raise
else:
if (not have_future):
with host.lock:
host.set_up()
host._currently_handling_node_up = False
return futures
|
'Intended for internal use only.'
| @run_in_executor
def on_down(self, host, is_host_addition, expect_host_to_be_down=False):
| if self.is_shutdown:
return
with host.lock:
was_up = host.is_up
if (self._discount_down_events and (self.profile_manager.distance(host) != HostDistance.IGNORED)):
connected = False
for session in self.sessions:
pool_states = session.get_pool_state()
pool_state = pool_states.get(host)
if pool_state:
connected |= (pool_state['open_count'] > 0)
if connected:
return
host.set_down()
if (((not was_up) and (not expect_host_to_be_down)) or host.is_currently_reconnecting()):
return
log.warning('Host %s has been marked down', host)
self.profile_manager.on_down(host)
self.control_connection.on_down(host)
for session in self.sessions:
session.on_down(host)
for listener in self.listeners:
listener.on_down(host)
self._start_reconnector(host, is_host_addition)
|
'Called when adding initial contact points and when the control
connection subsequently discovers a new node.
Returns a Host instance, and a flag indicating whether it was new in
the metadata.
Intended for internal use only.'
| def add_host(self, address, datacenter=None, rack=None, signal=True, refresh_nodes=True):
| (host, new) = self.metadata.add_or_return_host(Host(address, self.conviction_policy_factory, datacenter, rack))
if (new and signal):
log.info('New Cassandra host %r discovered', host)
self.on_add(host, refresh_nodes)
return (host, new)
|
'Called when the control connection observes that a node has left the
ring. Intended for internal use only.'
| def remove_host(self, host):
| if (host and self.metadata.remove_host(host)):
log.info('Cassandra host %s removed', host)
self.on_remove(host)
|
'Adds a :class:`cassandra.policies.HostStateListener` subclass instance to
the list of listeners to be notified when a host is added, removed,
marked up, or marked down.'
| def register_listener(self, listener):
| with self._listener_lock:
self._listeners.add(listener)
|
'Removes a registered listener.'
| def unregister_listener(self, listener):
| with self._listener_lock:
self._listeners.remove(listener)
|
'If any host has fewer than the configured number of core connections
open, attempt to open connections until that number is met.'
| def _ensure_core_connections(self):
| for session in self.sessions:
for pool in tuple(session._pools.values()):
pool.ensure_core_connections()
|
'Returns the control connection host metadata.'
| def get_control_connection_host(self):
| connection = self.control_connection._connection
host = (connection.host if connection else None)
return (self.metadata.get_host(host) if host else None)
|
'Synchronously refresh all schema metadata.
By default, the timeout for this operation is governed by :attr:`~.Cluster.max_schema_agreement_wait`
and :attr:`~.Cluster.control_connection_timeout`.
Passing max_schema_agreement_wait here overrides :attr:`~.Cluster.max_schema_agreement_wait`.
Setting max_schema_agreement_wait <= 0 will bypass schema agreement and refresh schema immediately.
An Exception is raised if schema refresh fails for any reason.'
| def refresh_schema_metadata(self, max_schema_agreement_wait=None):
| if (not self.control_connection.refresh_schema(schema_agreement_wait=max_schema_agreement_wait, force=True)):
raise DriverException('Schema metadata was not refreshed. See log for details.')
|
'Synchronously refresh keyspace metadata. This applies to keyspace-level information such as replication
and durability settings. It does not refresh tables, types, etc. contained in the keyspace.
See :meth:`~.Cluster.refresh_schema_metadata` for description of ``max_schema_agreement_wait`` behavior'
| def refresh_keyspace_metadata(self, keyspace, max_schema_agreement_wait=None):
| if (not self.control_connection.refresh_schema(target_type=SchemaTargetType.KEYSPACE, keyspace=keyspace, schema_agreement_wait=max_schema_agreement_wait, force=True)):
raise DriverException('Keyspace metadata was not refreshed. See log for details.')
|
'Synchronously refresh table metadata. This applies to a table, and any triggers or indexes attached
to the table.
See :meth:`~.Cluster.refresh_schema_metadata` for description of ``max_schema_agreement_wait`` behavior'
| def refresh_table_metadata(self, keyspace, table, max_schema_agreement_wait=None):
| if (not self.control_connection.refresh_schema(target_type=SchemaTargetType.TABLE, keyspace=keyspace, table=table, schema_agreement_wait=max_schema_agreement_wait, force=True)):
raise DriverException('Table metadata was not refreshed. See log for details.')
|
'Synchronously refresh materialized view metadata.
See :meth:`~.Cluster.refresh_schema_metadata` for description of ``max_schema_agreement_wait`` behavior'
| def refresh_materialized_view_metadata(self, keyspace, view, max_schema_agreement_wait=None):
| if (not self.control_connection.refresh_schema(target_type=SchemaTargetType.TABLE, keyspace=keyspace, table=view, schema_agreement_wait=max_schema_agreement_wait, force=True)):
raise DriverException('View metadata was not refreshed. See log for details.')
|
'Synchronously refresh user defined type metadata.
See :meth:`~.Cluster.refresh_schema_metadata` for description of ``max_schema_agreement_wait`` behavior'
| def refresh_user_type_metadata(self, keyspace, user_type, max_schema_agreement_wait=None):
| if (not self.control_connection.refresh_schema(target_type=SchemaTargetType.TYPE, keyspace=keyspace, type=user_type, schema_agreement_wait=max_schema_agreement_wait, force=True)):
raise DriverException('User Type metadata was not refreshed. See log for details.')
|
'Synchronously refresh user defined function metadata.
``function`` is a :class:`cassandra.UserFunctionDescriptor`.
See :meth:`~.Cluster.refresh_schema_metadata` for description of ``max_schema_agreement_wait`` behavior'
| def refresh_user_function_metadata(self, keyspace, function, max_schema_agreement_wait=None):
| if (not self.control_connection.refresh_schema(target_type=SchemaTargetType.FUNCTION, keyspace=keyspace, function=function, schema_agreement_wait=max_schema_agreement_wait, force=True)):
raise DriverException('User Function metadata was not refreshed. See log for details.')
|
'Synchronously refresh user defined aggregate metadata.
``aggregate`` is a :class:`cassandra.UserAggregateDescriptor`.
See :meth:`~.Cluster.refresh_schema_metadata` for description of ``max_schema_agreement_wait`` behavior'
| def refresh_user_aggregate_metadata(self, keyspace, aggregate, max_schema_agreement_wait=None):
| if (not self.control_connection.refresh_schema(target_type=SchemaTargetType.AGGREGATE, keyspace=keyspace, aggregate=aggregate, schema_agreement_wait=max_schema_agreement_wait, force=True)):
raise DriverException('User Aggregate metadata was not refreshed. See log for details.')
|
'Synchronously refresh the node list and token metadata
`force_token_rebuild` can be used to rebuild the token map metadata, even if no new nodes are discovered.
An Exception is raised if node refresh fails for any reason.'
| def refresh_nodes(self, force_token_rebuild=False):
| if (not self.control_connection.refresh_node_list_and_token_map(force_token_rebuild)):
raise DriverException('Node list was not refreshed. See log for details.')
|
'*Deprecated:* set :attr:`~.Cluster.schema_metadata_enabled` :attr:`~.Cluster.token_metadata_enabled` instead
Sets a flag to enable (True) or disable (False) all metadata refresh queries.
This applies to both schema and node topology.
Disabling this is useful to minimize refreshes during multiple changes.
Meta refresh must be enabled for the driver to become aware of any cluster
topology changes or schema updates.'
| def set_meta_refresh_enabled(self, enabled):
| self.schema_metadata_enabled = enabled
self.token_metadata_enabled = enabled
|
'The format to return row results in. By default, each
returned row will be a named tuple. You can alternatively
use any of the following:
- :func:`cassandra.query.tuple_factory` - return a result row as a tuple
- :func:`cassandra.query.named_tuple_factory` - return a result row as a named tuple
- :func:`cassandra.query.dict_factory` - return a result row as a dict
- :func:`cassandra.query.ordered_dict_factory` - return a result row as an OrderedDict'
| @property
def row_factory(self):
| return self._row_factory
|
'A default timeout, measured in seconds, for queries executed through
:meth:`.execute()` or :meth:`.execute_async()`. This default may be
overridden with the `timeout` parameter for either of those methods.
Setting this to :const:`None` will cause no timeouts to be set by default.
Please see :meth:`.ResponseFuture.result` for details on the scope and
effect of this timeout.
.. versionadded:: 2.0.0'
| @property
def default_timeout(self):
| return self._default_timeout
|
'The default :class:`~ConsistencyLevel` for operations executed through
this session. This default may be overridden by setting the
:attr:`~.Statement.consistency_level` on individual statements.
.. versionadded:: 1.2.0
.. versionchanged:: 3.0.0
default changed from ONE to LOCAL_ONE'
| @property
def default_consistency_level(self):
| return self._default_consistency_level
|
'The default :class:`~ConsistencyLevel` for serial phase of conditional updates executed through
this session. This default may be overridden by setting the
:attr:`~.Statement.serial_consistency_level` on individual statements.
Only valid for ``protocol_version >= 2``.'
| @property
def default_serial_consistency_level(self):
| return self._default_serial_consistency_level
|
'Execute the given query and synchronously wait for the response.
If an error is encountered while executing the query, an Exception
will be raised.
`query` may be a query string or an instance of :class:`cassandra.query.Statement`.
`parameters` may be a sequence or dict of parameters to bind. If a
sequence is used, ``%s`` should be used the placeholder for each
argument. If a dict is used, ``%(name)s`` style placeholders must
be used.
`timeout` should specify a floating-point timeout (in seconds) after
which an :exc:`.OperationTimedOut` exception will be raised if the query
has not completed. If not set, the timeout defaults to
:attr:`~.Session.default_timeout`. If set to :const:`None`, there is
no timeout. Please see :meth:`.ResponseFuture.result` for details on
the scope and effect of this timeout.
If `trace` is set to :const:`True`, the query will be sent with tracing enabled.
The trace details can be obtained using the returned :class:`.ResultSet` object.
`custom_payload` is a :ref:`custom_payload` dict to be passed to the server.
If `query` is a Statement with its own custom_payload. The message payload
will be a union of the two, with the values specified here taking precedence.
`execution_profile` is the execution profile to use for this request. It can be a key to a profile configured
via :meth:`Cluster.add_execution_profile` or an instance (from :meth:`Session.execution_profile_clone_update`,
for example
`paging_state` is an optional paging state, reused from a previous :class:`ResultSet`.'
| def execute(self, query, parameters=None, timeout=_NOT_SET, trace=False, custom_payload=None, execution_profile=EXEC_PROFILE_DEFAULT, paging_state=None):
| return self.execute_async(query, parameters, trace, custom_payload, timeout, execution_profile, paging_state).result()
|
'Execute the given query and return a :class:`~.ResponseFuture` object
which callbacks may be attached to for asynchronous response
delivery. You may also call :meth:`~.ResponseFuture.result()`
on the :class:`.ResponseFuture` to synchronously block for results at
any time.
See :meth:`Session.execute` for parameter definitions.
Example usage::
>>> session = cluster.connect()
>>> future = session.execute_async("SELECT * FROM mycf")
>>> def log_results(results):
... for row in results:
... log.info("Results: %s", row)
>>> def log_error(exc):
>>> log.error("Operation failed: %s", exc)
>>> future.add_callbacks(log_results, log_error)
Async execution with blocking wait for results::
>>> future = session.execute_async("SELECT * FROM mycf")
>>> # do other stuff...
>>> try:
... results = future.result()
... except Exception:
... log.exception("Operation failed:")'
| def execute_async(self, query, parameters=None, trace=False, custom_payload=None, timeout=_NOT_SET, execution_profile=EXEC_PROFILE_DEFAULT, paging_state=None):
| future = self._create_response_future(query, parameters, trace, custom_payload, timeout, execution_profile, paging_state)
future._protocol_handler = self.client_protocol_handler
self._on_request(future)
future.send_request()
return future
|
'Returns the ResponseFuture before calling send_request() on it'
| def _create_response_future(self, query, parameters, trace, custom_payload, timeout, execution_profile=EXEC_PROFILE_DEFAULT, paging_state=None):
| prepared_statement = None
if isinstance(query, six.string_types):
query = SimpleStatement(query)
elif isinstance(query, PreparedStatement):
query = query.bind(parameters)
if (self.cluster._config_mode == _ConfigMode.LEGACY):
if (execution_profile is not EXEC_PROFILE_DEFAULT):
raise ValueError('Cannot specify execution_profile while using legacy parameters.')
if (timeout is _NOT_SET):
timeout = self.default_timeout
cl = (query.consistency_level if (query.consistency_level is not None) else self.default_consistency_level)
serial_cl = (query.serial_consistency_level if (query.serial_consistency_level is not None) else self.default_serial_consistency_level)
retry_policy = (query.retry_policy or self.cluster.default_retry_policy)
row_factory = self.row_factory
load_balancing_policy = self.cluster.load_balancing_policy
spec_exec_policy = None
else:
execution_profile = self._get_execution_profile(execution_profile)
if (timeout is _NOT_SET):
timeout = execution_profile.request_timeout
cl = (query.consistency_level if (query.consistency_level is not None) else execution_profile.consistency_level)
serial_cl = (query.serial_consistency_level if (query.serial_consistency_level is not None) else execution_profile.serial_consistency_level)
retry_policy = (query.retry_policy or execution_profile.retry_policy)
row_factory = execution_profile.row_factory
load_balancing_policy = execution_profile.load_balancing_policy
spec_exec_policy = execution_profile.speculative_execution_policy
fetch_size = query.fetch_size
if ((fetch_size is FETCH_SIZE_UNSET) and (self._protocol_version >= 2)):
fetch_size = self.default_fetch_size
elif (self._protocol_version == 1):
fetch_size = None
start_time = time.time()
if ((self._protocol_version >= 3) and self.use_client_timestamp):
timestamp = self.cluster.timestamp_generator()
else:
timestamp = None
if isinstance(query, SimpleStatement):
query_string = query.query_string
if parameters:
query_string = bind_params(query_string, parameters, self.encoder)
message = QueryMessage(query_string, cl, serial_cl, fetch_size, timestamp=timestamp)
elif isinstance(query, BoundStatement):
prepared_statement = query.prepared_statement
message = ExecuteMessage(prepared_statement.query_id, query.values, cl, serial_cl, fetch_size, timestamp=timestamp, skip_meta=bool(prepared_statement.result_metadata))
elif isinstance(query, BatchStatement):
if (self._protocol_version < 2):
raise UnsupportedOperation('BatchStatement execution is only supported with protocol version 2 or higher (supported in Cassandra 2.0 and higher). Consider setting Cluster.protocol_version to 2 to support this operation.')
message = BatchMessage(query.batch_type, query._statements_and_parameters, cl, serial_cl, timestamp)
message.tracing = trace
message.update_custom_payload(query.custom_payload)
message.update_custom_payload(custom_payload)
message.allow_beta_protocol_version = self.cluster.allow_beta_protocol_version
message.paging_state = paging_state
spec_exec_plan = (spec_exec_policy.new_plan((query.keyspace or self.keyspace), query) if (query.is_idempotent and spec_exec_policy) else None)
return ResponseFuture(self, message, query, timeout, metrics=self._metrics, prepared_statement=prepared_statement, retry_policy=retry_policy, row_factory=row_factory, load_balancer=load_balancing_policy, start_time=start_time, speculative_execution_plan=spec_exec_plan)
|
'Returns a clone of the ``ep`` profile. ``kwargs`` can be specified to update attributes
of the returned profile.
This is a shallow clone, so any objects referenced by the profile are shared. This means Load Balancing Policy
is maintained by inclusion in the active profiles. It also means updating any other rich objects will be seen
by the active profile. In cases where this is not desirable, be sure to replace the instance instead of manipulating
the shared object.'
| def execution_profile_clone_update(self, ep, **kwargs):
| clone = copy(self._get_execution_profile(ep))
for (attr, value) in kwargs.items():
setattr(clone, attr, value)
return clone
|
'Adds a callback with arguments to be called when any request is created.
It will be invoked as `fn(response_future, *args, **kwargs)` after each client request is created,
and before the request is sent\*. This can be used to create extensions by adding result callbacks to the
response future.
\* where `response_future` is the :class:`.ResponseFuture` for the request.
Note that the init callback is done on the client thread creating the request, so you may need to consider
synchronization if you have multiple threads. Any callbacks added to the response future will be executed
on the event loop thread, so the normal advice about minimizing cycles and avoiding blocking apply (see Note in
:meth:`.ResponseFuture.add_callbacks`.
See `this example <https://github.com/datastax/python-driver/blob/master/examples/request_init_listener.py>`_ in the
source tree for an example.'
| def add_request_init_listener(self, fn, *args, **kwargs):
| self._request_init_callbacks.append((fn, args, kwargs))
|
'Removes a callback and arguments from the list.
See :meth:`.Session.add_request_init_listener`.'
| def remove_request_init_listener(self, fn, *args, **kwargs):
| self._request_init_callbacks.remove((fn, args, kwargs))
|
'Prepares a query string, returning a :class:`~cassandra.query.PreparedStatement`
instance which can be used as follows::
>>> session = cluster.connect("mykeyspace")
>>> query = "INSERT INTO users (id, name, age) VALUES (?, ?, ?)"
>>> prepared = session.prepare(query)
>>> session.execute(prepared, (user.id, user.name, user.age))
Or you may bind values to the prepared statement ahead of time::
>>> prepared = session.prepare(query)
>>> bound_stmt = prepared.bind((user.id, user.name, user.age))
>>> session.execute(bound_stmt)
Of course, prepared statements may (and should) be reused::
>>> prepared = session.prepare(query)
>>> for user in users:
... bound = prepared.bind((user.id, user.name, user.age))
... session.execute(bound)
**Important**: PreparedStatements should be prepared only once.
Preparing the same query more than once will likely affect performance.
`custom_payload` is a key value map to be passed along with the prepare
message. See :ref:`custom_payload`.'
| def prepare(self, query, custom_payload=None):
| message = PrepareMessage(query=query)
future = ResponseFuture(self, message, query=None, timeout=self.default_timeout)
try:
future.send_request()
(query_id, bind_metadata, pk_indexes, result_metadata) = future.result()
except Exception:
log.exception('Error preparing query:')
raise
prepared_statement = PreparedStatement.from_message(query_id, bind_metadata, pk_indexes, self.cluster.metadata, query, self.keyspace, self._protocol_version, result_metadata)
prepared_statement.custom_payload = future.custom_payload
self.cluster.add_prepared(query_id, prepared_statement)
if self.cluster.prepare_on_all_hosts:
host = future._current_host
try:
self.prepare_on_all_hosts(prepared_statement.query_string, host)
except Exception:
log.exception('Error preparing query on all hosts:')
return prepared_statement
|
'Prepare the given query on all hosts, excluding ``excluded_host``.
Intended for internal use only.'
| def prepare_on_all_hosts(self, query, excluded_host):
| futures = []
for host in tuple(self._pools.keys()):
if ((host != excluded_host) and host.is_up):
future = ResponseFuture(self, PrepareMessage(query=query), None, self.default_timeout)
try:
request_id = future._query(host)
except Exception:
log.exception('Error preparing query for host %s:', host)
continue
if (request_id is None):
log.debug('Failed to prepare query for host %s: %r', host, future._errors.get(host))
continue
futures.append((host, future))
for (host, future) in futures:
try:
future.result()
except Exception:
log.exception('Error preparing query for host %s:', host)
|
'Close all connections. ``Session`` instances should not be used
for any purpose after being shutdown.'
| def shutdown(self):
| with self._lock:
if self.is_shutdown:
return
else:
self.is_shutdown = True
for future in self._initial_connect_futures:
future.cancel()
wait_futures(self._initial_connect_futures)
for pool in tuple(self._pools.values()):
pool.shutdown()
|
'For internal use only.'
| def add_or_renew_pool(self, host, is_host_addition):
| distance = self._profile_manager.distance(host)
if (distance == HostDistance.IGNORED):
return None
def run_add_or_renew_pool():
try:
if (self._protocol_version >= 3):
new_pool = HostConnection(host, distance, self)
else:
new_pool = HostConnectionPool(host, distance, self)
except AuthenticationFailed as auth_exc:
conn_exc = ConnectionException(str(auth_exc), host=host)
self.cluster.signal_connection_failure(host, conn_exc, is_host_addition)
return False
except Exception as conn_exc:
log.warning('Failed to create connection pool for new host %s:', host, exc_info=conn_exc)
self.cluster.signal_connection_failure(host, conn_exc, is_host_addition, expect_host_to_be_down=True)
return False
previous = self._pools.get(host)
with self._lock:
while (new_pool._keyspace != self.keyspace):
self._lock.release()
set_keyspace_event = Event()
errors_returned = []
def callback(pool, errors):
errors_returned.extend(errors)
set_keyspace_event.set()
new_pool._set_keyspace_for_all_conns(self.keyspace, callback)
set_keyspace_event.wait(self.cluster.connect_timeout)
if ((not set_keyspace_event.is_set()) or errors_returned):
log.warning('Failed setting keyspace for pool after keyspace changed during connect: %s', errors_returned)
self.cluster.on_down(host, is_host_addition)
new_pool.shutdown()
self._lock.acquire()
return False
self._lock.acquire()
self._pools[host] = new_pool
log.debug('Added pool for host %s to session', host)
if previous:
previous.shutdown()
return True
return self.submit(run_add_or_renew_pool)
|
'When the set of live nodes change, the loadbalancer will change its
mind on host distances. It might change it on the node that came/left
but also on other nodes (for instance, if a node dies, another
previously ignored node may be now considered).
This method ensures that all hosts for which a pool should exist
have one, and hosts that shouldn\'t don\'t.
For internal use only.'
| def update_created_pools(self):
| futures = set()
for host in self.cluster.metadata.all_hosts():
distance = self._profile_manager.distance(host)
pool = self._pools.get(host)
future = None
if ((not pool) or pool.is_shutdown):
if ((distance != HostDistance.IGNORED) and (host.is_up in (True, None))):
future = self.add_or_renew_pool(host, False)
elif (distance != pool.host_distance):
if (distance == HostDistance.IGNORED):
future = self.remove_pool(host)
else:
pool.host_distance = distance
if future:
futures.add(future)
return futures
|
'Called by the parent Cluster instance when a node is marked down.
Only intended for internal use.'
| def on_down(self, host):
| future = self.remove_pool(host)
if future:
future.add_done_callback((lambda f: self.update_created_pools()))
|
'Internal'
| def on_remove(self, host):
| self.on_down(host)
|
'Set the default keyspace for all queries made through this Session.
This operation blocks until complete.'
| def set_keyspace(self, keyspace):
| self.execute(('USE %s' % (protect_name(keyspace),)))
|
'Asynchronously sets the keyspace on all pools. When all
pools have set all of their connections, `callback` will be
called with a dictionary of all errors that occurred, keyed
by the `Host` that they occurred against.'
| def _set_keyspace_for_all_pools(self, keyspace, callback):
| with self._lock:
self.keyspace = keyspace
remaining_callbacks = set(self._pools.values())
errors = {}
if (not remaining_callbacks):
callback(errors)
return
def pool_finished_setting_keyspace(pool, host_errors):
remaining_callbacks.remove(pool)
if host_errors:
errors[pool.host] = host_errors
if (not remaining_callbacks):
callback(host_errors)
for pool in tuple(self._pools.values()):
pool._set_keyspace_for_all_conns(keyspace, pool_finished_setting_keyspace)
|
'Called by the parent Cluster instance when the user registers a new
mapping from a user-defined type to a class. Intended for internal
use only.'
| def user_type_registered(self, keyspace, user_type, klass):
| try:
ks_meta = self.cluster.metadata.keyspaces[keyspace]
except KeyError:
raise UserTypeDoesNotExist(('Keyspace %s does not exist or has not been discovered by the driver' % (keyspace,)))
try:
type_meta = ks_meta.user_types[user_type]
except KeyError:
raise UserTypeDoesNotExist(('User type %s does not exist in keyspace %s' % (user_type, keyspace)))
field_names = type_meta.field_names
if six.PY2:
field_names = [fn.encode('utf-8') for fn in field_names]
def encode(val):
return ('{ %s }' % ' , '.join((('%s : %s' % (field_name, self.encoder.cql_encode_all_types(getattr(val, field_name, None)))) for field_name in field_names)))
self.encoder.mapping[klass] = encode
|
'Internal'
| def submit(self, fn, *args, **kwargs):
| if (not self.is_shutdown):
return self.cluster.executor.submit(fn, *args, **kwargs)
|
'Replace existing connection (if there is one) and close it.'
| def _set_new_connection(self, conn):
| with self._lock:
old = self._connection
self._connection = conn
if old:
log.debug('[control connection] Closing old connection %r, replacing with %r', old, conn)
old.close()
|
'Tries to connect to each host in the query plan until one succeeds
or every attempt fails. If successful, a new Connection will be
returned. Otherwise, :exc:`NoHostAvailable` will be raised
with an "errors" arg that is a dict mapping host addresses
to the exception that was raised when an attempt was made to open
a connection to that host.'
| def _reconnect_internal(self):
| errors = {}
for host in self._cluster._default_load_balancing_policy.make_query_plan():
try:
return self._try_connect(host)
except ConnectionException as exc:
errors[host.address] = exc
log.warning('[control connection] Error connecting to %s:', host, exc_info=True)
self._cluster.signal_connection_failure(host, exc, is_host_addition=False)
except Exception as exc:
errors[host.address] = exc
log.warning('[control connection] Error connecting to %s:', host, exc_info=True)
if self._is_shutdown:
raise DriverException('[control connection] Reconnection in progress during shutdown')
raise NoHostAvailable('Unable to connect to any servers', errors)
|
'Creates a new Connection, registers for pushed events, and refreshes
node/token and schema metadata.'
| def _try_connect(self, host):
| log.debug('[control connection] Opening new connection to %s', host)
while True:
try:
connection = self._cluster.connection_factory(host.address, is_control_connection=True)
if self._is_shutdown:
connection.close()
raise DriverException('Reconnecting during shutdown')
break
except ProtocolVersionUnsupported as e:
self._cluster.protocol_downgrade(host.address, e.startup_version)
log.debug('[control connection] Established new connection %r, registering watchers and refreshing schema and topology', connection)
self_weakref = weakref.ref(self, partial(_clear_watcher, weakref.proxy(connection)))
try:
connection.register_watchers({'TOPOLOGY_CHANGE': partial(_watch_callback, self_weakref, '_handle_topology_change'), 'STATUS_CHANGE': partial(_watch_callback, self_weakref, '_handle_status_change'), 'SCHEMA_CHANGE': partial(_watch_callback, self_weakref, '_handle_schema_change')}, register_timeout=self._timeout)
sel_peers = (self._SELECT_PEERS if self._token_meta_enabled else self._SELECT_PEERS_NO_TOKENS)
sel_local = (self._SELECT_LOCAL if self._token_meta_enabled else self._SELECT_LOCAL_NO_TOKENS)
peers_query = QueryMessage(query=sel_peers, consistency_level=ConsistencyLevel.ONE)
local_query = QueryMessage(query=sel_local, consistency_level=ConsistencyLevel.ONE)
shared_results = connection.wait_for_responses(peers_query, local_query, timeout=self._timeout)
self._refresh_node_list_and_token_map(connection, preloaded_results=shared_results)
self._refresh_schema(connection, preloaded_results=shared_results, schema_agreement_wait=(-1))
except Exception:
connection.close()
raise
return connection
|
'Called by the _ControlReconnectionHandler when a new connection
is successfully created. Clears out the _reconnection_handler on
this ControlConnection.'
| def _get_and_set_reconnection_handler(self, new_handler):
| with self._reconnection_lock:
old = self._reconnection_handler
self._reconnection_handler = new_handler
return old
|
'Used to mitigate refreshes for nodes that are already known.
Some versions of the server send superfluous NEW_NODE messages in addition to UP events.'
| def _refresh_nodes_if_not_up(self, addr):
| host = self._cluster.metadata.get_host(addr)
if ((not host) or (not host.is_up)):
self.refresh_node_list_and_token_map()
|
'Internal'
| def send_request(self, error_no_hosts=True):
| for host in self.query_plan:
req_id = self._query(host)
if (req_id is not None):
self._req_id = req_id
return True
if ((self.timeout is not None) and ((time.time() - self._start_time) > self.timeout)):
self._on_timeout()
return True
if error_no_hosts:
self._set_final_exception(NoHostAvailable('Unable to complete the operation against any hosts', self._errors))
return False
|
'Returns :const:`True` if there are more pages left in the
query results, :const:`False` otherwise. This should only
be checked after the first page has been returned.
.. versionadded:: 2.0.0'
| @property
def has_more_pages(self):
| return (self._paging_state is not None)
|
'Warnings returned from the server, if any. This will only be
set for protocol_version 4+.
Warnings may be returned for such things as oversized batches,
or too many tombstones in slice queries.
Ensure the future is complete before trying to access this property
(call :meth:`.result()`, or after callback is invoked).
Otherwise it may throw if the response has not been received.'
| @property
def warnings(self):
| if (not self._event.is_set()):
raise DriverException('warnings cannot be retrieved before ResponseFuture is finalized')
return self._warnings
|
'The custom payload returned from the server, if any. This will only be
set by Cassandra servers implementing a custom QueryHandler, and only
for protocol_version 4+.
Ensure the future is complete before trying to access this property
(call :meth:`.result()`, or after callback is invoked).
Otherwise it may throw if the response has not been received.
:return: :ref:`custom_payload`.'
| @property
def custom_payload(self):
| if (not self._event.is_set()):
raise DriverException('custom_payload cannot be retrieved before ResponseFuture is finalized')
return self._custom_payload
|
'If there are more pages left in the query result, this asynchronously
starts fetching the next page. If there are no pages left, :exc:`.QueryExhausted`
is raised. Also see :attr:`.has_more_pages`.
This should only be called after the first page has been returned.
.. versionadded:: 2.0.0'
| def start_fetching_next_page(self):
| if (not self._paging_state):
raise QueryExhausted()
self._make_query_plan()
self.message.paging_state = self._paging_state
self._event.clear()
self._final_result = _NOT_SET
self._final_exception = None
self._start_timer()
self.send_request()
|
'Handle the response to our attempt to prepare a statement.
If it succeeded, run the original query again against the same host.'
| def _execute_after_prepare(self, host, connection, pool, response):
| if pool:
pool.return_connection(connection)
if self._final_exception:
return
if isinstance(response, ResultMessage):
if (response.kind == RESULT_KIND_PREPARED):
if self.prepared_statement:
(_, _, _, result_metadata) = response.results
self.prepared_statement.result_metadata = result_metadata
request_id = self._query(host)
if (request_id is None):
self.send_request()
else:
self._set_final_exception(ConnectionException(('Got unexpected response when preparing statement on host %s: %s' % (host, response))))
elif isinstance(response, ErrorMessage):
if hasattr(response, 'to_exception'):
self._set_final_exception(response.to_exception())
else:
self._set_final_exception(response)
elif isinstance(response, ConnectionException):
log.debug('Connection error when preparing statement on host %s: %s', host, response)
self._errors[host] = response
self.send_request()
else:
self._set_final_exception(ConnectionException(('Got unexpected response type when preparing statement on host %s: %s' % (host, response))))
|
'Return the final result or raise an Exception if errors were
encountered. If the final result or error has not been set
yet, this method will block until it is set, or the timeout
set for the request expires.
Timeout is specified in the Session request execution functions.
If the timeout is exceeded, an :exc:`cassandra.OperationTimedOut` will be raised.
This is a client-side timeout. For more information
about server-side coordinator timeouts, see :class:`.policies.RetryPolicy`.
Example usage::
>>> future = session.execute_async("SELECT * FROM mycf")
>>> # do other stuff...
>>> try:
... rows = future.result()
... for row in rows:
... ... # process results
... except Exception:
... log.exception("Operation failed:")'
| def result(self):
| self._event.wait()
if (self._final_result is not _NOT_SET):
return ResultSet(self, self._final_result)
else:
raise self._final_exception
|
'Returns the trace session ids for this future, if tracing was enabled (does not fetch trace data).'
| def get_query_trace_ids(self):
| return [trace.trace_id for trace in self._query_traces]
|
'Fetches and returns the query trace of the last response, or `None` if tracing was
not enabled.
Note that this may raise an exception if there are problems retrieving the trace
details from Cassandra. If the trace is not available after `max_wait`,
:exc:`cassandra.query.TraceUnavailable` will be raised.
If the ResponseFuture is not done (async execution) and you try to retrieve the trace,
:exc:`cassandra.query.TraceUnavailable` will be raised.
`query_cl` is the consistency level used to poll the trace tables.'
| def get_query_trace(self, max_wait=None, query_cl=ConsistencyLevel.LOCAL_ONE):
| if ((self._final_result is _NOT_SET) and (self._final_exception is None)):
raise TraceUnavailable('Trace information was not available. The ResponseFuture is not done.')
if self._query_traces:
return self._get_query_trace((len(self._query_traces) - 1), max_wait, query_cl)
|
'Fetches and returns the query traces for all query pages, if tracing was enabled.
See note in :meth:`~.get_query_trace` regarding possible exceptions.'
| def get_all_query_traces(self, max_wait_per=None, query_cl=ConsistencyLevel.LOCAL_ONE):
| if self._query_traces:
return [self._get_query_trace(i, max_wait_per, query_cl) for i in range(len(self._query_traces))]
return []
|
'Attaches a callback function to be called when the final results arrive.
By default, `fn` will be called with the results as the first and only
argument. If `*args` or `**kwargs` are supplied, they will be passed
through as additional positional or keyword arguments to `fn`.
If an error is hit while executing the operation, a callback attached
here will not be called. Use :meth:`.add_errback()` or :meth:`add_callbacks()`
if you wish to handle that case.
If the final result has already been seen when this method is called,
the callback will be called immediately (before this method returns).
Note: in the case that the result is not available when the callback is added,
the callback is executed by IO event thread. This means that the callback
should not block or attempt further synchronous requests, because no further
IO will be processed until the callback returns.
**Important**: if the callback you attach results in an exception being
raised, **the exception will be ignored**, so please ensure your
callback handles all error cases that you care about.
Usage example::
>>> session = cluster.connect("mykeyspace")
>>> def handle_results(rows, start_time, should_log=False):
... if should_log:
... log.info("Total time: %f", time.time() - start_time)
>>> future = session.execute_async("SELECT * FROM users")
>>> future.add_callback(handle_results, time.time(), should_log=True)'
| def add_callback(self, fn, *args, **kwargs):
| run_now = False
with self._callback_lock:
self._callbacks.append((fn, args, kwargs))
if (self._final_result is not _NOT_SET):
run_now = True
if run_now:
fn(self._final_result, *args, **kwargs)
return self
|
'Like :meth:`.add_callback()`, but handles error cases.
An Exception instance will be passed as the first positional argument
to `fn`.'
| def add_errback(self, fn, *args, **kwargs):
| run_now = False
with self._callback_lock:
self._errbacks.append((fn, args, kwargs))
if self._final_exception:
run_now = True
if run_now:
fn(self._final_exception, *args, **kwargs)
return self
|
'A convenient combination of :meth:`.add_callback()` and
:meth:`.add_errback()`.
Example usage::
>>> session = cluster.connect()
>>> query = "SELECT * FROM mycf"
>>> future = session.execute_async(query)
>>> def log_results(results, level=\'debug\'):
... for row in results:
... log.log(level, "Result: %s", row)
>>> def log_error(exc, query):
... log.error("Query \'%s\' failed: %s", query, exc)
>>> future.add_callbacks(
... callback=log_results, callback_kwargs={\'level\': \'info\'},
... errback=log_error, errback_args=(query,))'
| def add_callbacks(self, callback, errback, callback_args=(), callback_kwargs=None, errback_args=(), errback_kwargs=None):
| self.add_callback(callback, *callback_args, **(callback_kwargs or {}))
self.add_errback(errback, *errback_args, **(errback_kwargs or {}))
|
'True if the last response indicated more pages; False otherwise'
| @property
def has_more_pages(self):
| return self.response_future.has_more_pages
|
'The list of current page rows. May be empty if the result was empty,
or this is the last page.'
| @property
def current_rows(self):
| return (self._current_rows or [])
|
'Manually, synchronously fetch the next page. Supplied for manually retrieving pages
and inspecting :meth:`~.current_page`. It is not necessary to call this when iterating
through results; paging happens implicitly in iteration.'
| def fetch_next_page(self):
| if self.response_future.has_more_pages:
self.response_future.start_fetching_next_page()
result = self.response_future.result()
self._current_rows = result._current_rows
else:
self._current_rows = []
|
'Gets the last query trace from the associated future.
See :meth:`.ResponseFuture.get_query_trace` for details.'
| def get_query_trace(self, max_wait_sec=None):
| return self.response_future.get_query_trace(max_wait_sec)
|
'Gets all query traces from the associated future.
See :meth:`.ResponseFuture.get_all_query_traces` for details.'
| def get_all_query_traces(self, max_wait_sec_per=None):
| return self.response_future.get_all_query_traces(max_wait_sec_per)
|
'For LWT results, returns whether the transaction was applied.
Result is indeterminate if called on a result that was not an LWT request.
Only valid when one of tne of the internal row factories is in use.'
| @property
def was_applied(self):
| if (self.response_future.row_factory not in (named_tuple_factory, dict_factory, tuple_factory)):
raise RuntimeError(('Cannot determine LWT result with row factory %s' % (self.response_future.row_factsory,)))
if (len(self.current_rows) != 1):
raise RuntimeError(('LWT result should have exactly one row. This has %d.' % len(self.current_rows)))
row = self.current_rows[0]
if isinstance(row, tuple):
return row[0]
else:
return row['[applied]']
|
'Server paging state of the query. Can be `None` if the query was not paged.
The driver treats paging state as opaque, but it may contain primary key data, so applications may want to
avoid sending this to untrusted parties.'
| @property
def paging_state(self):
| return self.response_future._paging_state
|
'Returns the metrics for the registered cluster instance.'
| def get_stats(self):
| return scales.getStats()[self.stats_name]
|
'Set the metrics stats name.
The stats_name is a string used to access the metris through scales: scales.getStats()[<stats_name>]
Default is \'cassandra-<num>\'.'
| def set_stats_name(self, stats_name):
| if (self.stats_name == stats_name):
return
if (stats_name in scales._Stats.stats):
raise ValueError('"{0}" already exists in stats.'.format(stats_name))
stats = scales._Stats.stats[self.stats_name]
del scales._Stats.stats[self.stats_name]
self.stats_name = stats_name
scales._Stats.stats[self.stats_name] = stats
|
'`query_string` should be a literal CQL statement with the exception
of parameter placeholders that will be filled through the
`parameters` argument of :meth:`.Session.execute()`.
See :class:`Statement` attributes for a description of the other parameters.'
| def __init__(self, query_string, retry_policy=None, consistency_level=None, routing_key=None, serial_consistency_level=None, fetch_size=FETCH_SIZE_UNSET, keyspace=None, custom_payload=None, is_idempotent=False):
| Statement.__init__(self, retry_policy, consistency_level, routing_key, serial_consistency_level, fetch_size, keyspace, custom_payload, is_idempotent)
self._query_string = query_string
|
'Creates and returns a :class:`BoundStatement` instance using `values`.
See :meth:`BoundStatement.bind` for rules on input ``values``.'
| def bind(self, values):
| return BoundStatement(self).bind(values)
|
'`prepared_statement` should be an instance of :class:`PreparedStatement`.
See :class:`Statement` attributes for a description of the other parameters.'
| def __init__(self, prepared_statement, retry_policy=None, consistency_level=None, routing_key=None, serial_consistency_level=None, fetch_size=FETCH_SIZE_UNSET, keyspace=None, custom_payload=None):
| self.prepared_statement = prepared_statement
self.consistency_level = prepared_statement.consistency_level
self.serial_consistency_level = prepared_statement.serial_consistency_level
self.fetch_size = prepared_statement.fetch_size
self.custom_payload = prepared_statement.custom_payload
self.is_idempotent = prepared_statement.is_idempotent
self.values = []
meta = prepared_statement.column_metadata
if meta:
self.keyspace = meta[0].keyspace_name
Statement.__init__(self, retry_policy, consistency_level, routing_key, serial_consistency_level, fetch_size, keyspace, custom_payload, prepared_statement.is_idempotent)
|
'Binds a sequence of values for the prepared statement parameters
and returns this instance. Note that `values` *must* be:
* a sequence, even if you are only binding one value, or
* a dict that relates 1-to-1 between dict keys and columns
.. versionchanged:: 2.6.0
:data:`~.UNSET_VALUE` was introduced. These can be bound as positional parameters
in a sequence, or by name in a dict. Additionally, when using protocol v4+:
* short sequences will be extended to match bind parameters with UNSET_VALUE
* names may be omitted from a dict with UNSET_VALUE implied.
.. versionchanged:: 3.0.0
method will not throw if extra keys are present in bound dict (PYTHON-178)'
| def bind(self, values):
| if (values is None):
values = ()
proto_version = self.prepared_statement.protocol_version
col_meta = self.prepared_statement.column_metadata
if isinstance(values, dict):
values_dict = values
values = []
for col in col_meta:
try:
values.append(values_dict[col.name])
except KeyError:
if (proto_version >= 4):
values.append(UNSET_VALUE)
else:
raise KeyError(('Column name `%s` not found in bound dict.' % col.name))
value_len = len(values)
col_meta_len = len(col_meta)
if (value_len > col_meta_len):
raise ValueError(('Too many arguments provided to bind() (got %d, expected %d)' % (len(values), len(col_meta))))
if ((proto_version < 4) and self.prepared_statement.routing_key_indexes and (value_len < len(self.prepared_statement.routing_key_indexes))):
raise ValueError(('Too few arguments provided to bind() (got %d, required %d for routing key)' % (value_len, len(self.prepared_statement.routing_key_indexes))))
self.raw_values = values
self.values = []
for (value, col_spec) in zip(values, col_meta):
if (value is None):
self.values.append(None)
elif (value is UNSET_VALUE):
if (proto_version >= 4):
self._append_unset_value()
else:
raise ValueError(('Attempt to bind UNSET_VALUE while using unsuitable protocol version (%d < 4)' % proto_version))
else:
try:
self.values.append(col_spec.type.serialize(value, proto_version))
except (TypeError, struct.error) as exc:
actual_type = type(value)
message = ('Received an argument of invalid type for column "%s". Expected: %s, Got: %s; (%s)' % (col_spec.name, col_spec.type, actual_type, exc))
raise TypeError(message)
if (proto_version >= 4):
diff = (col_meta_len - len(self.values))
if diff:
for _ in range(diff):
self._append_unset_value()
return self
|
'`batch_type` specifies The :class:`.BatchType` for the batch operation.
Defaults to :attr:`.BatchType.LOGGED`.
`retry_policy` should be a :class:`~.RetryPolicy` instance for
controlling retries on the operation.
`consistency_level` should be a :class:`~.ConsistencyLevel` value
to be used for all operations in the batch.
`custom_payload` is a :ref:`custom_payload` passed to the server.
Note: as Statement objects are added to the batch, this map is
updated with any values found in their custom payloads. These are
only allowed when using protocol version 4 or higher.
Example usage:
.. code-block:: python
insert_user = session.prepare("INSERT INTO users (name, age) VALUES (?, ?)")
batch = BatchStatement(consistency_level=ConsistencyLevel.QUORUM)
for (name, age) in users_to_insert:
batch.add(insert_user, (name, age))
session.execute(batch)
You can also mix different types of operations within a batch:
.. code-block:: python
batch = BatchStatement()
batch.add(SimpleStatement("INSERT INTO users (name, age) VALUES (%s, %s)"), (name, age))
batch.add(SimpleStatement("DELETE FROM pending_users WHERE name=%s"), (name,))
session.execute(batch)
.. versionadded:: 2.0.0
.. versionchanged:: 2.1.0
Added `serial_consistency_level` as a parameter
.. versionchanged:: 2.6.0
Added `custom_payload` as a parameter'
| def __init__(self, batch_type=BatchType.LOGGED, retry_policy=None, consistency_level=None, serial_consistency_level=None, session=None, custom_payload=None):
| self.batch_type = batch_type
self._statements_and_parameters = []
self._session = session
Statement.__init__(self, retry_policy=retry_policy, consistency_level=consistency_level, serial_consistency_level=serial_consistency_level, custom_payload=custom_payload)
|
'This is a convenience method to clear a batch statement for reuse.
*Note:* it should not be used concurrently with uncompleted execution futures executing the same
``BatchStatement``.'
| def clear(self):
| del self._statements_and_parameters[:]
self.keyspace = None
self.routing_key = None
if self.custom_payload:
self.custom_payload.clear()
|
'Adds a :class:`.Statement` and optional sequence of parameters
to be used with the statement to the batch.
Like with other statements, parameters must be a sequence, even
if there is only one item.'
| def add(self, statement, parameters=None):
| if isinstance(statement, six.string_types):
if parameters:
encoder = (Encoder() if (self._session is None) else self._session.encoder)
statement = bind_params(statement, parameters, encoder)
self._add_statement_and_params(False, statement, ())
elif isinstance(statement, PreparedStatement):
query_id = statement.query_id
bound_statement = statement.bind((() if (parameters is None) else parameters))
self._update_state(bound_statement)
self._add_statement_and_params(True, query_id, bound_statement.values)
elif isinstance(statement, BoundStatement):
if parameters:
raise ValueError('Parameters cannot be passed with a BoundStatement to BatchStatement.add()')
self._update_state(statement)
self._add_statement_and_params(True, statement.prepared_statement.query_id, statement.values)
else:
query_string = statement.query_string
if parameters:
encoder = (Encoder() if (self._session is None) else self._session.encoder)
query_string = bind_params(query_string, parameters, encoder)
self._update_state(statement)
self._add_statement_and_params(False, query_string, ())
return self
|
'Adds a sequence of :class:`.Statement` objects and a matching sequence
of parameters to the batch. Statement and parameter sequences must be of equal length or
one will be truncated. :const:`None` can be used in the parameters position where are needed.'
| def add_all(self, statements, parameters):
| for (statement, value) in zip(statements, parameters):
self.add(statement, value)
|
'Retrieves the actual tracing details from Cassandra and populates the
attributes of this instance. Because tracing details are stored
asynchronously by Cassandra, this may need to retry the session
detail fetch. If the trace is still not available after `max_wait`
seconds, :exc:`.TraceUnavailable` will be raised; if `max_wait` is
:const:`None`, this will retry forever.
`wait_for_complete=False` bypasses the wait for duration to be populated.
This can be used to query events from partial sessions.
`query_cl` specifies a consistency level to use for polling the trace tables,
if it should be different than the session default.'
| def populate(self, max_wait=2.0, wait_for_complete=True, query_cl=None):
| attempt = 0
start = time.time()
while True:
time_spent = (time.time() - start)
if ((max_wait is not None) and (time_spent >= max_wait)):
raise TraceUnavailable(('Trace information was not available within %f seconds. Consider raising Session.max_trace_wait.' % (max_wait,)))
log.debug('Attempting to fetch trace info for trace ID: %s', self.trace_id)
session_results = self._execute(SimpleStatement(self._SELECT_SESSIONS_FORMAT, consistency_level=query_cl), (self.trace_id,), time_spent, max_wait)
is_complete = (session_results and (session_results[0].duration is not None) and (session_results[0].started_at is not None))
if ((not session_results) or (wait_for_complete and (not is_complete))):
time.sleep((self._BASE_RETRY_SLEEP * (2 ** attempt)))
attempt += 1
continue
if is_complete:
log.debug('Fetched trace info for trace ID: %s', self.trace_id)
else:
log.debug('Fetching parital trace info for trace ID: %s', self.trace_id)
session_row = session_results[0]
self.request_type = session_row.request
self.duration = (timedelta(microseconds=session_row.duration) if is_complete else None)
self.started_at = session_row.started_at
self.coordinator = session_row.coordinator
self.parameters = session_row.parameters
self.client = getattr(session_row, 'client', None)
log.debug('Attempting to fetch trace events for trace ID: %s', self.trace_id)
time_spent = (time.time() - start)
event_results = self._execute(SimpleStatement(self._SELECT_EVENTS_FORMAT, consistency_level=query_cl), (self.trace_id,), time_spent, max_wait)
log.debug('Fetched trace events for trace ID: %s', self.trace_id)
self.events = tuple((TraceEvent(r.activity, r.event_id, r.source, r.source_elapsed, r.thread) for r in event_results))
break
|
'Implementations of this class should return a new instance
of :class:`~.Authenticator` or one of its subclasses.'
| def new_authenticator(self, host):
| raise NotImplementedError()
|
'Returns an message to send to the server to initiate the SASL handshake.
:const:`None` may be returned to send an empty message.'
| def initial_response(self):
| return None
|
'Called when the server sends a challenge message. Generally, this method
should return :const:`None` when authentication is complete from a
client perspective. Otherwise, a string should be returned.'
| def evaluate_challenge(self, challenge):
| raise NotImplementedError()
|
'Called when the server indicates that authentication was successful.
Depending on the authentication mechanism, `token` may be :const:`None`
or a string.'
| def on_authentication_success(self, token):
| pass
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.