desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Test to ensure that on down calls to clusters with connections still active don\'t result in a host being marked down. The second part of the test kills the connection then invokes on_down, and ensures the state changes for host\'s metadata. @since 3.7 @jira_ticket PYTHON-498 @expected_result host should never be toggled down while a connection is active. @test_category connection'
def test_down_event_with_active_connection(self):
with Cluster(protocol_version=PROTOCOL_VERSION) as cluster: session = cluster.connect(wait_for_all_pools=True) random_host = cluster.metadata.all_hosts()[0] cluster.on_down(random_host, False) for _ in range(10): new_host = cluster.metadata.all_hosts()[0] self.assertTrue(new_host.is_up, 'Host was not up on iteration {0}'.format(_)) time.sleep(0.01) pool = session._pools.get(random_host) pool.shutdown() cluster.on_down(random_host, False) was_marked_down = False for _ in range(20): new_host = cluster.metadata.all_hosts()[0] if (not new_host.is_up): was_marked_down = True break time.sleep(0.01) self.assertTrue(was_marked_down)
'Test duplicate RPC addresses. Modifies the system.peers table to make hosts have the same rpc address. Ensures such hosts are filtered out and a message is logged @since 3.4 @jira_ticket PYTHON-366 @expected_result only one hosts\' metadata will be populated @test_category metadata'
def test_duplicate(self):
mock_handler = MockLoggingHandler() logger = logging.getLogger(cassandra.cluster.__name__) logger.addHandler(mock_handler) test_cluster = Cluster(protocol_version=PROTOCOL_VERSION, load_balancing_policy=self.load_balancing_policy) test_cluster.connect() warnings = mock_handler.messages.get('warning') self.assertEqual(len(warnings), 1) self.assertTrue(('multiple' in warnings[0])) logger.removeHandler(mock_handler) test_cluster.shutdown()
'Test cluster connection with protocol v5 and beta flag not set @since 3.7.0 @jira_ticket PYTHON-614 @expected_result client shouldn\'t connect with V5 and no beta flag set @test_category connection'
@protocolv5 def test_invalid_protocol_version_beta_option(self):
cluster = Cluster(protocol_version=cassandra.ProtocolVersion.MAX_SUPPORTED, allow_beta_protocol_version=False) try: with self.assertRaises(NoHostAvailable): cluster.connect() except Exception as e: self.fail('Unexpected error encountered {0}'.format(e.message))
'Test cluster connection with protocol version 5 and beta flag set @since 3.7.0 @jira_ticket PYTHON-614 @expected_result client should connect with protocol v5 and beta flag set. @test_category connection'
@protocolv5 def test_valid_protocol_version_beta_options_connect(self):
cluster = Cluster(protocol_version=cassandra.ProtocolVersion.MAX_SUPPORTED, allow_beta_protocol_version=True) session = cluster.connect() self.assertEqual(cluster.protocol_version, cassandra.ProtocolVersion.MAX_SUPPORTED) self.assertTrue(session.execute('select release_version from system.local')[0]) cluster.shutdown()
'Tests that byte strings in Python maps to blob type in Cassandra'
def test_can_insert_blob_type_as_string(self):
s = self.session s.execute('CREATE TABLE blobstring (a ascii PRIMARY KEY, b blob)') params = ['key1', 'blobbyblob'] query = 'INSERT INTO blobstring (a, b) VALUES (%s, %s)' if (six.PY2 and (self.cql_version >= (3, 1, 0))): if (self.cass_version >= (2, 1, 0)): msg = '.*Invalid STRING constant \\(.*?\\) for "b" of type blob.*' else: msg = '.*Invalid STRING constant \\(.*?\\) for b of type blob.*' self.assertRaisesRegexp(InvalidRequest, msg, s.execute, query, params) return if six.PY2: cass_params = [params[0], params[1].encode('hex')] s.execute(query, cass_params) else: s.execute(query, params) results = s.execute('SELECT * FROM blobstring')[0] for (expected, actual) in zip(params, results): self.assertEqual(expected, actual)
'Tests that blob type in Cassandra maps to bytearray in Python'
def test_can_insert_blob_type_as_bytearray(self):
s = self.session s.execute('CREATE TABLE blobbytes (a ascii PRIMARY KEY, b blob)') params = ['key1', bytearray('blob1')] s.execute('INSERT INTO blobbytes (a, b) VALUES (%s, %s)', params) results = s.execute('SELECT * FROM blobbytes')[0] for (expected, actual) in zip(params, results): self.assertEqual(expected, actual)
'Simple test to ensure the DesBytesTypeByteArray deserializer functionally works @since 3.1 @jira_ticket PYTHON-503 @expected_result byte array should be deserialized appropriately. @test_category queries:custom_payload'
@unittest.skipIf((not hasattr(cassandra, 'deserializers')), 'Cython required for to test DesBytesTypeArray deserializer') def test_des_bytes_type_array(self):
original = None try: original = cassandra.deserializers.DesBytesType cassandra.deserializers.DesBytesType = cassandra.deserializers.DesBytesTypeByteArray s = self.session s.execute('CREATE TABLE blobbytes2 (a ascii PRIMARY KEY, b blob)') params = ['key1', bytearray('blob1')] s.execute('INSERT INTO blobbytes2 (a, b) VALUES (%s, %s)', params) results = s.execute('SELECT * FROM blobbytes2')[0] for (expected, actual) in zip(params, results): self.assertEqual(expected, actual) finally: if (original is not None): cassandra.deserializers.DesBytesType = original
'Test insertion of all datatype primitives'
def test_can_insert_primitive_datatypes(self):
c = Cluster(protocol_version=PROTOCOL_VERSION) s = c.connect(self.keyspace_name) alpha_type_list = ['zz int PRIMARY KEY'] col_names = ['zz'] start_index = ord('a') for (i, datatype) in enumerate(PRIMITIVE_DATATYPES): alpha_type_list.append('{0} {1}'.format(chr((start_index + i)), datatype)) col_names.append(chr((start_index + i))) s.execute('CREATE TABLE alltypes ({0})'.format(', '.join(alpha_type_list))) params = [0] for datatype in PRIMITIVE_DATATYPES: params.append(get_sample(datatype)) columns_string = ', '.join(col_names) placeholders = ', '.join((['%s'] * len(col_names))) s.execute('INSERT INTO alltypes ({0}) VALUES ({1})'.format(columns_string, placeholders), params) results = s.execute('SELECT {0} FROM alltypes WHERE zz=0'.format(columns_string))[0] for (expected, actual) in zip(params, results): self.assertEqual(actual, expected) placeholders = ','.join((['?'] * len(col_names))) insert = s.prepare('INSERT INTO alltypes ({0}) VALUES ({1})'.format(columns_string, placeholders)) s.execute(insert.bind(params)) results = s.execute('SELECT {0} FROM alltypes WHERE zz=0'.format(columns_string))[0] for (expected, actual) in zip(params, results): self.assertEqual(actual, expected) select = s.prepare('SELECT {0} FROM alltypes WHERE zz=?'.format(columns_string)) results = s.execute(select.bind([0]))[0] for (expected, actual) in zip(params, results): self.assertEqual(actual, expected) s.row_factory = ordered_dict_factory select = s.prepare('SELECT * FROM alltypes') results = s.execute(select)[0] for (expected, actual) in zip(params, results.values()): self.assertEqual(actual, expected) c.shutdown()
'Test insertion of all collection types'
def test_can_insert_collection_datatypes(self):
c = Cluster(protocol_version=PROTOCOL_VERSION) s = c.connect(self.keyspace_name) s.encoder.mapping[tuple] = s.encoder.cql_encode_tuple alpha_type_list = ['zz int PRIMARY KEY'] col_names = ['zz'] start_index = ord('a') for (i, collection_type) in enumerate(COLLECTION_TYPES): for (j, datatype) in enumerate(PRIMITIVE_DATATYPES_KEYS): if (collection_type == 'map'): type_string = '{0}_{1} {2}<{3}, {3}>'.format(chr((start_index + i)), chr((start_index + j)), collection_type, datatype) elif (collection_type == 'tuple'): type_string = '{0}_{1} frozen<{2}<{3}>>'.format(chr((start_index + i)), chr((start_index + j)), collection_type, datatype) else: type_string = '{0}_{1} {2}<{3}>'.format(chr((start_index + i)), chr((start_index + j)), collection_type, datatype) alpha_type_list.append(type_string) col_names.append('{0}_{1}'.format(chr((start_index + i)), chr((start_index + j)))) s.execute('CREATE TABLE allcoltypes ({0})'.format(', '.join(alpha_type_list))) columns_string = ', '.join(col_names) params = [0] for collection_type in COLLECTION_TYPES: for datatype in PRIMITIVE_DATATYPES_KEYS: params.append(get_collection_sample(collection_type, datatype)) placeholders = ', '.join((['%s'] * len(col_names))) s.execute('INSERT INTO allcoltypes ({0}) VALUES ({1})'.format(columns_string, placeholders), params) results = s.execute('SELECT {0} FROM allcoltypes WHERE zz=0'.format(columns_string))[0] for (expected, actual) in zip(params, results): self.assertEqual(actual, expected) params = [0] for collection_type in COLLECTION_TYPES: for datatype in PRIMITIVE_DATATYPES_KEYS: params.append(get_collection_sample(collection_type, datatype)) placeholders = ','.join((['?'] * len(col_names))) insert = s.prepare('INSERT INTO allcoltypes ({0}) VALUES ({1})'.format(columns_string, placeholders)) s.execute(insert.bind(params)) results = s.execute('SELECT {0} FROM allcoltypes WHERE zz=0'.format(columns_string))[0] for (expected, actual) in zip(params, results): self.assertEqual(actual, expected) select = s.prepare('SELECT {0} FROM allcoltypes WHERE zz=?'.format(columns_string)) results = s.execute(select.bind([0]))[0] for (expected, actual) in zip(params, results): self.assertEqual(actual, expected) s.row_factory = ordered_dict_factory select = s.prepare('SELECT * FROM allcoltypes') results = s.execute(select)[0] for (expected, actual) in zip(params, results.values()): self.assertEqual(actual, expected) c.shutdown()
'Test insertion of empty strings and null values'
def test_can_insert_empty_strings_and_nulls(self):
s = self.session alpha_type_list = ['zz int PRIMARY KEY'] col_names = [] string_types = set(('ascii', 'text', 'varchar')) string_columns = set('') non_string_types = ((PRIMITIVE_DATATYPES - string_types) - set(('blob', 'date', 'inet', 'time', 'timestamp'))) non_string_columns = set() start_index = ord('a') for (i, datatype) in enumerate(PRIMITIVE_DATATYPES): col_name = chr((start_index + i)) alpha_type_list.append('{0} {1}'.format(col_name, datatype)) col_names.append(col_name) if (datatype in non_string_types): non_string_columns.add(col_name) if (datatype in string_types): string_columns.add(col_name) execute_until_pass(s, 'CREATE TABLE all_empty ({0})'.format(', '.join(alpha_type_list))) columns_string = ','.join(col_names) s.execute('INSERT INTO all_empty (zz) VALUES (2)') results = s.execute('SELECT {0} FROM all_empty WHERE zz=2'.format(columns_string))[0] self.assertTrue(all(((x is None) for x in results))) select = s.prepare('SELECT {0} FROM all_empty WHERE zz=?'.format(columns_string)) results = s.execute(select.bind([2]))[0] self.assertTrue(all(((x is None) for x in results))) expected_values = dict(((col, '') for col in string_columns)) columns_string = ','.join(string_columns) placeholders = ','.join((['%s'] * len(string_columns))) s.execute('INSERT INTO all_empty (zz, {0}) VALUES (3, {1})'.format(columns_string, placeholders), expected_values.values()) results = s.execute('SELECT {0} FROM all_empty WHERE zz=3'.format(columns_string))[0] for (expected, actual) in zip(expected_values.values(), results): self.assertEqual(actual, expected) results = s.execute(s.prepare('SELECT {0} FROM all_empty WHERE zz=?'.format(columns_string)), [3])[0] for (expected, actual) in zip(expected_values.values(), results): self.assertEqual(actual, expected) for col in non_string_columns: query = 'INSERT INTO all_empty (zz, {0}) VALUES (4, %s)'.format(col) with self.assertRaises(InvalidRequest): s.execute(query, ['']) insert = s.prepare('INSERT INTO all_empty (zz, {0}) VALUES (4, ?)'.format(col)) with self.assertRaises(TypeError): s.execute(insert, ['']) params = [] for datatype in PRIMITIVE_DATATYPES: params.append(get_sample(datatype)) columns_string = ','.join(col_names) placeholders = ','.join((['%s'] * len(col_names))) simple_insert = 'INSERT INTO all_empty (zz, {0}) VALUES (5, {1})'.format(columns_string, placeholders) s.execute(simple_insert, params) null_values = ([None] * len(col_names)) s.execute(simple_insert, null_values) query = 'SELECT {0} FROM all_empty WHERE zz=5'.format(columns_string) results = s.execute(query)[0] for col in results: self.assertEqual(None, col) select = s.prepare('SELECT {0} FROM all_empty WHERE zz=?'.format(columns_string)) results = s.execute(select.bind([5]))[0] for col in results: self.assertEqual(None, col) s.execute(simple_insert, params) placeholders = ','.join((['?'] * len(col_names))) insert = s.prepare('INSERT INTO all_empty (zz, {0}) VALUES (5, {1})'.format(columns_string, placeholders)) s.execute(insert, null_values) results = s.execute(query)[0] for col in results: self.assertEqual(None, col) results = s.execute(select.bind([5]))[0] for col in results: self.assertEqual(None, col)
'Ensure Int32Type supports empty values'
def test_can_insert_empty_values_for_int32(self):
s = self.session execute_until_pass(s, 'CREATE TABLE empty_values (a text PRIMARY KEY, b int)') execute_until_pass(s, "INSERT INTO empty_values (a, b) VALUES ('a', blobAsInt(0x))") try: Int32Type.support_empty_values = True results = execute_until_pass(s, "SELECT b FROM empty_values WHERE a='a'")[0] self.assertIs(EMPTY, results.b) finally: Int32Type.support_empty_values = False
'Ensure timezone-aware datetimes are converted to timestamps correctly'
def test_timezone_aware_datetimes_are_timestamps(self):
try: import pytz except ImportError as exc: raise unittest.SkipTest(('pytz is not available: %r' % (exc,))) dt = datetime(1997, 8, 29, 11, 14) eastern_tz = pytz.timezone('US/Eastern') eastern_tz.localize(dt) s = self.session s.execute('CREATE TABLE tz_aware (a ascii PRIMARY KEY, b timestamp)') s.execute("INSERT INTO tz_aware (a, b) VALUES ('key1', %s)", [dt]) result = s.execute("SELECT b FROM tz_aware WHERE a='key1'")[0].b self.assertEqual(dt.utctimetuple(), result.utctimetuple()) insert = s.prepare("INSERT INTO tz_aware (a, b) VALUES ('key2', ?)") s.execute(insert.bind([dt])) result = s.execute("SELECT b FROM tz_aware WHERE a='key2'")[0].b self.assertEqual(dt.utctimetuple(), result.utctimetuple())
'Basic test of tuple functionality'
def test_can_insert_tuples(self):
if (self.cass_version < (2, 1, 0)): raise unittest.SkipTest('The tuple type was introduced in Cassandra 2.1') c = Cluster(protocol_version=PROTOCOL_VERSION) s = c.connect(self.keyspace_name) s.encoder.mapping[tuple] = s.encoder.cql_encode_tuple s.execute('CREATE TABLE tuple_type (a int PRIMARY KEY, b frozen<tuple<ascii, int, boolean>>)') complete = ('foo', 123, True) s.execute('INSERT INTO tuple_type (a, b) VALUES (0, %s)', parameters=(complete,)) result = s.execute('SELECT b FROM tuple_type WHERE a=0')[0] self.assertEqual(complete, result.b) partial = ('bar', 456) partial_result = (partial + (None,)) s.execute('INSERT INTO tuple_type (a, b) VALUES (1, %s)', parameters=(partial,)) result = s.execute('SELECT b FROM tuple_type WHERE a=1')[0] self.assertEqual(partial_result, result.b) subpartial = ('zoo',) subpartial_result = (subpartial + (None, None)) s.execute('INSERT INTO tuple_type (a, b) VALUES (2, %s)', parameters=(subpartial,)) result = s.execute('SELECT b FROM tuple_type WHERE a=2')[0] self.assertEqual(subpartial_result, result.b) prepared = s.prepare('INSERT INTO tuple_type (a, b) VALUES (?, ?)') s.execute(prepared, parameters=(3, complete)) s.execute(prepared, parameters=(4, partial)) s.execute(prepared, parameters=(5, subpartial)) self.assertRaises(ValueError, s.execute, prepared, parameters=(0, (1, 2, 3, 4, 5, 6))) prepared = s.prepare('SELECT b FROM tuple_type WHERE a=?') self.assertEqual(complete, s.execute(prepared, (3,))[0].b) self.assertEqual(partial_result, s.execute(prepared, (4,))[0].b) self.assertEqual(subpartial_result, s.execute(prepared, (5,))[0].b) c.shutdown()
'Test tuple types of lengths of 1, 2, 3, and 384 to ensure edge cases work as expected.'
def test_can_insert_tuples_with_varying_lengths(self):
if (self.cass_version < (2, 1, 0)): raise unittest.SkipTest('The tuple type was introduced in Cassandra 2.1') c = Cluster(protocol_version=PROTOCOL_VERSION) s = c.connect(self.keyspace_name) s.row_factory = dict_factory s.encoder.mapping[tuple] = s.encoder.cql_encode_tuple lengths = (1, 2, 3, 384) value_schema = [] for i in lengths: value_schema += [(' v_%s frozen<tuple<%s>>' % (i, ', '.join((['int'] * i))))] s.execute(('CREATE TABLE tuple_lengths (k int PRIMARY KEY, %s)' % (', '.join(value_schema),))) for i in lengths: created_tuple = tuple(range(0, (i + 1))) self.assertRaises(InvalidRequest, s.execute, 'INSERT INTO tuple_lengths (k, v_%s) VALUES (0, %s)', (i, created_tuple)) created_tuple = tuple(range(0, i)) s.execute('INSERT INTO tuple_lengths (k, v_%s) VALUES (0, %s)', (i, created_tuple)) result = s.execute('SELECT v_%s FROM tuple_lengths WHERE k=0', (i,))[0] self.assertEqual(tuple(created_tuple), result[('v_%s' % i)]) c.shutdown()
'Ensure tuple subtypes are appropriately handled.'
def test_can_insert_tuples_all_primitive_datatypes(self):
if (self.cass_version < (2, 1, 0)): raise unittest.SkipTest('The tuple type was introduced in Cassandra 2.1') c = Cluster(protocol_version=PROTOCOL_VERSION) s = c.connect(self.keyspace_name) s.encoder.mapping[tuple] = s.encoder.cql_encode_tuple s.execute(('CREATE TABLE tuple_primitive (k int PRIMARY KEY, v frozen<tuple<%s>>)' % ','.join(PRIMITIVE_DATATYPES))) values = [] type_count = len(PRIMITIVE_DATATYPES) for (i, data_type) in enumerate(PRIMITIVE_DATATYPES): values.append(get_sample(data_type)) expected = tuple((values + ([None] * (type_count - len(values))))) s.execute('INSERT INTO tuple_primitive (k, v) VALUES (%s, %s)', (i, tuple(values))) result = s.execute('SELECT v FROM tuple_primitive WHERE k=%s', (i,))[0] self.assertEqual(result.v, expected) c.shutdown()
'Ensure tuple subtypes are appropriately handled for maps, sets, and lists.'
def test_can_insert_tuples_all_collection_datatypes(self):
if (self.cass_version < (2, 1, 0)): raise unittest.SkipTest('The tuple type was introduced in Cassandra 2.1') c = Cluster(protocol_version=PROTOCOL_VERSION) s = c.connect(self.keyspace_name) s.row_factory = dict_factory s.encoder.mapping[tuple] = s.encoder.cql_encode_tuple values = [] for datatype in PRIMITIVE_DATATYPES_KEYS: values.append('v_{0} frozen<tuple<list<{1}>>>'.format(len(values), datatype)) for datatype in PRIMITIVE_DATATYPES_KEYS: values.append('v_{0} frozen<tuple<set<{1}>>>'.format(len(values), datatype)) for datatype in PRIMITIVE_DATATYPES_KEYS: datatype_1 = datatype_2 = datatype if (datatype == 'blob'): datatype_1 = 'ascii' values.append('v_{0} frozen<tuple<map<{1}, {2}>>>'.format(len(values), datatype_1, datatype_2)) if (set(COLLECTION_TYPES) != set(['tuple', 'list', 'map', 'set'])): raise NotImplemented('Missing datatype not implemented: {}'.format((set(COLLECTION_TYPES) - set(['tuple', 'list', 'map', 'set'])))) s.execute(('CREATE TABLE tuple_non_primative (k int PRIMARY KEY, %s)' % ', '.join(values))) i = 0 for datatype in PRIMITIVE_DATATYPES_KEYS: created_tuple = tuple([[get_sample(datatype)]]) s.execute('INSERT INTO tuple_non_primative (k, v_%s) VALUES (0, %s)', (i, created_tuple)) result = s.execute('SELECT v_%s FROM tuple_non_primative WHERE k=0', (i,))[0] self.assertEqual(created_tuple, result[('v_%s' % i)]) i += 1 for datatype in PRIMITIVE_DATATYPES_KEYS: created_tuple = tuple([sortedset([get_sample(datatype)])]) s.execute('INSERT INTO tuple_non_primative (k, v_%s) VALUES (0, %s)', (i, created_tuple)) result = s.execute('SELECT v_%s FROM tuple_non_primative WHERE k=0', (i,))[0] self.assertEqual(created_tuple, result[('v_%s' % i)]) i += 1 for datatype in PRIMITIVE_DATATYPES_KEYS: if (datatype == 'blob'): created_tuple = tuple([{get_sample('ascii'): get_sample(datatype)}]) else: created_tuple = tuple([{get_sample(datatype): get_sample(datatype)}]) s.execute('INSERT INTO tuple_non_primative (k, v_%s) VALUES (0, %s)', (i, created_tuple)) result = s.execute('SELECT v_%s FROM tuple_non_primative WHERE k=0', (i,))[0] self.assertEqual(created_tuple, result[('v_%s' % i)]) i += 1 c.shutdown()
'Helper method for creating nested tuple schema'
def nested_tuples_schema_helper(self, depth):
if (depth == 0): return 'int' else: return ('tuple<%s>' % self.nested_tuples_schema_helper((depth - 1)))
'Helper method for creating nested tuples'
def nested_tuples_creator_helper(self, depth):
if (depth == 0): return 303 else: return (self.nested_tuples_creator_helper((depth - 1)),)
'Ensure nested are appropriately handled.'
def test_can_insert_nested_tuples(self):
if (self.cass_version < (2, 1, 0)): raise unittest.SkipTest('The tuple type was introduced in Cassandra 2.1') c = Cluster(protocol_version=PROTOCOL_VERSION) s = c.connect(self.keyspace_name) s.row_factory = dict_factory s.encoder.mapping[tuple] = s.encoder.cql_encode_tuple s.execute(('CREATE TABLE nested_tuples (k int PRIMARY KEY, v_1 frozen<%s>,v_2 frozen<%s>,v_3 frozen<%s>,v_32 frozen<%s>)' % (self.nested_tuples_schema_helper(1), self.nested_tuples_schema_helper(2), self.nested_tuples_schema_helper(3), self.nested_tuples_schema_helper(32)))) for i in (1, 2, 3, 32): created_tuple = self.nested_tuples_creator_helper(i) s.execute('INSERT INTO nested_tuples (k, v_%s) VALUES (%s, %s)', (i, i, created_tuple)) result = s.execute('SELECT v_%s FROM nested_tuples WHERE k=%s', (i, i))[0] self.assertEqual(created_tuple, result[('v_%s' % i)]) c.shutdown()
'Test tuples with null and empty string fields.'
def test_can_insert_tuples_with_nulls(self):
if (self.cass_version < (2, 1, 0)): raise unittest.SkipTest('The tuple type was introduced in Cassandra 2.1') s = self.session s.execute('CREATE TABLE tuples_nulls (k int PRIMARY KEY, t frozen<tuple<text, int, uuid, blob>>)') insert = s.prepare('INSERT INTO tuples_nulls (k, t) VALUES (0, ?)') s.execute(insert, [(None, None, None, None)]) result = s.execute('SELECT * FROM tuples_nulls WHERE k=0') self.assertEqual((None, None, None, None), result[0].t) read = s.prepare('SELECT * FROM tuples_nulls WHERE k=0') self.assertEqual((None, None, None, None), s.execute(read)[0].t) s.execute(insert, [('', None, None, '')]) result = s.execute('SELECT * FROM tuples_nulls WHERE k=0') self.assertEqual(('', None, None, ''), result[0].t) self.assertEqual(('', None, None, ''), s.execute(read)[0].t)
'Test to ensure unicode strings can be used in a query'
def test_can_insert_unicode_query_string(self):
s = self.session s.execute(u"SELECT * FROM system.local WHERE key = 'ef\u2052ef'") s.execute(u'SELECT * FROM system.local WHERE key = %s', (u'fe\u2051fe',))
'Test to ensure that CompositeTypes can be used in a query'
def test_can_read_composite_type(self):
s = self.session s.execute("\n CREATE TABLE composites (\n a int PRIMARY KEY,\n b 'org.apache.cassandra.db.marshal.CompositeType(AsciiType, Int32Type)'\n )") s.execute("INSERT INTO composites (a, b) VALUES (0, 'abc:123')") result = s.execute('SELECT * FROM composites WHERE a = 0')[0] self.assertEqual(0, result.a) self.assertEqual(('abc', 123), result.b) s.execute("INSERT INTO composites (a, b) VALUES (0, 'abc')") result = s.execute('SELECT * FROM composites WHERE a = 0')[0] self.assertEqual(0, result.a) self.assertEqual(('abc',), result.b)
'Test to insure that Infinity -Infinity and NaN are supported by the python driver. @since 3.0.0 @jira_ticket PYTHON-282 @expected_result nan, inf and -inf can be inserted and selected correctly. @test_category data_types'
@notprotocolv1 def test_special_float_cql_encoding(self):
s = self.session s.execute('\n CREATE TABLE float_cql_encoding (\n f float PRIMARY KEY,\n d double\n )') items = (float('nan'), float('inf'), float('-inf')) def verify_insert_select(ins_statement, sel_statement): execute_concurrent_with_args(s, ins_statement, ((f, f) for f in items)) for f in items: row = s.execute(sel_statement, (f,))[0] if math.isnan(f): self.assertTrue(math.isnan(row.f)) self.assertTrue(math.isnan(row.d)) else: self.assertEqual(row.f, f) self.assertEqual(row.d, f) verify_insert_select('INSERT INTO float_cql_encoding (f, d) VALUES (%s, %s)', 'SELECT * FROM float_cql_encoding WHERE f=%s') s.execute('TRUNCATE float_cql_encoding') verify_insert_select(s.prepare('INSERT INTO float_cql_encoding (f, d) VALUES (?, ?)'), s.prepare('SELECT * FROM float_cql_encoding WHERE f=?'))
'Test to validate that decimal deserialization works correctly in with our cython extensions @since 3.0.0 @jira_ticket PYTHON-212 @expected_result no exceptions are thrown, decimal is decoded correctly @test_category data_types serialization'
@cythontest def test_cython_decimal(self):
self.session.execute('CREATE TABLE {0} (dc decimal PRIMARY KEY)'.format(self.function_table_name)) try: self.session.execute('INSERT INTO {0} (dc) VALUES (-1.08430792318105707)'.format(self.function_table_name)) results = self.session.execute('SELECT * FROM {0}'.format(self.function_table_name)) self.assertTrue((str(results[0].dc) == '-1.08430792318105707')) finally: self.session.execute('DROP TABLE {0}'.format(self.function_table_name))
'Test to write several Duration values to the database and verify they can be read correctly. The verify than an exception is arisen if the value is too big @since 3.10 @jira_ticket PYTHON-747 @expected_result the read value in C* matches the written one @test_category data_types serialization'
@greaterthanorequalcass3_10 def test_smoke_duration_values(self):
self.session.execute('\n CREATE TABLE duration_smoke (k int primary key, v duration)\n ') self.addCleanup(self.session.execute, 'DROP TABLE duration_smoke') prepared = self.session.prepare('\n INSERT INTO duration_smoke (k, v)\n VALUES (?, ?)\n ') nanosecond_smoke_values = [0, (-1), 1, 100, 1000, 1000000, 1000000000, 10000000000000, (-9223372036854775807), 9223372036854775807, int('7FFFFFFFFFFFFFFF', 16), int('-7FFFFFFFFFFFFFFF', 16)] month_day_smoke_values = [0, (-1), 1, 100, 1000, 1000000, 1000000000, int('7FFFFFFF', 16), int('-7FFFFFFF', 16)] for nanosecond_value in nanosecond_smoke_values: for month_day_value in month_day_smoke_values: if ((month_day_value <= 0) != (nanosecond_value <= 0)): continue self.session.execute(prepared, (1, Duration(month_day_value, month_day_value, nanosecond_value))) results = self.session.execute('SELECT * FROM duration_smoke') v = results[0][1] self.assertEqual(Duration(month_day_value, month_day_value, nanosecond_value), v, 'Error encoding value {0},{0},{1}'.format(month_day_value, nanosecond_value)) self.assertRaises(ValueError, self.session.execute, prepared, (1, Duration(0, 0, int('8FFFFFFFFFFFFFF0', 16)))) self.assertRaises(ValueError, self.session.execute, prepared, (1, Duration(0, int('8FFFFFFFFFFFFFF0', 16), 0))) self.assertRaises(ValueError, self.session.execute, prepared, (1, Duration(int('8FFFFFFFFFFFFFF0', 16), 0, 0)))
'Test to validate that nested type serialization works on various protocol versions. Provided the version of cassandra is greater the 2.1.3 we would expect to nested to types to work at all protocol versions. @since 3.0.0 @jira_ticket PYTHON-215 @expected_result no exceptions are thrown @test_category data_types serialization'
@greaterthancass21 @lessthancass30 def test_nested_types_with_protocol_version(self):
ddl = 'CREATE TABLE {0}.t (\n k int PRIMARY KEY,\n v list<frozen<set<int>>>)'.format(self.keyspace_name) self.session.execute(ddl) ddl = 'CREATE TABLE {0}.u (\n k int PRIMARY KEY,\n v set<frozen<list<int>>>)'.format(self.keyspace_name) self.session.execute(ddl) ddl = 'CREATE TABLE {0}.v (\n k int PRIMARY KEY,\n v map<frozen<set<int>>, frozen<list<int>>>,\n v1 frozen<tuple<int, text>>)'.format(self.keyspace_name) self.session.execute(ddl) self.session.execute('CREATE TYPE {0}.typ (v0 frozen<map<int, frozen<list<int>>>>, v1 frozen<list<int>>)'.format(self.keyspace_name)) ddl = 'CREATE TABLE {0}.w (\n k int PRIMARY KEY,\n v frozen<typ>)'.format(self.keyspace_name) self.session.execute(ddl) for pvi in range(1, 5): self.run_inserts_at_version(pvi) for pvr in range(1, 5): self.read_inserts_at_level(pvr)
'Test to ensure that non frozen udt\'s work with C* >3.6. @since 3.7.0 @jira_ticket PYTHON-498 @expected_result Non frozen UDT\'s are supported @test_category data_types, udt'
@greaterthanorequalcass36 def test_non_frozen_udts(self):
self.session.execute('USE {0}'.format(self.keyspace_name)) self.session.execute('CREATE TYPE user (state text, has_corn boolean)') self.session.execute('CREATE TABLE {0} (a int PRIMARY KEY, b user)'.format(self.function_table_name)) User = namedtuple('user', ('state', 'has_corn')) self.cluster.register_user_type(self.keyspace_name, 'user', User) self.session.execute('INSERT INTO {0} (a, b) VALUES (%s, %s)'.format(self.function_table_name), (0, User('Nebraska', True))) self.session.execute('UPDATE {0} SET b.has_corn = False where a = 0'.format(self.function_table_name)) result = self.session.execute('SELECT * FROM {0}'.format(self.function_table_name)) self.assertFalse(result[0].b.has_corn) table_sql = self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].as_cql_query() self.assertNotIn('<frozen>', table_sql)
'Test the insertion of unprepared, registered UDTs'
def test_can_insert_unprepared_registered_udts(self):
c = Cluster(protocol_version=PROTOCOL_VERSION) s = c.connect(self.keyspace_name, wait_for_all_pools=True) s.execute('CREATE TYPE user (age int, name text)') s.execute('CREATE TABLE mytable (a int PRIMARY KEY, b frozen<user>)') User = namedtuple('user', ('age', 'name')) c.register_user_type(self.keyspace_name, 'user', User) s.execute('INSERT INTO mytable (a, b) VALUES (%s, %s)', (0, User(42, 'bob'))) result = s.execute('SELECT b FROM mytable WHERE a=0') row = result[0] self.assertEqual(42, row.b.age) self.assertEqual('bob', row.b.name) self.assertTrue((type(row.b) is User)) s.execute("\n CREATE KEYSPACE udt_test_unprepared_registered2\n WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1' }\n ") s.set_keyspace('udt_test_unprepared_registered2') s.execute('CREATE TYPE user (state text, is_cool boolean)') s.execute('CREATE TABLE mytable (a int PRIMARY KEY, b frozen<user>)') User = namedtuple('user', ('state', 'is_cool')) c.register_user_type('udt_test_unprepared_registered2', 'user', User) s.execute('INSERT INTO mytable (a, b) VALUES (%s, %s)', (0, User('Texas', True))) result = s.execute('SELECT b FROM mytable WHERE a=0') row = result[0] self.assertEqual('Texas', row.b.state) self.assertEqual(True, row.b.is_cool) self.assertTrue((type(row.b) is User)) s.execute('DROP KEYSPACE udt_test_unprepared_registered2') c.shutdown()
'Test the registration of UDTs before session creation'
def test_can_register_udt_before_connecting(self):
c = Cluster(protocol_version=PROTOCOL_VERSION) s = c.connect(wait_for_all_pools=True) s.execute("\n CREATE KEYSPACE udt_test_register_before_connecting\n WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1' }\n ") s.set_keyspace('udt_test_register_before_connecting') s.execute('CREATE TYPE user (age int, name text)') s.execute('CREATE TABLE mytable (a int PRIMARY KEY, b frozen<user>)') s.execute("\n CREATE KEYSPACE udt_test_register_before_connecting2\n WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1' }\n ") s.set_keyspace('udt_test_register_before_connecting2') s.execute('CREATE TYPE user (state text, is_cool boolean)') s.execute('CREATE TABLE mytable (a int PRIMARY KEY, b frozen<user>)') c.shutdown() c = Cluster(protocol_version=PROTOCOL_VERSION) User1 = namedtuple('user', ('age', 'name')) User2 = namedtuple('user', ('state', 'is_cool')) c.register_user_type('udt_test_register_before_connecting', 'user', User1) c.register_user_type('udt_test_register_before_connecting2', 'user', User2) s = c.connect(wait_for_all_pools=True) s.set_keyspace('udt_test_register_before_connecting') s.execute('INSERT INTO mytable (a, b) VALUES (%s, %s)', (0, User1(42, 'bob'))) result = s.execute('SELECT b FROM mytable WHERE a=0') row = result[0] self.assertEqual(42, row.b.age) self.assertEqual('bob', row.b.name) self.assertTrue((type(row.b) is User1)) s.set_keyspace('udt_test_register_before_connecting2') s.execute('INSERT INTO mytable (a, b) VALUES (%s, %s)', (0, User2('Texas', True))) result = s.execute('SELECT b FROM mytable WHERE a=0') row = result[0] self.assertEqual('Texas', row.b.state) self.assertEqual(True, row.b.is_cool) self.assertTrue((type(row.b) is User2)) s.execute('DROP KEYSPACE udt_test_register_before_connecting') s.execute('DROP KEYSPACE udt_test_register_before_connecting2') c.shutdown()
'Test the insertion of prepared, unregistered UDTs'
def test_can_insert_prepared_unregistered_udts(self):
c = Cluster(protocol_version=PROTOCOL_VERSION) s = c.connect(self.keyspace_name, wait_for_all_pools=True) s.execute('CREATE TYPE user (age int, name text)') s.execute('CREATE TABLE mytable (a int PRIMARY KEY, b frozen<user>)') User = namedtuple('user', ('age', 'name')) insert = s.prepare('INSERT INTO mytable (a, b) VALUES (?, ?)') s.execute(insert, (0, User(42, 'bob'))) select = s.prepare('SELECT b FROM mytable WHERE a=?') result = s.execute(select, (0,)) row = result[0] self.assertEqual(42, row.b.age) self.assertEqual('bob', row.b.name) s.execute("\n CREATE KEYSPACE udt_test_prepared_unregistered2\n WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1' }\n ") s.set_keyspace('udt_test_prepared_unregistered2') s.execute('CREATE TYPE user (state text, is_cool boolean)') s.execute('CREATE TABLE mytable (a int PRIMARY KEY, b frozen<user>)') User = namedtuple('user', ('state', 'is_cool')) insert = s.prepare('INSERT INTO mytable (a, b) VALUES (?, ?)') s.execute(insert, (0, User('Texas', True))) select = s.prepare('SELECT b FROM mytable WHERE a=?') result = s.execute(select, (0,)) row = result[0] self.assertEqual('Texas', row.b.state) self.assertEqual(True, row.b.is_cool) s.execute('DROP KEYSPACE udt_test_prepared_unregistered2') c.shutdown()
'Test the insertion of prepared, registered UDTs'
def test_can_insert_prepared_registered_udts(self):
c = Cluster(protocol_version=PROTOCOL_VERSION) s = c.connect(self.keyspace_name, wait_for_all_pools=True) s.execute('CREATE TYPE user (age int, name text)') User = namedtuple('user', ('age', 'name')) c.register_user_type(self.keyspace_name, 'user', User) s.execute('CREATE TABLE mytable (a int PRIMARY KEY, b frozen<user>)') insert = s.prepare('INSERT INTO mytable (a, b) VALUES (?, ?)') s.execute(insert, (0, User(42, 'bob'))) select = s.prepare('SELECT b FROM mytable WHERE a=?') result = s.execute(select, (0,)) row = result[0] self.assertEqual(42, row.b.age) self.assertEqual('bob', row.b.name) self.assertTrue((type(row.b) is User)) s.execute("\n CREATE KEYSPACE udt_test_prepared_registered2\n WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1' }\n ") s.set_keyspace('udt_test_prepared_registered2') s.execute('CREATE TYPE user (state text, is_cool boolean)') User = namedtuple('user', ('state', 'is_cool')) c.register_user_type('udt_test_prepared_registered2', 'user', User) s.execute('CREATE TABLE mytable (a int PRIMARY KEY, b frozen<user>)') insert = s.prepare('INSERT INTO mytable (a, b) VALUES (?, ?)') s.execute(insert, (0, User('Texas', True))) select = s.prepare('SELECT b FROM mytable WHERE a=?') result = s.execute(select, (0,)) row = result[0] self.assertEqual('Texas', row.b.state) self.assertEqual(True, row.b.is_cool) self.assertTrue((type(row.b) is User)) s.execute('DROP KEYSPACE udt_test_prepared_registered2') c.shutdown()
'Test the insertion of UDTs with null and empty string fields'
def test_can_insert_udts_with_nulls(self):
c = Cluster(protocol_version=PROTOCOL_VERSION) s = c.connect(self.keyspace_name, wait_for_all_pools=True) s.execute('CREATE TYPE user (a text, b int, c uuid, d blob)') User = namedtuple('user', ('a', 'b', 'c', 'd')) c.register_user_type(self.keyspace_name, 'user', User) s.execute('CREATE TABLE mytable (a int PRIMARY KEY, b frozen<user>)') insert = s.prepare('INSERT INTO mytable (a, b) VALUES (0, ?)') s.execute(insert, [User(None, None, None, None)]) results = s.execute('SELECT b FROM mytable WHERE a=0') self.assertEqual((None, None, None, None), results[0].b) select = s.prepare('SELECT b FROM mytable WHERE a=0') self.assertEqual((None, None, None, None), s.execute(select)[0].b) s.execute(insert, [User('', None, None, six.binary_type())]) results = s.execute('SELECT b FROM mytable WHERE a=0') self.assertEqual(('', None, None, six.binary_type()), results[0].b) c.shutdown()
'Test for ensuring extra-lengthy udts are properly inserted'
def test_can_insert_udts_with_varying_lengths(self):
c = Cluster(protocol_version=PROTOCOL_VERSION) s = c.connect(self.keyspace_name, wait_for_all_pools=True) MAX_TEST_LENGTH = 254 s.execute('CREATE TYPE lengthy_udt ({0})'.format(', '.join(['v_{0} int'.format(i) for i in range(MAX_TEST_LENGTH)]))) s.execute('CREATE TABLE mytable (k int PRIMARY KEY, v frozen<lengthy_udt>)') udt = namedtuple('lengthy_udt', tuple(['v_{0}'.format(i) for i in range(MAX_TEST_LENGTH)])) c.register_user_type(self.keyspace_name, 'lengthy_udt', udt) for i in (0, 1, 2, 3, MAX_TEST_LENGTH): params = ([j for j in range(i)] + [None for j in range((MAX_TEST_LENGTH - i))]) created_udt = udt(*params) s.execute('INSERT INTO mytable (k, v) VALUES (0, %s)', (created_udt,)) result = s.execute('SELECT v FROM mytable WHERE k=0')[0] self.assertEqual(created_udt, result.v) c.shutdown()
'Test for ensuring nested registered udts are properly inserted'
def test_can_insert_nested_registered_udts(self):
c = Cluster(protocol_version=PROTOCOL_VERSION) s = c.connect(self.keyspace_name, wait_for_all_pools=True) s.row_factory = dict_factory MAX_NESTING_DEPTH = 16 self.nested_udt_schema_helper(s, MAX_NESTING_DEPTH) udts = [] udt = namedtuple('depth_0', ('age', 'name')) udts.append(udt) c.register_user_type(self.keyspace_name, 'depth_0', udts[0]) for i in range(MAX_NESTING_DEPTH): udt = namedtuple('depth_{0}'.format((i + 1)), 'value') udts.append(udt) c.register_user_type(self.keyspace_name, 'depth_{0}'.format((i + 1)), udts[(i + 1)]) self.nested_udt_verification_helper(s, MAX_NESTING_DEPTH, udts) c.shutdown()
'Test for ensuring nested unregistered udts are properly inserted'
def test_can_insert_nested_unregistered_udts(self):
c = Cluster(protocol_version=PROTOCOL_VERSION) s = c.connect(self.keyspace_name, wait_for_all_pools=True) s.row_factory = dict_factory MAX_NESTING_DEPTH = 16 self.nested_udt_schema_helper(s, MAX_NESTING_DEPTH) udts = [] udt = namedtuple('depth_0', ('age', 'name')) udts.append(udt) for i in range(MAX_NESTING_DEPTH): udt = namedtuple('depth_{0}'.format((i + 1)), 'value') udts.append(udt) for i in (0, 1, 2, 3, MAX_NESTING_DEPTH): udt = self.nested_udt_creation_helper(udts, i) insert = s.prepare('INSERT INTO mytable (k, v_{0}) VALUES (0, ?)'.format(i)) s.execute(insert, [udt]) result = s.execute('SELECT v_{0} FROM mytable WHERE k=0'.format(i))[0] self.assertEqual(udt, result['v_{0}'.format(i)]) c.shutdown()
'Test for ensuring nested udts are inserted correctly when the created namedtuples are use names that are different the cql type.'
def test_can_insert_nested_registered_udts_with_different_namedtuples(self):
c = Cluster(protocol_version=PROTOCOL_VERSION) s = c.connect(self.keyspace_name, wait_for_all_pools=True) s.row_factory = dict_factory MAX_NESTING_DEPTH = 16 self.nested_udt_schema_helper(s, MAX_NESTING_DEPTH) udts = [] udt = namedtuple('level_0', ('age', 'name')) udts.append(udt) c.register_user_type(self.keyspace_name, 'depth_0', udts[0]) for i in range(MAX_NESTING_DEPTH): udt = namedtuple('level_{0}'.format((i + 1)), 'value') udts.append(udt) c.register_user_type(self.keyspace_name, 'depth_{0}'.format((i + 1)), udts[(i + 1)]) self.nested_udt_verification_helper(s, MAX_NESTING_DEPTH, udts) c.shutdown()
'Test for ensuring that an error is raised for operating on a nonexisting udt or an invalid keyspace'
def test_raise_error_on_nonexisting_udts(self):
c = Cluster(protocol_version=PROTOCOL_VERSION) s = c.connect(self.keyspace_name, wait_for_all_pools=True) User = namedtuple('user', ('age', 'name')) with self.assertRaises(UserTypeDoesNotExist): c.register_user_type('some_bad_keyspace', 'user', User) with self.assertRaises(UserTypeDoesNotExist): c.register_user_type('system', 'user', User) with self.assertRaises(InvalidRequest): s.execute('CREATE TABLE mytable (a int PRIMARY KEY, b frozen<user>)') c.shutdown()
'Test for inserting various types of PRIMITIVE_DATATYPES into UDT\'s'
def test_can_insert_udt_all_datatypes(self):
c = Cluster(protocol_version=PROTOCOL_VERSION) s = c.connect(self.keyspace_name, wait_for_all_pools=True) alpha_type_list = [] start_index = ord('a') for (i, datatype) in enumerate(PRIMITIVE_DATATYPES): alpha_type_list.append('{0} {1}'.format(chr((start_index + i)), datatype)) s.execute('\n CREATE TYPE alldatatypes ({0})\n '.format(', '.join(alpha_type_list))) s.execute('CREATE TABLE mytable (a int PRIMARY KEY, b frozen<alldatatypes>)') alphabet_list = [] for i in range(ord('a'), (ord('a') + len(PRIMITIVE_DATATYPES))): alphabet_list.append('{0}'.format(chr(i))) Alldatatypes = namedtuple('alldatatypes', alphabet_list) c.register_user_type(self.keyspace_name, 'alldatatypes', Alldatatypes) params = [] for datatype in PRIMITIVE_DATATYPES: params.append(get_sample(datatype)) insert = s.prepare('INSERT INTO mytable (a, b) VALUES (?, ?)') s.execute(insert, (0, Alldatatypes(*params))) results = s.execute('SELECT * FROM mytable') row = results[0].b for (expected, actual) in zip(params, row): self.assertEqual(expected, actual) c.shutdown()
'Test for inserting various types of COLLECTION_TYPES into UDT\'s'
def test_can_insert_udt_all_collection_datatypes(self):
c = Cluster(protocol_version=PROTOCOL_VERSION) s = c.connect(self.keyspace_name, wait_for_all_pools=True) alpha_type_list = [] start_index = ord('a') for (i, collection_type) in enumerate(COLLECTION_TYPES): for (j, datatype) in enumerate(PRIMITIVE_DATATYPES_KEYS): if (collection_type == 'map'): type_string = '{0}_{1} {2}<{3}, {3}>'.format(chr((start_index + i)), chr((start_index + j)), collection_type, datatype) elif (collection_type == 'tuple'): type_string = '{0}_{1} frozen<{2}<{3}>>'.format(chr((start_index + i)), chr((start_index + j)), collection_type, datatype) else: type_string = '{0}_{1} {2}<{3}>'.format(chr((start_index + i)), chr((start_index + j)), collection_type, datatype) alpha_type_list.append(type_string) s.execute('\n CREATE TYPE alldatatypes ({0})\n '.format(', '.join(alpha_type_list))) s.execute('CREATE TABLE mytable (a int PRIMARY KEY, b frozen<alldatatypes>)') alphabet_list = [] for i in range(ord('a'), (ord('a') + len(COLLECTION_TYPES))): for j in range(ord('a'), (ord('a') + len(PRIMITIVE_DATATYPES_KEYS))): alphabet_list.append('{0}_{1}'.format(chr(i), chr(j))) Alldatatypes = namedtuple('alldatatypes', alphabet_list) c.register_user_type(self.keyspace_name, 'alldatatypes', Alldatatypes) params = [] for collection_type in COLLECTION_TYPES: for datatype in PRIMITIVE_DATATYPES_KEYS: params.append(get_collection_sample(collection_type, datatype)) insert = s.prepare('INSERT INTO mytable (a, b) VALUES (?, ?)') s.execute(insert, (0, Alldatatypes(*params))) results = s.execute('SELECT * FROM mytable') row = results[0].b for (expected, actual) in zip(params, row): self.assertEqual(expected, actual) c.shutdown()
'Test for inserting various types of nested COLLECTION_TYPES into tables and UDTs'
def test_can_insert_nested_collections(self):
if (self.cass_version < (2, 1, 3)): raise unittest.SkipTest('Support for nested collections was introduced in Cassandra 2.1.3') c = Cluster(protocol_version=PROTOCOL_VERSION) s = c.connect(self.keyspace_name, wait_for_all_pools=True) s.encoder.mapping[tuple] = s.encoder.cql_encode_tuple name = self._testMethodName s.execute(('\n CREATE TYPE %s (\n m frozen<map<int,text>>,\n t tuple<int,text>,\n l frozen<list<int>>,\n s frozen<set<int>>\n )' % name)) s.execute(('\n CREATE TYPE %s_nested (\n m frozen<map<int,text>>,\n t tuple<int,text>,\n l frozen<list<int>>,\n s frozen<set<int>>,\n u frozen<%s>\n )' % (name, name))) s.execute(('\n CREATE TABLE %s (\n k int PRIMARY KEY,\n map_map map<frozen<map<int,int>>, frozen<map<int,int>>>,\n map_set map<frozen<set<int>>, frozen<set<int>>>,\n map_list map<frozen<list<int>>, frozen<list<int>>>,\n map_tuple map<frozen<tuple<int, int>>, frozen<tuple<int>>>,\n map_udt map<frozen<%s_nested>, frozen<%s>>,\n )' % (name, name, name))) validate = partial(self.insert_select_column, s, name) validate('map_map', OrderedMap([({1: 1, 2: 2}, {3: 3, 4: 4}), ({5: 5, 6: 6}, {7: 7, 8: 8})])) validate('map_set', OrderedMap([(set((1, 2)), set((3, 4))), (set((5, 6)), set((7, 8)))])) validate('map_list', OrderedMap([([1, 2], [3, 4]), ([5, 6], [7, 8])])) validate('map_tuple', OrderedMap([((1, 2), (3,)), ((4, 5), (6,))])) value = nested_collection_udt({1: 'v1', 2: 'v2'}, (3, 'v3'), [4, 5, 6, 7], set((8, 9, 10))) key = nested_collection_udt_nested(value.m, value.t, value.l, value.s, value) key2 = nested_collection_udt_nested({3: 'v3'}, value.t, value.l, value.s, value) validate('map_udt', OrderedMap([(key, value), (key2, value)])) c.shutdown()
'PYTHON-413'
def test_non_alphanum_identifiers(self):
s = self.session non_alphanum_name = 'test.field@#$%@%#!' type_name = 'type2' s.execute(('CREATE TYPE "%s" ("%s" text)' % (non_alphanum_name, non_alphanum_name))) s.execute(('CREATE TYPE %s ("%s" text)' % (type_name, non_alphanum_name))) s.execute(('CREATE TABLE %s (k int PRIMARY KEY, non_alphanum_type_map map<frozen<"%s">, int>, alphanum_type_map map<frozen<%s>, int>)' % (self.table_name, non_alphanum_name, type_name))) s.execute(('INSERT INTO %s (k, non_alphanum_type_map, alphanum_type_map) VALUES (%s, {{"%s": \'nonalphanum\'}: 0}, {{"%s": \'alphanum\'}: 1})' % (self.table_name, 0, non_alphanum_name, non_alphanum_name))) row = s.execute(('SELECT * FROM %s' % (self.table_name,)))[0] (k, v) = row.non_alphanum_type_map.popitem() self.assertEqual(v, 0) self.assertEqual(k.__class__, tuple) self.assertEqual(k[0], 'nonalphanum') (k, v) = row.alphanum_type_map.popitem() self.assertEqual(v, 1) self.assertNotEqual(k.__class__, tuple) self.assertEqual(k[0], 'alphanum') self.assertEqual(k.field_0_, 'alphanum')
'Test to ensure that altered UDT\'s are properly surfaced without needing to restart the underlying session. @since 3.0.0 @jira_ticket PYTHON-226 @expected_result UDT\'s will reflect added columns without a session restart. @test_category data_types, udt'
@lessthancass30 def test_alter_udt(self):
self.session.set_keyspace(self.keyspace_name) self.session.execute('CREATE TYPE typetoalter (a int)') typetoalter = namedtuple('typetoalter', 'a') self.session.execute('CREATE TABLE {0} (pk int primary key, typetoalter frozen<typetoalter>)'.format(self.function_table_name)) insert_statement = self.session.prepare('INSERT INTO {0} (pk, typetoalter) VALUES (?, ?)'.format(self.function_table_name)) self.session.execute(insert_statement, [1, typetoalter(1)]) results = self.session.execute('SELECT * from {0}'.format(self.function_table_name)) for result in results: self.assertTrue(hasattr(result.typetoalter, 'a')) self.assertFalse(hasattr(result.typetoalter, 'b')) self.session.execute('ALTER TYPE typetoalter add b int') typetoalter = namedtuple('typetoalter', ('a', 'b')) self.session.execute(insert_statement, [2, typetoalter(2, 2)]) results = self.session.execute('SELECT * from {0}'.format(self.function_table_name)) for result in results: self.assertTrue(hasattr(result.typetoalter, 'a')) self.assertTrue(hasattr(result.typetoalter, 'b'))
'Test to validate paging state api @since 3.7.0 @jira_ticket PYTHON-200 @expected_result paging state should returned should be accurate, and allow for queries to be resumed. @test_category queries'
def test_paging_state(self):
statements_and_params = zip(cycle(['INSERT INTO test3rf.test (k, v) VALUES (%s, 0)']), [(i,) for i in range(100)]) execute_concurrent(self.session, list(statements_and_params)) list_all_results = [] self.session.default_fetch_size = 3 result_set = self.session.execute('SELECT * FROM test3rf.test') while result_set.has_more_pages: for row in result_set.current_rows: self.assertNotIn(row, list_all_results) list_all_results.extend(result_set.current_rows) page_state = result_set.paging_state result_set = self.session.execute('SELECT * FROM test3rf.test', paging_state=page_state) if (len(result_set.current_rows) > 0): list_all_results.append(result_set.current_rows) self.assertEqual(len(list_all_results), 100)
'Test to validate callback api @since 3.9.0 @jira_ticket PYTHON-733 @expected_result callbacks shouldn\'t be called twice per message and the fetch_size should be handled in a transparent way to the user @test_category queries'
def test_paging_callbacks(self):
statements_and_params = zip(cycle(['INSERT INTO test3rf.test (k, v) VALUES (%s, 0)']), [(i,) for i in range(100)]) execute_concurrent(self.session, list(statements_and_params)) prepared = self.session.prepare('SELECT * FROM test3rf.test') for fetch_size in (2, 3, 7, 10, 99, 100, 101, 10000): self.session.default_fetch_size = fetch_size future = self.session.execute_async('SELECT * FROM test3rf.test', timeout=20) event = Event() counter = count() number_of_calls = count() def handle_page(rows, future, counter, number_of_calls): next(number_of_calls) for row in rows: next(counter) if future.has_more_pages: future.start_fetching_next_page() else: event.set() def handle_error(err): event.set() self.fail(err) future.add_callbacks(callback=handle_page, callback_args=(future, counter, number_of_calls), errback=handle_error) event.wait() self.assertEqual(next(number_of_calls), ((100 // fetch_size) + 1)) self.assertEqual(next(counter), 100) future = self.session.execute_async(SimpleStatement('SELECT * FROM test3rf.test'), timeout=20) event.clear() counter = count() number_of_calls = count() future.add_callbacks(callback=handle_page, callback_args=(future, counter, number_of_calls), errback=handle_error) event.wait() self.assertEqual(next(number_of_calls), ((100 // fetch_size) + 1)) self.assertEqual(next(counter), 100) future = self.session.execute_async(prepared, timeout=20) event.clear() counter = count() number_of_calls = count() future.add_callbacks(callback=handle_page, callback_args=(future, counter, number_of_calls), errback=handle_error) event.wait() self.assertEqual(next(number_of_calls), ((100 // fetch_size) + 1)) self.assertEqual(next(counter), 100)
'Ensure per-statement fetch_sizes override the default fetch size.'
def test_fetch_size(self):
statements_and_params = zip(cycle(['INSERT INTO test3rf.test (k, v) VALUES (%s, 0)']), [(i,) for i in range(100)]) execute_concurrent(self.session, list(statements_and_params)) prepared = self.session.prepare('SELECT * FROM test3rf.test') self.session.default_fetch_size = 10 result = self.session.execute(prepared, []) self.assertTrue(result.has_more_pages) self.session.default_fetch_size = 2000 result = self.session.execute(prepared, []) self.assertFalse(result.has_more_pages) self.session.default_fetch_size = None result = self.session.execute(prepared, []) self.assertFalse(result.has_more_pages) self.session.default_fetch_size = 10 prepared.fetch_size = 2000 result = self.session.execute(prepared, []) self.assertFalse(result.has_more_pages) prepared.fetch_size = None result = self.session.execute(prepared, []) self.assertFalse(result.has_more_pages) prepared.fetch_size = 10 result = self.session.execute(prepared, []) self.assertTrue(result.has_more_pages) prepared.fetch_size = 2000 bound = prepared.bind([]) result = self.session.execute(bound, []) self.assertFalse(result.has_more_pages) prepared.fetch_size = None bound = prepared.bind([]) result = self.session.execute(bound, []) self.assertFalse(result.has_more_pages) prepared.fetch_size = 10 bound = prepared.bind([]) result = self.session.execute(bound, []) self.assertTrue(result.has_more_pages) bound.fetch_size = 2000 result = self.session.execute(bound, []) self.assertFalse(result.has_more_pages) bound.fetch_size = None result = self.session.execute(bound, []) self.assertFalse(result.has_more_pages) bound.fetch_size = 10 result = self.session.execute(bound, []) self.assertTrue(result.has_more_pages) s = SimpleStatement('SELECT * FROM test3rf.test', fetch_size=None) result = self.session.execute(s, []) self.assertFalse(result.has_more_pages) s = SimpleStatement('SELECT * FROM test3rf.test') result = self.session.execute(s, []) self.assertTrue(result.has_more_pages) s = SimpleStatement('SELECT * FROM test3rf.test') s.fetch_size = None result = self.session.execute(s, []) self.assertFalse(result.has_more_pages)
'Code coverage to ensure trace prints to string without error'
def test_trace_prints_okay(self):
query = 'SELECT * FROM system.local' statement = SimpleStatement(query) rs = self.session.execute(statement, trace=True) trace = rs.get_query_trace() self.assertTrue(trace.events) str(trace) for event in trace.events: str(event)
'Test to validate, new column deserialization message @since 3.7.0 @jira_ticket PYTHON-361 @expected_result Special failed decoding message should be present @test_category tracing'
def test_row_error_message(self):
self.session.execute('CREATE TABLE {0}.{1} (k int PRIMARY KEY, v timestamp)'.format(self.keyspace_name, self.function_table_name)) ss = SimpleStatement('INSERT INTO {0}.{1} (k, v) VALUES (1, 1000000000000000)'.format(self.keyspace_name, self.function_table_name)) self.session.execute(ss) with self.assertRaises(DriverException) as context: self.session.execute('SELECT * FROM {0}.{1}'.format(self.keyspace_name, self.function_table_name)) self.assertIn('Failed decoding result column', str(context.exception))
'Test to validate that client trace contains client ip information. creates a simple query and ensures that the client trace information is present. This will only be the case if the c* version is 2.2 or greater @since 2.6.0 @jira_ticket PYTHON-435 @expected_result client address should be present in C* >= 2.2, otherwise should be none. @test_category tracing #The current version on the trunk doesn\'t have the version set to 2.2 yet. #For now we will use the protocol version. Once they update the version on C* trunk #we can use the C*. See below #self._cass_version, self._cql_version = get_server_versions() #if self._cass_version < (2, 2): # raise unittest.SkipTest("Client IP was not present in trace until C* 2.2")'
@local @greaterthanprotocolv3 def test_client_ip_in_trace(self):
query = 'SELECT * FROM system.local' statement = SimpleStatement(query) response_future = self.session.execute_async(statement, trace=True) response_future.result() trace = response_future.get_query_trace(max_wait=10.0) client_ip = trace.client pat = re.compile('127.0.0.\\d{1,3}') self.assertIsNotNone(client_ip, 'Client IP was not set in trace with C* >= 2.2') self.assertTrue(pat.match(client_ip), 'Client IP from trace did not match the expected value')
'Test to ensure that CL is set correctly honored when executing trace queries. @since 3.3 @jira_ticket PYTHON-435 @expected_result Consistency Levels set on get_query_trace should be honored'
def test_trace_cl(self):
query = 'SELECT * FROM system.local' statement = SimpleStatement(query) response_future = self.session.execute_async(statement, trace=True) response_future.result() with self.assertRaises(Unavailable): response_future.get_query_trace(query_cl=ConsistencyLevel.THREE) self.assertIsNotNone(response_future.get_query_trace(max_wait=2.0, query_cl=ConsistencyLevel.TWO).trace_id) response_future = self.session.execute_async(statement, trace=True) response_future.result() self.assertIsNotNone(response_future.get_query_trace(max_wait=2.0, query_cl=ConsistencyLevel.ONE).trace_id) response_future = self.session.execute_async(statement, trace=True) response_future.result() with self.assertRaises(InvalidRequest): self.assertIsNotNone(response_future.get_query_trace(max_wait=2.0, query_cl=ConsistencyLevel.ANY).trace_id) self.assertIsNotNone(response_future.get_query_trace(max_wait=2.0, query_cl=ConsistencyLevel.QUORUM).trace_id)
'Tests to ensure that partial tracing works. Creates a table and runs an insert. Then attempt a query with tracing enabled. After the query is run we delete the duration information associated with the trace, and attempt to populate the tracing information. Should fail with wait_for_complete=True, succeed for False. @since 3.0.0 @jira_ticket PYTHON-438 @expected_result tracing comes back sans duration @test_category tracing'
@notwindows def test_incomplete_query_trace(self):
self.session.execute('CREATE TABLE {0} (k INT, i INT, PRIMARY KEY(k, i))'.format(self.keyspace_table_name)) self.session.execute('INSERT INTO {0} (k, i) VALUES (0, 1)'.format(self.keyspace_table_name)) response_future = self.session.execute_async('SELECT i FROM {0} WHERE k=0'.format(self.keyspace_table_name), trace=True) response_future.result() self.assertEqual(len(response_future._query_traces), 1) trace = response_future._query_traces[0] self.assertTrue(self._wait_for_trace_to_populate(trace.trace_id)) delete_statement = SimpleStatement('DELETE duration FROM system_traces.sessions WHERE session_id = {0}'.format(trace.trace_id), consistency_level=ConsistencyLevel.ALL) self.session.execute(delete_statement) self.assertTrue(self._wait_for_trace_to_delete(trace.trace_id)) self.assertRaises(TraceUnavailable, trace.populate, max_wait=0.2, wait_for_complete=True) self.assertFalse(trace.events) trace.populate(wait_for_complete=False) self.assertIsNone(trace.duration) self.assertIsNotNone(trace.trace_id) self.assertIsNotNone(trace.request_type) self.assertIsNotNone(trace.parameters) self.assertTrue(trace.events) self.assertIsNotNone(trace.started_at)
'Test to ensure column_types are set as part of the result set @since 3.8 @jira_ticket PYTHON-648 @expected_result column_names should be preset. @test_category queries basic'
def test_query_by_id(self):
create_table = 'CREATE TABLE {0}.{1} (id int primary key, m map<int, text>)'.format(self.keyspace_name, self.function_table_name) self.session.execute(create_table) self.session.execute((((('insert into ' + self.keyspace_name) + '.') + self.function_table_name) + " (id, m) VALUES ( 1, {1: 'one', 2: 'two', 3:'three'})")) results1 = self.session.execute('select id, m from {0}.{1}'.format(self.keyspace_name, self.function_table_name)) self.assertIsNotNone(results1.column_types) self.assertEqual(results1.column_types[0].typename, 'int') self.assertEqual(results1.column_types[1].typename, 'map') self.assertEqual(results1.column_types[0].cassname, 'Int32Type') self.assertEqual(results1.column_types[1].cassname, 'MapType') self.assertEqual(len(results1.column_types[0].subtypes), 0) self.assertEqual(len(results1.column_types[1].subtypes), 2) self.assertEqual(results1.column_types[1].subtypes[0].typename, 'int') self.assertEqual(results1.column_types[1].subtypes[1].typename, 'varchar') self.assertEqual(results1.column_types[1].subtypes[0].cassname, 'Int32Type') self.assertEqual(results1.column_types[1].subtypes[1].cassname, 'VarcharType')
'Test to validate the columns are present on the result set. Preforms a simple query against a table then checks to ensure column names are correct and present and correct. @since 3.0.0 @jira_ticket PYTHON-439 @expected_result column_names should be preset. @test_category queries basic'
def test_column_names(self):
create_table = 'CREATE TABLE {0}.{1}(\n user TEXT,\n game TEXT,\n year INT,\n month INT,\n day INT,\n score INT,\n PRIMARY KEY (user, game, year, month, day)\n )'.format(self.keyspace_name, self.function_table_name) self.session.execute(create_table) result_set = self.session.execute('SELECT * FROM {0}.{1}'.format(self.keyspace_name, self.function_table_name)) self.assertIsNotNone(result_set.column_types) self.assertEqual(result_set.column_names, [u'user', u'game', u'year', u'month', u'day', u'score'])
'Simple code coverage to ensure routing_keys can be accessed'
def test_routing_key(self):
prepared = self.session.prepare('\n INSERT INTO test3rf.test (k, v) VALUES (?, ?)\n ') self.assertIsInstance(prepared, PreparedStatement) bound = prepared.bind((1, None)) self.assertEqual(bound.routing_key, '\x00\x00\x00\x01')
'Ensure when routing_key_indexes are blank, the routing key should be None'
def test_empty_routing_key_indexes(self):
prepared = self.session.prepare('\n INSERT INTO test3rf.test (k, v) VALUES (?, ?)\n ') prepared.routing_key_indexes = None self.assertIsInstance(prepared, PreparedStatement) bound = prepared.bind((1, None)) self.assertEqual(bound.routing_key, None)
'Basic test that ensures _set_routing_key() overrides the current routing key'
def test_predefined_routing_key(self):
prepared = self.session.prepare('\n INSERT INTO test3rf.test (k, v) VALUES (?, ?)\n ') self.assertIsInstance(prepared, PreparedStatement) bound = prepared.bind((1, None)) bound._set_routing_key('fake_key') self.assertEqual(bound.routing_key, 'fake_key')
'Basic test that uses a fake routing_key_index'
def test_multiple_routing_key_indexes(self):
prepared = self.session.prepare('\n INSERT INTO test3rf.test (k, v) VALUES (?, ?)\n ') self.assertIsInstance(prepared, PreparedStatement) prepared.routing_key_indexes = [0, 1] bound = prepared.bind((1, 2)) self.assertEqual(bound.routing_key, '\x00\x04\x00\x00\x00\x01\x00\x00\x04\x00\x00\x00\x02\x00') prepared.routing_key_indexes = [1, 0] bound = prepared.bind((1, 2)) self.assertEqual(bound.routing_key, '\x00\x04\x00\x00\x00\x02\x00\x00\x04\x00\x00\x00\x01\x00')
'Ensure that bound.keyspace works as expected'
def test_bound_keyspace(self):
prepared = self.session.prepare('\n INSERT INTO test3rf.test (k, v) VALUES (?, ?)\n ') self.assertIsInstance(prepared, PreparedStatement) bound = prepared.bind((1, 2)) self.assertEqual(bound.keyspace, 'test3rf')
'Test to validate that result metadata is appropriately populated across protocol version In protocol version 1 result metadata is retrieved everytime the statement is issued. In all other protocol versions it\'s set once upon the prepare, then re-used. This test ensures that it manifests it\'s self the same across multiple protocol versions. @since 3.6.0 @jira_ticket PYTHON-71 @expected_result result metadata is consistent.'
def test_prepared_metadata_generation(self):
base_line = None for proto_version in get_supported_protocol_versions(): beta_flag = (True if (proto_version in ProtocolVersion.BETA_VERSIONS) else False) cluster = Cluster(protocol_version=proto_version, allow_beta_protocol_version=beta_flag) session = cluster.connect() select_statement = session.prepare('SELECT * FROM system.local') if (proto_version == 1): self.assertEqual(select_statement.result_metadata, None) else: self.assertNotEqual(select_statement.result_metadata, None) future = session.execute_async(select_statement) results = future.result() if (base_line is None): base_line = results[0]._asdict().keys() else: self.assertEqual(base_line, results[0]._asdict().keys()) cluster.shutdown()
'Test to validate prepare_on_all_hosts flag is honored. Use a special ForcedHostSwitchPolicy to ensure prepared queries are cycled over nodes that should not have them prepared. Check the logs to insure they are being re-prepared on those nodes @since 3.4.0 @jira_ticket PYTHON-556 @expected_result queries will have to re-prepared on hosts that aren\'t the control connection'
def test_prepare_on_all_hosts(self):
white_list = ForcedHostSwitchPolicy() clus = Cluster(load_balancing_policy=white_list, protocol_version=PROTOCOL_VERSION, prepare_on_all_hosts=False, reprepare_on_up=False) self.addCleanup(clus.shutdown) session = clus.connect(wait_for_all_pools=True) mock_handler = MockLoggingHandler() logger = logging.getLogger(cluster.__name__) logger.addHandler(mock_handler) select_statement = session.prepare('SELECT * FROM system.local') session.execute(select_statement) session.execute(select_statement) session.execute(select_statement) self.assertEqual(2, mock_handler.get_message_count('debug', 'Re-preparing'))
'Test to validate a prepared statement used inside a batch statement is correctly handled by the driver @since 3.10 @jira_ticket PYTHON-706 @expected_result queries will have to re-prepared on hosts that aren\'t the control connection and the batch statement will be sent.'
def test_prepare_batch_statement(self):
white_list = ForcedHostSwitchPolicy() clus = Cluster(load_balancing_policy=white_list, protocol_version=PROTOCOL_VERSION, prepare_on_all_hosts=False, reprepare_on_up=False) self.addCleanup(clus.shutdown) table = ('test3rf.%s' % self._testMethodName.lower()) session = clus.connect(wait_for_all_pools=True) session.execute(('DROP TABLE IF EXISTS %s' % table)) session.execute(('CREATE TABLE %s (k int PRIMARY KEY, v int )' % table)) insert_statement = session.prepare(('INSERT INTO %s (k, v) VALUES (?, ?)' % table)) batch_statement = BatchStatement(consistency_level=ConsistencyLevel.ONE) batch_statement.add(insert_statement, (1, 2)) session.execute(batch_statement) select_results = session.execute(SimpleStatement(('SELECT * FROM %s WHERE k = 1' % table), consistency_level=ConsistencyLevel.ALL)) first_row = select_results[0][:2] self.assertEqual((1, 2), first_row)
'Test to validate a prepared statement used inside a batch statement is correctly handled by the driver. The metadata might be updated when a table is altered. This tests combines queries not being prepared and an update of the prepared statement metadata @since 3.10 @jira_ticket PYTHON-706 @expected_result queries will have to re-prepared on hosts that aren\'t the control connection and the batch statement will be sent.'
def test_prepare_batch_statement_after_alter(self):
white_list = ForcedHostSwitchPolicy() clus = Cluster(load_balancing_policy=white_list, protocol_version=PROTOCOL_VERSION, prepare_on_all_hosts=False, reprepare_on_up=False) self.addCleanup(clus.shutdown) table = ('test3rf.%s' % self._testMethodName.lower()) session = clus.connect(wait_for_all_pools=True) session.execute(('DROP TABLE IF EXISTS %s' % table)) session.execute(('CREATE TABLE %s (k int PRIMARY KEY, a int, b int, d int)' % table)) insert_statement = session.prepare(('INSERT INTO %s (k, b, d) VALUES (?, ?, ?)' % table)) session.execute(('ALTER TABLE %s ADD c int' % table)) values_to_insert = [(1, 2, 3), (2, 3, 4), (3, 4, 5), (4, 5, 6)] for i in range(10): value_to_insert = values_to_insert[(i % len(values_to_insert))] batch_statement = BatchStatement(consistency_level=ConsistencyLevel.ONE) batch_statement.add(insert_statement, value_to_insert) session.execute(batch_statement) select_results = session.execute(('SELECT * FROM %s' % table)) expected_results = [(1, None, 2, None, 3), (2, None, 3, None, 4), (3, None, 4, None, 5), (4, None, 5, None, 6)] self.assertEqual(set(expected_results), set(select_results._current_rows))
'Highlight the format of printing SimpleStatements'
def test_simple_statement(self):
ss = SimpleStatement('SELECT * FROM test3rf.test', consistency_level=ConsistencyLevel.ONE) self.assertEqual(str(ss), '<SimpleStatement query="SELECT * FROM test3rf.test", consistency=ONE>')
'Highlight the difference between Prepared and Bound statements'
def test_prepared_statement(self):
cluster = Cluster(protocol_version=PROTOCOL_VERSION) session = cluster.connect() prepared = session.prepare('INSERT INTO test3rf.test (k, v) VALUES (?, ?)') prepared.consistency_level = ConsistencyLevel.ONE self.assertEqual(str(prepared), '<PreparedStatement query="INSERT INTO test3rf.test (k, v) VALUES (?, ?)", consistency=ONE>') bound = prepared.bind((1, 2)) self.assertEqual(str(bound), '<BoundStatement query="INSERT INTO test3rf.test (k, v) VALUES (?, ?)", values=(1, 2), consistency=ONE>') cluster.shutdown()
'Test is skipped if run with cql version < 2'
def setUp(self):
if (PROTOCOL_VERSION < 2): raise unittest.SkipTest(('Protocol 2.0+ is required for Lightweight transactions, currently testing against %r' % (PROTOCOL_VERSION,))) self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) self.session = self.cluster.connect() ddl = '\n CREATE TABLE test3rf.lwt (\n k int PRIMARY KEY,\n v int )' self.session.execute(ddl)
'Shutdown cluster'
def tearDown(self):
self.session.execute('DROP TABLE test3rf.lwt') self.cluster.shutdown()
'Test for PYTHON-91 "Connection closed after LWT timeout" Verifies that connection to the cluster is not shut down when timeout occurs. Number of iterations can be specified with LWT_ITERATIONS environment variable. Default value is 1000'
def test_no_connection_refused_on_timeout(self):
insert_statement = self.session.prepare('INSERT INTO test3rf.lwt (k, v) VALUES (0, 0) IF NOT EXISTS') delete_statement = self.session.prepare('DELETE FROM test3rf.lwt WHERE k = 0 IF EXISTS') iterations = int(os.getenv('LWT_ITERATIONS', 1000)) statements_and_params = [] for i in range(iterations): statements_and_params.append((insert_statement, ())) statements_and_params.append((delete_statement, ())) received_timeout = False results = execute_concurrent(self.session, statements_and_params, raise_on_first_error=False) for (success, result) in results: if success: continue else: if (type(result).__name__ == 'NoHostAvailable'): self.fail(('PYTHON-91: Disconnected from Cassandra: %s' % result.message)) if (type(result).__name__ == 'WriteTimeout'): received_timeout = True continue if (type(result).__name__ == 'WriteFailure'): received_timeout = True continue if (type(result).__name__ == 'ReadTimeout'): continue if (type(result).__name__ == 'ReadFailure'): continue self.fail(('Unexpected exception %s: %s' % (type(result).__name__, result.message))) self.assertTrue(received_timeout)
'batch routing key is inherited from BoundStatement'
def test_rk_from_bound(self):
bound = self.prepared.bind((1, None)) batch = BatchStatement() batch.add(bound) self.assertIsNotNone(batch.routing_key) self.assertEqual(batch.routing_key, bound.routing_key)
'batch routing key is inherited from SimpleStatement'
def test_rk_from_simple(self):
batch = BatchStatement() batch.add(self.simple_statement) self.assertIsNotNone(batch.routing_key) self.assertEqual(batch.routing_key, self.simple_statement.routing_key)
'compound batch inherits the first routing key of the first added statement (bound statement is first)'
def test_inherit_first_rk_bound(self):
bound = self.prepared.bind((100000000, None)) batch = BatchStatement() batch.add('ss with no rk') batch.add(bound) batch.add(self.simple_statement) for i in range(3): batch.add(self.prepared, (i, i)) self.assertIsNotNone(batch.routing_key) self.assertEqual(batch.routing_key, bound.routing_key)
'compound batch inherits the first routing key of the first added statement (Simplestatement is first)'
def test_inherit_first_rk_simple_statement(self):
bound = self.prepared.bind((1, None)) batch = BatchStatement() batch.add('ss with no rk') batch.add(self.simple_statement) batch.add(bound) for i in range(10): batch.add(self.prepared, (i, i)) self.assertIsNotNone(batch.routing_key) self.assertEqual(batch.routing_key, self.simple_statement.routing_key)
'compound batch inherits the first routing key of the first added statement (prepared statement is first)'
def test_inherit_first_rk_prepared_param(self):
bound = self.prepared.bind((2, None)) batch = BatchStatement() batch.add('ss with no rk') batch.add(self.prepared, (1, 0)) batch.add(bound) batch.add(self.simple_statement) self.assertIsNotNone(batch.routing_key) self.assertEqual(batch.routing_key, self.prepared.bind((1, 0)).routing_key)
'Test to ensure that cql filtering where clauses are properly supported in the python driver. test_mv_filtering Tests that various complex MV where clauses produce the correct results. It also validates that these results and the grammar is supported appropriately. @since 3.0.0 @jira_ticket PYTHON-399 @expected_result Materialized view where clauses should produce the appropriate results. @test_category materialized_view'
def test_mv_filtering(self):
create_table = 'CREATE TABLE {0}.scores(\n user TEXT,\n game TEXT,\n year INT,\n month INT,\n day INT,\n score INT,\n PRIMARY KEY (user, game, year, month, day)\n )'.format(self.keyspace_name) self.session.execute(create_table) create_mv_alltime = 'CREATE MATERIALIZED VIEW {0}.alltimehigh AS\n SELECT * FROM {0}.scores\n WHERE game IS NOT NULL AND score IS NOT NULL AND user IS NOT NULL AND year IS NOT NULL AND month IS NOT NULL AND day IS NOT NULL\n PRIMARY KEY (game, score, user, year, month, day)\n WITH CLUSTERING ORDER BY (score DESC)'.format(self.keyspace_name) create_mv_dailyhigh = 'CREATE MATERIALIZED VIEW {0}.dailyhigh AS\n SELECT * FROM {0}.scores\n WHERE game IS NOT NULL AND year IS NOT NULL AND month IS NOT NULL AND day IS NOT NULL AND score IS NOT NULL AND user IS NOT NULL\n PRIMARY KEY ((game, year, month, day), score, user)\n WITH CLUSTERING ORDER BY (score DESC)'.format(self.keyspace_name) create_mv_monthlyhigh = 'CREATE MATERIALIZED VIEW {0}.monthlyhigh AS\n SELECT * FROM {0}.scores\n WHERE game IS NOT NULL AND year IS NOT NULL AND month IS NOT NULL AND score IS NOT NULL AND user IS NOT NULL AND day IS NOT NULL\n PRIMARY KEY ((game, year, month), score, user, day)\n WITH CLUSTERING ORDER BY (score DESC)'.format(self.keyspace_name) create_mv_filtereduserhigh = "CREATE MATERIALIZED VIEW {0}.filtereduserhigh AS\n SELECT * FROM {0}.scores\n WHERE user in ('jbellis', 'pcmanus') AND game IS NOT NULL AND score IS NOT NULL AND year is NOT NULL AND day is not NULL and month IS NOT NULL\n PRIMARY KEY (game, score, user, year, month, day)\n WITH CLUSTERING ORDER BY (score DESC)".format(self.keyspace_name) self.session.execute(create_mv_alltime) self.session.execute(create_mv_dailyhigh) self.session.execute(create_mv_monthlyhigh) self.session.execute(create_mv_filtereduserhigh) prepared_insert = self.session.prepare('INSERT INTO {0}.scores (user, game, year, month, day, score) VALUES (?, ?, ? ,? ,?, ?)'.format(self.keyspace_name)) bound = prepared_insert.bind(('pcmanus', 'Coup', 2015, 5, 1, 4000)) self.session.execute(bound) bound = prepared_insert.bind(('jbellis', 'Coup', 2015, 5, 3, 1750)) self.session.execute(bound) bound = prepared_insert.bind(('yukim', 'Coup', 2015, 5, 3, 2250)) self.session.execute(bound) bound = prepared_insert.bind(('tjake', 'Coup', 2015, 5, 3, 500)) self.session.execute(bound) bound = prepared_insert.bind(('iamaleksey', 'Coup', 2015, 6, 1, 2500)) self.session.execute(bound) bound = prepared_insert.bind(('tjake', 'Coup', 2015, 6, 2, 1000)) self.session.execute(bound) bound = prepared_insert.bind(('pcmanus', 'Coup', 2015, 6, 2, 2000)) self.session.execute(bound) bound = prepared_insert.bind(('jmckenzie', 'Coup', 2015, 6, 9, 2700)) self.session.execute(bound) bound = prepared_insert.bind(('jbellis', 'Coup', 2015, 6, 20, 3500)) self.session.execute(bound) bound = prepared_insert.bind(('jbellis', 'Checkers', 2015, 6, 20, 1200)) self.session.execute(bound) bound = prepared_insert.bind(('jbellis', 'Chess', 2015, 6, 21, 3500)) self.session.execute(bound) bound = prepared_insert.bind(('pcmanus', 'Chess', 2015, 1, 25, 3200)) self.session.execute(bound) query_statement = SimpleStatement("SELECT * FROM {0}.alltimehigh WHERE game='Coup'".format(self.keyspace_name), consistency_level=ConsistencyLevel.QUORUM) results = self.session.execute(query_statement) self.assertEqual(results[0].game, 'Coup') self.assertEqual(results[0].year, 2015) self.assertEqual(results[0].month, 5) self.assertEqual(results[0].day, 1) self.assertEqual(results[0].score, 4000) self.assertEqual(results[0].user, 'pcmanus') prepared_query = self.session.prepare('SELECT * FROM {0}.dailyhigh WHERE game=? AND year=? AND month=? and day=?'.format(self.keyspace_name)) bound_query = prepared_query.bind(('Coup', 2015, 6, 2)) results = self.session.execute(bound_query) self.assertEqual(results[0].game, 'Coup') self.assertEqual(results[0].year, 2015) self.assertEqual(results[0].month, 6) self.assertEqual(results[0].day, 2) self.assertEqual(results[0].score, 2000) self.assertEqual(results[0].user, 'pcmanus') self.assertEqual(results[1].game, 'Coup') self.assertEqual(results[1].year, 2015) self.assertEqual(results[1].month, 6) self.assertEqual(results[1].day, 2) self.assertEqual(results[1].score, 1000) self.assertEqual(results[1].user, 'tjake') prepared_query = self.session.prepare('SELECT * FROM {0}.monthlyhigh WHERE game=? AND year=? AND month=? and score >= ? and score <= ?'.format(self.keyspace_name)) bound_query = prepared_query.bind(('Coup', 2015, 6, 2500, 3500)) results = self.session.execute(bound_query) self.assertEqual(results[0].game, 'Coup') self.assertEqual(results[0].year, 2015) self.assertEqual(results[0].month, 6) self.assertEqual(results[0].day, 20) self.assertEqual(results[0].score, 3500) self.assertEqual(results[0].user, 'jbellis') self.assertEqual(results[1].game, 'Coup') self.assertEqual(results[1].year, 2015) self.assertEqual(results[1].month, 6) self.assertEqual(results[1].day, 9) self.assertEqual(results[1].score, 2700) self.assertEqual(results[1].user, 'jmckenzie') self.assertEqual(results[2].game, 'Coup') self.assertEqual(results[2].year, 2015) self.assertEqual(results[2].month, 6) self.assertEqual(results[2].day, 1) self.assertEqual(results[2].score, 2500) self.assertEqual(results[2].user, 'iamaleksey') query_statement = SimpleStatement("SELECT * FROM {0}.filtereduserhigh WHERE game='Chess'".format(self.keyspace_name), consistency_level=ConsistencyLevel.QUORUM) results = self.session.execute(query_statement) self.assertEqual(results[0].game, 'Chess') self.assertEqual(results[0].year, 2015) self.assertEqual(results[0].month, 6) self.assertEqual(results[0].day, 21) self.assertEqual(results[0].score, 3500) self.assertEqual(results[0].user, 'jbellis') self.assertEqual(results[1].game, 'Chess') self.assertEqual(results[1].year, 2015) self.assertEqual(results[1].month, 1) self.assertEqual(results[1].day, 25) self.assertEqual(results[1].score, 3200) self.assertEqual(results[1].user, 'pcmanus')
'Test to validate that unicode query strings are handled appropriately by various query types @since 3.0.0 @jira_ticket PYTHON-334 @expected_result no unicode exceptions are thrown @test_category query'
def test_unicode(self):
unicode_text = u'Fran\xe7ois' batch = BatchStatement(BatchType.LOGGED) batch.add(u'INSERT INTO {0}.{1} (k, v) VALUES (%s, %s)'.format(self.keyspace_name, self.function_table_name), (0, unicode_text)) self.session.execute(batch) self.session.execute(u'INSERT INTO {0}.{1} (k, v) VALUES (%s, %s)'.format(self.keyspace_name, self.function_table_name), (0, unicode_text)) prepared = self.session.prepare(u'INSERT INTO {0}.{1} (k, v) VALUES (?, ?)'.format(self.keyspace_name, self.function_table_name)) bound = prepared.bind((1, unicode_text)) self.session.execute(bound)
'Test to validate that generator based results are surfaced correctly Repeatedly inserts data into a a table and attempts to query it. It then validates that the results are returned in the order expected @since 2.7.0 @jira_ticket PYTHON-123 @expected_result all data should be returned in order. @test_category queries:async'
def test_execute_concurrent_with_args_generator(self):
for num_statements in (0, 1, 2, 7, 10, 99, 100, 101, 199, 200, 201): statement = SimpleStatement('INSERT INTO test3rf.test (k, v) VALUES (%s, %s)', consistency_level=ConsistencyLevel.QUORUM) parameters = [(i, i) for i in range(num_statements)] results = self.execute_concurrent_args_helper(self.session, statement, parameters, results_generator=True) for (success, result) in results: self.assertTrue(success) self.assertFalse(result) results = self.execute_concurrent_args_helper(self.session, statement, parameters, results_generator=True) for result in results: self.assertTrue(isinstance(result, ExecutionResult)) self.assertTrue(result.success) self.assertFalse(result.result_or_exc) statement = SimpleStatement('SELECT v FROM test3rf.test WHERE k=%s', consistency_level=ConsistencyLevel.QUORUM) parameters = [(i,) for i in range(num_statements)] results = self.execute_concurrent_args_helper(self.session, statement, parameters, results_generator=True) for i in range(num_statements): result = next(results) self.assertEqual((True, [(i,)]), result) self.assertRaises(StopIteration, next, results)
'Test to validate that generator based results are surfaced correctly when paging is used Inserts data into a a table and attempts to query it. It then validates that the results are returned as expected (no order specified) @since 2.7.0 @jira_ticket PYTHON-123 @expected_result all data should be returned in order. @test_category paging'
def test_execute_concurrent_paged_result_generator(self):
if (PROTOCOL_VERSION < 2): raise unittest.SkipTest(('Protocol 2+ is required for Paging, currently testing against %r' % (PROTOCOL_VERSION,))) num_statements = 201 statement = SimpleStatement('INSERT INTO test3rf.test (k, v) VALUES (%s, %s)', consistency_level=ConsistencyLevel.QUORUM) parameters = [(i, i) for i in range(num_statements)] results = self.execute_concurrent_args_helper(self.session, statement, parameters, results_generator=True) self.assertEqual(num_statements, sum((1 for _ in results))) statement = SimpleStatement('SELECT * FROM test3rf.test LIMIT %s', consistency_level=ConsistencyLevel.QUORUM, fetch_size=int((num_statements / 2))) paged_results_gen = self.execute_concurrent_args_helper(self.session, statement, [(num_statements,)], results_generator=True) found_results = 0 for result_tuple in paged_results_gen: paged_result = result_tuple[1] for _ in paged_result: found_results += 1 self.assertEqual(found_results, num_statements)
'Test to validate that custom protocol handlers work with raw row results Connect and validate that the normal protocol handler is used. Re-Connect and validate that the custom protocol handler is used. Re-Connect and validate that the normal protocol handler is used. @since 2.7 @jira_ticket PYTHON-313 @expected_result custom protocol handler is invoked appropriately. @test_category data_types:serialization'
def test_custom_raw_uuid_row_results(self):
cluster = Cluster(protocol_version=PROTOCOL_VERSION) session = cluster.connect(keyspace='custserdes') session.row_factory = tuple_factory result = session.execute('SELECT schema_version FROM system.local') uuid_type = result[0][0] self.assertEqual(type(uuid_type), uuid.UUID) session.client_protocol_handler = CustomTestRawRowType session.row_factory = tuple_factory result_set = session.execute('SELECT schema_version FROM system.local') raw_value = result_set[0][0] self.assertTrue(isinstance(raw_value, binary_type)) self.assertEqual(len(raw_value), 16) session.client_protocol_handler = ProtocolHandler result_set = session.execute('SELECT schema_version FROM system.local') uuid_type = result_set[0][0] self.assertEqual(type(uuid_type), uuid.UUID) cluster.shutdown()
'Test to validate that custom protocol handlers work with varying types of results Connect, create a table with all sorts of data. Query the data, make the sure the custom results handler is used correctly. @since 2.7 @jira_ticket PYTHON-313 @expected_result custom protocol handler is invoked with various result types @test_category data_types:serialization'
def test_custom_raw_row_results_all_types(self):
cluster = Cluster(protocol_version=PROTOCOL_VERSION) session = cluster.connect(keyspace='custserdes') session.client_protocol_handler = CustomProtocolHandlerResultMessageTracked session.row_factory = tuple_factory colnames = create_table_with_all_types('alltypes', session, 1) columns_string = ', '.join(colnames) params = get_all_primitive_params(0) results = session.execute('SELECT {0} FROM alltypes WHERE primkey=0'.format(columns_string))[0] for (expected, actual) in zip(params, results): self.assertEqual(actual, expected) self.assertEqual(len(CustomResultMessageTracked.checked_rev_row_set), (len(PRIMITIVE_DATATYPES) - 1)) cluster.shutdown()
'Test to validate that the _PAGE_SIZE_FLAG is not treated correctly in V4 if the flags are written using write_uint instead of write_int @since 3.9 @jira_ticket PYTHON-713 @expected_result the fetch_size=1 parameter will be ignored @test_category connection'
@greaterthanorequalcass30 def test_protocol_divergence_v4_fail_by_flag_uses_int(self):
self._protocol_divergence_fail_by_flag_uses_int(ProtocolVersion.V4, uses_int_query_flag=False, int_flag=True)
'Test to validate that client warnings can be surfaced @since 2.6.0 @jira_ticket PYTHON-315 @expected_result valid warnings returned @test_assumptions - batch_size_warn_threshold_in_kb: 5 @test_category queries:client_warning'
def test_warning_basic(self):
future = self.session.execute_async(self.warn_batch) future.result() self.assertEqual(len(future.warnings), 1) self.assertRegexpMatches(future.warnings[0], 'Batch.*exceeding.*')
'Test to validate client warning with tracing @since 2.6.0 @jira_ticket PYTHON-315 @expected_result valid warnings returned @test_assumptions - batch_size_warn_threshold_in_kb: 5 @test_category queries:client_warning'
def test_warning_with_trace(self):
future = self.session.execute_async(self.warn_batch, trace=True) future.result() self.assertEqual(len(future.warnings), 1) self.assertRegexpMatches(future.warnings[0], 'Batch.*exceeding.*') self.assertIsNotNone(future.get_query_trace())
'Test to validate client warning with custom payload @since 2.6.0 @jira_ticket PYTHON-315 @expected_result valid warnings returned @test_assumptions - batch_size_warn_threshold_in_kb: 5 @test_category queries:client_warning'
@local def test_warning_with_custom_payload(self):
payload = {'key': 'value'} future = self.session.execute_async(self.warn_batch, custom_payload=payload) future.result() self.assertEqual(len(future.warnings), 1) self.assertRegexpMatches(future.warnings[0], 'Batch.*exceeding.*') self.assertDictEqual(future.custom_payload, payload)
'Test to validate client warning with tracing and client warning @since 2.6.0 @jira_ticket PYTHON-315 @expected_result valid warnings returned @test_assumptions - batch_size_warn_threshold_in_kb: 5 @test_category queries:client_warning'
@local def test_warning_with_trace_and_custom_payload(self):
payload = {'key': 'value'} future = self.session.execute_async(self.warn_batch, trace=True, custom_payload=payload) future.result() self.assertEqual(len(future.warnings), 1) self.assertRegexpMatches(future.warnings[0], 'Batch.*exceeding.*') self.assertIsNotNone(future.get_query_trace()) self.assertDictEqual(future.custom_payload, payload)
'Check to ensure that the broadcast and listen adresss is populated correctly @since 3.3 @jira_ticket PYTHON-332 @expected_result They are populated for C*> 2.1.6, 2.2.0 @test_category metadata'
@local def test_broadcast_listen_address(self):
for host in self.cluster.metadata.all_hosts(): self.assertIsNotNone(host.broadcast_address) con = self.cluster.control_connection.get_connections()[0] local_host = con.host listen_addrs = [host.listen_address for host in self.cluster.metadata.all_hosts()] self.assertTrue((local_host in listen_addrs))
'Checks the hosts release version and validates that it is equal to the Cassandra version we are using in our test harness. @since 3.3 @jira_ticket PYTHON-301 @expected_result host.release version should match our specified Cassandra version. @test_category metadata'
def test_host_release_version(self):
for host in self.cluster.metadata.all_hosts(): self.assertTrue(host.release_version.startswith(CASSANDRA_VERSION))
'Checks to ensure that hosts that are not resolvable are excluded from the contact point list. @since 3.6 @jira_ticket PYTHON-549 @expected_result Invalid hosts on the contact list should be excluded @test_category metadata'
def test_bad_contact_point(self):
self.assertEqual(len(self.cluster.metadata.all_hosts()), 3)
'Checks to ensure that schema metadata_enabled, and token_metadata_enabled flags work correctly. @since 3.3 @jira_ticket PYTHON-327 @expected_result schema metadata will not be populated when schema_metadata_enabled is fause token_metadata will be missing when token_metadata is set to false @test_category metadata'
def test_schema_metadata_disable(self):
no_schema = Cluster(schema_metadata_enabled=False) no_schema_session = no_schema.connect() self.assertEqual(len(no_schema.metadata.keyspaces), 0) self.assertEqual(no_schema.metadata.export_schema_as_string(), '') no_token = Cluster(token_metadata_enabled=False) no_token_session = no_token.connect() self.assertEqual(len(no_token.metadata.token_map.token_to_host_owner), 0) query = 'SELECT * FROM system.local' no_schema_rs = no_schema_session.execute(query) no_token_rs = no_token_session.execute(query) self.assertIsNotNone(no_schema_rs[0]) self.assertIsNotNone(no_token_rs[0]) no_schema.shutdown() no_token.shutdown()
'Simple test to ensure that the metatdata associated with cluster ordering is surfaced is surfaced correctly. Creates a table with a few clustering keys. Then checks the clustering order associated with clustering columns and ensure it\'s set correctly. @since 3.0.0 @jira_ticket PYTHON-402 @expected_result is_reversed is set on DESC order, and is False on ASC @test_category metadata'
def test_cluster_column_ordering_reversed_metadata(self):
create_statement = self.make_create_statement(['a'], ['b', 'c'], ['d'], compact=True) create_statement += ' AND CLUSTERING ORDER BY (b ASC, c DESC)' self.session.execute(create_statement) tablemeta = self.get_table_metadata() b_column = tablemeta.columns['b'] self.assertFalse(b_column.is_reversed) c_column = tablemeta.columns['c'] self.assertTrue(c_column.is_reversed)
'test options for non-size-tiered compaction strategy Creates a table with LeveledCompactionStrategy, specifying one non-default option. Verifies that the option is present in generated CQL, and that other legacy table parameters (min_threshold, max_threshold) are not included. @since 2.6.0 @jira_ticket PYTHON-352 @expected_result the options map for LeveledCompactionStrategy does not contain min_threshold, max_threshold @test_category metadata'
def test_non_size_tiered_compaction(self):
create_statement = self.make_create_statement(['a'], [], ['b', 'c']) create_statement += "WITH COMPACTION = {'class': 'LeveledCompactionStrategy', 'tombstone_threshold': '0.3'}" self.session.execute(create_statement) table_meta = self.get_table_metadata() cql = table_meta.export_as_string() self.assertIn("'tombstone_threshold': '0.3'", cql) self.assertIn('LeveledCompactionStrategy', cql) self.assertNotIn('min_threshold', cql) self.assertNotIn('max_threshold', cql)
'test for synchronously refreshing all cluster metadata test_refresh_schema_metadata tests all cluster metadata is refreshed when calling refresh_schema_metadata(). It creates a second cluster object with schema_event_refresh_window=-1 such that schema refreshes are disabled for schema change push events. It then alters the cluster, creating a new keyspace, using the first cluster object, and verifies that the cluster metadata has not changed in the second cluster object. It then calls refresh_schema_metadata() and verifies that the cluster metadata is updated in the second cluster object. Similarly, it then proceeds to altering keyspace, table, UDT, UDF, and UDA metadata and subsequently verfies that these metadata is updated when refresh_schema_metadata() is called. @since 2.6.0 @jira_ticket PYTHON-291 @expected_result Cluster, keyspace, table, UDT, UDF, and UDA metadata should be refreshed when refresh_schema_metadata() is called. @test_category metadata'
def test_refresh_schema_metadata(self):
cluster2 = Cluster(protocol_version=PROTOCOL_VERSION, schema_event_refresh_window=(-1)) cluster2.connect() self.assertNotIn('new_keyspace', cluster2.metadata.keyspaces) self.session.execute("CREATE KEYSPACE new_keyspace WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'}") self.assertNotIn('new_keyspace', cluster2.metadata.keyspaces) cluster2.refresh_schema_metadata() self.assertIn('new_keyspace', cluster2.metadata.keyspaces) self.session.execute('ALTER KEYSPACE {0} WITH durable_writes = false'.format(self.keyspace_name)) self.assertTrue(cluster2.metadata.keyspaces[self.keyspace_name].durable_writes) cluster2.refresh_schema_metadata() self.assertFalse(cluster2.metadata.keyspaces[self.keyspace_name].durable_writes) table_name = 'test' self.session.execute('CREATE TABLE {0}.{1} (a int PRIMARY KEY, b text)'.format(self.keyspace_name, table_name)) cluster2.refresh_schema_metadata() self.session.execute('ALTER TABLE {0}.{1} ADD c double'.format(self.keyspace_name, table_name)) self.assertNotIn('c', cluster2.metadata.keyspaces[self.keyspace_name].tables[table_name].columns) cluster2.refresh_schema_metadata() self.assertIn('c', cluster2.metadata.keyspaces[self.keyspace_name].tables[table_name].columns) if (PROTOCOL_VERSION >= 3): self.session.execute('CREATE TYPE {0}.user (age int, name text)'.format(self.keyspace_name)) self.assertEqual(cluster2.metadata.keyspaces[self.keyspace_name].user_types, {}) cluster2.refresh_schema_metadata() self.assertIn('user', cluster2.metadata.keyspaces[self.keyspace_name].user_types) if (PROTOCOL_VERSION >= 4): self.session.execute("CREATE FUNCTION {0}.sum_int(key int, val int)\n RETURNS NULL ON NULL INPUT\n RETURNS int\n LANGUAGE javascript AS 'key + val';".format(self.keyspace_name)) self.assertEqual(cluster2.metadata.keyspaces[self.keyspace_name].functions, {}) cluster2.refresh_schema_metadata() self.assertIn('sum_int(int,int)', cluster2.metadata.keyspaces[self.keyspace_name].functions) self.session.execute('CREATE AGGREGATE {0}.sum_agg(int)\n SFUNC sum_int\n STYPE int\n INITCOND 0'.format(self.keyspace_name)) self.assertEqual(cluster2.metadata.keyspaces[self.keyspace_name].aggregates, {}) cluster2.refresh_schema_metadata() self.assertIn('sum_agg(int)', cluster2.metadata.keyspaces[self.keyspace_name].aggregates) self.session.execute('DROP KEYSPACE new_keyspace') self.assertIn('new_keyspace', cluster2.metadata.keyspaces) cluster2.refresh_schema_metadata() self.assertNotIn('new_keyspace', cluster2.metadata.keyspaces) cluster2.shutdown()
'test for synchronously refreshing keyspace metadata test_refresh_keyspace_metadata tests that keyspace metadata is refreshed when calling refresh_keyspace_metadata(). It creates a second cluster object with schema_event_refresh_window=-1 such that schema refreshes are disabled for schema change push events. It then alters the keyspace, disabling durable_writes, using the first cluster object, and verifies that the keyspace metadata has not changed in the second cluster object. Finally, it calls refresh_keyspace_metadata() and verifies that the keyspace metadata is updated in the second cluster object. @since 2.6.0 @jira_ticket PYTHON-291 @expected_result Keyspace metadata should be refreshed when refresh_keyspace_metadata() is called. @test_category metadata'
def test_refresh_keyspace_metadata(self):
cluster2 = Cluster(protocol_version=PROTOCOL_VERSION, schema_event_refresh_window=(-1)) cluster2.connect() self.assertTrue(cluster2.metadata.keyspaces[self.keyspace_name].durable_writes) self.session.execute('ALTER KEYSPACE {0} WITH durable_writes = false'.format(self.keyspace_name)) self.assertTrue(cluster2.metadata.keyspaces[self.keyspace_name].durable_writes) cluster2.refresh_keyspace_metadata(self.keyspace_name) self.assertFalse(cluster2.metadata.keyspaces[self.keyspace_name].durable_writes) cluster2.shutdown()
'test for synchronously refreshing table metadata test_refresh_table_metatadata tests that table metadata is refreshed when calling test_refresh_table_metatadata(). It creates a second cluster object with schema_event_refresh_window=-1 such that schema refreshes are disabled for schema change push events. It then alters the table, adding a new column, using the first cluster object, and verifies that the table metadata has not changed in the second cluster object. Finally, it calls test_refresh_table_metatadata() and verifies that the table metadata is updated in the second cluster object. @since 2.6.0 @jira_ticket PYTHON-291 @expected_result Table metadata should be refreshed when refresh_table_metadata() is called. @test_category metadata'
def test_refresh_table_metadata(self):
table_name = 'test' self.session.execute('CREATE TABLE {0}.{1} (a int PRIMARY KEY, b text)'.format(self.keyspace_name, table_name)) cluster2 = Cluster(protocol_version=PROTOCOL_VERSION, schema_event_refresh_window=(-1)) cluster2.connect() self.assertNotIn('c', cluster2.metadata.keyspaces[self.keyspace_name].tables[table_name].columns) self.session.execute('ALTER TABLE {0}.{1} ADD c double'.format(self.keyspace_name, table_name)) self.assertNotIn('c', cluster2.metadata.keyspaces[self.keyspace_name].tables[table_name].columns) cluster2.refresh_table_metadata(self.keyspace_name, table_name) self.assertIn('c', cluster2.metadata.keyspaces[self.keyspace_name].tables[table_name].columns) cluster2.shutdown()
'test for synchronously refreshing materialized view metadata test_refresh_table_metadata_for_materialized_views tests that materialized view metadata is refreshed when calling test_refresh_table_metatadata() with the materialized view name as the table. It creates a second cluster object with schema_event_refresh_window=-1 such that schema refreshes are disabled for schema change push events. It then creates a new materialized view , using the first cluster object, and verifies that the materialized view metadata has not changed in the second cluster object. Finally, it calls test_refresh_table_metatadata() with the materialized view name as the table name, and verifies that the materialized view metadata is updated in the second cluster object. @since 3.0.0 @jira_ticket PYTHON-371 @expected_result Materialized view metadata should be refreshed when refresh_table_metadata() is called. @test_category metadata'
def test_refresh_metadata_for_mv(self):
if (CASS_SERVER_VERSION < (3, 0)): raise unittest.SkipTest('Materialized views require Cassandra 3.0+') self.session.execute('CREATE TABLE {0}.{1} (a int PRIMARY KEY, b text)'.format(self.keyspace_name, self.function_table_name)) cluster2 = Cluster(protocol_version=PROTOCOL_VERSION, schema_event_refresh_window=(-1)) cluster2.connect() try: self.assertNotIn('mv1', cluster2.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views) self.session.execute('CREATE MATERIALIZED VIEW {0}.mv1 AS SELECT b FROM {0}.{1} WHERE b IS NOT NULL PRIMARY KEY (a, b)'.format(self.keyspace_name, self.function_table_name)) self.assertNotIn('mv1', cluster2.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views) cluster2.refresh_table_metadata(self.keyspace_name, 'mv1') self.assertIn('mv1', cluster2.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views) finally: cluster2.shutdown() original_meta = self.cluster.metadata.keyspaces[self.keyspace_name].views['mv1'] self.assertIs(original_meta, self.session.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views['mv1']) self.cluster.refresh_materialized_view_metadata(self.keyspace_name, 'mv1') current_meta = self.cluster.metadata.keyspaces[self.keyspace_name].views['mv1'] self.assertIsNot(current_meta, original_meta) self.assertIsNot(original_meta, self.session.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views['mv1']) self.assertEqual(original_meta.as_cql_query(), current_meta.as_cql_query()) cluster3 = Cluster(protocol_version=PROTOCOL_VERSION, schema_event_refresh_window=(-1)) cluster3.connect() try: self.assertNotIn('mv2', cluster3.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views) self.session.execute('CREATE MATERIALIZED VIEW {0}.mv2 AS SELECT b FROM {0}.{1} WHERE b IS NOT NULL PRIMARY KEY (a, b)'.format(self.keyspace_name, self.function_table_name)) self.assertNotIn('mv2', cluster3.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views) cluster3.refresh_materialized_view_metadata(self.keyspace_name, 'mv2') self.assertIn('mv2', cluster3.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views) finally: cluster3.shutdown()
'test for synchronously refreshing UDT metadata in keyspace test_refresh_user_type_metadata tests that UDT metadata in a keyspace is refreshed when calling refresh_user_type_metadata(). It creates a second cluster object with schema_event_refresh_window=-1 such that schema refreshes are disabled for schema change push events. It then alters the keyspace, creating a new UDT, using the first cluster object, and verifies that the UDT metadata has not changed in the second cluster object. Finally, it calls refresh_user_type_metadata() and verifies that the UDT metadata in the keyspace is updated in the second cluster object. @since 2.6.0 @jira_ticket PYTHON-291 @expected_result UDT metadata in the keyspace should be refreshed when refresh_user_type_metadata() is called. @test_category metadata'
def test_refresh_user_type_metadata(self):
if (PROTOCOL_VERSION < 3): raise unittest.SkipTest('Protocol 3+ is required for UDTs, currently testing against {0}'.format(PROTOCOL_VERSION)) cluster2 = Cluster(protocol_version=PROTOCOL_VERSION, schema_event_refresh_window=(-1)) cluster2.connect() self.assertEqual(cluster2.metadata.keyspaces[self.keyspace_name].user_types, {}) self.session.execute('CREATE TYPE {0}.user (age int, name text)'.format(self.keyspace_name)) self.assertEqual(cluster2.metadata.keyspaces[self.keyspace_name].user_types, {}) cluster2.refresh_user_type_metadata(self.keyspace_name, 'user') self.assertIn('user', cluster2.metadata.keyspaces[self.keyspace_name].user_types) cluster2.shutdown()
'Test to insure that protocol v1/v2 surface UDT metadata changes @since 3.7.0 @jira_ticket PYTHON-106 @expected_result UDT metadata in the keyspace should be updated regardless of protocol version @test_category metadata'
def test_refresh_user_type_metadata_proto_2(self):
supported_versions = get_supported_protocol_versions() if ((2 not in supported_versions) or (CASSANDRA_VERSION < '2.1')): raise unittest.SkipTest('Protocol versions 1 and 2 are not supported in Cassandra version '.format(CASSANDRA_VERSION)) for protocol_version in (1, 2): cluster = Cluster(protocol_version=protocol_version) session = cluster.connect() self.assertEqual(cluster.metadata.keyspaces[self.keyspace_name].user_types, {}) session.execute('CREATE TYPE {0}.user (age int, name text)'.format(self.keyspace_name)) self.assertIn('user', cluster.metadata.keyspaces[self.keyspace_name].user_types) self.assertIn('age', cluster.metadata.keyspaces[self.keyspace_name].user_types['user'].field_names) self.assertIn('name', cluster.metadata.keyspaces[self.keyspace_name].user_types['user'].field_names) session.execute('ALTER TYPE {0}.user ADD flag boolean'.format(self.keyspace_name)) self.assertIn('flag', cluster.metadata.keyspaces[self.keyspace_name].user_types['user'].field_names) session.execute('ALTER TYPE {0}.user RENAME flag TO something'.format(self.keyspace_name)) self.assertIn('something', cluster.metadata.keyspaces[self.keyspace_name].user_types['user'].field_names) session.execute('DROP TYPE {0}.user'.format(self.keyspace_name)) self.assertEqual(cluster.metadata.keyspaces[self.keyspace_name].user_types, {}) cluster.shutdown()
'test for synchronously refreshing UDF metadata in keyspace test_refresh_user_function_metadata tests that UDF metadata in a keyspace is refreshed when calling refresh_user_function_metadata(). It creates a second cluster object with schema_event_refresh_window=-1 such that schema refreshes are disabled for schema change push events. It then alters the keyspace, creating a new UDF, using the first cluster object, and verifies that the UDF metadata has not changed in the second cluster object. Finally, it calls refresh_user_function_metadata() and verifies that the UDF metadata in the keyspace is updated in the second cluster object. @since 2.6.0 @jira_ticket PYTHON-291 @expected_result UDF metadata in the keyspace should be refreshed when refresh_user_function_metadata() is called. @test_category metadata'
def test_refresh_user_function_metadata(self):
if (PROTOCOL_VERSION < 4): raise unittest.SkipTest('Protocol 4+ is required for UDFs, currently testing against {0}'.format(PROTOCOL_VERSION)) cluster2 = Cluster(protocol_version=PROTOCOL_VERSION, schema_event_refresh_window=(-1)) cluster2.connect() self.assertEqual(cluster2.metadata.keyspaces[self.keyspace_name].functions, {}) self.session.execute("CREATE FUNCTION {0}.sum_int(key int, val int)\n RETURNS NULL ON NULL INPUT\n RETURNS int\n LANGUAGE javascript AS 'key + val';".format(self.keyspace_name)) self.assertEqual(cluster2.metadata.keyspaces[self.keyspace_name].functions, {}) cluster2.refresh_user_function_metadata(self.keyspace_name, UserFunctionDescriptor('sum_int', ['int', 'int'])) self.assertIn('sum_int(int,int)', cluster2.metadata.keyspaces[self.keyspace_name].functions) cluster2.shutdown()
'test for synchronously refreshing UDA metadata in keyspace test_refresh_user_aggregate_metadata tests that UDA metadata in a keyspace is refreshed when calling refresh_user_aggregate_metadata(). It creates a second cluster object with schema_event_refresh_window=-1 such that schema refreshes are disabled for schema change push events. It then alters the keyspace, creating a new UDA, using the first cluster object, and verifies that the UDA metadata has not changed in the second cluster object. Finally, it calls refresh_user_aggregate_metadata() and verifies that the UDF metadata in the keyspace is updated in the second cluster object. @since 2.6.0 @jira_ticket PYTHON-291 @expected_result UDA metadata in the keyspace should be refreshed when refresh_user_aggregate_metadata() is called. @test_category metadata'
def test_refresh_user_aggregate_metadata(self):
if (PROTOCOL_VERSION < 4): raise unittest.SkipTest('Protocol 4+ is required for UDAs, currently testing against {0}'.format(PROTOCOL_VERSION)) cluster2 = Cluster(protocol_version=PROTOCOL_VERSION, schema_event_refresh_window=(-1)) cluster2.connect() self.assertEqual(cluster2.metadata.keyspaces[self.keyspace_name].aggregates, {}) self.session.execute("CREATE FUNCTION {0}.sum_int(key int, val int)\n RETURNS NULL ON NULL INPUT\n RETURNS int\n LANGUAGE javascript AS 'key + val';".format(self.keyspace_name)) self.session.execute('CREATE AGGREGATE {0}.sum_agg(int)\n SFUNC sum_int\n STYPE int\n INITCOND 0'.format(self.keyspace_name)) self.assertEqual(cluster2.metadata.keyspaces[self.keyspace_name].aggregates, {}) cluster2.refresh_user_aggregate_metadata(self.keyspace_name, UserAggregateDescriptor('sum_agg', ['int'])) self.assertIn('sum_agg(int)', cluster2.metadata.keyspaces[self.keyspace_name].aggregates) cluster2.shutdown()
'test multiple indices on the same column. Creates a table and two indices. Ensures that both indices metatdata is surface appropriately. @since 3.0.0 @jira_ticket PYTHON-276 @expected_result IndexMetadata is appropriately surfaced @test_category metadata'
def test_multiple_indices(self):
if (CASS_SERVER_VERSION < (3, 0)): raise unittest.SkipTest('Materialized views require Cassandra 3.0+') self.session.execute('CREATE TABLE {0}.{1} (a int PRIMARY KEY, b map<text, int>)'.format(self.keyspace_name, self.function_table_name)) self.session.execute('CREATE INDEX index_1 ON {0}.{1}(b)'.format(self.keyspace_name, self.function_table_name)) self.session.execute('CREATE INDEX index_2 ON {0}.{1}(keys(b))'.format(self.keyspace_name, self.function_table_name)) indices = self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].indexes self.assertEqual(len(indices), 2) index_1 = indices['index_1'] index_2 = indices['index_2'] self.assertEqual(index_1.table_name, 'test_multiple_indices') self.assertEqual(index_1.name, 'index_1') self.assertEqual(index_1.kind, 'COMPOSITES') self.assertEqual(index_1.index_options['target'], 'values(b)') self.assertEqual(index_1.keyspace_name, 'schemametadatatests') self.assertEqual(index_2.table_name, 'test_multiple_indices') self.assertEqual(index_2.name, 'index_2') self.assertEqual(index_2.kind, 'COMPOSITES') self.assertEqual(index_2.index_options['target'], 'keys(b)') self.assertEqual(index_2.keyspace_name, 'schemametadatatests')
'Test export schema functionality'
def test_export_schema(self):
cluster = Cluster(protocol_version=PROTOCOL_VERSION) cluster.connect() self.assertIsInstance(cluster.metadata.export_schema_as_string(), six.string_types) cluster.shutdown()
'Test export keyspace schema functionality'
def test_export_keyspace_schema(self):
cluster = Cluster(protocol_version=PROTOCOL_VERSION) cluster.connect() for keyspace in cluster.metadata.keyspaces: keyspace_metadata = cluster.metadata.keyspaces[keyspace] self.assertIsInstance(keyspace_metadata.export_as_string(), six.string_types) self.assertIsInstance(keyspace_metadata.as_cql_query(), six.string_types) cluster.shutdown()
'Test udt exports'
def test_export_keyspace_schema_udts(self):
if (CASS_SERVER_VERSION < (2, 1, 0)): raise unittest.SkipTest('UDTs were introduced in Cassandra 2.1') if (PROTOCOL_VERSION < 3): raise unittest.SkipTest(('Protocol 3.0+ is required for UDT change events, currently testing against %r' % (PROTOCOL_VERSION,))) if (sys.version_info[0:2] != (2, 7)): raise unittest.SkipTest('This test compares static strings generated from dict items, which may change orders. Test with 2.7.') cluster = Cluster(protocol_version=PROTOCOL_VERSION) session = cluster.connect() session.execute("\n CREATE KEYSPACE export_udts\n WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'}\n AND durable_writes = true;\n ") session.execute('\n CREATE TYPE export_udts.street (\n street_number int,\n street_name text)\n ') session.execute('\n CREATE TYPE export_udts.zip (\n zipcode int,\n zip_plus_4 int)\n ') session.execute('\n CREATE TYPE export_udts.address (\n street_address frozen<street>,\n zip_code frozen<zip>)\n ') session.execute('\n CREATE TABLE export_udts.users (\n user text PRIMARY KEY,\n addresses map<text, frozen<address>>)\n ') expected_prefix = "CREATE KEYSPACE export_udts WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'} AND durable_writes = true;\n\nCREATE TYPE export_udts.street (\n street_number int,\n street_name text\n);\n\nCREATE TYPE export_udts.zip (\n zipcode int,\n zip_plus_4 int\n);\n\nCREATE TYPE export_udts.address (\n street_address frozen<street>,\n zip_code frozen<zip>\n);\n\nCREATE TABLE export_udts.users (\n user text PRIMARY KEY,\n addresses map<text, frozen<address>>" self.assert_startswith_diff(cluster.metadata.keyspaces['export_udts'].export_as_string(), expected_prefix) table_meta = cluster.metadata.keyspaces['export_udts'].tables['users'] expected_prefix = 'CREATE TABLE export_udts.users (\n user text PRIMARY KEY,\n addresses map<text, frozen<address>>' self.assert_startswith_diff(table_meta.export_as_string(), expected_prefix) cluster.shutdown()