desc
stringlengths 3
26.7k
| decl
stringlengths 11
7.89k
| bodies
stringlengths 8
553k
|
---|---|---|
'Test that names that need to be escaped in CREATE statements are'
| @greaterthancass21
def test_case_sensitivity(self):
| cluster = Cluster(protocol_version=PROTOCOL_VERSION)
session = cluster.connect()
ksname = 'AnInterestingKeyspace'
cfname = 'AnInterestingTable'
session.execute('DROP KEYSPACE IF EXISTS {0}'.format(ksname))
session.execute(('\n CREATE KEYSPACE "%s"\n WITH replication = {\'class\': \'SimpleStrategy\', \'replication_factor\': \'1\'}\n ' % (ksname,)))
session.execute(('\n CREATE TABLE "%s"."%s" (\n k int,\n "A" int,\n "B" int,\n "MyColumn" int,\n PRIMARY KEY (k, "A"))\n WITH CLUSTERING ORDER BY ("A" DESC)\n ' % (ksname, cfname)))
session.execute(('\n CREATE INDEX myindex ON "%s"."%s" ("MyColumn")\n ' % (ksname, cfname)))
session.execute(('\n CREATE INDEX "AnotherIndex" ON "%s"."%s" ("B")\n ' % (ksname, cfname)))
ksmeta = cluster.metadata.keyspaces[ksname]
schema = ksmeta.export_as_string()
self.assertIn('CREATE KEYSPACE "AnInterestingKeyspace"', schema)
self.assertIn('CREATE TABLE "AnInterestingKeyspace"."AnInterestingTable"', schema)
self.assertIn('"A" int', schema)
self.assertIn('"B" int', schema)
self.assertIn('"MyColumn" int', schema)
self.assertIn('PRIMARY KEY (k, "A")', schema)
self.assertIn('WITH CLUSTERING ORDER BY ("A" DESC)', schema)
self.assertIn('CREATE INDEX myindex ON "AnInterestingKeyspace"."AnInterestingTable" ("MyColumn")', schema)
self.assertIn('CREATE INDEX "AnotherIndex" ON "AnInterestingKeyspace"."AnInterestingTable" ("B")', schema)
cluster.shutdown()
|
'Ensure AlreadyExists exception is thrown when hit'
| def test_already_exists_exceptions(self):
| cluster = Cluster(protocol_version=PROTOCOL_VERSION)
session = cluster.connect()
ksname = 'test3rf'
cfname = 'test'
ddl = "\n CREATE KEYSPACE %s\n WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '3'}"
self.assertRaises(AlreadyExists, session.execute, (ddl % ksname))
ddl = '\n CREATE TABLE %s.%s (\n k int PRIMARY KEY,\n v int )'
self.assertRaises(AlreadyExists, session.execute, (ddl % (ksname, cfname)))
cluster.shutdown()
|
'Ensure cluster.metadata.get_replicas return correctly when not attached to keyspace'
| @local
def test_replicas(self):
| if (murmur3 is None):
raise unittest.SkipTest('the murmur3 extension is not available')
cluster = Cluster(protocol_version=PROTOCOL_VERSION)
self.assertEqual(cluster.metadata.get_replicas('test3rf', 'key'), [])
cluster.connect('test3rf')
self.assertNotEqual(list(cluster.metadata.get_replicas('test3rf', six.b('key'))), [])
host = list(cluster.metadata.get_replicas('test3rf', six.b('key')))[0]
self.assertEqual(host.datacenter, 'dc1')
self.assertEqual(host.rack, 'r1')
cluster.shutdown()
|
'Test token mappings'
| def test_token_map(self):
| cluster = Cluster(protocol_version=PROTOCOL_VERSION)
cluster.connect('test3rf')
ring = cluster.metadata.token_map.ring
owners = list((cluster.metadata.token_map.token_to_host_owner[token] for token in ring))
get_replicas = cluster.metadata.token_map.get_replicas
for ksname in ('test1rf', 'test2rf', 'test3rf'):
self.assertNotEqual(list(get_replicas(ksname, ring[0])), [])
for (i, token) in enumerate(ring):
self.assertEqual(set(get_replicas('test3rf', token)), set(owners))
self.assertEqual(set(get_replicas('test2rf', token)), set([owners[((i + 1) % 3)], owners[((i + 2) % 3)]]))
self.assertEqual(set(get_replicas('test1rf', token)), set([owners[((i + 1) % 3)]]))
cluster.shutdown()
|
'Table info is preserved upon keyspace alter:
Create table
Verify schema
Alter ks
Verify that table metadata is still present
PYTHON-173'
| def test_keyspace_alter(self):
| name = self._testMethodName.lower()
self.session.execute(('CREATE TABLE %s.d (d INT PRIMARY KEY)' % name))
original_keyspace_meta = self.cluster.metadata.keyspaces[name]
self.assertEqual(original_keyspace_meta.durable_writes, True)
self.assertEqual(len(original_keyspace_meta.tables), 1)
self.session.execute(('ALTER KEYSPACE %s WITH durable_writes = false' % name))
new_keyspace_meta = self.cluster.metadata.keyspaces[name]
self.assertNotEqual(original_keyspace_meta, new_keyspace_meta)
self.assertEqual(new_keyspace_meta.durable_writes, False)
|
'Tests are skipped if run with native protocol version < 4'
| def setUp(self):
| if (PROTOCOL_VERSION < 4):
raise unittest.SkipTest('Function metadata requires native protocol version 4+')
|
'Test to to ensure functions come after UDTs in in keyspace dump
test_functions_after_udt creates a basic function. Then queries that function and make sure that in the results
that UDT\'s are listed before any corresponding functions, when we dump the keyspace
Ideally we would make a function that takes a udt type, but this presently fails because C* c059a56 requires
udt to be frozen to create, but does not store meta indicating frozen
SEE https://issues.apache.org/jira/browse/CASSANDRA-9186
Maybe update this after release
kwargs = self.make_function_kwargs()
kwargs[\'argument_types\'][0] = "frozen<%s>" % udt_name
expected_meta = Function(**kwargs)
with self.VerifiedFunction(self, **kwargs):
@since 2.6.0
@jira_ticket PYTHON-211
@expected_result UDT\'s should come before any functions
@test_category function'
| def test_functions_after_udt(self):
| self.assertNotIn(self.function_name, self.keyspace_function_meta)
udt_name = 'udtx'
self.session.execute(('CREATE TYPE %s (x int)' % udt_name))
with self.VerifiedFunction(self, **self.make_function_kwargs()):
keyspace_cql = self.cluster.metadata.keyspaces[self.keyspace_name].export_as_string()
type_idx = keyspace_cql.rfind('CREATE TYPE')
func_idx = keyspace_cql.find('CREATE FUNCTION')
self.assertNotIn((-1), (type_idx, func_idx), ('TYPE or FUNCTION not found in keyspace_cql: ' + keyspace_cql))
self.assertGreater(func_idx, type_idx)
|
'Test to verify to that functions with different signatures are differentiated in metadata
test_function_same_name_diff_types Creates two functions. One with the same name but a slightly different
signature. Then ensures that both are surfaced separately in our metadata.
@since 2.6.0
@jira_ticket PYTHON-211
@expected_result function with the same name but different signatures should be surfaced separately
@test_category function'
| def test_function_same_name_diff_types(self):
| kwargs = self.make_function_kwargs()
with self.VerifiedFunction(self, **kwargs):
self.assertGreater(len(kwargs['argument_types']), 1)
self.assertGreater(len(kwargs['argument_names']), 1)
kwargs['argument_types'] = kwargs['argument_types'][:1]
kwargs['argument_names'] = kwargs['argument_names'][:1]
with self.VerifiedFunction(self, **kwargs):
functions = [f for f in self.keyspace_function_meta.values() if (f.name == self.function_name)]
self.assertEqual(len(functions), 2)
self.assertNotEqual(functions[0].argument_types, functions[1].argument_types)
|
'Test to verify CQL output for functions with zero parameters
Creates a function with no input parameters, verify that CQL output is correct.
@since 2.7.1
@jira_ticket PYTHON-392
@expected_result function with no parameters should generate proper CQL
@test_category function'
| def test_function_no_parameters(self):
| kwargs = self.make_function_kwargs()
kwargs['argument_types'] = []
kwargs['argument_names'] = []
kwargs['return_type'] = 'bigint'
kwargs['body'] = 'return System.currentTimeMillis() / 1000L;'
with self.VerifiedFunction(self, **kwargs) as vf:
fn_meta = self.keyspace_function_meta[vf.signature]
self.assertRegexpMatches(fn_meta.as_cql_query(), ('CREATE FUNCTION.*%s\\(\\) .*' % kwargs['name']))
|
'Test to verify to that functions maintain equality after a keyspace is altered
test_functions_follow_keyspace_alter creates a function then alters a the keyspace associated with that function.
After the alter we validate that the function maintains the same metadata
@since 2.6.0
@jira_ticket PYTHON-211
@expected_result functions are the same after parent keyspace is altered
@test_category function'
| def test_functions_follow_keyspace_alter(self):
| with self.VerifiedFunction(self, **self.make_function_kwargs()):
original_keyspace_meta = self.cluster.metadata.keyspaces[self.keyspace_name]
self.session.execute(('ALTER KEYSPACE %s WITH durable_writes = false' % self.keyspace_name))
try:
new_keyspace_meta = self.cluster.metadata.keyspaces[self.keyspace_name]
self.assertNotEqual(original_keyspace_meta, new_keyspace_meta)
self.assertIs(original_keyspace_meta.functions, new_keyspace_meta.functions)
finally:
self.session.execute(('ALTER KEYSPACE %s WITH durable_writes = true' % self.keyspace_name))
|
'Test to verify to that that called on null argument is honored on function creation.
test_functions_follow_keyspace_alter create two functions. One with the called_on_null_input set to true,
the other with it set to false. We then verify that the metadata constructed from those function is correctly
reflected
@since 2.6.0
@jira_ticket PYTHON-211
@expected_result functions metadata correctly reflects called_on_null_input flag.
@test_category function'
| def test_function_cql_called_on_null(self):
| kwargs = self.make_function_kwargs()
kwargs['called_on_null_input'] = True
with self.VerifiedFunction(self, **kwargs) as vf:
fn_meta = self.keyspace_function_meta[vf.signature]
self.assertRegexpMatches(fn_meta.as_cql_query(), 'CREATE FUNCTION.*\\) CALLED ON NULL INPUT RETURNS .*')
kwargs['called_on_null_input'] = False
with self.VerifiedFunction(self, **kwargs) as vf:
fn_meta = self.keyspace_function_meta[vf.signature]
self.assertRegexpMatches(fn_meta.as_cql_query(), 'CREATE FUNCTION.*\\) RETURNS NULL ON NULL INPUT RETURNS .*')
|
'Test to verify to that the return type of a an aggregate is honored in the metadata
test_return_type_meta creates an aggregate then ensures the return type of the created
aggregate is correctly surfaced in the metadata
@since 2.6.0
@jira_ticket PYTHON-211
@expected_result aggregate has the correct return typ in the metadata
@test_category aggregate'
| def test_return_type_meta(self):
| with self.VerifiedAggregate(self, **self.make_aggregate_kwargs('sum_int', 'int', init_cond='1')) as va:
self.assertEqual(self.keyspace_aggregate_meta[va.signature].return_type, 'int')
|
'Test to verify that various initial conditions are correctly surfaced in various aggregate functions
test_init_cond creates several different types of aggregates, and given various initial conditions it verifies that
they correctly impact the aggregate\'s execution
@since 2.6.0
@jira_ticket PYTHON-211
@expected_result initial conditions are correctly evaluated as part of the aggregates
@test_category aggregate'
| def test_init_cond(self):
| c = Cluster(protocol_version=3)
s = c.connect(self.keyspace_name)
encoder = Encoder()
expected_values = range(4)
for init_cond in ((-1), 0, 1):
cql_init = encoder.cql_encode_all_types(init_cond)
with self.VerifiedAggregate(self, **self.make_aggregate_kwargs('sum_int', 'int', init_cond=cql_init)) as va:
sum_res = s.execute(('SELECT %s(v) AS sum FROM t' % va.function_kwargs['name']))[0].sum
self.assertEqual(sum_res, (int(init_cond) + sum(expected_values)))
for init_cond in ([], ['1', '2']):
cql_init = encoder.cql_encode_all_types(init_cond)
with self.VerifiedAggregate(self, **self.make_aggregate_kwargs('extend_list', 'list<text>', init_cond=cql_init)) as va:
list_res = s.execute(('SELECT %s(v) AS list_res FROM t' % va.function_kwargs['name']))[0].list_res
self.assertListEqual(list_res[:len(init_cond)], init_cond)
self.assertEqual(set((i for i in list_res[len(init_cond):])), set((str(i) for i in expected_values)))
expected_map_values = dict(((i, i) for i in expected_values))
expected_key_set = set(expected_values)
for init_cond in ({}, {1: 2, 3: 4}, {5: 5}):
cql_init = encoder.cql_encode_all_types(init_cond)
with self.VerifiedAggregate(self, **self.make_aggregate_kwargs('update_map', 'map<int, int>', init_cond=cql_init)) as va:
map_res = s.execute(('SELECT %s(v) AS map_res FROM t' % va.function_kwargs['name']))[0].map_res
self.assertDictContainsSubset(expected_map_values, map_res)
init_not_updated = dict(((k, init_cond[k]) for k in (set(init_cond) - expected_key_set)))
self.assertDictContainsSubset(init_not_updated, map_res)
c.shutdown()
|
'Test to verify that aggregates are listed after function in metadata
test_aggregates_after_functions creates an aggregate, and then verifies that they are listed
after any function creations when the keypspace dump is preformed
@since 2.6.0
@jira_ticket PYTHON-211
@expected_result aggregates are declared after any functions
@test_category aggregate'
| def test_aggregates_after_functions(self):
| with self.VerifiedAggregate(self, **self.make_aggregate_kwargs('extend_list', 'list<text>')):
keyspace_cql = self.cluster.metadata.keyspaces[self.keyspace_name].export_as_string()
func_idx = keyspace_cql.find('CREATE FUNCTION')
aggregate_idx = keyspace_cql.rfind('CREATE AGGREGATE')
self.assertNotIn((-1), (aggregate_idx, func_idx), ('AGGREGATE or FUNCTION not found in keyspace_cql: ' + keyspace_cql))
self.assertGreater(aggregate_idx, func_idx)
|
'Test to verify to that aggregates with different signatures are differentiated in metadata
test_same_name_diff_types Creates two Aggregates. One with the same name but a slightly different
signature. Then ensures that both are surfaced separately in our metadata.
@since 2.6.0
@jira_ticket PYTHON-211
@expected_result aggregates with the same name but different signatures should be surfaced separately
@test_category function'
| def test_same_name_diff_types(self):
| kwargs = self.make_aggregate_kwargs('sum_int', 'int', init_cond='0')
with self.VerifiedAggregate(self, **kwargs):
kwargs['state_func'] = 'sum_int_two'
kwargs['argument_types'] = ['int', 'int']
with self.VerifiedAggregate(self, **kwargs):
aggregates = [a for a in self.keyspace_aggregate_meta.values() if (a.name == kwargs['name'])]
self.assertEqual(len(aggregates), 2)
self.assertNotEqual(aggregates[0].argument_types, aggregates[1].argument_types)
|
'Test to verify to that aggregates maintain equality after a keyspace is altered
test_aggregates_follow_keyspace_alter creates a function then alters a the keyspace associated with that
function. After the alter we validate that the function maintains the same metadata
@since 2.6.0
@jira_ticket PYTHON-211
@expected_result aggregates are the same after parent keyspace is altered
@test_category function'
| def test_aggregates_follow_keyspace_alter(self):
| with self.VerifiedAggregate(self, **self.make_aggregate_kwargs('sum_int', 'int', init_cond='0')):
original_keyspace_meta = self.cluster.metadata.keyspaces[self.keyspace_name]
self.session.execute(('ALTER KEYSPACE %s WITH durable_writes = false' % self.keyspace_name))
try:
new_keyspace_meta = self.cluster.metadata.keyspaces[self.keyspace_name]
self.assertNotEqual(original_keyspace_meta, new_keyspace_meta)
self.assertIs(original_keyspace_meta.aggregates, new_keyspace_meta.aggregates)
finally:
self.session.execute(('ALTER KEYSPACE %s WITH durable_writes = true' % self.keyspace_name))
|
'Test to verify that the initial_cond and final_func parameters are correctly honored
test_cql_optional_params creates various aggregates with different combinations of initial_condition,
and final_func parameters set. It then ensures they are correctly honored.
@since 2.6.0
@jira_ticket PYTHON-211
@expected_result initial_condition and final_func parameters are honored correctly
@test_category function'
| def test_cql_optional_params(self):
| kwargs = self.make_aggregate_kwargs('extend_list', 'list<text>')
encoder = Encoder()
self.assertIsNone(kwargs['initial_condition'])
self.assertIsNone(kwargs['final_func'])
with self.VerifiedAggregate(self, **kwargs) as va:
meta = self.keyspace_aggregate_meta[va.signature]
self.assertIsNone(meta.initial_condition)
self.assertIsNone(meta.final_func)
cql = meta.as_cql_query()
self.assertEqual(cql.find('INITCOND'), (-1))
self.assertEqual(cql.find('FINALFUNC'), (-1))
kwargs['initial_condition'] = encoder.cql_encode_all_types(['init', 'cond'])
with self.VerifiedAggregate(self, **kwargs) as va:
meta = self.keyspace_aggregate_meta[va.signature]
self.assertEqual(meta.initial_condition, kwargs['initial_condition'])
self.assertIsNone(meta.final_func)
cql = meta.as_cql_query()
search_string = ('INITCOND %s' % kwargs['initial_condition'])
self.assertGreater(cql.find(search_string), 0, ('"%s" search string not found in cql:\n%s' % (search_string, cql)))
self.assertEqual(cql.find('FINALFUNC'), (-1))
kwargs['initial_condition'] = None
kwargs['final_func'] = 'List_As_String'
with self.VerifiedAggregate(self, **kwargs) as va:
meta = self.keyspace_aggregate_meta[va.signature]
self.assertIsNone(meta.initial_condition)
self.assertEqual(meta.final_func, kwargs['final_func'])
cql = meta.as_cql_query()
self.assertEqual(cql.find('INITCOND'), (-1))
search_string = ('FINALFUNC "%s"' % kwargs['final_func'])
self.assertGreater(cql.find(search_string), 0, ('"%s" search string not found in cql:\n%s' % (search_string, cql)))
kwargs['initial_condition'] = encoder.cql_encode_all_types(['init', 'cond'])
kwargs['final_func'] = 'List_As_String'
with self.VerifiedAggregate(self, **kwargs) as va:
meta = self.keyspace_aggregate_meta[va.signature]
self.assertEqual(meta.initial_condition, kwargs['initial_condition'])
self.assertEqual(meta.final_func, kwargs['final_func'])
cql = meta.as_cql_query()
init_cond_idx = cql.find(('INITCOND %s' % kwargs['initial_condition']))
final_func_idx = cql.find(('FINALFUNC "%s"' % kwargs['final_func']))
self.assertNotIn((-1), (init_cond_idx, final_func_idx))
self.assertGreater(init_cond_idx, final_func_idx)
|
'Tests to make sure DCT\'s have correct string formatting
Constructs a DCT and check the format as generated. To insure it matches what is expected
@since 3.6.0
@jira_ticket PYTHON-579
@expected_result DCT subtypes should always have fully qualified names
@test_category metadata'
| def test_dct_alias(self):
| self.session.execute("CREATE TABLE {0}.{1} (k int PRIMARY KEY,c1 'DynamicCompositeType(s => UTF8Type, i => Int32Type)',c2 Text)".format(self.ks_name, self.function_table_name))
dct_table = self.cluster.metadata.keyspaces.get(self.ks_name).tables.get(self.function_table_name)
self.assertTrue(("c1'org.apache.cassandra.db.marshal.DynamicCompositeType(s=>org.apache.cassandra.db.marshal.UTF8Type,i=>org.apache.cassandra.db.marshal.Int32Type)'" in dct_table.as_cql_query().replace(' ', '')))
|
'test for materialized view metadata creation
test_materialized_view_metadata_creation tests that materialized view metadata properly created implicitly in
both keyspace and table metadata under "views". It creates a simple base table and then creates a view based
on that table. It then checks that the materialized view metadata is contained in the keyspace and table
metadata. Finally, it checks that the keyspace_name and the base_table_name in the view metadata is properly set.
@since 3.0.0
@jira_ticket PYTHON-371
@expected_result Materialized view metadata in both the ks and table should be created with a new view is created.
@test_category metadata'
| def test_materialized_view_metadata_creation(self):
| self.assertIn('mv1', self.cluster.metadata.keyspaces[self.keyspace_name].views)
self.assertIn('mv1', self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views)
self.assertEqual(self.keyspace_name, self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views['mv1'].keyspace_name)
self.assertEqual(self.function_table_name, self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views['mv1'].base_table_name)
|
'test for materialized view metadata alteration
test_materialized_view_metadata_alter tests that materialized view metadata is properly updated implicitly in the
table metadata once that view is updated. It creates a simple base table and then creates a view based
on that table. It then alters that materalized view and checks that the materialized view metadata is altered in
the table metadata.
@since 3.0.0
@jira_ticket PYTHON-371
@expected_result Materialized view metadata should be updated with the view is altered.
@test_category metadata'
| def test_materialized_view_metadata_alter(self):
| self.assertIn('SizeTieredCompactionStrategy', self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views['mv1'].options['compaction']['class'])
self.session.execute("ALTER MATERIALIZED VIEW {0}.mv1 WITH compaction = {{ 'class' : 'LeveledCompactionStrategy' }}".format(self.keyspace_name))
self.assertIn('LeveledCompactionStrategy', self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views['mv1'].options['compaction']['class'])
|
'test for materialized view metadata dropping
test_materialized_view_metadata_drop tests that materialized view metadata is properly removed implicitly in
both keyspace and table metadata once that view is dropped. It creates a simple base table and then creates a view
based on that table. It then drops that materalized view and checks that the materialized view metadata is removed
from the keyspace and table metadata.
@since 3.0.0
@jira_ticket PYTHON-371
@expected_result Materialized view metadata in both the ks and table should be removed with the view is dropped.
@test_category metadata'
| def test_materialized_view_metadata_drop(self):
| self.session.execute('DROP MATERIALIZED VIEW {0}.mv1'.format(self.keyspace_name))
self.assertNotIn('mv1', self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views)
self.assertNotIn('mv1', self.cluster.metadata.keyspaces[self.keyspace_name].views)
self.assertDictEqual({}, self.cluster.metadata.keyspaces[self.keyspace_name].tables[self.function_table_name].views)
self.assertDictEqual({}, self.cluster.metadata.keyspaces[self.keyspace_name].views)
self.session.execute('CREATE MATERIALIZED VIEW {0}.mv1 AS SELECT c FROM {0}.{1} WHERE c IS NOT NULL PRIMARY KEY (pk, c)'.format(self.keyspace_name, self.function_table_name))
|
'test to ensure that materialized view metadata is properly constructed
test_create_view_metadata tests that materialized views metadata is properly constructed. It runs a simple
query to construct a materialized view, then proceeds to inspect the metadata associated with that MV.
Columns are inspected to insure that all are of the proper type, and in the proper type.
@since 3.0.0
@jira_ticket PYTHON-371
@expected_result Materialized view metadata should be constructed appropriately.
@test_category metadata'
| def test_create_view_metadata(self):
| create_table = 'CREATE TABLE {0}.scores(\n user TEXT,\n game TEXT,\n year INT,\n month INT,\n day INT,\n score INT,\n PRIMARY KEY (user, game, year, month, day)\n )'.format(self.keyspace_name)
self.session.execute(create_table)
create_mv = 'CREATE MATERIALIZED VIEW {0}.monthlyhigh AS\n SELECT game, year, month, score, user, day FROM {0}.scores\n WHERE game IS NOT NULL AND year IS NOT NULL AND month IS NOT NULL AND score IS NOT NULL AND user IS NOT NULL AND day IS NOT NULL\n PRIMARY KEY ((game, year, month), score, user, day)\n WITH CLUSTERING ORDER BY (score DESC, user ASC, day ASC)'.format(self.keyspace_name)
self.session.execute(create_mv)
score_table = self.cluster.metadata.keyspaces[self.keyspace_name].tables['scores']
mv = self.cluster.metadata.keyspaces[self.keyspace_name].views['monthlyhigh']
self.assertIsNotNone(score_table.views['monthlyhigh'])
self.assertIsNotNone(len(score_table.views), 1)
self.assertEqual(len(score_table.partition_key), 1)
self.assertIsNotNone(score_table.columns['user'])
self.assertTrue(score_table.columns['user'], score_table.partition_key[0])
self.assertEqual(len(score_table.clustering_key), 4)
self.assertIsNotNone(score_table.columns['game'])
self.assertTrue(score_table.columns['game'], score_table.clustering_key[0])
self.assertIsNotNone(score_table.columns['year'])
self.assertTrue(score_table.columns['year'], score_table.clustering_key[1])
self.assertIsNotNone(score_table.columns['month'])
self.assertTrue(score_table.columns['month'], score_table.clustering_key[2])
self.assertIsNotNone(score_table.columns['day'])
self.assertTrue(score_table.columns['day'], score_table.clustering_key[3])
self.assertIsNotNone(score_table.columns['score'])
self.assertEqual(mv.keyspace_name, self.keyspace_name)
self.assertEqual(mv.name, 'monthlyhigh')
self.assertEqual(mv.base_table_name, 'scores')
self.assertFalse(mv.include_all_columns)
mv_columns = list(mv.columns.values())
self.assertEqual(len(mv_columns), 6)
game_column = mv_columns[0]
self.assertIsNotNone(game_column)
self.assertEqual(game_column.name, 'game')
self.assertEqual(game_column, mv.partition_key[0])
year_column = mv_columns[1]
self.assertIsNotNone(year_column)
self.assertEqual(year_column.name, 'year')
self.assertEqual(year_column, mv.partition_key[1])
month_column = mv_columns[2]
self.assertIsNotNone(month_column)
self.assertEqual(month_column.name, 'month')
self.assertEqual(month_column, mv.partition_key[2])
def compare_columns(a, b, name):
self.assertEqual(a.name, name)
self.assertEqual(a.name, b.name)
self.assertEqual(a.table, b.table)
self.assertEqual(a.cql_type, b.cql_type)
self.assertEqual(a.is_static, b.is_static)
self.assertEqual(a.is_reversed, b.is_reversed)
score_column = mv_columns[3]
compare_columns(score_column, mv.clustering_key[0], 'score')
user_column = mv_columns[4]
compare_columns(user_column, mv.clustering_key[1], 'user')
day_column = mv_columns[5]
compare_columns(day_column, mv.clustering_key[2], 'day')
|
'test to ensure that materialized view metadata is properly updated with base columns are added
test_create_view_metadata tests that materialized views metadata is properly updated when columns are added to
the base table.
@since 3.0.0
@jira_ticket PYTHON-419
@expected_result Materialized view metadata should be updated correctly
@test_category metadata'
| def test_base_table_column_addition_mv(self):
| create_table = 'CREATE TABLE {0}.scores(\n user TEXT,\n game TEXT,\n year INT,\n month INT,\n day INT,\n score TEXT,\n PRIMARY KEY (user, game, year, month, day)\n )'.format(self.keyspace_name)
self.session.execute(create_table)
create_mv = 'CREATE MATERIALIZED VIEW {0}.monthlyhigh AS\n SELECT game, year, month, score, user, day FROM {0}.scores\n WHERE game IS NOT NULL AND year IS NOT NULL AND month IS NOT NULL AND score IS NOT NULL AND user IS NOT NULL AND day IS NOT NULL\n PRIMARY KEY ((game, year, month), score, user, day)\n WITH CLUSTERING ORDER BY (score DESC, user ASC, day ASC)'.format(self.keyspace_name)
create_mv_alltime = 'CREATE MATERIALIZED VIEW {0}.alltimehigh AS\n SELECT * FROM {0}.scores\n WHERE game IS NOT NULL AND score IS NOT NULL AND user IS NOT NULL AND year IS NOT NULL AND month IS NOT NULL AND day IS NOT NULL\n PRIMARY KEY (game, score, user, year, month, day)\n WITH CLUSTERING ORDER BY (score DESC)'.format(self.keyspace_name)
self.session.execute(create_mv)
self.session.execute(create_mv_alltime)
score_table = self.cluster.metadata.keyspaces[self.keyspace_name].tables['scores']
self.assertIsNotNone(score_table.views['monthlyhigh'])
self.assertIsNotNone(score_table.views['alltimehigh'])
self.assertEqual(len(self.cluster.metadata.keyspaces[self.keyspace_name].views), 2)
insert_fouls = 'ALTER TABLE {0}.scores ADD fouls INT'.format(self.keyspace_name)
self.session.execute(insert_fouls)
self.assertEqual(len(self.cluster.metadata.keyspaces[self.keyspace_name].views), 2)
score_table = self.cluster.metadata.keyspaces[self.keyspace_name].tables['scores']
self.assertIn('fouls', score_table.columns)
for i in range(10):
mv_alltime = self.cluster.metadata.keyspaces[self.keyspace_name].views['alltimehigh']
if ('fouls' in mv_alltime.columns):
break
time.sleep(0.2)
self.assertIn('fouls', mv_alltime.columns)
mv_alltime_fouls_comumn = self.cluster.metadata.keyspaces[self.keyspace_name].views['alltimehigh'].columns['fouls']
self.assertEqual(mv_alltime_fouls_comumn.cql_type, 'int')
|
'test to ensure that materialized view metadata is properly updated when a type in the base table
is updated.
test_create_view_metadata tests that materialized views metadata is properly updated when the type of base table
column is changed.
@since 3.0.0
@jira_ticket CASSANDRA-10424
@expected_result Materialized view metadata should be updated correctly
@test_category metadata'
| @lessthancass30
def test_base_table_type_alter_mv(self):
| create_table = 'CREATE TABLE {0}.scores(\n user TEXT,\n game TEXT,\n year INT,\n month INT,\n day INT,\n score TEXT,\n PRIMARY KEY (user, game, year, month, day)\n )'.format(self.keyspace_name)
self.session.execute(create_table)
create_mv = 'CREATE MATERIALIZED VIEW {0}.monthlyhigh AS\n SELECT game, year, month, score, user, day FROM {0}.scores\n WHERE game IS NOT NULL AND year IS NOT NULL AND month IS NOT NULL AND score IS NOT NULL AND user IS NOT NULL AND day IS NOT NULL\n PRIMARY KEY ((game, year, month), score, user, day)\n WITH CLUSTERING ORDER BY (score DESC, user ASC, day ASC)'.format(self.keyspace_name)
self.session.execute(create_mv)
self.assertEqual(len(self.cluster.metadata.keyspaces[self.keyspace_name].views), 1)
alter_scores = 'ALTER TABLE {0}.scores ALTER score TYPE blob'.format(self.keyspace_name)
self.session.execute(alter_scores)
self.assertEqual(len(self.cluster.metadata.keyspaces[self.keyspace_name].views), 1)
score_column = self.cluster.metadata.keyspaces[self.keyspace_name].tables['scores'].columns['score']
self.assertEqual(score_column.cql_type, 'blob')
for i in range(10):
score_mv_column = self.cluster.metadata.keyspaces[self.keyspace_name].views['monthlyhigh'].columns['score']
if ('blob' == score_mv_column.cql_type):
break
time.sleep(0.2)
self.assertEqual(score_mv_column.cql_type, 'blob')
|
'test to ensure that materialized view metadata is properly constructed when quoted identifiers are used
test_metadata_with_quoted_identifiers tests that materialized views metadata is properly constructed.
It runs a simple query to construct a materialized view, then proceeds to inspect the metadata associated with
that MV. The caveat here is that the tables and the materialized view both have quoted identifiers
Columns are inspected to insure that all are of the proper type, and in the proper type.
@since 3.0.0
@jira_ticket PYTHON-371
@expected_result Materialized view metadata should be constructed appropriately even with quoted identifiers.
@test_category metadata'
| def test_metadata_with_quoted_identifiers(self):
| create_table = 'CREATE TABLE {0}.t1 (\n "theKey" int,\n "the;Clustering" int,\n "the Value" int,\n PRIMARY KEY ("theKey", "the;Clustering"))'.format(self.keyspace_name)
self.session.execute(create_table)
create_mv = 'CREATE MATERIALIZED VIEW {0}.mv1 AS\n SELECT "theKey", "the;Clustering", "the Value"\n FROM {0}.t1\n WHERE "theKey" IS NOT NULL AND "the;Clustering" IS NOT NULL AND "the Value" IS NOT NULL\n PRIMARY KEY ("theKey", "the;Clustering")'.format(self.keyspace_name)
self.session.execute(create_mv)
t1_table = self.cluster.metadata.keyspaces[self.keyspace_name].tables['t1']
mv = self.cluster.metadata.keyspaces[self.keyspace_name].views['mv1']
self.assertIsNotNone(t1_table.views['mv1'])
self.assertIsNotNone(len(t1_table.views), 1)
self.assertEqual(len(t1_table.partition_key), 1)
self.assertIsNotNone(t1_table.columns['theKey'])
self.assertTrue(t1_table.columns['theKey'], t1_table.partition_key[0])
self.assertEqual(len(t1_table.clustering_key), 1)
self.assertIsNotNone(t1_table.columns['the;Clustering'])
self.assertTrue(t1_table.columns['the;Clustering'], t1_table.clustering_key[0])
self.assertIsNotNone(t1_table.columns['the Value'])
self.assertEqual(mv.keyspace_name, self.keyspace_name)
self.assertEqual(mv.name, 'mv1')
self.assertEqual(mv.base_table_name, 't1')
self.assertFalse(mv.include_all_columns)
mv_columns = list(mv.columns.values())
self.assertEqual(len(mv_columns), 3)
theKey_column = mv_columns[0]
self.assertIsNotNone(theKey_column)
self.assertEqual(theKey_column.name, 'theKey')
self.assertEqual(theKey_column, mv.partition_key[0])
cluster_column = mv_columns[1]
self.assertIsNotNone(cluster_column)
self.assertEqual(cluster_column.name, 'the;Clustering')
self.assertEqual(cluster_column.name, mv.clustering_key[0].name)
self.assertEqual(cluster_column.table, mv.clustering_key[0].table)
self.assertEqual(cluster_column.is_static, mv.clustering_key[0].is_static)
self.assertEqual(cluster_column.is_reversed, mv.clustering_key[0].is_reversed)
value_column = mv_columns[2]
self.assertIsNotNone(value_column)
self.assertEqual(value_column.name, 'the Value')
|
'Test to ensure DSE metadata is populated appropriately.
@since 3.4
@jira_ticket PYTHON-555
@expected_result metadata for dse_version, and dse_workload should be populated on dse clusters
@test_category metadata'
| def test_dse_specific_meta(self):
| for host in self.cluster.metadata.all_hosts():
self.assertIsNotNone(host.dse_version, 'Dse version not populated as expected')
self.assertEqual(host.dse_version, DSE_VERSION)
self.assertTrue(('Cassandra' in host.dse_workload))
|
'Test basic PreparedStatement usage'
| def test_basic(self):
| self.session.execute('\n DROP KEYSPACE IF EXISTS preparedtests\n ')
self.session.execute("\n CREATE KEYSPACE preparedtests\n WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'}\n ")
self.session.set_keyspace('preparedtests')
self.session.execute('\n CREATE TABLE cf0 (\n a text,\n b text,\n c text,\n PRIMARY KEY (a, b)\n )\n ')
prepared = self.session.prepare('\n INSERT INTO cf0 (a, b, c) VALUES (?, ?, ?)\n ')
self.assertIsInstance(prepared, PreparedStatement)
bound = prepared.bind(('a', 'b', 'c'))
self.session.execute(bound)
prepared = self.session.prepare('\n SELECT * FROM cf0 WHERE a=?\n ')
self.assertIsInstance(prepared, PreparedStatement)
bound = prepared.bind('a')
results = self.session.execute(bound)
self.assertEqual(results, [('a', 'b', 'c')])
prepared = self.session.prepare('\n INSERT INTO cf0 (a, b, c) VALUES (?, ?, ?)\n ')
self.assertIsInstance(prepared, PreparedStatement)
bound = prepared.bind({'a': 'x', 'b': 'y', 'c': 'z'})
self.session.execute(bound)
prepared = self.session.prepare('\n SELECT * FROM cf0 WHERE a=?\n ')
self.assertIsInstance(prepared, PreparedStatement)
bound = prepared.bind({'a': 'x'})
results = self.session.execute(bound)
self.assertEqual(results, [('x', 'y', 'z')])
|
'Ensure an InvalidRequest is thrown
when prepared statements are missing the primary key'
| def test_missing_primary_key(self):
| self._run_missing_primary_key(self.session)
|
'Ensure an InvalidRequest is thrown
when prepared statements are missing the primary key
with dict bindings'
| def test_missing_primary_key_dicts(self):
| self._run_missing_primary_key_dicts(self.session)
|
'Ensure a ValueError is thrown when attempting to bind too many variables'
| def test_too_many_bind_values(self):
| self._run_too_many_bind_values(self.session)
|
'Ensure an error is thrown when attempting to bind the wrong values
with dict bindings'
| def test_imprecise_bind_values_dicts(self):
| prepared = self.session.prepare('\n INSERT INTO test3rf.test (k, v) VALUES (?, ?)\n ')
self.assertIsInstance(prepared, PreparedStatement)
prepared.bind({'k': 1, 'v': 2, 'v2': 3})
if (PROTOCOL_VERSION < 4):
self.assertRaises(KeyError, prepared.bind, {'k': 1, 'v2': 3})
else:
prepared.bind({'k': 1, 'v2': 3})
self.assertIsInstance(prepared, PreparedStatement)
if (PROTOCOL_VERSION < 4):
self.assertRaises(KeyError, prepared.bind, {})
else:
self.assertRaises(ValueError, prepared.bind, {})
|
'Ensure binding None is handled correctly'
| def test_none_values(self):
| prepared = self.session.prepare('\n INSERT INTO test3rf.test (k, v) VALUES (?, ?)\n ')
self.assertIsInstance(prepared, PreparedStatement)
bound = prepared.bind((1, None))
self.session.execute(bound)
prepared = self.session.prepare('\n SELECT * FROM test3rf.test WHERE k=?\n ')
self.assertIsInstance(prepared, PreparedStatement)
bound = prepared.bind((1,))
results = self.session.execute(bound)
self.assertEqual(results[0].v, None)
|
'Test to validate that UNSET_VALUEs are bound, and have the expected effect
Prepare a statement and insert all values. Then follow with execute excluding
parameters. Verify that the original values are unaffected.
@since 2.6.0
@jira_ticket PYTHON-317
@expected_result UNSET_VALUE is implicitly added to bind parameters, and properly encoded, leving unset values unaffected.
@test_category prepared_statements:binding'
| def test_unset_values(self):
| if (PROTOCOL_VERSION < 4):
raise unittest.SkipTest('Binding UNSET values is not supported in protocol version < 4')
self.session.execute('CREATE TABLE IF NOT EXISTS test1rf.test_unset_values (k int PRIMARY KEY, v0 int, v1 int)')
insert = self.session.prepare('INSERT INTO test1rf.test_unset_values (k, v0, v1) VALUES (?, ?, ?)')
select = self.session.prepare('SELECT * FROM test1rf.test_unset_values WHERE k=?')
bind_expected = [((0, 0, 0), (0, 0, 0)), ((0, 1), (0, 1, 0)), ({'k': 0, 'v0': 2}, (0, 2, 0)), ({'k': 0, 'v1': 1}, (0, 2, 1)), ((0, 3, UNSET_VALUE), (0, 3, 1)), ((0, UNSET_VALUE, 2), (0, 3, 2)), ({'k': 0, 'v0': 4, 'v1': UNSET_VALUE}, (0, 4, 2)), ({'k': 0, 'v0': UNSET_VALUE, 'v1': 3}, (0, 4, 3)), ((0, None, None), (0, None, None))]
for (params, expected) in bind_expected:
self.session.execute(insert, params)
results = self.session.execute(select, (0,))
self.assertEqual(results[0], expected)
self.assertRaises(ValueError, self.session.execute, select, (UNSET_VALUE, 0, 0))
|
'Ensure binding None is handled correctly with dict bindings'
| def test_none_values_dicts(self):
| prepared = self.session.prepare('\n INSERT INTO test3rf.test (k, v) VALUES (?, ?)\n ')
self.assertIsInstance(prepared, PreparedStatement)
bound = prepared.bind({'k': 1, 'v': None})
self.session.execute(bound)
prepared = self.session.prepare('\n SELECT * FROM test3rf.test WHERE k=?\n ')
self.assertIsInstance(prepared, PreparedStatement)
bound = prepared.bind({'k': 1})
results = self.session.execute(bound)
self.assertEqual(results[0].v, None)
|
'Ensure None binding over async queries'
| def test_async_binding(self):
| prepared = self.session.prepare('\n INSERT INTO test3rf.test (k, v) VALUES (?, ?)\n ')
self.assertIsInstance(prepared, PreparedStatement)
future = self.session.execute_async(prepared, (873, None))
future.result()
prepared = self.session.prepare('\n SELECT * FROM test3rf.test WHERE k=?\n ')
self.assertIsInstance(prepared, PreparedStatement)
future = self.session.execute_async(prepared, (873,))
results = future.result()
self.assertEqual(results[0].v, None)
|
'Ensure None binding over async queries with dict bindings'
| def test_async_binding_dicts(self):
| prepared = self.session.prepare('\n INSERT INTO test3rf.test (k, v) VALUES (?, ?)\n ')
self.assertIsInstance(prepared, PreparedStatement)
future = self.session.execute_async(prepared, {'k': 873, 'v': None})
future.result()
prepared = self.session.prepare('\n SELECT * FROM test3rf.test WHERE k=?\n ')
self.assertIsInstance(prepared, PreparedStatement)
future = self.session.execute_async(prepared, {'k': 873})
results = future.result()
self.assertEqual(results[0].v, None)
|
'test for error in executing prepared statement on a dropped table
test_raise_error_on_execute_prepared_statement_dropped_table tests that an InvalidRequest is raised when a
prepared statement is executed after its corresponding table is dropped. This happens because if a prepared
statement is invalid, the driver attempts to automatically re-prepare it on a non-existing table.
@expected_errors InvalidRequest If a prepared statement is executed on a dropped table
@since 2.6.0
@jira_ticket PYTHON-207
@expected_result InvalidRequest error should be raised upon prepared statement execution.
@test_category prepared_statements'
| def test_raise_error_on_prepared_statement_execution_dropped_table(self):
| self.session.execute('CREATE TABLE test3rf.error_test (k int PRIMARY KEY, v int)')
prepared = self.session.prepare('SELECT * FROM test3rf.error_test WHERE k=?')
self.session.execute('DROP TABLE test3rf.error_test')
with self.assertRaises(InvalidRequest):
self.session.execute(prepared, [0])
|
'Tests to make sure cached metadata is updated when an invalidated prepared statement is reprepared.
@since 2.7.0
@jira_ticket PYTHON-621
Prior to this fix, the request would blow up with a protocol error when the result was decoded expecting a different
number of columns.'
| @unittest.skip
def test_invalidated_result_metadata(self):
| s = self.session
s.result_factory = tuple_factory
table = ('test1rf.%s' % self._testMethodName.lower())
s.execute(('DROP TABLE IF EXISTS %s' % table))
s.execute(('CREATE TABLE %s (k int PRIMARY KEY, a int, b int, c int)' % table))
s.execute(('INSERT INTO %s (k, a, b, c) VALUES (0, 0, 0, 0)' % table))
wildcard_prepared = s.prepare(('SELECT * FROM %s' % table))
original_result_metadata = wildcard_prepared.result_metadata
self.assertEqual(len(original_result_metadata), 4)
r = s.execute(wildcard_prepared)
self.assertEqual(r[0], (0, 0, 0, 0))
s.execute(('ALTER TABLE %s DROP c' % table))
futures = set((s.execute_async(wildcard_prepared.bind(None)) for _ in range(200)))
for f in futures:
self.assertEqual(f.result()[0], (0, 0, 0))
self.assertIsNot(wildcard_prepared.result_metadata, original_result_metadata)
s.execute(('DROP TABLE %s' % table))
|
'Test to ensure that same named results are surfaced in the NamedTupleFactory
Creates a table with a few different text fields. Inserts a few values in that table.
It then fetches the values and confirms that despite all be being selected as the same name
they are propagated in the result set differently.
@since 3.3
@jira_ticket PYTHON-467
@expected_result duplicate named results have unique row names.
@test_category queries'
| def test_sanitizing(self):
| for x in range(5):
insert1 = "\n INSERT INTO {0}.{1}\n ( k , v1, v2, v3 )\n VALUES\n ( 1 , 'v1{2}', 'v2{2}','v3{2}' )\n ".format(self.keyspace_name, self.function_table_name, str(x))
self.session.execute(insert1)
query = 'SELECT v1 AS duplicate, v2 AS duplicate, v3 AS duplicate from {0}.{1}'.format(self.ks_name, self.function_table_name)
rs = self.session.execute(query)
row = rs[0]
self.assertTrue(hasattr(row, 'duplicate'))
self.assertTrue(hasattr(row, 'duplicate_'))
self.assertTrue(hasattr(row, 'duplicate__'))
|
'no exception on SELECT for numeric column name'
| def test_no_exception_on_select(self):
| try:
self.session.execute('SELECT * FROM test1rf.table_num_col')
except ValueError as e:
self.fail(('Unexpected ValueError exception: %s' % e.message))
|
'can SELECT "<numeric col name>" AS aliases'
| def test_can_select_using_alias(self):
| if (self._cass_version < (2, 0, 0)):
raise unittest.SkipTest('Alias in SELECT not supported before 2.0')
try:
self.session.execute('SELECT key, "626972746864617465" AS my_col from test1rf.table_num_col')
except ValueError as e:
self.fail(('Unexpected ValueError exception: %s' % e.message))
|
'can SELECT numeric column using dict_factory'
| def test_can_select_with_dict_factory(self):
| self.session.row_factory = dict_factory
try:
self.session.execute('SELECT * FROM test1rf.table_num_col')
except ValueError as e:
self.fail(('Unexpected ValueError exception: %s' % e.message))
|
'Trigger and ensure connection_errors are counted
Stop all node with the driver knowing about the "DOWN" states.'
| def test_connection_error(self):
| for i in range(0, 100):
self.session.execute_async('INSERT INTO test (k, v) VALUES ({0}, {1})'.format(i, i))
get_cluster().stop(wait=True, gently=False)
try:
query = SimpleStatement('SELECT * FROM test', consistency_level=ConsistencyLevel.ALL)
with self.assertRaises(NoHostAvailable):
self.session.execute(query)
finally:
get_cluster().start(wait_for_binary_proto=True, wait_other_notice=True)
time.sleep(5)
self.assertGreater(self.cluster.metrics.stats.connection_errors, 0)
|
'Trigger and ensure write_timeouts are counted
Write a key, value pair. Pause a node without the coordinator node knowing about the "DOWN" state.
Attempt a write at cl.ALL and receive a WriteTimeout.'
| def test_write_timeout(self):
| self.session.execute('INSERT INTO test (k, v) VALUES (1, 1)')
query = SimpleStatement('SELECT * FROM test WHERE k=1', consistency_level=ConsistencyLevel.ALL)
results = execute_until_pass(self.session, query)
self.assertTrue(results)
get_node(1).pause()
try:
query = SimpleStatement('INSERT INTO test (k, v) VALUES (2, 2)', consistency_level=ConsistencyLevel.ALL)
with self.assertRaises(WriteTimeout):
self.session.execute(query, timeout=None)
self.assertEqual(1, self.cluster.metrics.stats.write_timeouts)
finally:
get_node(1).resume()
|
'Trigger and ensure read_timeouts are counted
Write a key, value pair. Pause a node without the coordinator node knowing about the "DOWN" state.
Attempt a read at cl.ALL and receive a ReadTimeout.'
| def test_read_timeout(self):
| self.session.execute('INSERT INTO test (k, v) VALUES (1, 1)')
query = SimpleStatement('SELECT * FROM test WHERE k=1', consistency_level=ConsistencyLevel.ALL)
results = execute_until_pass(self.session, query)
self.assertTrue(results)
get_node(1).pause()
try:
query = SimpleStatement('SELECT * FROM test', consistency_level=ConsistencyLevel.ALL)
with self.assertRaises(ReadTimeout):
self.session.execute(query, timeout=None)
self.assertEqual(1, self.cluster.metrics.stats.read_timeouts)
finally:
get_node(1).resume()
|
'Trigger and ensure unavailables are counted
Write a key, value pair. Stop a node with the coordinator node knowing about the "DOWN" state.
Attempt an insert/read at cl.ALL and receive a Unavailable Exception.'
| def test_unavailable(self):
| self.session.execute('INSERT INTO test (k, v) VALUES (1, 1)')
query = SimpleStatement('SELECT * FROM test WHERE k=1', consistency_level=ConsistencyLevel.ALL)
results = execute_until_pass(self.session, query)
self.assertTrue(results)
get_node(1).stop(wait=True, wait_other_notice=True)
time.sleep(5)
try:
query = SimpleStatement('INSERT INTO test (k, v) VALUES (2, 2)', consistency_level=ConsistencyLevel.ALL)
with self.assertRaises(Unavailable):
self.session.execute(query)
self.assertEqual(self.cluster.metrics.stats.unavailables, 1)
query = SimpleStatement('SELECT * FROM test', consistency_level=ConsistencyLevel.ALL)
with self.assertRaises(Unavailable):
self.session.execute(query, timeout=None)
self.assertEqual(self.cluster.metrics.stats.unavailables, 2)
finally:
get_node(1).start(wait_other_notice=True, wait_for_binary_proto=True)
time.sleep(5)
self.cluster.shutdown()
|
'Test to validate that metrics can be scopped to invdividual clusters
@since 3.6.0
@jira_ticket PYTHON-561
@expected_result metrics should be scopped to a cluster level
@test_category metrics'
| @local
def test_metrics_per_cluster(self):
| cluster2 = Cluster(metrics_enabled=True, protocol_version=PROTOCOL_VERSION, default_retry_policy=FallthroughRetryPolicy())
cluster2.connect(self.ks_name, wait_for_all_pools=True)
self.assertEqual(len(cluster2.metadata.all_hosts()), 3)
query = SimpleStatement('SELECT * FROM {0}.{0}'.format(self.ks_name), consistency_level=ConsistencyLevel.ALL)
self.session.execute(query)
get_node(1).pause()
try:
query = SimpleStatement('INSERT INTO {0}.{0} (k, v) VALUES (2, 2)'.format(self.ks_name), consistency_level=ConsistencyLevel.ALL)
with self.assertRaises(WriteTimeout):
self.session.execute(query, timeout=None)
finally:
get_node(1).resume()
cluster2.metrics.set_stats_name('cluster2-metrics')
stats_cluster1 = self.cluster.metrics.get_stats()
stats_cluster2 = cluster2.metrics.get_stats()
self.assertEqual(1, self.cluster.metrics.stats.write_timeouts)
self.assertEqual(0, cluster2.metrics.stats.write_timeouts)
self.assertNotEqual(0.0, self.cluster.metrics.request_timer['mean'])
self.assertEqual(0.0, cluster2.metrics.request_timer['mean'])
self.assertNotEqual(0.0, stats_cluster1['request_timer']['mean'])
self.assertEqual(0.0, stats_cluster2['request_timer']['mean'])
self.assertEqual(0.0, scales.getStats()['cluster2-metrics']['request_timer']['mean'])
cluster2.shutdown()
|
'Test to validate that cluster metrics names can\'t overlap.
@since 3.6.0
@jira_ticket PYTHON-561
@expected_result metric names should not be allowed to be same.
@test_category metrics'
| def test_duplicate_metrics_per_cluster(self):
| cluster2 = Cluster(metrics_enabled=True, protocol_version=PROTOCOL_VERSION, default_retry_policy=FallthroughRetryPolicy())
cluster3 = Cluster(metrics_enabled=True, protocol_version=PROTOCOL_VERSION, default_retry_policy=FallthroughRetryPolicy())
cluster2.metrics.set_stats_name('appcluster')
cluster2.metrics.set_stats_name('appcluster')
with self.assertRaises(ValueError):
cluster3.metrics.set_stats_name('appcluster')
cluster3.metrics.set_stats_name('devops')
session2 = cluster2.connect(self.ks_name, wait_for_all_pools=True)
session3 = cluster3.connect(self.ks_name, wait_for_all_pools=True)
for i in range(10):
query = SimpleStatement('SELECT * FROM {0}.{0}'.format(self.ks_name), consistency_level=ConsistencyLevel.ALL)
session2.execute(query)
for i in range(5):
query = SimpleStatement('SELECT * FROM {0}.{0}'.format(self.ks_name), consistency_level=ConsistencyLevel.ALL)
session3.execute(query)
self.assertEqual(cluster2.metrics.get_stats()['request_timer']['count'], 10)
self.assertEqual(cluster3.metrics.get_stats()['request_timer']['count'], 5)
self.assertTrue(('appcluster' in scales._Stats.stats.keys()))
self.assertTrue(('devops' in scales._Stats.stats.keys()))
cluster2.shutdown()
cluster3.shutdown()
|
'Test to validate that requests listeners.
This test creates a simple metrics based request listener to track request size, it then
check to ensure that on_success and on_error methods are invoked appropriately.
@since 3.7.0
@jira_ticket PYTHON-284
@expected_result in_error, and on_success should be invoked apropriately
@test_category metrics'
| def test_metrics_per_cluster(self):
| ra = RequestAnalyzer(self.session)
for _ in range(10):
self.session.execute('SELECT release_version FROM system.local')
for _ in range(3):
try:
self.session.execute('nonesense')
except SyntaxException:
continue
self.assertTrue(self.wait_for_count(ra, 10))
self.assertTrue(self.wait_for_count(ra, 3, error=True))
ra.remove_ra(self.session)
ra = RequestAnalyzer(self.session, throw_on_success=False, throw_on_fail=True)
self.session.execute('SELECT release_version FROM system.local')
ra.remove_ra(self.session)
RequestAnalyzer(self.session, throw_on_success=True)
try:
self.session.execute('nonesense')
except SyntaxException:
pass
|
'Test to ensure that connection id fetching will block when max_id is reached/
In previous versions of the driver this test will cause a
NoHostAvailable exception to be thrown, when the max_id is restricted
@since 3.3
@jira_ticket PYTHON-514
@expected_result When many requests are run on a single node connection acquisition should block
until connection is available or the request times out.
@test_category connection timeout'
| def test_in_flight_timeout(self):
| futures = []
query = 'SELECT * FROM system.local'
for i in range(100):
futures.append(self.session.execute_async(query))
for future in futures:
future.result()
|
'Helper method to solve automated testing issues within Jenkins.
Officially patched under the 2.0 branch through
17998ef72a2fe2e67d27dd602b6ced33a58ad8ef, but left as is for the
1.0 branch due to possible regressions for fixing an
automated testing edge-case.'
| def get_connection(self, timeout=5):
| conn = None
e = None
for i in range(5):
try:
contact_point = CASSANDRA_IP
conn = self.klass.factory(host=contact_point, timeout=timeout, protocol_version=PROTOCOL_VERSION)
break
except (OperationTimedOut, NoHostAvailable, ConnectionShutdown) as e:
continue
if conn:
return conn
else:
raise e
|
'Test a single connection with sequential requests.'
| def test_single_connection(self):
| conn = self.get_connection()
query = 'SELECT keyspace_name FROM system.schema_keyspaces LIMIT 1'
event = Event()
def cb(count, *args, **kwargs):
count += 1
if (count >= 10):
conn.close()
event.set()
else:
conn.send_msg(QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE), request_id=0, cb=partial(cb, count))
conn.send_msg(QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE), request_id=0, cb=partial(cb, 0))
event.wait()
|
'Test a single connection with pipelined requests.'
| def test_single_connection_pipelined_requests(self):
| conn = self.get_connection()
query = 'SELECT keyspace_name FROM system.schema_keyspaces LIMIT 1'
responses = ([False] * 100)
event = Event()
def cb(response_list, request_num, *args, **kwargs):
response_list[request_num] = True
if all(response_list):
conn.close()
event.set()
for i in range(100):
conn.send_msg(QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE), request_id=i, cb=partial(cb, responses, i))
event.wait()
|
'Test multiple connections with pipelined requests.'
| def test_multiple_connections(self):
| conns = [self.get_connection() for i in range(5)]
events = [Event() for i in range(5)]
query = 'SELECT keyspace_name FROM system.schema_keyspaces LIMIT 1'
def cb(event, conn, count, *args, **kwargs):
count += 1
if (count >= 10):
conn.close()
event.set()
else:
conn.send_msg(QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE), request_id=count, cb=partial(cb, event, conn, count))
for (event, conn) in zip(events, conns):
conn.send_msg(QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE), request_id=0, cb=partial(cb, event, conn, 0))
for event in events:
event.wait()
|
'Test sharing a single connections across multiple threads,
which will result in pipelined requests.'
| def test_multiple_threads_shared_connection(self):
| num_requests_per_conn = 25
num_threads = 5
event = Event()
conn = self.get_connection()
query = 'SELECT keyspace_name FROM system.schema_keyspaces LIMIT 1'
def cb(all_responses, thread_responses, request_num, *args, **kwargs):
thread_responses[request_num] = True
if all(map(all, all_responses)):
conn.close()
event.set()
def send_msgs(all_responses, thread_responses):
for i in range(num_requests_per_conn):
qmsg = QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE)
with conn.lock:
request_id = conn.get_request_id()
conn.send_msg(qmsg, request_id, cb=partial(cb, all_responses, thread_responses, i))
all_responses = []
threads = []
for i in range(num_threads):
thread_responses = ([False] * num_requests_per_conn)
all_responses.append(thread_responses)
t = Thread(target=send_msgs, args=(all_responses, thread_responses))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
event.wait()
|
'Test several threads, each with their own Connection and pipelined
requests.'
| def test_multiple_threads_multiple_connections(self):
| num_requests_per_conn = 25
num_conns = 5
events = [Event() for i in range(5)]
query = 'SELECT keyspace_name FROM system.schema_keyspaces LIMIT 1'
def cb(conn, event, thread_responses, request_num, *args, **kwargs):
thread_responses[request_num] = True
if all(thread_responses):
conn.close()
event.set()
def send_msgs(conn, event):
thread_responses = ([False] * num_requests_per_conn)
for i in range(num_requests_per_conn):
qmsg = QueryMessage(query=query, consistency_level=ConsistencyLevel.ONE)
with conn.lock:
request_id = conn.get_request_id()
conn.send_msg(qmsg, request_id, cb=partial(cb, conn, event, thread_responses, i))
event.wait()
threads = []
for i in range(num_conns):
conn = self.get_connection()
t = Thread(target=send_msgs, args=(conn, events[i]))
threads.append(t)
for t in threads:
t.start()
for t in threads:
t.join()
|
'Return correct authentication provider based on protocol version.
There is a difference in the semantics of authentication provider argument with protocol versions 1 and 2
For protocol version 2 and higher it should be a PlainTextAuthProvider object.
For protocol version 1 it should be a function taking hostname as an argument and returning a dictionary
containing username and password.
:param username: authentication username
:param password: authentication password
:return: authentication object suitable for Cluster.connect()'
| def get_authentication_provider(self, username, password):
| if (PROTOCOL_VERSION < 2):
return (lambda hostname: dict(username=username, password=password))
else:
return PlainTextAuthProvider(username=username, password=password)
|
'Test to validate that dropping a keyspace with user defined types doesn\'t kill the control connection.
Creates a keyspace, and populates with a user defined type. It then records the control_connection\'s id. It
will then drop the keyspace and get the id of the control_connection again. They should be the same. If they are
not dropping the keyspace likely caused the control connection to be rebuilt.
@since 2.7.0
@jira_ticket PYTHON-358
@expected_result the control connection is not killed
@test_category connection'
| def test_drop_keyspace(self):
| self.session = self.cluster.connect()
self.session.execute("\n CREATE KEYSPACE keyspacetodrop\n WITH replication = { 'class' : 'SimpleStrategy', 'replication_factor': '1' }\n ")
self.session.set_keyspace('keyspacetodrop')
self.session.execute('CREATE TYPE user (age int, name text)')
self.session.execute('CREATE TABLE mytable (a int PRIMARY KEY, b frozen<user>)')
cc_id_pre_drop = id(self.cluster.control_connection._connection)
self.session.execute('DROP KEYSPACE keyspacetodrop')
cc_id_post_drop = id(self.cluster.control_connection._connection)
self.assertEqual(cc_id_post_drop, cc_id_pre_drop)
|
'Test to validate Cluster.get_control_connection_host() metadata
@since 3.5.0
@jira_ticket PYTHON-583
@expected_result the control connection metadata should accurately reflect cluster state.
@test_category metadata'
| def test_get_control_connection_host(self):
| host = self.cluster.get_control_connection_host()
self.assertEqual(host, None)
self.session = self.cluster.connect()
cc_host = self.cluster.control_connection._connection.host
host = self.cluster.get_control_connection_host()
self.assertEqual(host.address, cc_host)
self.assertEqual(host.is_up, True)
self.cluster.control_connection._reconnect()
new_host = self.cluster.get_control_connection_host()
self.assertNotEqual(host, new_host)
|
'Test to validate that custom payloads work with simple queries
creates a simple query and ensures that custom payloads are passed to C*. A custom
query provider is used with C* so we can validate that same custom payloads are sent back
with the results
@since 2.6
@jira_ticket PYTHON-280
@expected_result valid custom payloads should be sent and received
@test_category queries:custom_payload'
| def test_custom_query_basic(self):
| query = 'SELECT * FROM system.local'
statement = SimpleStatement(query)
self.validate_various_custom_payloads(statement=statement)
|
'Test to validate that custom payloads work with batch queries
creates a batch query and ensures that custom payloads are passed to C*. A custom
query provider is used with C* so we can validate that same custom payloads are sent back
with the results
@since 2.6
@jira_ticket PYTHON-280
@expected_result valid custom payloads should be sent and received
@test_category queries:custom_payload'
| def test_custom_query_batching(self):
| batch = BatchStatement(BatchType.LOGGED)
for i in range(10):
batch.add(SimpleStatement('INSERT INTO test3rf.test (k, v) VALUES (%s, %s)'), (i, i))
self.validate_various_custom_payloads(statement=batch)
|
'Test to validate that custom payloads work with prepared queries
creates a batch query and ensures that custom payloads are passed to C*. A custom
query provider is used with C* so we can validate that same custom payloads are sent back
with the results
@since 2.6
@jira_ticket PYTHON-280
@expected_result valid custom payloads should be sent and received
@test_category queries:custom_payload'
| def test_custom_query_prepared(self):
| prepared = self.session.prepare('\n INSERT INTO test3rf.test (k, v) VALUES (?, ?)\n ')
bound = prepared.bind((1, None))
self.validate_various_custom_payloads(statement=bound)
|
'This is a utility method that given a statement will attempt
to submit the statement with various custom payloads. It will
validate that the custom payloads are sent and received correctly.
@param statement The statement to validate the custom queries in conjunction with'
| def validate_various_custom_payloads(self, statement):
| custom_payload = {'test': 'test_return'}
self.execute_async_validate_custom_payload(statement=statement, custom_payload=custom_payload)
custom_payload = {'': ''}
self.execute_async_validate_custom_payload(statement=statement, custom_payload=custom_payload)
custom_payload = {' ': ' '}
self.execute_async_validate_custom_payload(statement=statement, custom_payload=custom_payload)
key_value = ('x' * 10)
custom_payload = {key_value: six.b(key_value)}
self.execute_async_validate_custom_payload(statement=statement, custom_payload=custom_payload)
for i in range(65534):
custom_payload[str(i)] = six.b('x')
self.execute_async_validate_custom_payload(statement=statement, custom_payload=custom_payload)
custom_payload[str(65535)] = six.b('x')
with self.assertRaises(ValueError):
self.execute_async_validate_custom_payload(statement=statement, custom_payload=custom_payload)
|
'This is just a simple method that submits a statement with a payload, and validates
that the custom payload we submitted matches the one that we got back
@param statement The statement to execute
@param custom_payload The custom payload to submit with'
| def execute_async_validate_custom_payload(self, statement, custom_payload):
| response_future = self.session.execute_async(statement, custom_payload=custom_payload)
response_future.result()
returned_custom_payload = response_future.custom_payload
self.assertEqual(custom_payload, returned_custom_payload)
|
'USE test1rf;
DROP TABLE IF EXISTS race;
CREATE TABLE race (x int PRIMARY KEY);'
| def setUp(self):
| self.cluster = Cluster(protocol_version=PROTOCOL_VERSION)
self.session = self.cluster.connect('test1rf')
ddl1 = '\n DROP TABLE IF EXISTS race'
self.session.execute(ddl1)
ddl2 = '\n CREATE TABLE race (x int PRIMARY KEY)'
self.session.execute(ddl2)
|
'DROP TABLE test1rf.race
Shutdown cluster'
| def tearDown(self):
| self.session.execute('DROP TABLE race')
self.cluster.shutdown()
|
'Verify that in_flight value stays equal to one while doing multiple inserts.
The number of inserts can be set through INSERTS_ITERATIONS environmental variable.
Default value is 1000000.'
| def test_in_flight_is_one(self):
| prepared = self.session.prepare('INSERT INTO race (x) VALUES (?)')
iterations = int(os.getenv('INSERT_ITERATIONS', 1000000))
i = 0
leaking_connections = False
while ((i < iterations) and (not leaking_connections)):
bound = prepared.bind((i,))
self.session.execute(bound)
for pool in self.session._pools.values():
if leaking_connections:
break
for conn in pool.get_connections():
if (conn.in_flight > 1):
print self.session.get_pool_state()
leaking_connections = True
break
i = (i + 1)
self.assertFalse(leaking_connections, ('Detected leaking connection after %s iterations' % i))
|
'Code coverage for interface-style base class'
| def test_non_implemented(self):
| policy = LoadBalancingPolicy()
host = Host('ip1', SimpleConvictionPolicy)
host.set_location_info('dc1', 'rack1')
self.assertRaises(NotImplementedError, policy.distance, host)
self.assertRaises(NotImplementedError, policy.populate, None, host)
self.assertRaises(NotImplementedError, policy.make_query_plan)
self.assertRaises(NotImplementedError, policy.on_up, host)
self.assertRaises(NotImplementedError, policy.on_down, host)
self.assertRaises(NotImplementedError, policy.on_add, host)
self.assertRaises(NotImplementedError, policy.on_remove, host)
|
'Ensure query plan for a downed cluster will execute without errors'
| def test_no_live_nodes(self):
| hosts = [0, 1, 2, 3]
policy = RoundRobinPolicy()
policy.populate(None, hosts)
for i in range(4):
policy.on_down(i)
qplan = list(policy.make_query_plan())
self.assertEqual(qplan, [])
|
'Ensure query plan for a downed cluster will execute without errors'
| def test_no_live_nodes(self):
| hosts = []
for i in range(4):
h = Host(i, SimpleConvictionPolicy)
h.set_location_info('dc1', 'rack1')
hosts.append(h)
policy = DCAwareRoundRobinPolicy('dc1', used_hosts_per_remote_dc=1)
policy.populate(Mock(), hosts)
for host in hosts:
policy.on_down(host)
qplan = list(policy.make_query_plan())
self.assertEqual(qplan, [])
|
'Ensure query plan for an empty cluster will execute without errors'
| def test_no_nodes(self):
| policy = DCAwareRoundRobinPolicy('dc1', used_hosts_per_remote_dc=1)
policy.populate(None, [])
qplan = list(policy.make_query_plan())
self.assertEqual(qplan, [])
|
'Same test as DCAwareRoundRobinPolicyTest.test_get_distance()
Except a FakeCluster is needed for the metadata variable and
policy.child_policy is needed to change child policy settings'
| def test_get_distance(self):
| policy = TokenAwarePolicy(DCAwareRoundRobinPolicy('dc1', used_hosts_per_remote_dc=0))
host = Host('ip1', SimpleConvictionPolicy)
host.set_location_info('dc1', 'rack1')
policy.populate(self.FakeCluster(), [host])
self.assertEqual(policy.distance(host), HostDistance.LOCAL)
remote_host = Host('ip2', SimpleConvictionPolicy)
remote_host.set_location_info('dc2', 'rack1')
self.assertEqual(policy.distance(remote_host), HostDistance.IGNORED)
policy._child_policy.used_hosts_per_remote_dc = 1
self.assertEqual(policy.distance(remote_host), HostDistance.IGNORED)
policy.populate(self.FakeCluster(), [host, remote_host])
self.assertEqual(policy.distance(remote_host), HostDistance.REMOTE)
second_remote_host = Host('ip3', SimpleConvictionPolicy)
second_remote_host.set_location_info('dc2', 'rack1')
policy.populate(self.FakeCluster(), [host, remote_host, second_remote_host])
distances = set([policy.distance(remote_host), policy.distance(second_remote_host)])
self.assertEqual(distances, set([HostDistance.REMOTE, HostDistance.IGNORED]))
|
'Same test as DCAwareRoundRobinPolicyTest.test_status_updates()'
| def test_status_updates(self):
| hosts = [Host(i, SimpleConvictionPolicy) for i in range(4)]
for h in hosts[:2]:
h.set_location_info('dc1', 'rack1')
for h in hosts[2:]:
h.set_location_info('dc2', 'rack1')
policy = TokenAwarePolicy(DCAwareRoundRobinPolicy('dc1', used_hosts_per_remote_dc=1))
policy.populate(self.FakeCluster(), hosts)
policy.on_down(hosts[0])
policy.on_remove(hosts[2])
new_local_host = Host(4, SimpleConvictionPolicy)
new_local_host.set_location_info('dc1', 'rack1')
policy.on_up(new_local_host)
new_remote_host = Host(5, SimpleConvictionPolicy)
new_remote_host.set_location_info('dc9000', 'rack1')
policy.on_add(new_remote_host)
qplan = list(policy.make_query_plan())
self.assertEqual(set(qplan[:2]), set([hosts[1], new_local_host]))
self.assertEqual(set(qplan[2:]), set([hosts[3], new_remote_host]))
self.assertEqual(policy.distance(new_remote_host), HostDistance.REMOTE)
policy.on_down(new_local_host)
policy.on_down(hosts[1])
qplan = list(policy.make_query_plan())
self.assertEqual(set(qplan), set([hosts[3], new_remote_host]))
policy.on_down(new_remote_host)
policy.on_down(hosts[3])
qplan = list(policy.make_query_plan())
self.assertEqual(qplan, [])
|
'Test to validate the hosts are shuffled when `shuffle_replicas` is truthy
@since 3.8
@jira_ticket PYTHON-676
@expected_result shuffle should be called, because the keyspace and the
routing key are set
@test_category policy'
| def test_shuffles_if_given_keyspace_and_routing_key(self):
| self._assert_shuffle(keyspace='keyspace', routing_key='routing_key')
|
'Test to validate the hosts are not shuffled when no keyspace is provided
@since 3.8
@jira_ticket PYTHON-676
@expected_result shuffle should be called, because keyspace is None
@test_category policy'
| def test_no_shuffle_if_given_no_keyspace(self):
| self._assert_shuffle(keyspace=None, routing_key='routing_key')
|
'Test to validate the hosts are not shuffled when no routing_key is provided
@since 3.8
@jira_ticket PYTHON-676
@expected_result shuffle should be called, because routing_key is None
@test_category policy'
| def test_no_shuffle_if_given_no_routing_key(self):
| self._assert_shuffle(keyspace='keyspace', routing_key=None)
|
'Code coverage for interface-style base class'
| def test_not_implemented(self):
| conviction_policy = ConvictionPolicy(1)
self.assertRaises(NotImplementedError, conviction_policy.add_failure, 1)
self.assertRaises(NotImplementedError, conviction_policy.reset)
|
'Code coverage for SimpleConvictionPolicy'
| def test_basic_responses(self):
| conviction_policy = SimpleConvictionPolicy(1)
self.assertEqual(conviction_policy.add_failure(1), True)
self.assertEqual(conviction_policy.reset(), None)
|
'Code coverage for interface-style base class'
| def test_basic_responses(self):
| policy = ReconnectionPolicy()
self.assertRaises(NotImplementedError, policy.new_schedule)
|
'Test initialization values'
| def test_bad_vals(self):
| self.assertRaises(ValueError, ConstantReconnectionPolicy, (-1), 0)
|
'Test ConstantReconnectionPolicy schedule'
| def test_schedule(self):
| delay = 2
max_attempts = 100
policy = ConstantReconnectionPolicy(delay=delay, max_attempts=max_attempts)
schedule = list(policy.new_schedule())
self.assertEqual(len(schedule), max_attempts)
for (i, delay) in enumerate(schedule):
self.assertEqual(delay, delay)
|
'Test how negative max_attempts are handled'
| def test_schedule_negative_max_attempts(self):
| delay = 2
max_attempts = (-100)
try:
ConstantReconnectionPolicy(delay=delay, max_attempts=max_attempts)
self.fail('max_attempts should throw ValueError when negative')
except ValueError:
pass
|
'Test to verify an OverflowError is handled correctly
in the ExponentialReconnectionPolicy
@since 3.10
@jira_ticket PYTHON-707
@expected_result all numbers should be less than sys.float_info.max
since that\'s the biggest max we can possibly have as that argument must be a float.
Note that is possible for a float to be inf.
@test_category policy'
| def test_schedule_overflow(self):
| base_delay = (sys.float_info.max - 1)
max_delay = sys.float_info.max
max_attempts = (2 ** 12)
policy = ExponentialReconnectionPolicy(base_delay=base_delay, max_delay=max_delay, max_attempts=max_attempts)
schedule = list(policy.new_schedule())
for number in schedule:
self.assertLessEqual(number, sys.float_info.max)
|
'Use the same tests for test_write_timeout, but ensure they only RETHROW'
| def test_unavailable(self):
| policy = RetryPolicy()
(retry, consistency) = policy.on_unavailable(query=None, consistency=ONE, required_replicas=1, alive_replicas=2, retry_num=1)
self.assertEqual(retry, RetryPolicy.RETHROW)
self.assertEqual(consistency, None)
(retry, consistency) = policy.on_unavailable(query=None, consistency=ONE, required_replicas=1, alive_replicas=2, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETRY_NEXT_HOST)
self.assertEqual(consistency, ONE)
(retry, consistency) = policy.on_unavailable(query=None, consistency=ONE, required_replicas=10000, alive_replicas=1, retry_num=0)
self.assertEqual(retry, RetryPolicy.RETRY_NEXT_HOST)
self.assertEqual(consistency, ONE)
|
'PYTHON-443
Sanity check to ensure we don\'t unintentionally change class hierarchy of exception types'
| def test_exception_types(self):
| self.assertTrue(issubclass(Unavailable, DriverException))
self.assertTrue(issubclass(Unavailable, RequestExecutionException))
self.assertTrue(issubclass(ReadTimeout, DriverException))
self.assertTrue(issubclass(ReadTimeout, RequestExecutionException))
self.assertTrue(issubclass(ReadTimeout, Timeout))
self.assertTrue(issubclass(WriteTimeout, DriverException))
self.assertTrue(issubclass(WriteTimeout, RequestExecutionException))
self.assertTrue(issubclass(WriteTimeout, Timeout))
self.assertTrue(issubclass(CoordinationFailure, DriverException))
self.assertTrue(issubclass(CoordinationFailure, RequestExecutionException))
self.assertTrue(issubclass(ReadFailure, DriverException))
self.assertTrue(issubclass(ReadFailure, RequestExecutionException))
self.assertTrue(issubclass(ReadFailure, CoordinationFailure))
self.assertTrue(issubclass(WriteFailure, DriverException))
self.assertTrue(issubclass(WriteFailure, RequestExecutionException))
self.assertTrue(issubclass(WriteFailure, CoordinationFailure))
self.assertTrue(issubclass(FunctionFailure, DriverException))
self.assertTrue(issubclass(FunctionFailure, RequestExecutionException))
self.assertTrue(issubclass(RequestValidationException, DriverException))
self.assertTrue(issubclass(ConfigurationException, DriverException))
self.assertTrue(issubclass(ConfigurationException, RequestValidationException))
self.assertTrue(issubclass(AlreadyExists, DriverException))
self.assertTrue(issubclass(AlreadyExists, RequestValidationException))
self.assertTrue(issubclass(AlreadyExists, ConfigurationException))
self.assertTrue(issubclass(InvalidRequest, DriverException))
self.assertTrue(issubclass(InvalidRequest, RequestValidationException))
self.assertTrue(issubclass(Unauthorized, DriverException))
self.assertTrue(issubclass(Unauthorized, RequestValidationException))
self.assertTrue(issubclass(AuthenticationFailed, DriverException))
self.assertTrue(issubclass(OperationTimedOut, DriverException))
self.assertTrue(issubclass(UnsupportedOperation, DriverException))
|
'Schedule something with a time collision to make sure the heap comparison works
PYTHON-473'
| @patch('time.time', return_value=3)
@patch('cassandra.cluster._Scheduler.run')
def test_event_delay_timing(self, *_):
| sched = _Scheduler(None)
sched.schedule(0, (lambda : None))
sched.schedule(0, (lambda : None))
|
'Make sure default_serial_consistency_level passes through to a query message.
Also make sure Statement.serial_consistency_level overrides the default.
PR #510'
| @mock_session_pools
def test_default_serial_consistency_level(self, *_):
| s = Session(Cluster(protocol_version=4), [Host('127.0.0.1', SimpleConvictionPolicy)])
self.assertIsNone(s.default_serial_consistency_level)
sentinel = 1001
for cl in (None, ConsistencyLevel.LOCAL_SERIAL, ConsistencyLevel.SERIAL, sentinel):
s.default_serial_consistency_level = cl
f = s.execute_async(query='')
self.assertEqual(f.message.serial_consistency_level, cl)
for cl_override in (ConsistencyLevel.LOCAL_SERIAL, ConsistencyLevel.SERIAL):
f = s.execute_async(SimpleStatement(query_string='', serial_consistency_level=cl_override))
self.assertEqual(s.default_serial_consistency_level, cl)
self.assertEqual(f.message.serial_consistency_level, cl_override)
|
'Ensure lookup_casstype_simple returns the correct classes'
| def test_lookup_casstype_simple(self):
| self.assertEqual(lookup_casstype_simple('AsciiType'), cassandra.cqltypes.AsciiType)
self.assertEqual(lookup_casstype_simple('LongType'), cassandra.cqltypes.LongType)
self.assertEqual(lookup_casstype_simple('BytesType'), cassandra.cqltypes.BytesType)
self.assertEqual(lookup_casstype_simple('BooleanType'), cassandra.cqltypes.BooleanType)
self.assertEqual(lookup_casstype_simple('CounterColumnType'), cassandra.cqltypes.CounterColumnType)
self.assertEqual(lookup_casstype_simple('DecimalType'), cassandra.cqltypes.DecimalType)
self.assertEqual(lookup_casstype_simple('DoubleType'), cassandra.cqltypes.DoubleType)
self.assertEqual(lookup_casstype_simple('FloatType'), cassandra.cqltypes.FloatType)
self.assertEqual(lookup_casstype_simple('InetAddressType'), cassandra.cqltypes.InetAddressType)
self.assertEqual(lookup_casstype_simple('Int32Type'), cassandra.cqltypes.Int32Type)
self.assertEqual(lookup_casstype_simple('UTF8Type'), cassandra.cqltypes.UTF8Type)
self.assertEqual(lookup_casstype_simple('DateType'), cassandra.cqltypes.DateType)
self.assertEqual(lookup_casstype_simple('SimpleDateType'), cassandra.cqltypes.SimpleDateType)
self.assertEqual(lookup_casstype_simple('ByteType'), cassandra.cqltypes.ByteType)
self.assertEqual(lookup_casstype_simple('ShortType'), cassandra.cqltypes.ShortType)
self.assertEqual(lookup_casstype_simple('TimeUUIDType'), cassandra.cqltypes.TimeUUIDType)
self.assertEqual(lookup_casstype_simple('TimeType'), cassandra.cqltypes.TimeType)
self.assertEqual(lookup_casstype_simple('UUIDType'), cassandra.cqltypes.UUIDType)
self.assertEqual(lookup_casstype_simple('IntegerType'), cassandra.cqltypes.IntegerType)
self.assertEqual(lookup_casstype_simple('MapType'), cassandra.cqltypes.MapType)
self.assertEqual(lookup_casstype_simple('ListType'), cassandra.cqltypes.ListType)
self.assertEqual(lookup_casstype_simple('SetType'), cassandra.cqltypes.SetType)
self.assertEqual(lookup_casstype_simple('CompositeType'), cassandra.cqltypes.CompositeType)
self.assertEqual(lookup_casstype_simple('ColumnToCollectionType'), cassandra.cqltypes.ColumnToCollectionType)
self.assertEqual(lookup_casstype_simple('ReversedType'), cassandra.cqltypes.ReversedType)
self.assertEqual(lookup_casstype_simple('DurationType'), cassandra.cqltypes.DurationType)
self.assertEqual(str(lookup_casstype_simple('unknown')), str(cassandra.cqltypes.mkUnrecognizedType('unknown')))
|
'Ensure lookup_casstype returns the correct classes'
| def test_lookup_casstype(self):
| self.assertEqual(lookup_casstype('AsciiType'), cassandra.cqltypes.AsciiType)
self.assertEqual(lookup_casstype('LongType'), cassandra.cqltypes.LongType)
self.assertEqual(lookup_casstype('BytesType'), cassandra.cqltypes.BytesType)
self.assertEqual(lookup_casstype('BooleanType'), cassandra.cqltypes.BooleanType)
self.assertEqual(lookup_casstype('CounterColumnType'), cassandra.cqltypes.CounterColumnType)
self.assertEqual(lookup_casstype('DateType'), cassandra.cqltypes.DateType)
self.assertEqual(lookup_casstype('DecimalType'), cassandra.cqltypes.DecimalType)
self.assertEqual(lookup_casstype('DoubleType'), cassandra.cqltypes.DoubleType)
self.assertEqual(lookup_casstype('FloatType'), cassandra.cqltypes.FloatType)
self.assertEqual(lookup_casstype('InetAddressType'), cassandra.cqltypes.InetAddressType)
self.assertEqual(lookup_casstype('Int32Type'), cassandra.cqltypes.Int32Type)
self.assertEqual(lookup_casstype('UTF8Type'), cassandra.cqltypes.UTF8Type)
self.assertEqual(lookup_casstype('DateType'), cassandra.cqltypes.DateType)
self.assertEqual(lookup_casstype('TimeType'), cassandra.cqltypes.TimeType)
self.assertEqual(lookup_casstype('ByteType'), cassandra.cqltypes.ByteType)
self.assertEqual(lookup_casstype('ShortType'), cassandra.cqltypes.ShortType)
self.assertEqual(lookup_casstype('TimeUUIDType'), cassandra.cqltypes.TimeUUIDType)
self.assertEqual(lookup_casstype('UUIDType'), cassandra.cqltypes.UUIDType)
self.assertEqual(lookup_casstype('IntegerType'), cassandra.cqltypes.IntegerType)
self.assertEqual(lookup_casstype('MapType'), cassandra.cqltypes.MapType)
self.assertEqual(lookup_casstype('ListType'), cassandra.cqltypes.ListType)
self.assertEqual(lookup_casstype('SetType'), cassandra.cqltypes.SetType)
self.assertEqual(lookup_casstype('CompositeType'), cassandra.cqltypes.CompositeType)
self.assertEqual(lookup_casstype('ColumnToCollectionType'), cassandra.cqltypes.ColumnToCollectionType)
self.assertEqual(lookup_casstype('ReversedType'), cassandra.cqltypes.ReversedType)
self.assertEqual(lookup_casstype('DurationType'), cassandra.cqltypes.DurationType)
self.assertEqual(str(lookup_casstype('unknown')), str(cassandra.cqltypes.mkUnrecognizedType('unknown')))
self.assertRaises(ValueError, lookup_casstype, 'AsciiType~')
|
'Smoke test cql_typename'
| def test_cql_typename(self):
| self.assertEqual(cql_typename('DateType'), 'timestamp')
self.assertEqual(cql_typename('org.apache.cassandra.db.marshal.ListType(IntegerType)'), 'list<varint>')
|
'Test Host class is ordered consistently
@since 3.9
@jira_ticket PYTHON-714
@expected_result the hosts are ordered correctly
@test_category data_types'
| def test_host_order(self):
| hosts = [Host(addr, SimpleConvictionPolicy) for addr in ('127.0.0.1', '127.0.0.2', '127.0.0.3', '127.0.0.4')]
hosts_equal = [Host(addr, SimpleConvictionPolicy) for addr in ('127.0.0.1', '127.0.0.1')]
hosts_equal_conviction = [Host('127.0.0.1', SimpleConvictionPolicy), Host('127.0.0.1', ConvictionPolicy)]
self._check_sequence_consistency(hosts)
self._check_sequence_consistency(hosts_equal, equal=True)
self._check_sequence_consistency(hosts_equal_conviction, equal=True)
|
'Test Date class is ordered consistently
@since 3.9
@jira_ticket PYTHON-714
@expected_result the dates are ordered correctly
@test_category data_types'
| def test_date_order(self):
| dates_from_string = [Date('2017-01-01'), Date('2017-01-05'), Date('2017-01-09'), Date('2017-01-13')]
dates_from_string_equal = [Date('2017-01-01'), Date('2017-01-01')]
self._check_sequence_consistency(dates_from_string)
self._check_sequence_consistency(dates_from_string_equal, equal=True)
date_format = '%Y-%m-%d'
dates_from_value = [Date((datetime.datetime.strptime(dtstr, date_format) - datetime.datetime(1970, 1, 1)).days) for dtstr in ('2017-01-02', '2017-01-06', '2017-01-10', '2017-01-14')]
dates_from_value_equal = [Date(1), Date(1)]
self._check_sequence_consistency(dates_from_value)
self._check_sequence_consistency(dates_from_value_equal, equal=True)
dates_from_datetime = [Date(datetime.datetime.strptime(dtstr, date_format)) for dtstr in ('2017-01-03', '2017-01-07', '2017-01-11', '2017-01-15')]
dates_from_datetime_equal = [Date(datetime.datetime.strptime('2017-01-01', date_format)), Date(datetime.datetime.strptime('2017-01-01', date_format))]
self._check_sequence_consistency(dates_from_datetime)
self._check_sequence_consistency(dates_from_datetime_equal, equal=True)
dates_from_date = [Date(datetime.datetime.strptime(dtstr, date_format).date()) for dtstr in ('2017-01-04', '2017-01-08', '2017-01-12', '2017-01-16')]
dates_from_date_equal = [datetime.datetime.strptime(dtstr, date_format) for dtstr in ('2017-01-09', '2017-01-9')]
self._check_sequence_consistency(dates_from_date)
self._check_sequence_consistency(dates_from_date_equal, equal=True)
self._check_sequence_consistency(self._shuffle_lists(dates_from_string, dates_from_value, dates_from_datetime, dates_from_date))
|
'Test Time class is ordered consistently
@since 3.9
@jira_ticket PYTHON-714
@expected_result the times are ordered correctly
@test_category data_types'
| def test_timer_order(self):
| time_from_int = [Time(1000), Time(4000), Time(7000), Time(10000)]
time_from_int_equal = [Time(1), Time(1)]
self._check_sequence_consistency(time_from_int)
self._check_sequence_consistency(time_from_int_equal, equal=True)
time_from_datetime = [Time(datetime.time(hour=0, minute=0, second=0, microsecond=us)) for us in (2, 5, 8, 11)]
time_from_datetime_equal = [Time(datetime.time(hour=0, minute=0, second=0, microsecond=us)) for us in (1, 1)]
self._check_sequence_consistency(time_from_datetime)
self._check_sequence_consistency(time_from_datetime_equal, equal=True)
time_from_string = [Time('00:00:00.000003000'), Time('00:00:00.000006000'), Time('00:00:00.000009000'), Time('00:00:00.000012000')]
time_from_string_equal = [Time('00:00:00.000004000'), Time('00:00:00.000004000')]
self._check_sequence_consistency(time_from_string)
self._check_sequence_consistency(time_from_string_equal, equal=True)
self._check_sequence_consistency(self._shuffle_lists(time_from_int, time_from_datetime, time_from_string))
|
'Test Token class is ordered consistently
@since 3.9
@jira_ticket PYTHON-714
@expected_result the tokens are ordered correctly
@test_category data_types'
| def test_token_order(self):
| tokens = [Token(1), Token(2), Token(3), Token(4)]
tokens_equal = [Token(1), Token(1)]
self._check_sequence_consistency(tokens)
self._check_sequence_consistency(tokens_equal, equal=True)
|
'Verify that timer timeouts are honored appropriately'
| def test_multi_timer_validation(self, *args):
| c = self.make_connection()
submit_and_wait_for_completion(self, AsyncoreConnection, 0, 100, 1, 100)
submit_and_wait_for_completion(self, AsyncoreConnection, 100, 0, (-1), 100)
submit_and_wait_for_completion(self, AsyncoreConnection, 0, 100, 1, 100, True)
|
'Verify that timer cancellation is honored'
| def test_timer_cancellation(self):
| connection = self.make_connection()
timeout = 0.1
callback = TimerCallback(timeout)
timer = connection.create_timer(timeout, callback.invoke)
timer.cancel()
time.sleep(0.2)
timer_manager = connection._loop._timers
self.assertFalse(timer_manager._queue)
self.assertFalse(timer_manager._new_timers)
self.assertFalse(callback.was_invoked())
|
'Verify that timer timeouts are honored appropriately'
| def test_multi_timer_validation(self):
| submit_and_wait_for_completion(self, self.connection_class, 0, 100, 1, 100)
submit_and_wait_for_completion(self, self.connection_class, 100, 0, (-1), 100)
(submit_and_wait_for_completion(self, self.connection_class, 0, 100, 1, 100, True),)
|
'Verify that timer cancellation is honored'
| def test_timer_cancellation(self):
| timeout = 0.1
callback = TimerCallback(timeout)
timer = self.connection_class.create_timer(timeout, callback.invoke)
timer.cancel()
time.sleep(0.2)
timer_manager = self.connection_class._timers
self.assertFalse(timer_manager._queue)
self.assertFalse(timer_manager._new_timers)
self.assertFalse(callback.was_invoked())
|
'Test for asserting that watchers are closed in LibevConnection
This test simulates a process termination without calling cluster.shutdown(), which would trigger
LibevConnection._libevloop._cleanup. It will check the watchers have been closed
Finally it will restore the LibevConnection reactor so it doesn\'t affect
the rest of the tests
@since 3.10
@jira_ticket PYTHON-747
@expected_result the watchers are closed
@test_category connection'
| def test_watchers_are_finished(self, *args):
| with patch.object(LibevConnection._libevloop, '_thread'):
with patch.object(LibevConnection._libevloop, 'notify'):
self.make_connection()
live_connections = set(LibevConnection._libevloop._live_conns)
libev__cleanup(weakref.ref(LibevConnection._libevloop))
for conn in live_connections:
for watcher in (conn._write_watcher, conn._read_watcher):
self.assertTrue(watcher.stop.mock_calls)
LibevConnection._libevloop._shutdown = False
|
'Verify that the timers are called in the correct order'
| def test_multi_timer_validation(self):
| twistedreactor.TwistedConnection.initialize_reactor()
connection = twistedreactor.TwistedConnection('1.2.3.4', cql_version='3.0.1')
submit_and_wait_for_completion(self, connection, 0, 100, 1, 100)
submit_and_wait_for_completion(self, connection, 100, 0, (-1), 100)
submit_and_wait_for_completion(self, connection, 0, 100, 1, 100, True)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.