desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Tests that query experessions are evaluated properly'
def test_query_expression_parsing(self):
query1 = self.table.filter((self.table.column('test_id') == 5)) assert (len(query1._where) == 1) op = query1._where[0] assert isinstance(op.operator, operators.EqualsOperator) assert (op.value == 5) query2 = query1.filter((self.table.column('expected_result') >= 1)) assert (len(query2._where) == 2) op = query2._where[1] assert isinstance(op.operator, operators.GreaterThanOrEqualOperator) assert (op.value == 1)
'Tests the where clause creation'
def test_filter_method_where_clause_generation(self):
query1 = self.table.objects(test_id=5) self.assertEqual(len(query1._where), 1) where = query1._where[0] self.assertEqual(where.field, 'test_id') self.assertEqual(where.value, 5) query2 = query1.filter(expected_result__gte=1) self.assertEqual(len(query2._where), 2) where = query2._where[0] self.assertEqual(where.field, 'test_id') self.assertIsInstance(where.operator, EqualsOperator) self.assertEqual(where.value, 5) where = query2._where[1] self.assertEqual(where.field, 'expected_result') self.assertIsInstance(where.operator, GreaterThanOrEqualOperator) self.assertEqual(where.value, 1)
'Tests the where clause creation'
def test_query_expression_where_clause_generation(self):
query1 = self.table.objects((self.table.column('test_id') == 5)) self.assertEqual(len(query1._where), 1) where = query1._where[0] self.assertEqual(where.field, 'test_id') self.assertEqual(where.value, 5) query2 = query1.filter((self.table.column('expected_result') >= 1)) self.assertEqual(len(query2._where), 2) where = query2._where[0] self.assertEqual(where.field, 'test_id') self.assertIsInstance(where.operator, EqualsOperator) self.assertEqual(where.value, 5) where = query2._where[1] self.assertEqual(where.field, 'expected_result') self.assertIsInstance(where.operator, GreaterThanOrEqualOperator) self.assertEqual(where.value, 1)
'Tests that adding filtering statements affects the count query as expected'
@execute_count(2) def test_count(self):
assert (self.table.objects.count() == 12) q = self.table.objects(test_id=0) assert (q.count() == 4)
'Tests that adding query statements affects the count query as expected'
@execute_count(2) def test_query_expression_count(self):
assert (self.table.objects.count() == 12) q = self.table.objects((self.table.column('test_id') == 0)) assert (q.count() == 4)
'Tests that iterating over a query set pulls back all of the expected results'
@execute_count(3) def test_iteration(self):
q = self.table.objects(test_id=0) compare_set = set([(0, 5), (1, 10), (2, 15), (3, 20)]) for t in q: val = (t.attempt_id, t.expected_result) assert (val in compare_set) compare_set.remove(val) assert (len(compare_set) == 0) q = self.table.objects(attempt_id=3).allow_filtering() assert (len(q) == 3) compare_set = set([(0, 20), (1, 20), (2, 75)]) for t in q: val = (t.test_id, t.expected_result) assert (val in compare_set) compare_set.remove(val) assert (len(compare_set) == 0) q = self.table.objects((self.table.column('attempt_id') == 3)).allow_filtering() assert (len(q) == 3) compare_set = set([(0, 20), (1, 20), (2, 75)]) for t in q: val = (t.test_id, t.expected_result) assert (val in compare_set) compare_set.remove(val) assert (len(compare_set) == 0)
'Tests that iterating over a query set more than once works'
@execute_count(2) def test_multiple_iterations_work_properly(self):
for q in (self.table.objects(test_id=0), self.table.objects((self.table.column('test_id') == 0))): compare_set = set([(0, 5), (1, 10), (2, 15), (3, 20)]) for t in q: val = (t.attempt_id, t.expected_result) assert (val in compare_set) compare_set.remove(val) assert (len(compare_set) == 0) compare_set = set([(0, 5), (1, 10), (2, 15), (3, 20)]) for t in q: val = (t.attempt_id, t.expected_result) assert (val in compare_set) compare_set.remove(val) assert (len(compare_set) == 0)
'tests that the use of one iterator does not affect the behavior of another'
@execute_count(2) def test_multiple_iterators_are_isolated(self):
for q in (self.table.objects(test_id=0), self.table.objects((self.table.column('test_id') == 0))): q = q.order_by('attempt_id') expected_order = [0, 1, 2, 3] iter1 = iter(q) iter2 = iter(q) for attempt_id in expected_order: assert (next(iter1).attempt_id == attempt_id) assert (next(iter2).attempt_id == attempt_id)
'Tests that the .get() method works on new and existing querysets'
@execute_count(3) def test_get_success_case(self):
m = self.table.objects.get(test_id=0, attempt_id=0) assert isinstance(m, ResultObject) assert (m.test_id == 0) assert (m.attempt_id == 0) q = self.table.objects(test_id=0, attempt_id=0) m = q.get() assert isinstance(m, ResultObject) assert (m.test_id == 0) assert (m.attempt_id == 0) q = self.table.objects(test_id=0) m = q.get(attempt_id=0) assert isinstance(m, ResultObject) assert (m.test_id == 0) assert (m.attempt_id == 0)
'Tests that the .get() method works on new and existing querysets'
@execute_count(3) def test_query_expression_get_success_case(self):
m = self.table.get((self.table.column('test_id') == 0), (self.table.column('attempt_id') == 0)) assert isinstance(m, ResultObject) assert (m.test_id == 0) assert (m.attempt_id == 0) q = self.table.objects((self.table.column('test_id') == 0), (self.table.column('attempt_id') == 0)) m = q.get() assert isinstance(m, ResultObject) assert (m.test_id == 0) assert (m.attempt_id == 0) q = self.table.objects((self.table.column('test_id') == 0)) m = q.get((self.table.column('attempt_id') == 0)) assert isinstance(m, ResultObject) assert (m.test_id == 0) assert (m.attempt_id == 0)
'Tests that get calls that don\'t return a result raises a DoesNotExist error'
@execute_count(1) def test_get_doesnotexist_exception(self):
with self.assertRaises(self.table.DoesNotExist): self.table.objects.get(test_id=100)
'Tests that get calls that return multiple results raise a MultipleObjectsReturned error'
@execute_count(1) def test_get_multipleobjects_exception(self):
with self.assertRaises(self.table.MultipleObjectsReturned): self.table.objects.get(test_id=1)
'Test NamedTable access to materialized views Creates some materialized views using Traditional CQL. Then ensures we can access those materialized view using the NamedKeyspace, and NamedTable interfaces. Tests basic filtering as well. @since 3.0.0 @jira_ticket PYTHON-406 @expected_result Named Tables should have access to materialized views @test_category materialized_view'
@greaterthanorequalcass30 @execute_count(5) def test_named_table_with_mv(self):
ks = models.DEFAULT_KEYSPACE self.session.execute('DROP MATERIALIZED VIEW IF EXISTS {0}.alltimehigh'.format(ks)) self.session.execute('DROP MATERIALIZED VIEW IF EXISTS {0}.monthlyhigh'.format(ks)) self.session.execute('DROP TABLE IF EXISTS {0}.scores'.format(ks)) create_table = 'CREATE TABLE {0}.scores(\n user TEXT,\n game TEXT,\n year INT,\n month INT,\n day INT,\n score INT,\n PRIMARY KEY (user, game, year, month, day)\n )'.format(ks) self.session.execute(create_table) create_mv = 'CREATE MATERIALIZED VIEW {0}.monthlyhigh AS\n SELECT game, year, month, score, user, day FROM {0}.scores\n WHERE game IS NOT NULL AND year IS NOT NULL AND month IS NOT NULL AND score IS NOT NULL AND user IS NOT NULL AND day IS NOT NULL\n PRIMARY KEY ((game, year, month), score, user, day)\n WITH CLUSTERING ORDER BY (score DESC, user ASC, day ASC)'.format(ks) self.session.execute(create_mv) create_mv_alltime = 'CREATE MATERIALIZED VIEW {0}.alltimehigh AS\n SELECT * FROM {0}.scores\n WHERE game IS NOT NULL AND score IS NOT NULL AND user IS NOT NULL AND year IS NOT NULL AND month IS NOT NULL AND day IS NOT NULL\n PRIMARY KEY (game, score, user, year, month, day)\n WITH CLUSTERING ORDER BY (score DESC)'.format(ks) self.session.execute(create_mv_alltime) prepared_insert = self.session.prepare('INSERT INTO {0}.scores (user, game, year, month, day, score) VALUES (?, ?, ? ,? ,?, ?)'.format(ks)) parameters = (('pcmanus', 'Coup', 2015, 5, 1, 4000), ('jbellis', 'Coup', 2015, 5, 3, 1750), ('yukim', 'Coup', 2015, 5, 3, 2250), ('tjake', 'Coup', 2015, 5, 3, 500), ('iamaleksey', 'Coup', 2015, 6, 1, 2500), ('tjake', 'Coup', 2015, 6, 2, 1000), ('pcmanus', 'Coup', 2015, 6, 2, 2000), ('jmckenzie', 'Coup', 2015, 6, 9, 2700), ('jbellis', 'Coup', 2015, 6, 20, 3500), ('jbellis', 'Checkers', 2015, 6, 20, 1200), ('jbellis', 'Chess', 2015, 6, 21, 3500), ('pcmanus', 'Chess', 2015, 1, 25, 3200)) prepared_insert.consistency_level = ConsistencyLevel.ALL execute_concurrent_with_args(self.session, prepared_insert, parameters) key_space = NamedKeyspace(ks) mv_monthly = key_space.table('monthlyhigh') mv_all_time = key_space.table('alltimehigh') self.assertTrue(self.check_table_size('scores', key_space, len(parameters))) self.assertTrue(self.check_table_size('monthlyhigh', key_space, len(parameters))) self.assertTrue(self.check_table_size('alltimehigh', key_space, len(parameters))) filtered_mv_monthly_objects = mv_monthly.objects.filter(game='Chess', year=2015, month=6) self.assertEqual(len(filtered_mv_monthly_objects), 1) self.assertEqual(filtered_mv_monthly_objects[0]['score'], 3500) self.assertEqual(filtered_mv_monthly_objects[0]['user'], 'jbellis') filtered_mv_alltime_objects = mv_all_time.objects.filter(game='Chess') self.assertEqual(len(filtered_mv_alltime_objects), 2) self.assertEqual(filtered_mv_alltime_objects[0]['score'], 3500)
'tests calling udpate on a queryset'
@execute_count(8) def test_update_values(self):
partition = uuid4() for i in range(5): TestQueryUpdateModel.create(partition=partition, cluster=i, count=i, text=str(i)) for (i, row) in enumerate(TestQueryUpdateModel.objects(partition=partition)): self.assertEqual(row.cluster, i) self.assertEqual(row.count, i) self.assertEqual(row.text, str(i)) TestQueryUpdateModel.objects(partition=partition, cluster=3).update(count=6) for (i, row) in enumerate(TestQueryUpdateModel.objects(partition=partition)): self.assertEqual(row.cluster, i) self.assertEqual(row.count, (6 if (i == 3) else i)) self.assertEqual(row.text, str(i))
'tests calling udpate on models with values passed in'
@execute_count(6) def test_update_values_validation(self):
partition = uuid4() for i in range(5): TestQueryUpdateModel.create(partition=partition, cluster=i, count=i, text=str(i)) for (i, row) in enumerate(TestQueryUpdateModel.objects(partition=partition)): self.assertEqual(row.cluster, i) self.assertEqual(row.count, i) self.assertEqual(row.text, str(i)) with self.assertRaises(ValidationError): TestQueryUpdateModel.objects(partition=partition, cluster=3).update(count='asdf')
'tests that passing in a kwarg to the update method that isn\'t a column will fail'
def test_invalid_update_kwarg(self):
with self.assertRaises(ValidationError): TestQueryUpdateModel.objects(partition=uuid4(), cluster=3).update(bacon=5000)
'tests that attempting to update the value of a primary key will fail'
def test_primary_key_update_failure(self):
with self.assertRaises(ValidationError): TestQueryUpdateModel.objects(partition=uuid4(), cluster=3).update(cluster=5000)
'setting a field to null in the update should issue a delete statement'
@execute_count(8) def test_null_update_deletes_column(self):
partition = uuid4() for i in range(5): TestQueryUpdateModel.create(partition=partition, cluster=i, count=i, text=str(i)) for (i, row) in enumerate(TestQueryUpdateModel.objects(partition=partition)): self.assertEqual(row.cluster, i) self.assertEqual(row.count, i) self.assertEqual(row.text, str(i)) TestQueryUpdateModel.objects(partition=partition, cluster=3).update(text=None) for (i, row) in enumerate(TestQueryUpdateModel.objects(partition=partition)): self.assertEqual(row.cluster, i) self.assertEqual(row.count, i) self.assertEqual(row.text, (None if (i == 3) else str(i)))
'tests that updating a columns value, and removing another works properly'
@execute_count(9) def test_mixed_value_and_null_update(self):
partition = uuid4() for i in range(5): TestQueryUpdateModel.create(partition=partition, cluster=i, count=i, text=str(i)) for (i, row) in enumerate(TestQueryUpdateModel.objects(partition=partition)): self.assertEqual(row.cluster, i) self.assertEqual(row.count, i) self.assertEqual(row.text, str(i)) TestQueryUpdateModel.objects(partition=partition, cluster=3).update(count=6, text=None) for (i, row) in enumerate(TestQueryUpdateModel.objects(partition=partition)): self.assertEqual(row.cluster, i) self.assertEqual(row.count, (6 if (i == 3) else i)) self.assertEqual(row.text, (None if (i == 3) else str(i)))
'If the key doesn\'t exist yet, an update creates the record'
@execute_count(2) def test_set_add_updates_new_record(self):
partition = uuid4() cluster = 1 TestQueryUpdateModel.objects(partition=partition, cluster=cluster).update(text_set__add=set(('bar',))) obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster) self.assertEqual(obj.text_set, set(('bar',)))
'Removing something not in the set should silently do nothing'
@execute_count(3) def test_set_remove_new_record(self):
partition = uuid4() cluster = 1 TestQueryUpdateModel.objects.create(partition=partition, cluster=cluster, text_set=set(('foo',))) TestQueryUpdateModel.objects(partition=partition, cluster=cluster).update(text_set__remove=set(('afsd',))) obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster) self.assertEqual(obj.text_set, set(('foo',)))
'Prepend two things since order is reversed by default by CQL'
@execute_count(3) def test_list_prepend_updates(self):
partition = uuid4() cluster = 1 original = ['foo'] TestQueryUpdateModel.objects.create(partition=partition, cluster=cluster, text_list=original) prepended = ['bar', 'baz'] TestQueryUpdateModel.objects(partition=partition, cluster=cluster).update(text_list__prepend=prepended) obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster) expected = ((prepended[::(-1)] if is_prepend_reversed() else prepended) + original) self.assertEqual(obj.text_list, expected)
'Merge a dictionary into existing value'
@execute_count(3) def test_map_update_updates(self):
partition = uuid4() cluster = 1 TestQueryUpdateModel.objects.create(partition=partition, cluster=cluster, text_map={'foo': '1', 'bar': '2'}) TestQueryUpdateModel.objects(partition=partition, cluster=cluster).update(text_map__update={'bar': '3', 'baz': '4'}) obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster) self.assertEqual(obj.text_map, {'foo': '1', 'bar': '3', 'baz': '4'})
'The CQL behavior is if you set a key in a map to null it deletes that key from the map. Test that this works with __update.'
@execute_count(3) def test_map_update_none_deletes_key(self):
partition = uuid4() cluster = 1 TestQueryUpdateModel.objects.create(partition=partition, cluster=cluster, text_map={'foo': '1', 'bar': '2'}) TestQueryUpdateModel.objects(partition=partition, cluster=cluster).update(text_map__update={'bar': None}) obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster) self.assertEqual(obj.text_map, {'foo': '1'})
'Test that map item removal with update(<columnname>__remove=...) works @jira_ticket PYTHON-688'
@greaterthancass20 @execute_count(5) def test_map_update_remove(self):
partition = uuid4() cluster = 1 TestQueryUpdateModel.objects.create(partition=partition, cluster=cluster, text_map={'foo': '1', 'bar': '2'}) TestQueryUpdateModel.objects(partition=partition, cluster=cluster).update(text_map__remove={'bar'}, text_map__update={'foz': '4', 'foo': '2'}) obj = TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster) self.assertEqual(obj.text_map, {'foo': '2', 'foz': '4'}) TestQueryUpdateModel.objects(partition=partition, cluster=cluster).update(text_map__remove={'foo', 'foz'}) self.assertEqual(TestQueryUpdateModel.objects.get(partition=partition, cluster=cluster).text_map, {})
'Map item removal requires a set to match the CQL API @jira_ticket PYTHON-688'
def test_map_remove_rejects_non_sets(self):
partition = uuid4() cluster = 1 TestQueryUpdateModel.objects.create(partition=partition, cluster=cluster, text_map={'foo': '1', 'bar': '2'}) with self.assertRaises(ValidationError): TestQueryUpdateModel.objects(partition=partition, cluster=cluster).update(text_map__remove=['bar'])
'Test to ensure that an extra DELETE is not sent if an object is read from the DB with a None value @since 3.9 @jira_ticket PYTHON-719 @expected_result only three queries are executed, the first one for inserting the object, the second one for reading it, and the third one for updating it @test_category object_mapper'
@execute_count(3) def test_an_extra_delete_is_not_sent(self):
partition = uuid4() cluster = 1 TestQueryUpdateModel.objects.create(partition=partition, cluster=cluster) obj = TestQueryUpdateModel.objects(partition=partition, cluster=cluster).first() self.assertFalse({k: v for (k, v) in obj._values.items() if v.deleted}) obj.text = 'foo' obj.save()
'Test to ensure that cluster keys are not included when removing only static columns @since 3.6 @jira_ticket PYTHON-608 @expected_result Server should not throw an exception, and the static column should be deleted @test_category object_mapper'
def test_static_deletion(self):
StaticDeleteModel.create(example_id=5, example_clust=5, example_static2=1) sdm = StaticDeleteModel.filter(example_id=5).first() self.assertEqual(1, sdm.example_static2) sdm.update(example_static2=None) self.assertIsNone(sdm.example_static2)
'Tests that passing None into the batch call clears any batch object'
@execute_count(0) def test_none_success_case(self):
b = BatchQuery() q = TestMultiKeyModel.objects.batch(b) self.assertEqual(q._batch, b) q = q.batch(None) self.assertIsNone(q._batch)
'Tests that passing None into the batch call clears any batch object'
@execute_count(0) def test_dml_none_success_case(self):
b = BatchQuery() q = DMLQuery(TestMultiKeyModel, batch=b) self.assertEqual(q._batch, b) q.batch(None) self.assertIsNone(q._batch)
'Test for inserting wide rows with batching test_wide_batch_rows tests inserting a wide row of data using batching. It will then attempt to query that data and ensure that all of it has been inserted appropriately. @expected_result all items should be inserted, and verified. @test_category queries:batch'
def test_wide_batch_rows(self):
table = 'wide_batch_rows' session = self.make_session_and_keyspace() session.execute(('CREATE TABLE %s (k INT, i INT, PRIMARY KEY(k, i))' % table)) statement = 'BEGIN BATCH ' to_insert = 2000 for i in range(to_insert): statement += ('INSERT INTO %s (k, i) VALUES (%s, %s) ' % (table, 0, i)) statement += 'APPLY BATCH' statement = SimpleStatement(statement, consistency_level=ConsistencyLevel.QUORUM) try: session.execute(statement, timeout=30.0) except OperationTimedOut: (ex_type, ex, tb) = sys.exc_info() log.warn('Batch wide row insertion timed out, this may require additional investigation') log.warn('{0}: {1} Backtrace: {2}'.format(ex_type.__name__, ex, traceback.extract_tb(tb))) del tb results = session.execute(('SELECT i FROM %s WHERE k=%s' % (table, 0))) lastvalue = 0 for (j, row) in enumerate(results): lastValue = row['i'] self.assertEqual(lastValue, j) index_value = (to_insert - 1) self.assertEqual(lastValue, index_value, 'Verification failed only found {0} inserted we were expecting {1}'.format(j, index_value)) session.cluster.shutdown()
'Test for inserting wide row of bytes test_wide_batch_rows tests inserting a wide row of data bytes. It will then attempt to query that data and ensure that all of it has been inserted appropriately. @expected_result all items should be inserted, and verified. @test_category queries'
def test_wide_byte_rows(self):
table = 'wide_byte_rows' session = self.make_session_and_keyspace() session.execute(('CREATE TABLE %s (k INT, i INT, v BLOB, PRIMARY KEY(k, i))' % table)) to_insert = 100000 prepared = session.prepare(('INSERT INTO %s (k, i, v) VALUES (0, ?, 0xCAFE)' % (table,))) timeouts = self.batch_futures(session, (prepared.bind((i,)) for i in range(to_insert))) results = session.execute(('SELECT i, v FROM %s WHERE k=0' % (table,))) expected_results = ((to_insert - timeouts) - 1) bb = pack('>H', 51966) for (i, row) in enumerate(results): self.assertEqual(row['v'], bb) self.assertGreaterEqual(i, expected_results, 'Verification failed only found {0} inserted we were expecting {1}'.format(i, expected_results)) session.cluster.shutdown()
'Test for inserting a large text field test_large_text tests inserting a large text field into a row. @expected_result the large text value should be inserted. When the row is queried it should match the original value that was inserted @test_category queries'
def test_large_text(self):
table = 'large_text' session = self.make_session_and_keyspace() session.execute(('CREATE TABLE %s (k int PRIMARY KEY, txt text)' % table)) text = ('a' * 1000000) session.execute(SimpleStatement(("INSERT INTO %s (k, txt) VALUES (%s, '%s')" % (table, 0, text)), consistency_level=ConsistencyLevel.QUORUM)) result = session.execute(('SELECT * FROM %s WHERE k=%s' % (table, 0))) found_result = False for (i, row) in enumerate(result): self.assertEqual(row['txt'], text) found_result = True self.assertTrue(found_result, 'No results were found') session.cluster.shutdown()
'Test to ensure that cluster.connect() doesn\'t return prior to pools being initialized. This test will figure out which host our pool logic will connect to first. It then shuts that server down. Previously the cluster.connect() would return prior to the pools being initialized, and the first queries would return a no host exception @since 3.7.0 @jira_ticket PYTHON-617 @expected_result query should complete successfully @test_category connection'
def test_pool_with_host_down(self):
all_contact_points = ['127.0.0.1', '127.0.0.2', '127.0.0.3'] cluster = Cluster(protocol_version=PROTOCOL_VERSION) cluster.connect(wait_for_all_pools=True) hosts = cluster.metadata.all_hosts() address = hosts[0].address node_to_stop = int(address.split('.')[(-1):][0]) cluster.shutdown() contact_point = '127.0.0.{0}'.format(self.get_node_not_x(node_to_stop)) cluster = Cluster(contact_points=[contact_point], protocol_version=PROTOCOL_VERSION) cluster.connect(wait_for_all_pools=True) try: force_stop(node_to_stop) wait_for_down(cluster, node_to_stop) cluster2 = Cluster(contact_points=all_contact_points, protocol_version=PROTOCOL_VERSION) session2 = cluster2.connect() session2.execute('SELECT * FROM system.local') finally: cluster2.shutdown() start(node_to_stop) wait_for_up(cluster, node_to_stop) cluster.shutdown()
'Test for default loadbalacing policy test_token_aware_is_used_by_default tests that the default loadbalancing policy is policies.TokenAwarePolicy. It creates a simple Cluster and verifies that the default loadbalancing policy is TokenAwarePolicy if the murmur3 C extension is found. Otherwise, the default loadbalancing policy is DCAwareRoundRobinPolicy. @since 2.6.0 @jira_ticket PYTHON-160 @expected_result TokenAwarePolicy should be the default loadbalancing policy. @test_category load_balancing:token_aware'
def test_token_aware_is_used_by_default(self):
cluster = Cluster(protocol_version=PROTOCOL_VERSION) if (murmur3 is not None): self.assertTrue(isinstance(cluster.load_balancing_policy, TokenAwarePolicy)) else: self.assertTrue(isinstance(cluster.load_balancing_policy, DCAwareRoundRobinPolicy)) cluster.shutdown()
'Test to validate the hosts are shuffled when the `shuffle_replicas` is truthy @since 3.8 @jira_ticket PYTHON-676 @expected_result the request are spread across the replicas, when one of them is down, the requests target the available one @test_category policy'
def test_token_aware_with_shuffle_rf2(self):
keyspace = 'test_token_aware_with_rf_2' (cluster, session) = self._set_up_shuffle_test(keyspace, replication_factor=2) self._check_query_order_changes(session=session, keyspace=keyspace) self.coordinator_stats.reset_counts() stop(2) self._wait_for_nodes_down([2], cluster) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 0) self.coordinator_stats.assert_query_count_equals(self, 3, 12) cluster.shutdown()
'Test to validate the hosts are shuffled when the `shuffle_replicas` is truthy @since 3.8 @jira_ticket PYTHON-676 @expected_result the request are spread across the replicas, when one of them is down, the requests target the other available ones @test_category policy'
def test_token_aware_with_shuffle_rf3(self):
keyspace = 'test_token_aware_with_rf_3' (cluster, session) = self._set_up_shuffle_test(keyspace, replication_factor=3) self._check_query_order_changes(session=session, keyspace=keyspace) self.coordinator_stats.reset_counts() stop(1) self._wait_for_nodes_down([1], cluster) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 0) query_count_two = self.coordinator_stats.get_query_count(2) query_count_three = self.coordinator_stats.get_query_count(3) self.assertEqual((query_count_two + query_count_three), 12) self.coordinator_stats.reset_counts() stop(2) self._wait_for_nodes_down([2], cluster) self._query(session, keyspace) self.coordinator_stats.assert_query_count_equals(self, 1, 0) self.coordinator_stats.assert_query_count_equals(self, 2, 0) self.coordinator_stats.assert_query_count_equals(self, 3, 12) cluster.shutdown()
'Test to validate removing certain hosts from the query plan with HostFilterPolicy @since 3.8 @jira_ticket PYTHON-961 @expected_result the excluded hosts are ignored @test_category policy'
def test_black_list_with_host_filter_policy(self):
use_singledc() keyspace = 'test_black_list_with_hfp' ignored_address = (IP_FORMAT % 2) hfp = HostFilterPolicy(child_policy=RoundRobinPolicy(), predicate=(lambda host: (host.address != ignored_address))) cluster = Cluster(((IP_FORMAT % 1),), load_balancing_policy=hfp, protocol_version=PROTOCOL_VERSION, topology_event_refresh_window=0, status_event_refresh_window=0) self.addCleanup(cluster.shutdown) session = cluster.connect() self._wait_for_nodes_up([1, 2, 3]) self.assertNotIn(ignored_address, [h.address for h in hfp.make_query_plan()]) create_schema(cluster, session, keyspace) self._insert(session, keyspace) self._query(session, keyspace) first_node_count = self.coordinator_stats.get_query_count(1) third_node_count = self.coordinator_stats.get_query_count(3) self.assertEqual((first_node_count + third_node_count), 12) self.assertTrue(((first_node_count == 8) or (first_node_count == 4))) self.coordinator_stats.assert_query_count_equals(self, 2, 0) force_stop(2) self._wait_for_nodes_down([2]) self.assertFalse(cluster.metadata._hosts[ignored_address].is_currently_reconnecting())
'Test to validate that we are able to connect to a cluster using ssl. test_can_connect_with_ssl_ca performs a simple sanity check to ensure that we can connect to a cluster with ssl authentication via simple server-side shared certificate authority. The client is able to validate the identity of the server, however by using this method the server can\'t trust the client unless additional authentication has been provided. @since 2.6.0 @jira_ticket PYTHON-332 @expected_result The client can connect via SSL and preform some basic operations @test_category connection:ssl'
def test_can_connect_with_ssl_ca(self):
abs_path_ca_cert_path = os.path.abspath(CLIENT_CA_CERTS) ssl_options = {'ca_certs': abs_path_ca_cert_path, 'ssl_version': ssl.PROTOCOL_TLSv1} validate_ssl_options(ssl_options=ssl_options)
'Test to validate that long running ssl connections continue to function past thier timeout window @since 3.6.0 @jira_ticket PYTHON-600 @expected_result The client can connect via SSL and preform some basic operations over a period of longer then a minute @test_category connection:ssl'
def test_can_connect_with_ssl_long_running(self):
abs_path_ca_cert_path = os.path.abspath(CLIENT_CA_CERTS) ssl_options = {'ca_certs': abs_path_ca_cert_path, 'ssl_version': ssl.PROTOCOL_TLSv1} tries = 0 while True: if (tries > 5): raise RuntimeError('Failed to connect to SSL cluster after 5 attempts') try: cluster = Cluster(protocol_version=PROTOCOL_VERSION, ssl_options=ssl_options) session = cluster.connect(wait_for_all_pools=True) break except Exception: (ex_type, ex, tb) = sys.exc_info() log.warn('{0}: {1} Backtrace: {2}'.format(ex_type.__name__, ex, traceback.extract_tb(tb))) del tb tries += 1 for i in range(8): rs = session.execute('SELECT * FROM system.local') time.sleep(10) cluster.shutdown()
'Test to validate that we are able to connect to a cluster using ssl, and host matching test_can_connect_with_ssl_ca_host_match performs a simple sanity check to ensure that we can connect to a cluster with ssl authentication via simple server-side shared certificate authority. It also validates that the host ip matches what is expected @since 3.3 @jira_ticket PYTHON-296 @expected_result The client can connect via SSL and preform some basic operations, with check_hostname specified @test_category connection:ssl'
def test_can_connect_with_ssl_ca_host_match(self):
abs_path_ca_cert_path = os.path.abspath(CLIENT_CA_CERTS) ssl_options = {'ca_certs': abs_path_ca_cert_path, 'ssl_version': ssl.PROTOCOL_TLSv1, 'cert_reqs': ssl.CERT_REQUIRED, 'check_hostname': True} validate_ssl_options(ssl_options=ssl_options)
'Test to validate that we can connect to a C* cluster that has client_auth enabled. This test will setup and use a c* cluster that has client authentication enabled. It will then attempt to connect using valid client keys, and certs (that are in the server\'s truststore), and attempt to preform some basic operations @since 2.7.0 @expected_result The client can connect via SSL and preform some basic operations @test_category connection:ssl'
def test_can_connect_with_ssl_client_auth(self):
abs_path_ca_cert_path = os.path.abspath(CLIENT_CA_CERTS) abs_driver_keyfile = os.path.abspath(DRIVER_KEYFILE) abs_driver_certfile = os.path.abspath(DRIVER_CERTFILE) ssl_options = {'ca_certs': abs_path_ca_cert_path, 'ssl_version': ssl.PROTOCOL_TLSv1, 'keyfile': abs_driver_keyfile, 'certfile': abs_driver_certfile} validate_ssl_options(ssl_options)
'Test to validate that we can connect to a C* cluster that has client_auth enabled, and hostmatching This test will setup and use a c* cluster that has client authentication enabled. It will then attempt to connect using valid client keys, and certs (that are in the server\'s truststore), and attempt to preform some basic operations, with check_hostname specified @jira_ticket PYTHON-296 @since 3.3 @expected_result The client can connect via SSL and preform some basic operations @test_category connection:ssl'
def test_can_connect_with_ssl_client_auth_host_name(self):
abs_path_ca_cert_path = os.path.abspath(CLIENT_CA_CERTS) abs_driver_keyfile = os.path.abspath(DRIVER_KEYFILE) abs_driver_certfile = os.path.abspath(DRIVER_CERTFILE) ssl_options = {'ca_certs': abs_path_ca_cert_path, 'ssl_version': ssl.PROTOCOL_TLSv1, 'keyfile': abs_driver_keyfile, 'certfile': abs_driver_certfile, 'cert_reqs': ssl.CERT_REQUIRED, 'check_hostname': True} validate_ssl_options(ssl_options)
'Test to validate that we cannot connect without client auth. This test will omit the keys/certs needed to preform client authentication. It will then attempt to connect to a server that has client authentication enabled. @since 2.7.0 @expected_result The client will throw an exception on connect @test_category connection:ssl'
def test_cannot_connect_without_client_auth(self):
abs_path_ca_cert_path = os.path.abspath(CLIENT_CA_CERTS) cluster = Cluster(protocol_version=PROTOCOL_VERSION, ssl_options={'ca_certs': abs_path_ca_cert_path, 'ssl_version': ssl.PROTOCOL_TLSv1}) with self.assertRaises(NoHostAvailable) as context: cluster.connect() cluster.shutdown()
'Test to validate that we cannot connect with invalid client auth. This test will use bad keys/certs to preform client authentication. It will then attempt to connect to a server that has client authentication enabled. @since 2.7.0 @expected_result The client will throw an exception on connect @test_category connection:ssl'
def test_cannot_connect_with_bad_client_auth(self):
abs_path_ca_cert_path = os.path.abspath(CLIENT_CA_CERTS) abs_driver_keyfile = os.path.abspath(DRIVER_KEYFILE) abs_driver_certfile = os.path.abspath(DRIVER_CERTFILE_BAD) cluster = Cluster(protocol_version=PROTOCOL_VERSION, ssl_options={'ca_certs': abs_path_ca_cert_path, 'ssl_version': ssl.PROTOCOL_TLSv1, 'keyfile': abs_driver_keyfile, 'certfile': abs_driver_certfile}) with self.assertRaises(NoHostAvailable) as context: cluster.connect() cluster.shutdown()
'Basic test for repeated schema creation and use, using many different keyspaces'
def test_recreates(self):
session = self.session for i in range(2): for keyspace_number in range(5): keyspace = 'ks_{0}'.format(keyspace_number) if (keyspace in self.cluster.metadata.keyspaces.keys()): drop = 'DROP KEYSPACE {0}'.format(keyspace) log.debug(drop) execute_until_pass(session, drop) create = "CREATE KEYSPACE {0} WITH replication = {{'class': 'SimpleStrategy', 'replication_factor': 3}}".format(keyspace) log.debug(create) execute_until_pass(session, create) create = 'CREATE TABLE {0}.cf (k int PRIMARY KEY, i int)'.format(keyspace) log.debug(create) execute_until_pass(session, create) use = 'USE {0}'.format(keyspace) log.debug(use) execute_until_pass(session, use) insert = 'INSERT INTO cf (k, i) VALUES (0, 0)' log.debug(insert) ss = SimpleStatement(insert, consistency_level=ConsistencyLevel.QUORUM) execute_until_pass(session, ss)
'Tests for any schema disagreements using many different keyspaces'
def test_for_schema_disagreements_different_keyspaces(self):
session = self.session for i in range(30): execute_until_pass(session, "CREATE KEYSPACE test_{0} WITH replication = {{'class': 'SimpleStrategy', 'replication_factor': 1}}".format(i)) execute_until_pass(session, 'CREATE TABLE test_{0}.cf (key int PRIMARY KEY, value int)'.format(i)) for j in range(100): execute_until_pass(session, 'INSERT INTO test_{0}.cf (key, value) VALUES ({1}, {1})'.format(i, j)) execute_until_pass(session, 'DROP KEYSPACE test_{0}'.format(i))
'Tests for any schema disagreements using the same keyspace multiple times'
def test_for_schema_disagreements_same_keyspace(self):
cluster = Cluster(protocol_version=PROTOCOL_VERSION) session = cluster.connect(wait_for_all_pools=True) for i in range(30): try: execute_until_pass(session, "CREATE KEYSPACE test WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}") except AlreadyExists: execute_until_pass(session, 'DROP KEYSPACE test') execute_until_pass(session, "CREATE KEYSPACE test WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 1}") execute_until_pass(session, 'CREATE TABLE test.cf (key int PRIMARY KEY, value int)') for j in range(100): execute_until_pass(session, 'INSERT INTO test.cf (key, value) VALUES ({0}, {0})'.format(j)) execute_until_pass(session, 'DROP KEYSPACE test') cluster.shutdown()
'Tests to ensure that schema disagreement is properly surfaced on the response future. Creates and destroys keyspaces/tables with various schema agreement timeouts set. First part runs cql create/drop cmds with schema agreement set in such away were it will be impossible for agreement to occur during timeout. It then validates that the correct value is set on the result. Second part ensures that when schema agreement occurs, that the result set reflects that appropriately @since 3.1.0 @jira_ticket PYTHON-458 @expected_result is_schema_agreed is set appropriately on response thefuture @test_category schema'
def test_for_schema_disagreement_attribute(self):
cluster = Cluster(protocol_version=PROTOCOL_VERSION, max_schema_agreement_wait=0.001) session = cluster.connect(wait_for_all_pools=True) rs = session.execute("CREATE KEYSPACE test_schema_disagreement WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3}") self.check_and_wait_for_agreement(session, rs, False) rs = session.execute(SimpleStatement('CREATE TABLE test_schema_disagreement.cf (key int PRIMARY KEY, value int)', consistency_level=ConsistencyLevel.ALL)) self.check_and_wait_for_agreement(session, rs, False) rs = session.execute('DROP KEYSPACE test_schema_disagreement') self.check_and_wait_for_agreement(session, rs, False) cluster.shutdown() cluster = Cluster(protocol_version=PROTOCOL_VERSION, max_schema_agreement_wait=100) session = cluster.connect() rs = session.execute("CREATE KEYSPACE test_schema_disagreement WITH replication = {'class': 'SimpleStrategy', 'replication_factor': 3}") self.check_and_wait_for_agreement(session, rs, True) rs = session.execute(SimpleStatement('CREATE TABLE test_schema_disagreement.cf (key int PRIMARY KEY, value int)', consistency_level=ConsistencyLevel.ALL)) self.check_and_wait_for_agreement(session, rs, True) rs = session.execute('DROP KEYSPACE test_schema_disagreement') self.check_and_wait_for_agreement(session, rs, True) cluster.shutdown()
'Test is skipped if run with native protocol version <4'
def setUp(self):
self.support_v5 = True if (PROTOCOL_VERSION < 4): raise unittest.SkipTest(('Native protocol 4,0+ is required for custom payloads, currently using %r' % (PROTOCOL_VERSION,))) try: self.cluster = Cluster(protocol_version=ProtocolVersion.MAX_SUPPORTED, allow_beta_protocol_version=True) self.session = self.cluster.connect() except NoHostAvailable: log.info('Protocol Version 5 not supported,') self.cluster = Cluster(protocol_version=PROTOCOL_VERSION) self.session = self.cluster.connect() self.support_v5 = False self.nodes_currently_failing = [] (self.node1, self.node2, self.node3) = get_cluster().nodes.values()
'This method will take in a set of failing nodes, and toggle all of the nodes in the provided list to fail writes. @param failing_nodes A definitive list of nodes that should fail writes @param keyspace The keyspace to enable failures on'
def setFailingNodes(self, failing_nodes, keyspace):
for node in failing_nodes: if (node not in self.nodes_currently_failing): node.stop(wait_other_notice=True, gently=False) node.start(jvm_args=[(' -Dcassandra.test.fail_writes_ks=' + keyspace)], wait_for_binary_proto=True, wait_other_notice=True) self.nodes_currently_failing.append(node) for node in self.nodes_currently_failing: if (node not in failing_nodes): node.stop(wait_other_notice=True, gently=True) node.start(wait_for_binary_proto=True, wait_other_notice=True) self.nodes_currently_failing.remove(node)
'Simple helper method to preform cql statements and check for expected exception @param text CQl statement to execute @param consistency_level Consistency level at which it is to be executed @param expected_exception Exception expected to be throw or none'
def _perform_cql_statement(self, text, consistency_level, expected_exception, session=None):
if (session is None): session = self.session statement = SimpleStatement(text) statement.consistency_level = consistency_level if (expected_exception is None): self.execute_helper(session, statement) else: with self.assertRaises(expected_exception) as cm: self.execute_helper(session, statement) if (self.support_v5 and (isinstance(cm.exception, WriteFailure) or isinstance(cm.exception, ReadFailure))): if isinstance(cm.exception, ReadFailure): self.assertEqual(list(cm.exception.error_code_map.values())[0], 1) else: self.assertEqual(list(cm.exception.error_code_map.values())[0], 0)
'Test to validate that write failures from the coordinator are surfaced appropriately. test_write_failures_from_coordinator Enable write failures on the various nodes using a custom jvm flag, cassandra.test.fail_writes_ks. This will cause writes to fail on that specific node. Depending on the replication factor of the keyspace, and the consistency level, we will expect the coordinator to send WriteFailure, or not. @since 2.6.0, 3.7.0 @jira_ticket PYTHON-238, PYTHON-619 @expected_result Appropriate write failures from the coordinator @test_category queries:basic'
def test_write_failures_from_coordinator(self):
self._perform_cql_statement("\n CREATE KEYSPACE testksfail\n WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '3'}\n ", consistency_level=ConsistencyLevel.ALL, expected_exception=None) self._perform_cql_statement('\n CREATE TABLE testksfail.test (\n k int PRIMARY KEY,\n v int )\n ', consistency_level=ConsistencyLevel.ALL, expected_exception=None) failing_nodes = [self.node1] self.setFailingNodes(failing_nodes, 'testksfail') self._perform_cql_statement('\n INSERT INTO testksfail.test (k, v) VALUES (1, 0 )\n ', consistency_level=ConsistencyLevel.ALL, expected_exception=WriteFailure) self._perform_cql_statement('\n INSERT INTO testksfail.test (k, v) VALUES (1, 0 )\n ', consistency_level=ConsistencyLevel.QUORUM, expected_exception=None) failing_nodes = [] self.setFailingNodes(failing_nodes, 'testksfail') self._perform_cql_statement('\n DROP KEYSPACE testksfail\n ', consistency_level=ConsistencyLevel.ANY, expected_exception=None)
'Test to validate that a ReadFailure is returned from the node when a specified threshold of tombstombs is reached. test_tombstomb_overflow_read_failure First sets the tombstone failure threshold down to a level that allows it to be more easily encountered. We then create some wide rows and ensure they are deleted appropriately. This produces the correct amount of tombstombs. Upon making a simple query we expect to get a read failure back from the coordinator. @since 2.6.0, 3.7.0 @jira_ticket PYTHON-238, PYTHON-619 @expected_result Appropriate write failures from the coordinator @test_category queries:basic'
def test_tombstone_overflow_read_failure(self):
self._perform_cql_statement('\n CREATE TABLE test3rf.test2 (\n k int,\n v0 int,\n v1 int, PRIMARY KEY (k,v0))\n ', consistency_level=ConsistencyLevel.ALL, expected_exception=None) statement = self.session.prepare('INSERT INTO test3rf.test2 (k, v0,v1) VALUES (1,?,1)') parameters = [(x,) for x in range(3000)] self.execute_concurrent_args_helper(self.session, statement, parameters) statement = self.session.prepare('DELETE v1 FROM test3rf.test2 WHERE k = 1 AND v0 =?') parameters = [(x,) for x in range(2001)] self.execute_concurrent_args_helper(self.session, statement, parameters) self._perform_cql_statement('\n SELECT * FROM test3rf.test2 WHERE k = 1\n ', consistency_level=ConsistencyLevel.ALL, expected_exception=ReadFailure) self._perform_cql_statement('\n DROP TABLE test3rf.test2;\n ', consistency_level=ConsistencyLevel.ALL, expected_exception=None)
'Test to validate that exceptions in user defined function are correctly surfaced by the driver to us. test_user_function_failure First creates a table to use for testing. Then creates a function that will throw an exception when invoked. It then invokes the function and expects a FunctionException. Finally it preforms cleanup operations. @since 2.6.0 @jira_ticket PYTHON-238 @expected_result Function failures when UDF throws exception @test_category queries:basic'
def test_user_function_failure(self):
self._perform_cql_statement('\n CREATE FUNCTION test3rf.test_failure(d double)\n RETURNS NULL ON NULL INPUT\n RETURNS double\n LANGUAGE java AS \'throw new RuntimeException("failure");\';\n ', consistency_level=ConsistencyLevel.ALL, expected_exception=None) self._perform_cql_statement('\n CREATE TABLE test3rf.d (k int PRIMARY KEY , d double);\n ', consistency_level=ConsistencyLevel.ALL, expected_exception=None) self._perform_cql_statement('\n INSERT INTO test3rf.d (k,d) VALUES (0, 5.12);\n ', consistency_level=ConsistencyLevel.ALL, expected_exception=None) self._perform_cql_statement('\n SELECT test_failure(d) FROM test3rf.d WHERE k = 0;\n ', consistency_level=ConsistencyLevel.ALL, expected_exception=FunctionFailure) self._perform_cql_statement('\n DROP FUNCTION test3rf.test_failure;\n ', consistency_level=ConsistencyLevel.ALL, expected_exception=None) self._perform_cql_statement('\n DROP TABLE test3rf.d;\n ', consistency_level=ConsistencyLevel.ALL, expected_exception=None)
'Setup sessions and pause node1'
def setUp(self):
node1 = ExecutionProfile(load_balancing_policy=HostFilterPolicy(RoundRobinPolicy(), (lambda host: (host.address == '127.0.0.1')))) self.cluster = Cluster(protocol_version=PROTOCOL_VERSION, execution_profiles={EXEC_PROFILE_DEFAULT: node1}) self.session = self.cluster.connect(wait_for_all_pools=True) self.control_connection_host_number = 1 self.node_to_stop = get_node(self.control_connection_host_number) ddl = '\n CREATE TABLE test3rf.timeout (\n k int PRIMARY KEY,\n v int )' self.session.execute(ddl) self.node_to_stop.pause()
'Shutdown cluster and resume node1'
def tearDown(self):
self.node_to_stop.resume() self.session.execute('DROP TABLE test3rf.timeout') self.cluster.shutdown()
'Test to validate that timeouts are honored Exercise the underlying timeouts, by attempting a query that will timeout. Ensure the default timeout is still honored. Make sure that user timeouts are also honored. @since 2.7.0 @jira_ticket PYTHON-108 @expected_result timeouts should be honored @test_category'
def test_async_timeouts(self):
ss = SimpleStatement('SELECT * FROM test3rf.test', consistency_level=ConsistencyLevel.ALL) start_time = time.time() future = self.session.execute_async(ss) with self.assertRaises(OperationTimedOut): future.result() end_time = time.time() total_time = (end_time - start_time) expected_time = self.session.default_timeout self.assertAlmostEqual(expected_time, total_time, delta=0.05) start_time = time.time() future = self.session.execute_async(ss, timeout=1) mock_callback = Mock(return_value=None) mock_errorback = Mock(return_value=None) future.add_callback(mock_callback) future.add_errback(mock_errorback) with self.assertRaises(OperationTimedOut): future.result() end_time = time.time() total_time = (end_time - start_time) expected_time = 1 self.assertAlmostEqual(expected_time, total_time, delta=0.05) self.assertTrue(mock_errorback.called) self.assertFalse(mock_callback.called)
'Test to validate hostfilter reacts correctly when the predicate return a different subset of the hosts HostFilterPolicy @since 3.8 @jira_ticket PYTHON-961 @expected_result the excluded hosts are ignored @test_category policy'
def test_predicate_changes(self):
external_event = True contact_point = '127.0.0.1' single_host = {Host(contact_point, SimpleConvictionPolicy)} all_hosts = {Host('127.0.0.{}'.format(i), SimpleConvictionPolicy) for i in (1, 2, 3)} predicate = (lambda host: ((host.address == contact_point) if external_event else True)) cluster = Cluster((contact_point,), load_balancing_policy=HostFilterPolicy(RoundRobinPolicy(), predicate=predicate), protocol_version=PROTOCOL_VERSION, topology_event_refresh_window=0, status_event_refresh_window=0) session = cluster.connect(wait_for_all_pools=True) queried_hosts = set() for _ in range(10): response = session.execute('SELECT * from system.local') queried_hosts.update(response.response_future.attempted_hosts) self.assertEqual(queried_hosts, single_host) external_event = False futures = session.update_created_pools() wait_futures(futures, timeout=cluster.connect_timeout) queried_hosts = set() for _ in range(10): response = session.execute('SELECT * from system.local') queried_hosts.update(response.response_future.attempted_hosts) self.assertEqual(queried_hosts, all_hosts)
'Test Cython-based parser that returns a list of tuples'
@cythontest def test_cython_parser(self):
verify_iterator_data(self.assertEqual, get_data(ProtocolHandler))
'Test Cython-based parser that returns an iterator of tuples'
@cythontest def test_cython_lazy_parser(self):
verify_iterator_data(self.assertEqual, get_data(LazyProtocolHandler))
'Test Cython-based parser that returns an iterator, over multiple pages'
@notprotocolv1 @numpytest def test_cython_lazy_results_paged(self):
cluster = Cluster(protocol_version=PROTOCOL_VERSION) session = cluster.connect(keyspace='testspace') session.row_factory = tuple_factory session.client_protocol_handler = LazyProtocolHandler session.default_fetch_size = 2 self.assertLess(session.default_fetch_size, self.N_ITEMS) results = session.execute('SELECT * FROM test_table') self.assertTrue(results.has_more_pages) self.assertEqual(verify_iterator_data(self.assertEqual, results), self.N_ITEMS) cluster.shutdown()
'Test Numpy-based parser that returns a NumPy array'
@notprotocolv1 @numpytest def test_numpy_parser(self):
result = get_data(NumpyProtocolHandler) self.assertFalse(result.has_more_pages) self._verify_numpy_page(result[0])
'Test Numpy-based parser that returns a NumPy array'
@notprotocolv1 @numpytest def test_numpy_results_paged(self):
cluster = Cluster(protocol_version=PROTOCOL_VERSION) session = cluster.connect(keyspace='testspace') session.row_factory = tuple_factory session.client_protocol_handler = NumpyProtocolHandler session.default_fetch_size = 2 expected_pages = (((self.N_ITEMS + session.default_fetch_size) - 1) // session.default_fetch_size) self.assertLess(session.default_fetch_size, self.N_ITEMS) results = session.execute('SELECT * FROM test_table') self.assertTrue(results.has_more_pages) for (count, page) in enumerate(results, 1): self.assertIsInstance(page, dict) for (colname, arr) in page.items(): if (count <= expected_pages): self.assertGreater(len(arr), 0, ('page count: %d' % (count,))) self.assertLessEqual(len(arr), session.default_fetch_size) else: self.assertEqual(len(arr), 0) self.assertEqual(self._verify_numpy_page(page), len(arr)) self.assertEqual(count, (expected_pages + 1)) cluster.shutdown()
'Test to validate that cython and numpy are installed correctly @since 3.3.0 @jira_ticket PYTHON-543 @expected_result Cython and Numpy should be present @test_category configuration'
@numpytest def test_cython_numpy_are_installed_valid(self):
if VERIFY_CYTHON: self.assertTrue(HAVE_CYTHON) self.assertTrue(HAVE_NUMPY)
'Match a string cqltype (e.g. \'int\' or \'blob\') with a numpy dtype'
def match_dtype(self, datatype, dtype):
if (datatype == 'smallint'): self.match_dtype_props(dtype, 'i', 2) elif (datatype == 'int'): self.match_dtype_props(dtype, 'i', 4) elif (datatype in ('bigint', 'counter')): self.match_dtype_props(dtype, 'i', 8) elif (datatype == 'float'): self.match_dtype_props(dtype, 'f', 4) elif (datatype == 'double'): self.match_dtype_props(dtype, 'f', 8) else: self.assertEqual(dtype.kind, 'O', msg=(dtype, datatype))
'Test to validate that the numpy protocol handler can deal with null values. @since 3.3.0 - updated 3.6.0: now numeric types used masked array @jira_ticket PYTHON-550 @expected_result Numpy can handle non mapped types\' null values. @test_category data_types:serialization'
@numpytest @greaterthancass21 def test_null_types(self):
s = self.session s.row_factory = tuple_factory s.client_protocol_handler = NumpyProtocolHandler table = ('%s.%s' % (self.keyspace_name, self.function_table_name)) create_table_with_all_types(table, s, 10) begin_unset = (max(s.execute(('select primkey from %s' % (table,)))[0]['primkey']) + 1) keys_null = range(begin_unset, (begin_unset + 10)) insert = ('insert into %s (primkey) values (%%s)' % (table,)) execute_concurrent_with_args(s, insert, ((k,) for k in keys_null)) result = s.execute(('select * from %s' % (table,)))[0] from numpy.ma import masked, MaskedArray result_keys = result.pop('primkey') mapped_index = [v[1] for v in sorted(zip(result_keys, count()))] had_masked = had_none = False for col_array in result.values(): if isinstance(col_array, MaskedArray): had_masked = True [self.assertIsNot(col_array[i], masked) for i in mapped_index[:begin_unset]] [self.assertIs(col_array[i], masked) for i in mapped_index[begin_unset:]] else: had_none = True [self.assertIsNotNone(col_array[i]) for i in mapped_index[:begin_unset]] [self.assertIsNone(col_array[i]) for i in mapped_index[begin_unset:]] self.assertTrue(had_masked) self.assertTrue(had_none)
'Test to ensure that is_up is not set by default on ignored hosts @since 3.6 @jira_ticket PYTHON-551 @expected_result ignored hosts should have None set for is_up @test_category connection'
@local def test_ignored_host_up(self):
ingored_host_policy = IgnoredHostPolicy(['127.0.0.2', '127.0.0.3']) cluster = Cluster(protocol_version=PROTOCOL_VERSION, load_balancing_policy=ingored_host_policy) session = cluster.connect() for host in cluster.metadata.all_hosts(): if (str(host) == '127.0.0.1'): self.assertTrue(host.is_up) else: self.assertIsNone(host.is_up) cluster.shutdown()
'Test to insure A records are resolved appropriately. @since 3.3 @jira_ticket PYTHON-415 @expected_result hostname will be transformed into IP @test_category connection'
@local def test_host_resolution(self):
cluster = Cluster(contact_points=['localhost'], protocol_version=PROTOCOL_VERSION, connect_timeout=1) self.assertTrue(('127.0.0.1' in cluster.contact_points_resolved))
'Ensure that duplicate hosts in the contact points are surfaced in the cluster metadata @since 3.3 @jira_ticket PYTHON-103 @expected_result duplicate hosts aren\'t surfaced in cluster.metadata @test_category connection'
@local def test_host_duplication(self):
cluster = Cluster(contact_points=['localhost', '127.0.0.1', 'localhost', 'localhost', 'localhost'], protocol_version=PROTOCOL_VERSION, connect_timeout=1) cluster.connect(wait_for_all_pools=True) self.assertEqual(len(cluster.metadata.all_hosts()), 3) cluster.shutdown() cluster = Cluster(contact_points=['127.0.0.1', 'localhost'], protocol_version=PROTOCOL_VERSION, connect_timeout=1) cluster.connect(wait_for_all_pools=True) self.assertEqual(len(cluster.metadata.all_hosts()), 3) cluster.shutdown()
'Test for initial control connection timeout test_raise_error_on_control_connection_timeout tests that the driver times out after the set initial connection timeout. It first pauses node1, essentially making it unreachable. It then attempts to create a Cluster object via connecting to node1 with a timeout of 1 second, and ensures that a NoHostAvailable is raised, along with an OperationTimedOut for 1 second. @expected_errors NoHostAvailable When node1 is paused, and a connection attempt is made. @since 2.6.0 @jira_ticket PYTHON-206 @expected_result NoHostAvailable exception should be raised after 1 second. @test_category connection'
@local def test_raise_error_on_control_connection_timeout(self):
get_node(1).pause() cluster = Cluster(contact_points=['127.0.0.1'], protocol_version=PROTOCOL_VERSION, connect_timeout=1) with self.assertRaisesRegexp(NoHostAvailable, "OperationTimedOut\\('errors=Timed out creating connection \\(1 seconds\\)"): cluster.connect() cluster.shutdown() get_node(1).resume()
'Test basic connection and usage'
def test_basic(self):
cluster = Cluster(protocol_version=PROTOCOL_VERSION) session = cluster.connect() result = execute_until_pass(session, "\n CREATE KEYSPACE clustertests\n WITH replication = {'class': 'SimpleStrategy', 'replication_factor': '1'}\n ") self.assertFalse(result) result = execute_with_long_wait_retry(session, '\n CREATE TABLE clustertests.cf0 (\n a text,\n b text,\n c text,\n PRIMARY KEY (a, b)\n )\n ') self.assertFalse(result) result = session.execute("\n INSERT INTO clustertests.cf0 (a, b, c) VALUES ('a', 'b', 'c')\n ") self.assertFalse(result) result = session.execute('SELECT * FROM clustertests.cf0') self.assertEqual([('a', 'b', 'c')], result) execute_with_long_wait_retry(session, 'DROP KEYSPACE clustertests') cluster.shutdown()
'Test for protocol negotiation Very that NoHostAvailable is risen in Session.__init__ when there are no valid connections and that no error is arisen otherwise, despite maybe being some invalid hosts @since 3.9 @jira_ticket PYTHON-665 @expected_result NoHostAvailable when the driver is unable to connect to a valid host, no exception otherwise @test_category connection'
def test_session_host_parameter(self):
cluster = Cluster(protocol_version=PROTOCOL_VERSION) with self.assertRaises(NoHostAvailable): Session(cluster, []) cluster.shutdown() cluster = Cluster(protocol_version=PROTOCOL_VERSION) with self.assertRaises(NoHostAvailable): Session(cluster, [Host('1.2.3.4', SimpleConvictionPolicy)]) cluster.shutdown() cluster = Cluster(protocol_version=PROTOCOL_VERSION) Session(cluster, [Host(x, SimpleConvictionPolicy) for x in ('127.0.0.1', '127.0.0.2', '1.2.3.4')]) cluster.shutdown()
'Test for protocol negotiation test_protocol_negotiation tests that the driver will select the correct protocol version to match the correct cassandra version. Please note that 2.1.5 has a bug https://issues.apache.org/jira/browse/CASSANDRA-9451 that will cause this test to fail that will cause this to not pass. It was rectified in 2.1.6 @since 2.6.0 @jira_ticket PYTHON-240 @expected_result the correct protocol version should be selected @test_category connection'
def test_protocol_negotiation(self):
cluster = Cluster() self.assertLessEqual(cluster.protocol_version, cassandra.ProtocolVersion.MAX_SUPPORTED) session = cluster.connect() updated_protocol_version = session._protocol_version updated_cluster_version = cluster.protocol_version if (CASSANDRA_VERSION >= '2.2'): self.assertEqual(updated_protocol_version, 4) self.assertEqual(updated_cluster_version, 4) elif (CASSANDRA_VERSION >= '2.1'): self.assertEqual(updated_protocol_version, 3) self.assertEqual(updated_cluster_version, 3) elif (CASSANDRA_VERSION >= '2.0'): self.assertEqual(updated_protocol_version, 2) self.assertEqual(updated_cluster_version, 2) else: self.assertEqual(updated_protocol_version, 1) self.assertEqual(updated_cluster_version, 1) cluster.shutdown()
'Test for protocol negotiation when explicit versions are set If an explicit protocol version that is not compatible with the server version is set an exception should be thrown. It should not attempt to negotiate for reference supported protocol version to server versions is as follows/ 1.2 -> 1 2.0 -> 2, 1 2.1 -> 3, 2, 1 2.2 -> 4, 3, 2, 1 3.X -> 4, 3 @since 3.6.0 @jira_ticket PYTHON-537 @expected_result downgrading should not be allowed when explicit protocol versions are set. @test_category connection'
def test_invalid_protocol_negotation(self):
upper_bound = get_unsupported_upper_protocol() if (upper_bound is not None): cluster = Cluster(protocol_version=upper_bound) with self.assertRaises(NoHostAvailable): cluster.connect() cluster.shutdown() lower_bound = get_unsupported_lower_protocol() if (lower_bound is not None): cluster = Cluster(protocol_version=lower_bound) with self.assertRaises(NoHostAvailable): cluster.connect() cluster.shutdown()
'Ensure clusters that connect on a keyspace, do'
def test_connect_on_keyspace(self):
cluster = Cluster(protocol_version=PROTOCOL_VERSION) session = cluster.connect() result = session.execute('\n INSERT INTO test1rf.test (k, v) VALUES (8889, 8889)\n ') self.assertFalse(result) result = session.execute('SELECT * FROM test1rf.test') self.assertEqual([(8889, 8889)], result, 'Rows in ResultSet are {0}'.format(result.current_rows)) session2 = cluster.connect('test1rf') result2 = session2.execute('SELECT * FROM test') self.assertEqual(result, result2) cluster.shutdown()
'Ensure errors are not thrown when using non-default policies'
def test_default_connections(self):
Cluster(load_balancing_policy=RoundRobinPolicy(), reconnection_policy=ExponentialReconnectionPolicy(1.0, 600.0), default_retry_policy=RetryPolicy(), conviction_policy_factory=SimpleConvictionPolicy, protocol_version=PROTOCOL_VERSION)
'Ensure you cannot connect to a cluster that\'s been shutdown'
def test_connect_to_already_shutdown_cluster(self):
cluster = Cluster(protocol_version=PROTOCOL_VERSION) cluster.shutdown() self.assertRaises(Exception, cluster.connect)
'Ensure that auth_providers are always callable'
def test_auth_provider_is_callable(self):
self.assertRaises(TypeError, Cluster, auth_provider=1, protocol_version=1) c = Cluster(protocol_version=1) self.assertRaises(TypeError, setattr, c, 'auth_provider', 1)
'Check for v2 auth_provider compliance'
def test_v2_auth_provider(self):
bad_auth_provider = (lambda x: {'username': 'foo', 'password': 'bar'}) self.assertRaises(TypeError, Cluster, auth_provider=bad_auth_provider, protocol_version=2) c = Cluster(protocol_version=2) self.assertRaises(TypeError, setattr, c, 'auth_provider', bad_auth_provider)
'Ensure that conviction_policy_factory are always callable'
def test_conviction_policy_factory_is_callable(self):
self.assertRaises(ValueError, Cluster, conviction_policy_factory=1)
'Ensure that a NoHostAvailable Exception is thrown when a cluster cannot connect to given hosts'
def test_connect_to_bad_hosts(self):
cluster = Cluster(['127.1.2.9', '127.1.2.10'], protocol_version=PROTOCOL_VERSION) self.assertRaises(NoHostAvailable, cluster.connect)
'Test connection setting getters and setters'
def test_cluster_settings(self):
if (PROTOCOL_VERSION >= 3): raise unittest.SkipTest("min/max requests and core/max conns aren't used with v3 protocol") cluster = Cluster(protocol_version=PROTOCOL_VERSION) min_requests_per_connection = cluster.get_min_requests_per_connection(HostDistance.LOCAL) self.assertEqual(cassandra.cluster.DEFAULT_MIN_REQUESTS, min_requests_per_connection) cluster.set_min_requests_per_connection(HostDistance.LOCAL, (min_requests_per_connection + 1)) self.assertEqual(cluster.get_min_requests_per_connection(HostDistance.LOCAL), (min_requests_per_connection + 1)) max_requests_per_connection = cluster.get_max_requests_per_connection(HostDistance.LOCAL) self.assertEqual(cassandra.cluster.DEFAULT_MAX_REQUESTS, max_requests_per_connection) cluster.set_max_requests_per_connection(HostDistance.LOCAL, (max_requests_per_connection + 1)) self.assertEqual(cluster.get_max_requests_per_connection(HostDistance.LOCAL), (max_requests_per_connection + 1)) core_connections_per_host = cluster.get_core_connections_per_host(HostDistance.LOCAL) self.assertEqual(cassandra.cluster.DEFAULT_MIN_CONNECTIONS_PER_LOCAL_HOST, core_connections_per_host) cluster.set_core_connections_per_host(HostDistance.LOCAL, (core_connections_per_host + 1)) self.assertEqual(cluster.get_core_connections_per_host(HostDistance.LOCAL), (core_connections_per_host + 1)) max_connections_per_host = cluster.get_max_connections_per_host(HostDistance.LOCAL) self.assertEqual(cassandra.cluster.DEFAULT_MAX_CONNECTIONS_PER_LOCAL_HOST, max_connections_per_host) cluster.set_max_connections_per_host(HostDistance.LOCAL, (max_connections_per_host + 1)) self.assertEqual(cluster.get_max_connections_per_host(HostDistance.LOCAL), (max_connections_per_host + 1))
'Ensure trace can be requested for async and non-async queries'
def test_trace(self):
cluster = Cluster(protocol_version=PROTOCOL_VERSION) session = cluster.connect() def check_trace(trace): self.assertIsNotNone(trace.request_type) self.assertIsNotNone(trace.duration) self.assertIsNotNone(trace.started_at) self.assertIsNotNone(trace.coordinator) self.assertIsNotNone(trace.events) result = session.execute('SELECT * FROM system.local', trace=True) check_trace(result.get_query_trace()) query = 'SELECT * FROM system.local' statement = SimpleStatement(query) result = session.execute(statement, trace=True) check_trace(result.get_query_trace()) query = 'SELECT * FROM system.local' statement = SimpleStatement(query) result = session.execute(statement) self.assertIsNone(result.get_query_trace()) statement2 = SimpleStatement(query) future = session.execute_async(statement2, trace=True) future.result() check_trace(future.get_query_trace()) statement2 = SimpleStatement(query) future = session.execute_async(statement2) future.result() self.assertIsNone(future.get_query_trace()) prepared = session.prepare('SELECT * FROM system.local') future = session.execute_async(prepared, parameters=(), trace=True) future.result() check_trace(future.get_query_trace()) cluster.shutdown()
'First checks that TraceUnavailable is arisen if the max_wait parameter is negative Then checks that TraceUnavailable is arisen if the result hasn\'t been set yet @since 3.10 @jira_ticket PYTHON-196 @expected_result TraceUnavailable is arisen in both cases @test_category query'
def test_trace_unavailable(self):
cluster = Cluster(protocol_version=PROTOCOL_VERSION) self.addCleanup(cluster.shutdown) session = cluster.connect() query = 'SELECT * FROM system.local' statement = SimpleStatement(query) max_retry_count = 10 for i in range(max_retry_count): future = session.execute_async(statement, trace=True) future.result() try: result = future.get_query_trace((-1.0)) self.check_trace(result) except TraceUnavailable: break else: raise Exception("get_query_trace didn't raise TraceUnavailable after {} tries".format(max_retry_count)) for i in range(max_retry_count): future = session.execute_async(statement, trace=True) try: result = future.get_query_trace(max_wait=120) self.check_trace(result) except TraceUnavailable: break else: raise Exception("get_query_trace didn't raise TraceUnavailable after {} tries".format(max_retry_count))
'Ensure str(future) returns without error'
def test_string_coverage(self):
cluster = Cluster(protocol_version=PROTOCOL_VERSION) session = cluster.connect() query = 'SELECT * FROM system.local' statement = SimpleStatement(query) future = session.execute_async(statement) self.assertIn(query, str(future)) future.result() self.assertIn(query, str(future)) self.assertIn('result', str(future)) cluster.shutdown()
'Tests that profile load balancing policies are honored. @since 3.5 @jira_ticket PYTHON-569 @expected_result Execution Policy should be used when applicable. @test_category config_profiles'
@local def test_profile_load_balancing(self):
query = 'select release_version from system.local' node1 = ExecutionProfile(load_balancing_policy=HostFilterPolicy(RoundRobinPolicy(), (lambda host: (host.address == CASSANDRA_IP)))) with Cluster(execution_profiles={'node1': node1}) as cluster: session = cluster.connect(wait_for_all_pools=True) expected_hosts = set(cluster.metadata.all_hosts()) queried_hosts = set() for _ in expected_hosts: rs = session.execute(query) queried_hosts.add(rs.response_future._current_host) self.assertEqual(queried_hosts, expected_hosts) expected_hosts = set((h for h in cluster.metadata.all_hosts() if (h.address == CASSANDRA_IP))) queried_hosts = set() for _ in cluster.metadata.all_hosts(): rs = session.execute(query, execution_profile='node1') queried_hosts.add(rs.response_future._current_host) self.assertEqual(queried_hosts, expected_hosts) named_tuple_row = rs[0] self.assertIsInstance(named_tuple_row, tuple) self.assertTrue(named_tuple_row.release_version) tmp_profile = copy(node1) tmp_profile.row_factory = tuple_factory queried_hosts = set() for _ in cluster.metadata.all_hosts(): rs = session.execute(query, execution_profile=tmp_profile) queried_hosts.add(rs.response_future._current_host) self.assertEqual(queried_hosts, expected_hosts) tuple_row = rs[0] self.assertIsInstance(tuple_row, tuple) with self.assertRaises(AttributeError): tuple_row.release_version self.assertTrue(session.execute(query, execution_profile='node1')[0].release_version)
'Tests that profile load balancing policies are not shared Creates two LBP, runs a few queries, and validates that each LBP is execised seperately between EP\'s @since 3.5 @jira_ticket PYTHON-569 @expected_result LBP should not be shared. @test_category config_profiles'
def test_profile_lb_swap(self):
query = 'select release_version from system.local' rr1 = ExecutionProfile(load_balancing_policy=RoundRobinPolicy()) rr2 = ExecutionProfile(load_balancing_policy=RoundRobinPolicy()) exec_profiles = {'rr1': rr1, 'rr2': rr2} with Cluster(execution_profiles=exec_profiles) as cluster: session = cluster.connect(wait_for_all_pools=True) expected_hosts = set(cluster.metadata.all_hosts()) rr1_queried_hosts = set() rr2_queried_hosts = set() rs = session.execute(query, execution_profile='rr1') rr1_queried_hosts.add(rs.response_future._current_host) rs = session.execute(query, execution_profile='rr2') rr2_queried_hosts.add(rs.response_future._current_host) self.assertEqual(rr2_queried_hosts, rr1_queried_hosts)
'Test that execution profiles containing token aware LBP can be added @since 3.5 @jira_ticket PYTHON-569 @expected_result Queries can run @test_category config_profiles'
def test_ta_lbp(self):
query = 'select release_version from system.local' ta1 = ExecutionProfile() with Cluster() as cluster: session = cluster.connect() cluster.add_execution_profile('ta1', ta1) rs = session.execute(query, execution_profile='ta1')
'Tests that profile load balancing policies are shared on clone Creates one LBP clones it, and ensures that the LBP is shared between the two EP\'s @since 3.5 @jira_ticket PYTHON-569 @expected_result LBP is shared @test_category config_profiles'
def test_clone_shared_lbp(self):
query = 'select release_version from system.local' rr1 = ExecutionProfile(load_balancing_policy=RoundRobinPolicy()) exec_profiles = {'rr1': rr1} with Cluster(execution_profiles=exec_profiles) as cluster: session = cluster.connect(wait_for_all_pools=True) self.assertGreater(len(cluster.metadata.all_hosts()), 1, 'We only have one host connected at this point') rr1_clone = session.execution_profile_clone_update('rr1', row_factory=tuple_factory) cluster.add_execution_profile('rr1_clone', rr1_clone) rr1_queried_hosts = set() rr1_clone_queried_hosts = set() rs = session.execute(query, execution_profile='rr1') rr1_queried_hosts.add(rs.response_future._current_host) rs = session.execute(query, execution_profile='rr1_clone') rr1_clone_queried_hosts.add(rs.response_future._current_host) self.assertNotEqual(rr1_clone_queried_hosts, rr1_queried_hosts)
'Tests to verify that using an unknown profile raises a ValueError @since 3.5 @jira_ticket PYTHON-569 @expected_result ValueError @test_category config_profiles'
def test_missing_exec_prof(self):
query = 'select release_version from system.local' rr1 = ExecutionProfile(load_balancing_policy=RoundRobinPolicy()) rr2 = ExecutionProfile(load_balancing_policy=RoundRobinPolicy()) exec_profiles = {'rr1': rr1, 'rr2': rr2} with Cluster(execution_profiles=exec_profiles) as cluster: session = cluster.connect() with self.assertRaises(ValueError): session.execute(query, execution_profile='rr3')
'Tests that changes to execution profiles correctly impact our cluster\'s pooling @since 3.5 @jira_ticket PYTHON-569 @expected_result pools should be correctly updated as EP\'s are added and removed @test_category config_profiles'
@local def test_profile_pool_management(self):
node1 = ExecutionProfile(load_balancing_policy=HostFilterPolicy(RoundRobinPolicy(), (lambda host: (host.address == '127.0.0.1')))) node2 = ExecutionProfile(load_balancing_policy=HostFilterPolicy(RoundRobinPolicy(), (lambda host: (host.address == '127.0.0.2')))) with Cluster(execution_profiles={EXEC_PROFILE_DEFAULT: node1, 'node2': node2}) as cluster: session = cluster.connect(wait_for_all_pools=True) pools = session.get_pool_state() self.assertGreater(len(cluster.metadata.all_hosts()), 2) self.assertEqual(set((h.address for h in pools)), set(('127.0.0.1', '127.0.0.2'))) node3 = ExecutionProfile(load_balancing_policy=HostFilterPolicy(RoundRobinPolicy(), (lambda host: (host.address == '127.0.0.3')))) cluster.add_execution_profile('node3', node3) pools = session.get_pool_state() self.assertEqual(set((h.address for h in pools)), set(('127.0.0.1', '127.0.0.2', '127.0.0.3')))
'Tests that EP Timeouts are honored. @since 3.5 @jira_ticket PYTHON-569 @expected_result EP timeouts should override defaults @test_category config_profiles'
@local def test_add_profile_timeout(self):
max_retry_count = 10 for i in range(max_retry_count): node1 = ExecutionProfile(load_balancing_policy=HostFilterPolicy(RoundRobinPolicy(), (lambda host: (host.address == '127.0.0.1')))) with Cluster(execution_profiles={EXEC_PROFILE_DEFAULT: node1}) as cluster: session = cluster.connect(wait_for_all_pools=True) pools = session.get_pool_state() self.assertGreater(len(cluster.metadata.all_hosts()), 2) self.assertEqual(set((h.address for h in pools)), set(('127.0.0.1',))) node2 = ExecutionProfile(load_balancing_policy=HostFilterPolicy(RoundRobinPolicy(), (lambda host: (host.address in ['127.0.0.2', '127.0.0.3'])))) start = time.time() try: self.assertRaises(cassandra.OperationTimedOut, cluster.add_execution_profile, 'profile_{0}'.format(i), node2, pool_wait_timeout=sys.float_info.min) break except AssertionError: end = time.time() self.assertAlmostEqual(start, end, 1) else: raise Exception("add_execution_profile didn't timeout after {0} retries".format(max_retry_count))
'Test that replicas are queried first for TokenAwarePolicy. A table with RF 1 is created. All the queries should go to that replica when TokenAwarePolicy is used. Then using HostFilterPolicy the replica is excluded from the considered hosts. By checking the trace we verify that there are no more replicas. @since 3.5 @jira_ticket PYTHON-653 @expected_result the replicas are queried for HostFilterPolicy @test_category metadata'
def test_replicas_are_queried(self):
queried_hosts = set() with Cluster(protocol_version=PROTOCOL_VERSION, load_balancing_policy=TokenAwarePolicy(RoundRobinPolicy())) as cluster: session = cluster.connect(wait_for_all_pools=True) session.execute('\n CREATE TABLE test1rf.table_with_big_key (\n k1 int,\n k2 int,\n k3 int,\n k4 int,\n PRIMARY KEY((k1, k2, k3), k4))') prepared = session.prepare('SELECT * from test1rf.table_with_big_key\n WHERE k1 = ? AND k2 = ? AND k3 = ? AND k4 = ?') for i in range(10): result = session.execute(prepared, (i, i, i, i), trace=True) queried_hosts = self._assert_replica_queried(result.get_query_trace(), only_replicas=True) last_i = i only_replica = queried_hosts.pop() available_hosts = [host for host in ['127.0.0.1', '127.0.0.2', '127.0.0.3'] if (host != only_replica)] with Cluster(contact_points=available_hosts, protocol_version=PROTOCOL_VERSION, load_balancing_policy=HostFilterPolicy(RoundRobinPolicy(), predicate=(lambda host: (host.address != only_replica)))) as cluster: session = cluster.connect(wait_for_all_pools=True) prepared = session.prepare('SELECT * from test1rf.table_with_big_key\n WHERE k1 = ? AND k2 = ? AND k3 = ? AND k4 = ?') for _ in range(10): result = session.execute(prepared, (last_i, last_i, last_i, last_i), trace=True) self._assert_replica_queried(result.get_query_trace(), only_replicas=False) session.execute('DROP TABLE test1rf.table_with_big_key')
'Test host address translation Uses a custom Address Translator to map all ip back to one. Validates AddressTranslator invocation by ensuring that only meta data associated with single host is populated @since 3.3 @jira_ticket PYTHON-69 @expected_result only one hosts\' metadata will be populated @test_category metadata'
def test_address_translator_basic(self):
lh_ad = LocalHostAdressTranslator({'127.0.0.1': '127.0.0.1', '127.0.0.2': '127.0.0.1', '127.0.0.3': '127.0.0.1'}) c = Cluster(address_translator=lh_ad) c.connect() self.assertEqual(len(c.metadata.all_hosts()), 1) c.shutdown()
'Test host address translation Uses a custom Address Translator to map ip\'s of non control_connection nodes to each other Validates AddressTranslator invocation by ensuring that metadata for mapped hosts is also mapped @since 3.3 @jira_ticket PYTHON-69 @expected_result metadata for crossed hosts will also be crossed @test_category metadata'
def test_address_translator_with_mixed_nodes(self):
adder_map = {'127.0.0.1': '127.0.0.1', '127.0.0.2': '127.0.0.3', '127.0.0.3': '127.0.0.2'} lh_ad = LocalHostAdressTranslator(adder_map) c = Cluster(address_translator=lh_ad) c.connect() for host in c.metadata.all_hosts(): self.assertEqual(adder_map.get(str(host)), host.broadcast_address) c.shutdown()
'Test cluster context without connecting. @since 3.4 @jira_ticket PYTHON-521 @expected_result context should still be valid @test_category configuration'
def test_no_connect(self):
with Cluster() as cluster: self.assertFalse(cluster.is_shutdown) self.assertTrue(cluster.is_shutdown)
'Test cluster and session contexts nested in one another. @since 3.4 @jira_ticket PYTHON-521 @expected_result cluster/session should be crated and shutdown appropriately. @test_category configuration'
def test_simple_nested(self):
with Cluster(**self.cluster_kwargs) as cluster: with cluster.connect() as session: self.assertFalse(cluster.is_shutdown) self.assertFalse(session.is_shutdown) self.assertTrue(session.execute('select release_version from system.local')[0]) self.assertTrue(session.is_shutdown) self.assertTrue(cluster.is_shutdown)
'Test cluster context without session context. @since 3.4 @jira_ticket PYTHON-521 @expected_result Session should be created correctly. Cluster should shutdown outside of context @test_category configuration'
def test_cluster_no_session(self):
with Cluster(**self.cluster_kwargs) as cluster: session = cluster.connect() self.assertFalse(cluster.is_shutdown) self.assertFalse(session.is_shutdown) self.assertTrue(session.execute('select release_version from system.local')[0]) self.assertTrue(session.is_shutdown) self.assertTrue(cluster.is_shutdown)
'Test session context without cluster context. @since 3.4 @jira_ticket PYTHON-521 @expected_result session should be created correctly. Session should shutdown correctly outside of context @test_category configuration'
def test_session_no_cluster(self):
cluster = Cluster(**self.cluster_kwargs) unmanaged_session = cluster.connect() with cluster.connect() as session: self.assertFalse(cluster.is_shutdown) self.assertFalse(session.is_shutdown) self.assertFalse(unmanaged_session.is_shutdown) self.assertTrue(session.execute('select release_version from system.local')[0]) self.assertTrue(session.is_shutdown) self.assertFalse(cluster.is_shutdown) self.assertFalse(unmanaged_session.is_shutdown) unmanaged_session.shutdown() self.assertTrue(unmanaged_session.is_shutdown) self.assertFalse(cluster.is_shutdown) cluster.shutdown() self.assertTrue(cluster.is_shutdown)