code
stringlengths
26
870k
docstring
stringlengths
1
65.6k
func_name
stringlengths
1
194
language
stringclasses
1 value
repo
stringlengths
8
68
path
stringlengths
5
194
url
stringlengths
46
254
license
stringclasses
4 values
def test_asserts_for_worker(self): """ Test that Worker() asserts that it's sanely configured """ Worker(wait_interval=1) # This shouldn't raise self.assertRaises(AssertionError, Worker, wait_interval=0)
Test that Worker() asserts that it's sanely configured
test_asserts_for_worker
python
spotify/luigi
test/worker_test.py
https://github.com/spotify/luigi/blob/master/test/worker_test.py
Apache-2.0
def test_wait_jitter(self, mock_sleep, mock_random): """ verify configured jitter amount """ mock_random.return_value = 1.0 w = Worker() x = w._sleeper() next(x) mock_random.assert_called_with(0, 10.0) mock_sleep.assert_called_with(2.0) mock_random.return_value = 2.0 next(x) mock_random.assert_called_with(0, 10.0) mock_sleep.assert_called_with(3.0)
verify configured jitter amount
test_wait_jitter
python
spotify/luigi
test/worker_test.py
https://github.com/spotify/luigi/blob/master/test/worker_test.py
Apache-2.0
def test_wait_jitter_default(self, mock_sleep, mock_random): """ verify default jitter is as expected """ mock_random.return_value = 1.0 w = Worker() x = w._sleeper() next(x) mock_random.assert_called_with(0, 5.0) mock_sleep.assert_called_with(2.0) mock_random.return_value = 3.3 next(x) mock_random.assert_called_with(0, 5.0) mock_sleep.assert_called_with(4.3)
verify default jitter is as expected
test_wait_jitter_default
python
spotify/luigi
test/worker_test.py
https://github.com/spotify/luigi/blob/master/test/worker_test.py
Apache-2.0
def test_propagation_when_executing(self): """ Ensure that keyboard interrupts causes luigi to quit when you are executing tasks. TODO: Add a test that tests the multiprocessing (--worker >1) case """ class KeyboardInterruptTask(luigi.Task): def run(self): raise KeyboardInterrupt() cmd = 'KeyboardInterruptTask --local-scheduler --no-lock'.split(' ') self.assertRaises(KeyboardInterrupt, luigi_run, cmd)
Ensure that keyboard interrupts causes luigi to quit when you are executing tasks. TODO: Add a test that tests the multiprocessing (--worker >1) case
test_propagation_when_executing
python
spotify/luigi
test/worker_test.py
https://github.com/spotify/luigi/blob/master/test/worker_test.py
Apache-2.0
def test_propagation_when_scheduling(self): """ Test that KeyboardInterrupt causes luigi to quit while scheduling. """ class KeyboardInterruptTask(luigi.Task): def complete(self): raise KeyboardInterrupt() class ExternalKeyboardInterruptTask(luigi.ExternalTask): def complete(self): raise KeyboardInterrupt() self.assertRaises(KeyboardInterrupt, luigi_run, ['KeyboardInterruptTask', '--local-scheduler', '--no-lock']) self.assertRaises(KeyboardInterrupt, luigi_run, ['ExternalKeyboardInterruptTask', '--local-scheduler', '--no-lock'])
Test that KeyboardInterrupt causes luigi to quit while scheduling.
test_propagation_when_scheduling
python
spotify/luigi
test/worker_test.py
https://github.com/spotify/luigi/blob/master/test/worker_test.py
Apache-2.0
def test_with_all_disabled_with_single_worker(self): """ With this test, a case which has a task (TestWrapperTask), requires two another tasks (TestErrorTask1,TestErrorTask1) which both is failed, is tested. Task TestErrorTask1 has default retry_count which is 1, but Task TestErrorTask2 has retry_count at task level as 2. This test is running on single worker """ class TestErrorTask1(DummyErrorTask): pass e1 = TestErrorTask1() class TestErrorTask2(DummyErrorTask): retry_count = self.per_task_retry_count e2 = TestErrorTask2() class TestWrapperTask(luigi.WrapperTask): def requires(self): return [e2, e1] wt = TestWrapperTask() with Worker(scheduler=self.sch, worker_id='X', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w1: self.assertTrue(w1.add(wt)) self.assertFalse(w1.run()) self.assertEqual([wt.task_id], list(self.sch.task_list('PENDING', 'UPSTREAM_DISABLED').keys())) self.assertEqual(sorted([e1.task_id, e2.task_id]), sorted(self.sch.task_list('DISABLED', '').keys())) self.assertEqual(0, self.sch._state.get_task(wt.task_id).num_failures()) self.assertEqual(self.per_task_retry_count, self.sch._state.get_task(e2.task_id).num_failures()) self.assertEqual(self.default_retry_count, self.sch._state.get_task(e1.task_id).num_failures())
With this test, a case which has a task (TestWrapperTask), requires two another tasks (TestErrorTask1,TestErrorTask1) which both is failed, is tested. Task TestErrorTask1 has default retry_count which is 1, but Task TestErrorTask2 has retry_count at task level as 2. This test is running on single worker
test_with_all_disabled_with_single_worker
python
spotify/luigi
test/worker_test.py
https://github.com/spotify/luigi/blob/master/test/worker_test.py
Apache-2.0
def test_with_all_disabled_with_multiple_worker(self): """ With this test, a case which has a task (TestWrapperTask), requires two another tasks (TestErrorTask1,TestErrorTask1) which both is failed, is tested. Task TestErrorTask1 has default retry_count which is 1, but Task TestErrorTask2 has retry_count at task level as 2. This test is running on multiple worker """ class TestErrorTask1(DummyErrorTask): pass e1 = TestErrorTask1() class TestErrorTask2(DummyErrorTask): retry_count = self.per_task_retry_count e2 = TestErrorTask2() class TestWrapperTask(luigi.WrapperTask): def requires(self): return [e2, e1] wt = TestWrapperTask() with Worker(scheduler=self.sch, worker_id='X', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w1: with Worker(scheduler=self.sch, worker_id='Y', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w2: with Worker(scheduler=self.sch, worker_id='Z', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w3: self.assertTrue(w1.add(wt)) self.assertTrue(w2.add(e2)) self.assertTrue(w3.add(e1)) self.assertFalse(w3.run()) self.assertFalse(w2.run()) self.assertTrue(w1.run()) self.assertEqual([wt.task_id], list(self.sch.task_list('PENDING', 'UPSTREAM_DISABLED').keys())) self.assertEqual(sorted([e1.task_id, e2.task_id]), sorted(self.sch.task_list('DISABLED', '').keys())) self.assertEqual(0, self.sch._state.get_task(wt.task_id).num_failures()) self.assertEqual(self.per_task_retry_count, self.sch._state.get_task(e2.task_id).num_failures()) self.assertEqual(self.default_retry_count, self.sch._state.get_task(e1.task_id).num_failures())
With this test, a case which has a task (TestWrapperTask), requires two another tasks (TestErrorTask1,TestErrorTask1) which both is failed, is tested. Task TestErrorTask1 has default retry_count which is 1, but Task TestErrorTask2 has retry_count at task level as 2. This test is running on multiple worker
test_with_all_disabled_with_multiple_worker
python
spotify/luigi
test/worker_test.py
https://github.com/spotify/luigi/blob/master/test/worker_test.py
Apache-2.0
def test_with_includes_success_with_single_worker(self): """ With this test, a case which has a task (TestWrapperTask), requires one (TestErrorTask1) FAILED and one (TestSuccessTask1) SUCCESS, is tested. Task TestSuccessTask1 will be DONE successfully, but Task TestErrorTask1 will be failed and it has retry_count at task level as 2. This test is running on single worker """ class TestSuccessTask1(DummyTask): pass s1 = TestSuccessTask1() class TestErrorTask1(DummyErrorTask): retry_count = self.per_task_retry_count e1 = TestErrorTask1() class TestWrapperTask(luigi.WrapperTask): def requires(self): return [e1, s1] wt = TestWrapperTask() with Worker(scheduler=self.sch, worker_id='X', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w1: self.assertTrue(w1.add(wt)) self.assertFalse(w1.run()) self.assertEqual([wt.task_id], list(self.sch.task_list('PENDING', 'UPSTREAM_DISABLED').keys())) self.assertEqual([e1.task_id], list(self.sch.task_list('DISABLED', '').keys())) self.assertEqual([s1.task_id], list(self.sch.task_list('DONE', '').keys())) self.assertEqual(0, self.sch._state.get_task(wt.task_id).num_failures()) self.assertEqual(self.per_task_retry_count, self.sch._state.get_task(e1.task_id).num_failures()) self.assertEqual(0, self.sch._state.get_task(s1.task_id).num_failures())
With this test, a case which has a task (TestWrapperTask), requires one (TestErrorTask1) FAILED and one (TestSuccessTask1) SUCCESS, is tested. Task TestSuccessTask1 will be DONE successfully, but Task TestErrorTask1 will be failed and it has retry_count at task level as 2. This test is running on single worker
test_with_includes_success_with_single_worker
python
spotify/luigi
test/worker_test.py
https://github.com/spotify/luigi/blob/master/test/worker_test.py
Apache-2.0
def test_with_includes_success_with_multiple_worker(self): """ With this test, a case which has a task (TestWrapperTask), requires one (TestErrorTask1) FAILED and one (TestSuccessTask1) SUCCESS, is tested. Task TestSuccessTask1 will be DONE successfully, but Task TestErrorTask1 will be failed and it has retry_count at task level as 2. This test is running on multiple worker """ class TestSuccessTask1(DummyTask): pass s1 = TestSuccessTask1() class TestErrorTask1(DummyErrorTask): retry_count = self.per_task_retry_count e1 = TestErrorTask1() class TestWrapperTask(luigi.WrapperTask): def requires(self): return [e1, s1] wt = TestWrapperTask() with Worker(scheduler=self.sch, worker_id='X', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w1: with Worker(scheduler=self.sch, worker_id='Y', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w2: with Worker(scheduler=self.sch, worker_id='Z', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w3: self.assertTrue(w1.add(wt)) self.assertTrue(w2.add(e1)) self.assertTrue(w3.add(s1)) self.assertTrue(w3.run()) self.assertFalse(w2.run()) self.assertTrue(w1.run()) self.assertEqual([wt.task_id], list(self.sch.task_list('PENDING', 'UPSTREAM_DISABLED').keys())) self.assertEqual([e1.task_id], list(self.sch.task_list('DISABLED', '').keys())) self.assertEqual([s1.task_id], list(self.sch.task_list('DONE', '').keys())) self.assertEqual(0, self.sch._state.get_task(wt.task_id).num_failures()) self.assertEqual(self.per_task_retry_count, self.sch._state.get_task(e1.task_id).num_failures()) self.assertEqual(0, self.sch._state.get_task(s1.task_id).num_failures())
With this test, a case which has a task (TestWrapperTask), requires one (TestErrorTask1) FAILED and one (TestSuccessTask1) SUCCESS, is tested. Task TestSuccessTask1 will be DONE successfully, but Task TestErrorTask1 will be failed and it has retry_count at task level as 2. This test is running on multiple worker
test_with_includes_success_with_multiple_worker
python
spotify/luigi
test/worker_test.py
https://github.com/spotify/luigi/blob/master/test/worker_test.py
Apache-2.0
def test_with_dynamic_dependencies_with_single_worker(self): """ With this test, a case includes dependency tasks(TestErrorTask1,TestErrorTask2) which both are failed. Task TestErrorTask1 has default retry_count which is 1, but Task TestErrorTask2 has retry_count at task level as 2. This test is running on single worker """ class TestErrorTask1(DummyErrorTask): pass e1 = TestErrorTask1() class TestErrorTask2(DummyErrorTask): retry_count = self.per_task_retry_count e2 = TestErrorTask2() class TestSuccessTask1(DummyTask): pass s1 = TestSuccessTask1() class TestWrapperTask(DummyTask): def requires(self): return [s1] def run(self): super(TestWrapperTask, self).run() yield e2, e1 wt = TestWrapperTask() with Worker(scheduler=self.sch, worker_id='X', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w1: self.assertTrue(w1.add(wt)) self.assertFalse(w1.run()) self.assertEqual([wt.task_id], list(self.sch.task_list('PENDING', 'UPSTREAM_DISABLED').keys())) self.assertEqual(sorted([e1.task_id, e2.task_id]), sorted(self.sch.task_list('DISABLED', '').keys())) self.assertEqual(0, self.sch._state.get_task(wt.task_id).num_failures()) self.assertEqual(0, self.sch._state.get_task(s1.task_id).num_failures()) self.assertEqual(self.per_task_retry_count, self.sch._state.get_task(e2.task_id).num_failures()) self.assertEqual(self.default_retry_count, self.sch._state.get_task(e1.task_id).num_failures())
With this test, a case includes dependency tasks(TestErrorTask1,TestErrorTask2) which both are failed. Task TestErrorTask1 has default retry_count which is 1, but Task TestErrorTask2 has retry_count at task level as 2. This test is running on single worker
test_with_dynamic_dependencies_with_single_worker
python
spotify/luigi
test/worker_test.py
https://github.com/spotify/luigi/blob/master/test/worker_test.py
Apache-2.0
def test_with_dynamic_dependencies_with_multiple_workers(self): """ With this test, a case includes dependency tasks(TestErrorTask1,TestErrorTask2) which both are failed. Task TestErrorTask1 has default retry_count which is 1, but Task TestErrorTask2 has retry_count at task level as 2. This test is running on multiple worker """ class TestErrorTask1(DummyErrorTask): pass e1 = TestErrorTask1() class TestErrorTask2(DummyErrorTask): retry_count = self.per_task_retry_count e2 = TestErrorTask2() class TestSuccessTask1(DummyTask): pass s1 = TestSuccessTask1() class TestWrapperTask(DummyTask): def requires(self): return [s1] def run(self): super(TestWrapperTask, self).run() yield e2, e1 wt = TestWrapperTask() with Worker(scheduler=self.sch, worker_id='X', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w1: with Worker(scheduler=self.sch, worker_id='Y', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w2: self.assertTrue(w1.add(wt)) self.assertTrue(w2.add(s1)) self.assertTrue(w2.run()) self.assertFalse(w1.run()) self.assertEqual([wt.task_id], list(self.sch.task_list('PENDING', 'UPSTREAM_DISABLED').keys())) self.assertEqual(sorted([e1.task_id, e2.task_id]), sorted(self.sch.task_list('DISABLED', '').keys())) self.assertEqual(0, self.sch._state.get_task(wt.task_id).num_failures()) self.assertEqual(0, self.sch._state.get_task(s1.task_id).num_failures()) self.assertEqual(self.per_task_retry_count, self.sch._state.get_task(e2.task_id).num_failures()) self.assertEqual(self.default_retry_count, self.sch._state.get_task(e1.task_id).num_failures())
With this test, a case includes dependency tasks(TestErrorTask1,TestErrorTask2) which both are failed. Task TestErrorTask1 has default retry_count which is 1, but Task TestErrorTask2 has retry_count at task level as 2. This test is running on multiple worker
test_with_dynamic_dependencies_with_multiple_workers
python
spotify/luigi
test/worker_test.py
https://github.com/spotify/luigi/blob/master/test/worker_test.py
Apache-2.0
def test_per_task_disable_persist_with_single_worker(self): """ Ensure that `Task.disable_window` impacts the task retrying policy: - with the scheduler retry policy (disable_window=3), task fails twice and gets disabled - with the task retry policy (disable_window=0.5) task never gets into the DISABLED state """ class TwoErrorsThenSuccessTask(Task): """ The task is failing two times and then succeeds, waiting 1s before each try """ retry_index = 0 disable_window = None def run(self): time.sleep(1) self.retry_index += 1 if self.retry_index < 3: raise Exception("Retry index is %s for %s" % (self.retry_index, self.task_family)) t = TwoErrorsThenSuccessTask() sch = Scheduler(retry_delay=0.1, retry_count=2, prune_on_get_work=True, disable_window=2) with Worker(scheduler=sch, worker_id='X', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w: self.assertTrue(w.add(t)) self.assertFalse(w.run()) self.assertEqual(2, t.retry_index) self.assertEqual([t.task_id], list(sch.task_list('DISABLED').keys())) self.assertEqual(2, sch._state.get_task(t.task_id).num_failures()) t = TwoErrorsThenSuccessTask() t.retry_index = 0 t.disable_window = 0.5 sch = Scheduler(retry_delay=0.1, retry_count=2, prune_on_get_work=True, disable_window=2) with Worker(scheduler=sch, worker_id='X', keep_alive=True, wait_interval=0.1, wait_jitter=0.05) as w: self.assertTrue(w.add(t)) # Worker.run return False even if a task failed first but eventually succeeded. self.assertFalse(w.run()) self.assertEqual(3, t.retry_index) self.assertEqual([t.task_id], list(sch.task_list('DONE').keys())) self.assertEqual(1, len(sch._state.get_task(t.task_id).failures))
Ensure that `Task.disable_window` impacts the task retrying policy: - with the scheduler retry policy (disable_window=3), task fails twice and gets disabled - with the task retry policy (disable_window=0.5) task never gets into the DISABLED state
test_per_task_disable_persist_with_single_worker
python
spotify/luigi
test/worker_test.py
https://github.com/spotify/luigi/blob/master/test/worker_test.py
Apache-2.0
def test_sends_smtp_email(self): """ Call notifications.send_email_smtp with fixture parameters with smtp_without_tls set to False and check that sendmail is properly called. """ smtp_kws = {"host": "my.smtp.local", "port": 999, "local_hostname": "ptms", "timeout": 1200} with mock.patch('smtplib.SMTP') as SMTP: with mock.patch('luigi.notifications.generate_email') as generate_email: generate_email.return_value\ .as_string.return_value = self.mocked_email_msg notifications.send_email_smtp(*self.notification_args) SMTP.assert_called_once_with(**smtp_kws) SMTP.return_value.login.assert_called_once_with("Robin", "dooH") SMTP.return_value.starttls.assert_called_once_with() SMTP.return_value.sendmail\ .assert_called_once_with(self.sender, self.recipients, self.mocked_email_msg)
Call notifications.send_email_smtp with fixture parameters with smtp_without_tls set to False and check that sendmail is properly called.
test_sends_smtp_email
python
spotify/luigi
test/notifications_test.py
https://github.com/spotify/luigi/blob/master/test/notifications_test.py
Apache-2.0
def test_sends_smtp_email_without_tls(self): """ Call notifications.send_email_smtp with fixture parameters with no_tls set to True and check that sendmail is properly called without also calling starttls. """ smtp_kws = {"host": "my.smtp.local", "port": 999, "local_hostname": "ptms", "timeout": 1200} with mock.patch('smtplib.SMTP') as SMTP: with mock.patch('luigi.notifications.generate_email') as generate_email: generate_email.return_value \ .as_string.return_value = self.mocked_email_msg notifications.send_email_smtp(*self.notification_args) SMTP.assert_called_once_with(**smtp_kws) self.assertEqual(SMTP.return_value.starttls.called, False) SMTP.return_value.login.assert_called_once_with("Robin", "dooH") SMTP.return_value.sendmail \ .assert_called_once_with(self.sender, self.recipients, self.mocked_email_msg)
Call notifications.send_email_smtp with fixture parameters with no_tls set to True and check that sendmail is properly called without also calling starttls.
test_sends_smtp_email_without_tls
python
spotify/luigi
test/notifications_test.py
https://github.com/spotify/luigi/blob/master/test/notifications_test.py
Apache-2.0
def test_sends_smtp_email_exceptions(self): """ Call notifications.send_email_smtp when it cannot connect to smtp server (socket.error) starttls. """ smtp_kws = {"host": "my.smtp.local", "port": 999, "local_hostname": "ptms", "timeout": 1200} with mock.patch('smtplib.SMTP') as SMTP: with mock.patch('luigi.notifications.generate_email') as generate_email: SMTP.side_effect = socket.error() generate_email.return_value \ .as_string.return_value = self.mocked_email_msg try: notifications.send_email_smtp(*self.notification_args) except socket.error: self.fail("send_email_smtp() raised expection unexpectedly") SMTP.assert_called_once_with(**smtp_kws) self.assertEqual(notifications.generate_email.called, False) self.assertEqual(SMTP.sendemail.called, False)
Call notifications.send_email_smtp when it cannot connect to smtp server (socket.error) starttls.
test_sends_smtp_email_exceptions
python
spotify/luigi
test/notifications_test.py
https://github.com/spotify/luigi/blob/master/test/notifications_test.py
Apache-2.0
def test_sends_sendgrid_email(self): """ Call notifications.send_email_sendgrid with fixture parameters and check that SendGridAPIClient is properly called. """ with mock.patch('sendgrid.SendGridAPIClient') as SendGridAPIClient: notifications.send_email_sendgrid(*self.notification_args) SendGridAPIClient.assert_called_once_with("456abcdef123") self.assertTrue(SendGridAPIClient.return_value.send.called)
Call notifications.send_email_sendgrid with fixture parameters and check that SendGridAPIClient is properly called.
test_sends_sendgrid_email
python
spotify/luigi
test/notifications_test.py
https://github.com/spotify/luigi/blob/master/test/notifications_test.py
Apache-2.0
def test_sends_ses_email(self): """ Call notifications.send_email_ses with fixture parameters and check that boto is properly called. """ with mock.patch('boto3.client') as boto_client: with mock.patch('luigi.notifications.generate_email') as generate_email: generate_email.return_value\ .as_string.return_value = self.mocked_email_msg notifications.send_email_ses(*self.notification_args) SES = boto_client.return_value SES.send_raw_email.assert_called_once_with( Source=self.sender, Destinations=self.recipients, RawMessage={'Data': self.mocked_email_msg})
Call notifications.send_email_ses with fixture parameters and check that boto is properly called.
test_sends_ses_email
python
spotify/luigi
test/notifications_test.py
https://github.com/spotify/luigi/blob/master/test/notifications_test.py
Apache-2.0
def test_sends_sns_email(self): """ Call notifications.send_email_sns with fixture parameters and check that boto3 is properly called. """ with mock.patch('boto3.resource') as res: notifications.send_email_sns(*self.notification_args) SNS = res.return_value SNS.Topic.assert_called_once_with(self.recipients[0]) SNS.Topic.return_value.publish.assert_called_once_with( Subject=self.subject, Message=self.message)
Call notifications.send_email_sns with fixture parameters and check that boto3 is properly called.
test_sends_sns_email
python
spotify/luigi
test/notifications_test.py
https://github.com/spotify/luigi/blob/master/test/notifications_test.py
Apache-2.0
def test_sns_subject_is_shortened(self): """ Call notifications.send_email_sns with too long Subject (more than 100 chars) and check that it is cut to length of 100 chars. """ long_subject = 'Luigi: SanityCheck(regexPattern=aligned-source\\|data-not-older\\|source-chunks-compl,'\ 'mailFailure=False, mongodb=mongodb://localhost/stats) FAILED' with mock.patch('boto3.resource') as res: notifications.send_email_sns(self.sender, long_subject, self.message, self.recipients, self.image_png) SNS = res.return_value SNS.Topic.assert_called_once_with(self.recipients[0]) called_subj = SNS.Topic.return_value.publish.call_args[1]['Subject'] self.assertTrue(len(called_subj) <= 100, "Subject can be max 100 chars long! Found {}.".format(len(called_subj)))
Call notifications.send_email_sns with too long Subject (more than 100 chars) and check that it is cut to length of 100 chars.
test_sns_subject_is_shortened
python
spotify/luigi
test/notifications_test.py
https://github.com/spotify/luigi/blob/master/test/notifications_test.py
Apache-2.0
def check_dispatcher(self, target): """ Call notifications.send_email and test that the proper function was called. """ expected_args = self.notification_args with mock.patch('luigi.notifications.{}'.format(target)) as sender: notifications.send_email(self.subject, self.message, self.sender, self.recipients, image_png=self.image_png) self.assertTrue(sender.called) call_args = sender.call_args[0] self.assertEqual(tuple(expected_args), call_args)
Call notifications.send_email and test that the proper function was called.
check_dispatcher
python
spotify/luigi
test/notifications_test.py
https://github.com/spotify/luigi/blob/master/test/notifications_test.py
Apache-2.0
def test_complete_behavior(self): """ Verify that RunOnceTask works as expected. This task will fail if it is a normal ``luigi.Task``, because RequiringTask will not run (missing dependency at runtime). """ class MyTask(RunOnceTask): pass class RequiringTask(luigi.Task): counter = 0 def requires(self): yield MyTask() def run(self): RequiringTask.counter += 1 self.run_locally(['RequiringTask']) self.assertEqual(1, RequiringTask.counter)
Verify that RunOnceTask works as expected. This task will fail if it is a normal ``luigi.Task``, because RequiringTask will not run (missing dependency at runtime).
test_complete_behavior
python
spotify/luigi
test/helpers_test.py
https://github.com/spotify/luigi/blob/master/test/helpers_test.py
Apache-2.0
def test_old_month_instantiation(self): """ Verify that you can still programmatically set of param as string """ class MyTask(luigi.Task): month_param = luigi.MonthParameter() def complete(self): return False range_task = RangeMonthly(now=datetime_to_epoch(datetime.datetime(2016, 1, 1)), of=MyTask, start=datetime.date(2015, 12, 1), stop=datetime.date(2016, 1, 1)) expected_task = MyTask(month_param=datetime.date(2015, 12, 1)) self.assertEqual(expected_task, list(range_task._requires())[0])
Verify that you can still programmatically set of param as string
test_old_month_instantiation
python
spotify/luigi
test/range_test.py
https://github.com/spotify/luigi/blob/master/test/range_test.py
Apache-2.0
def test_month_cli_instantiation(self): """ Verify that you can still use Range through CLI """ class MyTask(luigi.Task): task_namespace = "wohoo" month_param = luigi.MonthParameter() secret = 'some-value-to-sooth-python-linters' comp = False def complete(self): return self.comp def run(self): self.comp = True MyTask.secret = 'yay' now = str(int(datetime_to_epoch(datetime.datetime(2016, 1, 1)))) self.run_locally_split('RangeMonthly --of wohoo.MyTask --now {now} --start 2015-12 --stop 2016-01'.format(now=now)) self.assertEqual(MyTask(month_param=datetime.date(1934, 12, 1)).secret, 'yay')
Verify that you can still use Range through CLI
test_month_cli_instantiation
python
spotify/luigi
test/range_test.py
https://github.com/spotify/luigi/blob/master/test/range_test.py
Apache-2.0
def test_old_instantiation(self): """ Verify that you can still programmatically set of param as string """ class MyTask(luigi.Task): date_param = luigi.DateParameter() def complete(self): return False range_task = RangeDailyBase(now=datetime_to_epoch(datetime.datetime(2015, 12, 2)), of=MyTask, start=datetime.date(2015, 12, 1), stop=datetime.date(2015, 12, 2)) expected_task = MyTask(date_param=datetime.date(2015, 12, 1)) self.assertEqual(expected_task, list(range_task._requires())[0])
Verify that you can still programmatically set of param as string
test_old_instantiation
python
spotify/luigi
test/range_test.py
https://github.com/spotify/luigi/blob/master/test/range_test.py
Apache-2.0
def test_cli_instantiation(self): """ Verify that you can still use Range through CLI """ class MyTask(luigi.Task): task_namespace = "wohoo" date_param = luigi.DateParameter() secret = 'some-value-to-sooth-python-linters' comp = False def complete(self): return self.comp def run(self): self.comp = True MyTask.secret = 'yay' now = str(int(datetime_to_epoch(datetime.datetime(2015, 12, 2)))) self.run_locally_split('RangeDailyBase --of wohoo.MyTask --now {now} --start 2015-12-01 --stop 2015-12-02'.format(now=now)) self.assertEqual(MyTask(date_param=datetime.date(1934, 12, 1)).secret, 'yay')
Verify that you can still use Range through CLI
test_cli_instantiation
python
spotify/luigi
test/range_test.py
https://github.com/spotify/luigi/blob/master/test/range_test.py
Apache-2.0
def gather_forwarded_attributes(self): """ Returns a set of names of attributes that are forwarded by the TaskProcess and that are not *None*. The tests in this file check if and which attributes are present at different times, e.g. while running, or before and after a dynamic dependency was yielded. """ attrs = set() for attr in FORWARDED_ATTRIBUTES: if getattr(self, attr, None) is not None: attrs.add(attr) return attrs
Returns a set of names of attributes that are forwarded by the TaskProcess and that are not *None*. The tests in this file check if and which attributes are present at different times, e.g. while running, or before and after a dynamic dependency was yielded.
gather_forwarded_attributes
python
spotify/luigi
test/task_forwarded_attributes_test.py
https://github.com/spotify/luigi/blob/master/test/task_forwarded_attributes_test.py
Apache-2.0
def test_cleanup_children_on_terminate(self): """ Subprocesses spawned by tasks should be terminated on terminate """ class HangingSubprocessTask(luigi.Task): def run(self): python = sys.executable check_call([python, '-c', 'while True: pass']) task = HangingSubprocessTask() queue = mock.Mock() worker_id = 1 task_process = TaskProcess(task, worker_id, queue, mock.Mock()) task_process.start() parent = Process(task_process.pid) while not parent.children(): # wait for child process to startup sleep(0.01) [child] = parent.children() task_process.terminate() child.wait(timeout=1.0) # wait for terminate to complete self.assertFalse(parent.is_running()) self.assertFalse(child.is_running())
Subprocesses spawned by tasks should be terminated on terminate
test_cleanup_children_on_terminate
python
spotify/luigi
test/worker_task_test.py
https://github.com/spotify/luigi/blob/master/test/worker_task_test.py
Apache-2.0
def test_disable_worker_timeout(self): """ When a task sets worker_timeout explicitly to 0, it should disable the timeout, even if it is configured globally. """ class Task(luigi.Task): worker_timeout = 0 task_process = TaskProcess( task=Task(), worker_id=1, result_queue=mock.Mock(), status_reporter=mock.Mock(), worker_timeout=10, ) self.assertEqual(task_process.worker_timeout, 0)
When a task sets worker_timeout explicitly to 0, it should disable the timeout, even if it is configured globally.
test_disable_worker_timeout
python
spotify/luigi
test/worker_task_test.py
https://github.com/spotify/luigi/blob/master/test/worker_task_test.py
Apache-2.0
def test_assistant_request_runnable_task(self): """ Test that an assistant gets a task despite it havent registered for it """ self.setTime(0) self.sch.add_task(worker='X', task_id='A', runnable=True) self.setTime(600) self.sch.prune() self.assertEqual('A', self.sch.get_work(worker='Y', assistant=True)['task_id'])
Test that an assistant gets a task despite it havent registered for it
test_assistant_request_runnable_task
python
spotify/luigi
test/scheduler_api_test.py
https://github.com/spotify/luigi/blob/master/test/scheduler_api_test.py
Apache-2.0
def test_do_not_lock_resources_when_not_ready(self): """ Test to make sure that resources won't go unused waiting on workers """ self.sch.add_task(worker='X', task_id='A', priority=10) self.sch.add_task(worker='X', task_id='B', resources={'R': 1}, priority=5) self.sch.add_task(worker='Y', task_id='C', resources={'R': 1}, priority=1) self.sch.update_resources(R=1) self.sch.add_worker('X', [('workers', 1)]) self.assertEqual('C', self.sch.get_work(worker='Y')['task_id'])
Test to make sure that resources won't go unused waiting on workers
test_do_not_lock_resources_when_not_ready
python
spotify/luigi
test/scheduler_api_test.py
https://github.com/spotify/luigi/blob/master/test/scheduler_api_test.py
Apache-2.0
def test_do_not_lock_resources_while_running_higher_priority(self): """ Test to make sure that resources won't go unused waiting on workers """ self.sch.add_task(worker='X', task_id='A', priority=10) self.sch.add_task(worker='X', task_id='B', resources={'R': 1}, priority=5) self.sch.add_task(worker='Y', task_id='C', resources={'R': 1}, priority=1) self.sch.update_resources(R=1) self.sch.add_worker('X', [('workers', 1)]) self.assertEqual('A', self.sch.get_work(worker='X')['task_id']) self.assertEqual('C', self.sch.get_work(worker='Y')['task_id'])
Test to make sure that resources won't go unused waiting on workers
test_do_not_lock_resources_while_running_higher_priority
python
spotify/luigi
test/scheduler_api_test.py
https://github.com/spotify/luigi/blob/master/test/scheduler_api_test.py
Apache-2.0
def test_lock_resources_while_running_lower_priority(self): """ Make sure resources will be made available while working on lower priority tasks """ self.sch.add_task(worker='X', task_id='A', priority=4) self.assertEqual('A', self.sch.get_work(worker='X')['task_id']) self.sch.add_task(worker='X', task_id='B', resources={'R': 1}, priority=5) self.sch.add_task(worker='Y', task_id='C', resources={'R': 1}, priority=1) self.sch.update_resources(R=1) self.sch.add_worker('X', [('workers', 1)]) self.assertFalse(self.sch.get_work(worker='Y')['task_id'])
Make sure resources will be made available while working on lower priority tasks
test_lock_resources_while_running_lower_priority
python
spotify/luigi
test/scheduler_api_test.py
https://github.com/spotify/luigi/blob/master/test/scheduler_api_test.py
Apache-2.0
def test_disable_worker_cannot_add_tasks(self): """ Verify that a disabled worker cannot add tasks """ self.sch.disable_worker(worker=WORKER) self.sch.add_task(worker=WORKER, task_id='A') self.assertIsNone(self.sch.get_work(worker='assistant', assistant=True)['task_id']) self.sch.add_task(worker='third_enabled_worker', task_id='A') self.assertIsNotNone(self.sch.get_work(worker='assistant', assistant=True)['task_id'])
Verify that a disabled worker cannot add tasks
test_disable_worker_cannot_add_tasks
python
spotify/luigi
test/scheduler_api_test.py
https://github.com/spotify/luigi/blob/master/test/scheduler_api_test.py
Apache-2.0
def test_quadratic_behavior(self): """ Test that get_work is not taking linear amount of time. This is of course impossible to test, however, doing reasonable assumptions about hardware. This time should finish in a timely manner. """ # For 10000 it takes almost 1 second on my laptop. Prior to these # changes it was being slow already at NUM_TASKS=300 NUM_TASKS = 10000 for i in range(NUM_TASKS): self.sch.add_task(worker=str(i), task_id=str(i), resources={}) for i in range(NUM_TASKS): self.assertEqual(self.sch.get_work(worker=str(i))['task_id'], str(i)) self.sch.add_task(worker=str(i), task_id=str(i), status=DONE)
Test that get_work is not taking linear amount of time. This is of course impossible to test, however, doing reasonable assumptions about hardware. This time should finish in a timely manner.
test_quadratic_behavior
python
spotify/luigi
test/scheduler_api_test.py
https://github.com/spotify/luigi/blob/master/test/scheduler_api_test.py
Apache-2.0
def test_get_work_speed(self): """ Test that get_work is fast for few workers and many DONEs. In #986, @daveFNbuck reported that he got a slowdown. """ # This took almost 4 minutes without optimization. # Now it takes 10 seconds on my machine. NUM_PENDING = 1000 NUM_DONE = 200000 assert NUM_DONE >= NUM_PENDING for i in range(NUM_PENDING): self.sch.add_task(worker=WORKER, task_id=str(i), resources={}) for i in range(NUM_PENDING, NUM_DONE): self.sch.add_task(worker=WORKER, task_id=str(i), status=DONE) for i in range(NUM_PENDING): res = int(self.sch.get_work(worker=WORKER)['task_id']) self.assertTrue(0 <= res < NUM_PENDING) self.sch.add_task(worker=WORKER, task_id=str(res), status=DONE)
Test that get_work is fast for few workers and many DONEs. In #986, @daveFNbuck reported that he got a slowdown.
test_get_work_speed
python
spotify/luigi
test/scheduler_api_test.py
https://github.com/spotify/luigi/blob/master/test/scheduler_api_test.py
Apache-2.0
def test_assistants_dont_nurture_finished_statuses(self): """ Test how assistants affect longevity of tasks Assistants should not affect longevity expect for the tasks that it is running, par the one it's actually running. """ self.sch = Scheduler(retry_delay=100000000000) # Never pendify failed tasks self.setTime(1) self.sch.add_worker('assistant', [('assistant', True)]) self.sch.ping(worker='assistant') self.sch.add_task(worker='uploader', task_id='running', status=PENDING) self.assertEqual(self.sch.get_work(worker='assistant', assistant=True)['task_id'], 'running') self.setTime(2) self.sch.add_task(worker='uploader', task_id='done', status=DONE) self.sch.add_task(worker='uploader', task_id='disabled', status=DISABLED) self.sch.add_task(worker='uploader', task_id='pending', status=PENDING) self.sch.add_task(worker='uploader', task_id='failed', status=FAILED) self.sch.add_task(worker='uploader', task_id='unknown', status=UNKNOWN) self.setTime(100000) self.sch.ping(worker='assistant') self.sch.prune() self.setTime(200000) self.sch.ping(worker='assistant') self.sch.prune() nurtured_statuses = [RUNNING] not_nurtured_statuses = [DONE, UNKNOWN, DISABLED, PENDING, FAILED] for status in nurtured_statuses: self.assertEqual(set([status.lower()]), set(self.sch.task_list(status, ''))) for status in not_nurtured_statuses: self.assertEqual(set([]), set(self.sch.task_list(status, ''))) self.assertEqual(1, len(self.sch.task_list(None, ''))) # None == All statuses
Test how assistants affect longevity of tasks Assistants should not affect longevity expect for the tasks that it is running, par the one it's actually running.
test_assistants_dont_nurture_finished_statuses
python
spotify/luigi
test/scheduler_api_test.py
https://github.com/spotify/luigi/blob/master/test/scheduler_api_test.py
Apache-2.0
def test_no_crash_on_only_disable_hard_timeout(self): """ Scheduler shouldn't crash with only disable_hard_timeout There was some failure happening when disable_hard_timeout was set but disable_failures was not. """ self.sch = Scheduler(retry_delay=5, disable_hard_timeout=100) self.setTime(1) self.sch.add_worker(WORKER, []) self.sch.ping(worker=WORKER) self.setTime(2) self.sch.add_task(worker=WORKER, task_id='A') self.sch.add_task(worker=WORKER, task_id='B', deps=['A']) self.assertEqual(self.sch.get_work(worker=WORKER)['task_id'], 'A') self.sch.add_task(worker=WORKER, task_id='A', status=FAILED) self.setTime(10) self.sch.prune() self.assertEqual(self.sch.get_work(worker=WORKER)['task_id'], 'A')
Scheduler shouldn't crash with only disable_hard_timeout There was some failure happening when disable_hard_timeout was set but disable_failures was not.
test_no_crash_on_only_disable_hard_timeout
python
spotify/luigi
test/scheduler_api_test.py
https://github.com/spotify/luigi/blob/master/test/scheduler_api_test.py
Apache-2.0
def test_assistant_running_task_dont_disappear(self): """ Tasks run by an assistant shouldn't be pruned """ self.setTime(1) self.sch.add_worker(WORKER, []) self.sch.ping(worker=WORKER) self.setTime(2) self.sch.add_task(worker=WORKER, task_id='A') self.assertEqual(self.sch.get_work(worker=WORKER)['task_id'], 'A') self.sch.add_task(worker=WORKER, task_id='B') self.sch.add_worker('assistant', [('assistant', True)]) self.sch.ping(worker='assistant') self.assertEqual(self.sch.get_work(worker='assistant', assistant=True)['task_id'], 'B') self.setTime(100000) # Here, lets say WORKER disconnects (doesnt ping) self.sch.ping(worker='assistant') self.sch.prune() self.setTime(200000) self.sch.ping(worker='assistant') self.sch.prune() self.assertEqual({'B'}, set(self.sch.task_list(RUNNING, ''))) self.assertEqual({'B'}, set(self.sch.task_list('', '')))
Tasks run by an assistant shouldn't be pruned
test_assistant_running_task_dont_disappear
python
spotify/luigi
test/scheduler_api_test.py
https://github.com/spotify/luigi/blob/master/test/scheduler_api_test.py
Apache-2.0
def test_worker_prune_after_init(self): """ See https://github.com/spotify/luigi/pull/1019 """ worker = luigi.scheduler.Worker(123) class TmpCfg: def __init__(self): self.worker_disconnect_delay = 10 worker.prune(TmpCfg())
See https://github.com/spotify/luigi/pull/1019
test_worker_prune_after_init
python
spotify/luigi
test/scheduler_test.py
https://github.com/spotify/luigi/blob/master/test/scheduler_test.py
Apache-2.0
def test_bin_luigi_help_not_spammy(self): """ Test that `luigi --help` fits on one screen """ returncode, stdout, stderr = self._run_cmdline(['./bin/luigi', '--help']) self.assertLessEqual(len(stdout.splitlines()), 15)
Test that `luigi --help` fits on one screen
test_bin_luigi_help_not_spammy
python
spotify/luigi
test/cmdline_test.py
https://github.com/spotify/luigi/blob/master/test/cmdline_test.py
Apache-2.0
def test_bin_luigi_all_help_spammy(self): """ Test that `luigi --help-all` doesn't fit on a screen Naturally, I don't mind this test breaking, but it convinces me that the "not spammy" test is actually testing what it claims too. """ returncode, stdout, stderr = self._run_cmdline(['./bin/luigi', '--help-all']) self.assertGreater(len(stdout.splitlines()), 15)
Test that `luigi --help-all` doesn't fit on a screen Naturally, I don't mind this test breaking, but it convinces me that the "not spammy" test is actually testing what it claims too.
test_bin_luigi_all_help_spammy
python
spotify/luigi
test/cmdline_test.py
https://github.com/spotify/luigi/blob/master/test/cmdline_test.py
Apache-2.0
def test_deps_py_script(self): """ Test the deps.py script. """ args = 'python luigi/tools/deps.py --module examples.top_artists ArtistToplistToDatabase --date-interval 2015-W10'.split() returncode, stdout, stderr = self._run_cmdline(args) self.assertEqual(0, returncode) self.assertTrue(stdout.find(b'[FileSystem] data/streams_2015_03_04_faked.tsv') != -1) self.assertTrue(stdout.find(b'[DB] localhost') != -1)
Test the deps.py script.
test_deps_py_script
python
spotify/luigi
test/cmdline_test.py
https://github.com/spotify/luigi/blob/master/test/cmdline_test.py
Apache-2.0
def test_deps_tree_py_script(self): """ Test the deps_tree.py script. """ args = 'python luigi/tools/deps_tree.py --module examples.top_artists AggregateArtists --date-interval 2012-06'.split() returncode, stdout, stderr = self._run_cmdline(args) self.assertEqual(0, returncode) for i in range(1, 30): self.assertTrue(stdout.find(("-[Streams-{{'date': '2012-06-{0}'}}".format(str(i).zfill(2))).encode('utf-8')) != -1)
Test the deps_tree.py script.
test_deps_tree_py_script
python
spotify/luigi
test/cmdline_test.py
https://github.com/spotify/luigi/blob/master/test/cmdline_test.py
Apache-2.0
def test_bin_mentions_misspelled_task(self): """ Test that the error message is informative when a task is misspelled. In particular it should say that the task is misspelled and not that the local parameters do not exist. """ returncode, stdout, stderr = self._run_cmdline(['./bin/luigi', '--module', 'cmdline_test', 'HooBaseClass', '--x 5']) self.assertTrue(stderr.find(b'FooBaseClass') != -1) self.assertTrue(stderr.find(b'--x') != 0)
Test that the error message is informative when a task is misspelled. In particular it should say that the task is misspelled and not that the local parameters do not exist.
test_bin_mentions_misspelled_task
python
spotify/luigi
test/cmdline_test.py
https://github.com/spotify/luigi/blob/master/test/cmdline_test.py
Apache-2.0
def test_stack_trace_has_no_inner(self): """ Test that the stack trace for failing tasks are short The stack trace shouldn't contain unreasonably much implementation details of luigi In particular it should say that the task is misspelled and not that the local parameters do not exist. """ returncode, stdout, stderr = self._run_cmdline(['./bin/luigi', '--module', 'cmdline_test', 'ATaskThatFails', '--local-scheduler', '--no-lock']) print(stdout) self.assertFalse(stdout.find(b"run() got an unexpected keyword argument 'tracking_url_callback'") != -1) self.assertFalse(stdout.find(b'During handling of the above exception, another exception occurred') != -1)
Test that the stack trace for failing tasks are short The stack trace shouldn't contain unreasonably much implementation details of luigi In particular it should say that the task is misspelled and not that the local parameters do not exist.
test_stack_trace_has_no_inner
python
spotify/luigi
test/cmdline_test.py
https://github.com/spotify/luigi/blob/master/test/cmdline_test.py
Apache-2.0
def test_cmd_line_params_are_available_for_execution_summary(self): """ Test that config parameters specified on the command line are available while generating the execution summary. """ returncode, stdout, stderr = self._run_cmdline([ './bin/luigi', '--module', 'cmdline_test', 'TaskThatRequiresConfig', '--local-scheduler', '--no-lock' '--RequiredConfig-required-test-param', 'A', ]) print(stdout) print(stderr) self.assertNotEqual(returncode, 1) self.assertFalse(b'required_test_param' in stderr)
Test that config parameters specified on the command line are available while generating the execution summary.
test_cmd_line_params_are_available_for_execution_summary
python
spotify/luigi
test/cmdline_test.py
https://github.com/spotify/luigi/blob/master/test/cmdline_test.py
Apache-2.0
def setUp(self): """Set up a temporary directory for test files.""" self.temp_dir = tempfile.mkdtemp() self.test_file_template = 'test_file_{}.txt' self.tar_file_name = 'test.tar' self.tar_file_name_with_traversal = f'traversal_{self.tar_file_name}'
Set up a temporary directory for test files.
setUp
python
spotify/luigi
test/safe_extractor_test.py
https://github.com/spotify/luigi/blob/master/test/safe_extractor_test.py
Apache-2.0
def tearDown(self): """Clean up the temporary directory after each test.""" shutil.rmtree(self.temp_dir)
Clean up the temporary directory after each test.
tearDown
python
spotify/luigi
test/safe_extractor_test.py
https://github.com/spotify/luigi/blob/master/test/safe_extractor_test.py
Apache-2.0
def create_test_tar(self, tar_path, file_count=1, with_traversal=False): """ Create a tar file containing test files. Args: tar_path (str): Path where the tar file will be created. file_count (int): Number of test files to include. with_traversal (bool): If True, creates a tar file with path traversal vulnerability. """ # Default content for the test files file_contents = [f'This is {self.test_file_template.format(i)}' for i in range(file_count)] with tarfile.open(tar_path, 'w') as tar: for i in range(file_count): file_name = self.test_file_template.format(i) file_path = os.path.join(self.temp_dir, file_name) # Write content to each test file with open(file_path, 'w') as f: f.write(file_contents[i]) # If path traversal is enabled, create malicious paths archive_name = f'../../{file_name}' if with_traversal else file_name # Add the file to the tar archive tar.add(file_path, arcname=archive_name)
Create a tar file containing test files. Args: tar_path (str): Path where the tar file will be created. file_count (int): Number of test files to include. with_traversal (bool): If True, creates a tar file with path traversal vulnerability.
create_test_tar
python
spotify/luigi
test/safe_extractor_test.py
https://github.com/spotify/luigi/blob/master/test/safe_extractor_test.py
Apache-2.0
def verify_extracted_files(self, file_count): """ Verify that the correct files were extracted and their contents match expectations. Args: file_count (int): Number of files to verify. """ for i in range(file_count): file_name = self.test_file_template.format(i) file_path = os.path.join(self.temp_dir, file_name) # Check if the file exists self.assertTrue(os.path.exists(file_path), f"File {file_name} does not exist.") # Check if the file content is correct with open(file_path, 'r') as f: content = f.read() expected_content = f'This is {file_name}' self.assertEqual(content, expected_content, f"Content mismatch in {file_name}.")
Verify that the correct files were extracted and their contents match expectations. Args: file_count (int): Number of files to verify.
verify_extracted_files
python
spotify/luigi
test/safe_extractor_test.py
https://github.com/spotify/luigi/blob/master/test/safe_extractor_test.py
Apache-2.0
def test_safe_extract(self): """Test normal safe extraction of tar files.""" tar_path = os.path.join(self.temp_dir, self.tar_file_name) # Create a tar file with 3 files self.create_test_tar(tar_path, file_count=3) # Initialize SafeExtractor and perform extraction extractor = SafeExtractor(self.temp_dir) extractor.safe_extract(tar_path) # Verify that all 3 files were extracted correctly self.verify_extracted_files(3)
Test normal safe extraction of tar files.
test_safe_extract
python
spotify/luigi
test/safe_extractor_test.py
https://github.com/spotify/luigi/blob/master/test/safe_extractor_test.py
Apache-2.0
def test_safe_extract_with_traversal(self): """Test safe extraction for tar files with path traversal (should raise an error).""" tar_path = os.path.join(self.temp_dir, self.tar_file_name_with_traversal) # Create a tar file with a path traversal file self.create_test_tar(tar_path, file_count=1, with_traversal=True) # Initialize SafeExtractor and expect RuntimeError due to path traversal extractor = SafeExtractor(self.temp_dir) with self.assertRaises(RuntimeError): extractor.safe_extract(tar_path)
Test safe extraction for tar files with path traversal (should raise an error).
test_safe_extract_with_traversal
python
spotify/luigi
test/safe_extractor_test.py
https://github.com/spotify/luigi/blob/master/test/safe_extractor_test.py
Apache-2.0
def complete(self): """ Create the file we need after a number of preconfigured attempts """ self.times_called += 1 if self.times_called >= self.times_to_call: open(self.path, 'a').close() return os.path.exists(self.path)
Create the file we need after a number of preconfigured attempts
complete
python
spotify/luigi
test/worker_external_task_test.py
https://github.com/spotify/luigi/blob/master/test/worker_external_task_test.py
Apache-2.0
def test_external_dependency_already_complete(self): """ Test that the test task completes when its dependency exists at the start of the execution. """ test_task = TestTask(tempdir=self.tempdir, complete_after=1) luigi.build([test_task], local_scheduler=True) assert os.path.exists(test_task.dep_path) assert os.path.exists(test_task.output_path) # complete() is called once per failure, twice per success assert test_task.dependency.times_called == 2
Test that the test task completes when its dependency exists at the start of the execution.
test_external_dependency_already_complete
python
spotify/luigi
test/worker_external_task_test.py
https://github.com/spotify/luigi/blob/master/test/worker_external_task_test.py
Apache-2.0
def test_external_dependency_gets_rechecked(self): """ Test that retry_external_tasks re-checks external tasks """ assert luigi.worker.worker().retry_external_tasks is True test_task = TestTask(tempdir=self.tempdir, complete_after=10) self._build([test_task]) assert os.path.exists(test_task.dep_path) assert os.path.exists(test_task.output_path) self.assertGreaterEqual(test_task.dependency.times_called, 10)
Test that retry_external_tasks re-checks external tasks
test_external_dependency_gets_rechecked
python
spotify/luigi
test/worker_external_task_test.py
https://github.com/spotify/luigi/blob/master/test/worker_external_task_test.py
Apache-2.0
def test_external_dependency_worker_is_patient(self): """ Test that worker doesn't "give up" with keep_alive option Instead, it should sleep for random.uniform() seconds, then ask scheduler for work. """ assert luigi.worker.worker().retry_external_tasks is True with patch('random.uniform', return_value=0.001): test_task = TestTask(tempdir=self.tempdir, complete_after=5) self._build([test_task]) assert os.path.exists(test_task.dep_path) assert os.path.exists(test_task.output_path) self.assertGreaterEqual(test_task.dependency.times_called, 5)
Test that worker doesn't "give up" with keep_alive option Instead, it should sleep for random.uniform() seconds, then ask scheduler for work.
test_external_dependency_worker_is_patient
python
spotify/luigi
test/worker_external_task_test.py
https://github.com/spotify/luigi/blob/master/test/worker_external_task_test.py
Apache-2.0
def test_external_dependency_bare(self): """ Test ExternalTask without altering global settings. """ assert luigi.worker.worker().retry_external_tasks is False test_task = TestTask(tempdir=self.tempdir, complete_after=5) scheduler = luigi.scheduler.Scheduler(retry_delay=0.01, prune_on_get_work=True) with luigi.worker.Worker( retry_external_tasks=True, scheduler=scheduler, keep_alive=True, wait_interval=0.00001, wait_jitter=0) as w: w.add(test_task) w.run() assert os.path.exists(test_task.dep_path) assert os.path.exists(test_task.output_path) self.assertGreaterEqual(test_task.dependency.times_called, 5)
Test ExternalTask without altering global settings.
test_external_dependency_bare
python
spotify/luigi
test/worker_external_task_test.py
https://github.com/spotify/luigi/blob/master/test/worker_external_task_test.py
Apache-2.0
def test_external_task_complete_but_missing_dep_at_runtime(self): """ Test external task complete but has missing upstream dependency at runtime. Should not get "unfulfilled dependencies" error. """ test_task = TestTask(tempdir=self.tempdir, complete_after=3) test_task.run = NotImplemented assert len(test_task.deps()) > 0 # split up scheduling task and running to simulate runtime scenario with self._make_worker() as w: w.add(test_task) # touch output so test_task should be considered complete at runtime open(test_task.output_path, 'a').close() success = w.run() self.assertTrue(success) # upstream dependency output didn't exist at runtime self.assertFalse(os.path.exists(test_task.dep_path))
Test external task complete but has missing upstream dependency at runtime. Should not get "unfulfilled dependencies" error.
test_external_task_complete_but_missing_dep_at_runtime
python
spotify/luigi
test/worker_external_task_test.py
https://github.com/spotify/luigi/blob/master/test/worker_external_task_test.py
Apache-2.0
def test_disable_window_seconds(self): """ Deprecated disable_window_seconds param uses disable_window value """ class ATask(luigi.Task): disable_window = 17 task = ATask() self.assertEqual(task.disable_window_seconds, 17)
Deprecated disable_window_seconds param uses disable_window value
test_disable_window_seconds
python
spotify/luigi
test/task_test.py
https://github.com/spotify/luigi/blob/master/test/task_test.py
Apache-2.0
def run(self, result=None): """ Common setup code. Due to the contextmanager cant use normal setup """ self.sch = Scheduler(retry_delay=0.00000001, retry_count=2) with Worker(scheduler=self.sch, worker_id='X', keep_alive=True, wait_interval=0.1, wait_jitter=0) as w: self.w = w super(WorkerKeepAliveUpstreamTest, self).run(result)
Common setup code. Due to the contextmanager cant use normal setup
run
python
spotify/luigi
test/worker_keep_alive_test.py
https://github.com/spotify/luigi/blob/master/test/worker_keep_alive_test.py
Apache-2.0
def test_alive_while_has_failure(self): """ One dependency disables and one fails """ class Disabler(luigi.Task): pass class Failer(luigi.Task): did_run = False def run(self): self.did_run = True class Wrapper(luigi.WrapperTask): def requires(self): return (Disabler(), Failer()) self.w.add(Wrapper()) disabler = Disabler().task_id failer = Failer().task_id self.sch.add_task(disabler, 'FAILED', worker='X') self.sch.prune() # Make scheduler unfail the disabled task self.sch.add_task(disabler, 'FAILED', worker='X') # Disable it self.sch.add_task(failer, 'FAILED', worker='X') # Fail it try: t = threading.Thread(target=self.w.run) t.start() t.join(timeout=1) # Wait 1 second self.assertTrue(t.is_alive()) # It shouldn't stop trying, the failed task should be retried! self.assertFalse(Failer.did_run) # It should never have run, the cooldown is longer than a second. finally: self.sch.prune() # Make it, like die. Couldn't find a more forceful way to do this. t.join(timeout=1) # Wait 1 second assert not t.is_alive()
One dependency disables and one fails
test_alive_while_has_failure
python
spotify/luigi
test/worker_keep_alive_test.py
https://github.com/spotify/luigi/blob/master/test/worker_keep_alive_test.py
Apache-2.0
def test_alive_while_has_success(self): """ One dependency disables and one succeeds """ # TODO: Fix copy paste mess class Disabler(luigi.Task): pass class Succeeder(luigi.Task): did_run = False def run(self): self.did_run = True class Wrapper(luigi.WrapperTask): def requires(self): return (Disabler(), Succeeder()) self.w.add(Wrapper()) disabler = Disabler().task_id succeeder = Succeeder().task_id self.sch.add_task(disabler, 'FAILED', worker='X') self.sch.prune() # Make scheduler unfail the disabled task self.sch.add_task(disabler, 'FAILED', worker='X') # Disable it self.sch.add_task(succeeder, 'DONE', worker='X') # Fail it try: t = threading.Thread(target=self.w.run) t.start() t.join(timeout=1) # Wait 1 second self.assertFalse(t.is_alive()) # The worker should think that it should stop ... # ... because in this case the only work remaining depends on DISABLED tasks, # hence it's not worth considering the wrapper task as a PENDING task to # keep the worker alive anymore. self.assertFalse(Succeeder.did_run) # It should never have run, it succeeded already finally: self.sch.prune() # This shouldnt be necessary in this version, but whatevs t.join(timeout=1) # Wait 1 second assert not t.is_alive()
One dependency disables and one succeeds
test_alive_while_has_success
python
spotify/luigi
test/worker_keep_alive_test.py
https://github.com/spotify/luigi/blob/master/test/worker_keep_alive_test.py
Apache-2.0
def test_retry_rpc_method(self): """ Tests that a call to a RPC method is re-tried 3 times. """ fetch_results = [socket.timeout, socket.timeout, '{"response":{}}'] self.assertEqual({}, self.get_work(fetch_results))
Tests that a call to a RPC method is re-tried 3 times.
test_retry_rpc_method
python
spotify/luigi
test/rpc_test.py
https://github.com/spotify/luigi/blob/master/test/rpc_test.py
Apache-2.0
def test_retry_rpc_limited(self): """ Tests that a call to an RPC method fails after the third attempt """ fetch_results = [socket.timeout, socket.timeout, socket.timeout] self.assertRaises(luigi.rpc.RPCError, self.get_work, fetch_results)
Tests that a call to an RPC method fails after the third attempt
test_retry_rpc_limited
python
spotify/luigi
test/rpc_test.py
https://github.com/spotify/luigi/blob/master/test/rpc_test.py
Apache-2.0
def test_log_rpc_retries_enabled(self, mock_logger): """ Tests that each retry of an RPC method is logged """ fetch_results = [socket.timeout, socket.timeout, '{"response":{}}'] self.get_work(fetch_results) self.assertEqual([ mock.call.warning('Failed connecting to remote scheduler %r', 'http://zorg.com', exc_info=True), mock.call.info('Retrying attempt 2 of 3 (max)'), mock.call.info('Wait for 1 seconds'), mock.call.warning('Failed connecting to remote scheduler %r', 'http://zorg.com', exc_info=True), mock.call.info('Retrying attempt 3 of 3 (max)'), mock.call.info('Wait for 1 seconds'), ], mock_logger.mock_calls)
Tests that each retry of an RPC method is logged
test_log_rpc_retries_enabled
python
spotify/luigi
test/rpc_test.py
https://github.com/spotify/luigi/blob/master/test/rpc_test.py
Apache-2.0
def test_log_rpc_retries_disabled(self, mock_logger): """ Tests that retries of an RPC method are not logged """ fetch_results = [socket.timeout, socket.timeout, socket.gaierror] try: self.get_work(fetch_results) self.fail("get_work should have thrown RPCError") except luigi.rpc.RPCError as e: self.assertTrue(isinstance(e.sub_exception, socket.gaierror)) self.assertEqual([], mock_logger.mock_calls)
Tests that retries of an RPC method are not logged
test_log_rpc_retries_disabled
python
spotify/luigi
test/rpc_test.py
https://github.com/spotify/luigi/blob/master/test/rpc_test.py
Apache-2.0
def test_get_work_retries_on_null(self): """ Tests that get_work will retry if the response is null """ fetch_results = ['{"response": null}', '{"response": {"pass": true}}'] self.assertEqual({'pass': True}, self.get_work(fetch_results))
Tests that get_work will retry if the response is null
test_get_work_retries_on_null
python
spotify/luigi
test/rpc_test.py
https://github.com/spotify/luigi/blob/master/test/rpc_test.py
Apache-2.0
def test_get_work_retries_on_null_limited(self): """ Tests that get_work will give up after the third null response """ fetch_results = ['{"response": null}'] * 3 + ['{"response": {}}'] self.assertRaises(luigi.rpc.RPCError, self.get_work, fetch_results)
Tests that get_work will give up after the third null response
test_get_work_retries_on_null_limited
python
spotify/luigi
test/rpc_test.py
https://github.com/spotify/luigi/blob/master/test/rpc_test.py
Apache-2.0
def test_quadratic_behavior(self): """ This would be too slow to run through network """ pass
This would be too slow to run through network
test_quadratic_behavior
python
spotify/luigi
test/rpc_test.py
https://github.com/spotify/luigi/blob/master/test/rpc_test.py
Apache-2.0
def test_get_work_speed(self): """ This would be too slow to run through network """ pass
This would be too slow to run through network
test_get_work_speed
python
spotify/luigi
test/rpc_test.py
https://github.com/spotify/luigi/blob/master/test/rpc_test.py
Apache-2.0
def test_with_dates(self): """ Just test that it doesn't crash with date params """ start = datetime.date(1998, 3, 23) class Bar(RunOnceTask): date = luigi.DateParameter() class Foo(luigi.Task): def requires(self): for i in range(10): new_date = start + datetime.timedelta(days=i) yield Bar(date=new_date) self.run_task(Foo()) d = self.summary_dict() exp_set = {Bar(start + datetime.timedelta(days=i)) for i in range(10)} exp_set.add(Foo()) self.assertEqual(exp_set, d['completed']) s = self.summary() self.assertIn('date=1998-0', s) self.assertIn('Scheduled 11 tasks', s) self.assertIn('Luigi Execution Summary', s) self.assertNotIn('00:00:00', s) self.assertNotIn('\n\n\n', s)
Just test that it doesn't crash with date params
test_with_dates
python
spotify/luigi
test/execution_summary_test.py
https://github.com/spotify/luigi/blob/master/test/execution_summary_test.py
Apache-2.0
def test_with_datehours(self): """ Just test that it doesn't crash with datehour params """ start = datetime.datetime(1998, 3, 23, 5) class Bar(RunOnceTask): datehour = luigi.DateHourParameter() class Foo(luigi.Task): def requires(self): for i in range(10): new_date = start + datetime.timedelta(hours=i) yield Bar(datehour=new_date) self.run_task(Foo()) d = self.summary_dict() exp_set = {Bar(start + datetime.timedelta(hours=i)) for i in range(10)} exp_set.add(Foo()) self.assertEqual(exp_set, d['completed']) s = self.summary() self.assertIn('datehour=1998-03-23T0', s) self.assertIn('Scheduled 11 tasks', s) self.assertIn('Luigi Execution Summary', s) self.assertNotIn('00:00:00', s) self.assertNotIn('\n\n\n', s)
Just test that it doesn't crash with datehour params
test_with_datehours
python
spotify/luigi
test/execution_summary_test.py
https://github.com/spotify/luigi/blob/master/test/execution_summary_test.py
Apache-2.0
def test_with_months(self): """ Just test that it doesn't crash with month params """ start = datetime.datetime(1998, 3, 23) class Bar(RunOnceTask): month = luigi.MonthParameter() class Foo(luigi.Task): def requires(self): for i in range(3): new_date = start + datetime.timedelta(days=30*i) yield Bar(month=new_date) self.run_task(Foo()) d = self.summary_dict() exp_set = {Bar(start + datetime.timedelta(days=30*i)) for i in range(3)} exp_set.add(Foo()) self.assertEqual(exp_set, d['completed']) s = self.summary() self.assertIn('month=1998-0', s) self.assertIn('Scheduled 4 tasks', s) self.assertIn('Luigi Execution Summary', s) self.assertNotIn('00:00:00', s) self.assertNotIn('\n\n\n', s)
Just test that it doesn't crash with month params
test_with_months
python
spotify/luigi
test/execution_summary_test.py
https://github.com/spotify/luigi/blob/master/test/execution_summary_test.py
Apache-2.0
def test_multiple_dash_dash_workers(self): """ Don't print own worker with ``--workers 2`` setting. """ self.worker = luigi.worker.Worker(scheduler=self.scheduler, worker_processes=2) class Foo(RunOnceTask): pass self.run_task(Foo()) d = self.summary_dict() self.assertEqual(set(), d['run_by_other_worker']) s = self.summary() self.assertNotIn('The other workers were', s) self.assertIn('This progress looks :) because there were no failed ', s) self.assertNotIn('\n\n\n', s)
Don't print own worker with ``--workers 2`` setting.
test_multiple_dash_dash_workers
python
spotify/luigi
test/execution_summary_test.py
https://github.com/spotify/luigi/blob/master/test/execution_summary_test.py
Apache-2.0
def test_with_uncomparable_parameters(self): """ Don't rely on parameters being sortable """ class Color(Enum): red = 1 yellow = 2 class Bar(RunOnceTask): eparam = luigi.EnumParameter(enum=Color) class Baz(RunOnceTask): eparam = luigi.EnumParameter(enum=Color) another_param = luigi.IntParameter() class Foo(luigi.Task): def requires(self): yield Bar(Color.red) yield Bar(Color.yellow) yield Baz(Color.red, 5) yield Baz(Color.yellow, 5) self.run_task(Foo()) s = self.summary() self.assertIn('yellow', s)
Don't rely on parameters being sortable
test_with_uncomparable_parameters
python
spotify/luigi
test/execution_summary_test.py
https://github.com/spotify/luigi/blob/master/test/execution_summary_test.py
Apache-2.0
def test_with_dict_dependency(self): """ Just test that it doesn't crash with dict params in dependencies """ args = dict(start=datetime.date(1998, 3, 23), num=3) class Bar(RunOnceTask): args = luigi.DictParameter() class Foo(luigi.Task): def requires(self): for i in range(10): new_dict = args.copy() new_dict['start'] = str(new_dict['start'] + datetime.timedelta(days=i)) yield Bar(args=new_dict) self.run_task(Foo()) d = self.summary_dict() exp_set = set() for i in range(10): new_dict = args.copy() new_dict['start'] = str(new_dict['start'] + datetime.timedelta(days=i)) exp_set.add(Bar(new_dict)) exp_set.add(Foo()) self.assertEqual(exp_set, d['completed']) s = self.summary() self.assertIn('"num": 3', s) self.assertIn('"start": "1998-0', s) self.assertIn('Scheduled 11 tasks', s) self.assertIn('Luigi Execution Summary', s) self.assertNotIn('00:00:00', s) self.assertNotIn('\n\n\n', s)
Just test that it doesn't crash with dict params in dependencies
test_with_dict_dependency
python
spotify/luigi
test/execution_summary_test.py
https://github.com/spotify/luigi/blob/master/test/execution_summary_test.py
Apache-2.0
def test_with_dict_argument(self): """ Just test that it doesn't crash with dict params """ args = dict(start=str(datetime.date(1998, 3, 23)), num=3) class Bar(RunOnceTask): args = luigi.DictParameter() self.run_task(Bar(args=args)) d = self.summary_dict() exp_set = set() exp_set.add(Bar(args=args)) self.assertEqual(exp_set, d['completed']) s = self.summary() self.assertIn('"num": 3', s) self.assertIn('"start": "1998-0', s) self.assertIn('Scheduled 1 task', s) self.assertIn('Luigi Execution Summary', s) self.assertNotIn('00:00:00', s) self.assertNotIn('\n\n\n', s)
Just test that it doesn't crash with dict params
test_with_dict_argument
python
spotify/luigi
test/execution_summary_test.py
https://github.com/spotify/luigi/blob/master/test/execution_summary_test.py
Apache-2.0
def test_subprocess_delegation(self): """ Test subprocess call structure using mock module """ orig_Popen = subprocess.Popen self.last_test = None def Popen(cmd, **kwargs): self.last_test = cmd subprocess.Popen = Popen context = RemoteContext( "some_host", username="luigi", key_file="/some/key.pub" ) context.Popen(["ls"]) self.assertTrue("ssh" in self.last_test) self.assertTrue("-i" in self.last_test) self.assertTrue("/some/key.pub" in self.last_test) self.assertTrue("luigi@some_host" in self.last_test) self.assertTrue("ls" in self.last_test) subprocess.Popen = orig_Popen
Test subprocess call structure using mock module
test_subprocess_delegation
python
spotify/luigi
test/test_ssh.py
https://github.com/spotify/luigi/blob/master/test/test_ssh.py
Apache-2.0
def test_check_output_fail_connect(self): """ Test check_output to a non-existing host """ context = RemoteContext("__NO_HOST_LIKE_THIS__", connect_timeout=1) self.assertRaises( subprocess.CalledProcessError, context.check_output, ["ls"] )
Test check_output to a non-existing host
test_check_output_fail_connect
python
spotify/luigi
test/test_ssh.py
https://github.com/spotify/luigi/blob/master/test/test_ssh.py
Apache-2.0
def test_inheritance_from_non_parameter(self): """ Cloning can pull non-source-parameters from source to target parameter. """ class SubTask(luigi.Task): lo = 1 @property def hi(self): return 2 t1 = SubTask() t2 = t1.clone(cls=LinearSum) self.assertEqual(t2.lo, 1) self.assertEqual(t2.hi, 2)
Cloning can pull non-source-parameters from source to target parameter.
test_inheritance_from_non_parameter
python
spotify/luigi
test/clone_test.py
https://github.com/spotify/luigi/blob/master/test/clone_test.py
Apache-2.0
def test_track_job(self): """`track_job` returns the state using qstat""" self.assertEqual(_parse_qstat_state(QSTAT_OUTPUT, 1), 'r') self.assertEqual(_parse_qstat_state(QSTAT_OUTPUT, 2), 'qw') self.assertEqual(_parse_qstat_state(QSTAT_OUTPUT, 3), 't') self.assertEqual(_parse_qstat_state('', 1), 'u') self.assertEqual(_parse_qstat_state('', 4), 'u')
`track_job` returns the state using qstat
test_track_job
python
spotify/luigi
test/contrib/sge_test.py
https://github.com/spotify/luigi/blob/master/test/contrib/sge_test.py
Apache-2.0
def test_success(self): """ Here using the responses lib to mock the PAI rest api call, the following specify the response of the call. """ responses.add(responses.POST, 'http://127.0.0.1:9186/api/v1/token', json={"token": "test", "user": "admin", "admin": True}, status=200) sk_task = SklearnJob() responses.add(responses.POST, 'http://127.0.0.1:9186/api/v1/jobs', json={"message": "update job {0} successfully".format(sk_task.name)}, status=202) responses.add(responses.GET, 'http://127.0.0.1:9186/api/v1/jobs/{0}'.format(sk_task.name), json={}, status=404) responses.add(responses.GET, 'http://127.0.0.1:9186/api/v1/jobs/{0}'.format(sk_task.name), body='{"jobStatus": {"state":"SUCCEED"}}', status=200) success = luigi.build([sk_task], local_scheduler=True) self.assertTrue(success) self.assertTrue(sk_task.complete())
Here using the responses lib to mock the PAI rest api call, the following specify the response of the call.
test_success
python
spotify/luigi
test/contrib/pai_test.py
https://github.com/spotify/luigi/blob/master/test/contrib/pai_test.py
Apache-2.0
def test_fail(self): """ Here using the responses lib to mock the PAI rest api call, the following specify the response of the call. """ responses.add(responses.POST, 'http://127.0.0.1:9186/api/v1/token', json={"token": "test", "user": "admin", "admin": True}, status=200) fail_task = SklearnJob() responses.add(responses.POST, 'http://127.0.0.1:9186/api/v1/jobs', json={"message": "update job {0} successfully".format(fail_task.name)}, status=202) responses.add(responses.GET, 'http://127.0.0.1:9186/api/v1/jobs/{0}'.format(fail_task.name), json={}, status=404) responses.add(responses.GET, 'http://127.0.0.1:9186/api/v1/jobs/{0}'.format(fail_task.name), body='{"jobStatus": {"state":"FAILED"}}', status=200) success = luigi.build([fail_task], local_scheduler=True) self.assertFalse(success) self.assertFalse(fail_task.complete())
Here using the responses lib to mock the PAI rest api call, the following specify the response of the call.
test_fail
python
spotify/luigi
test/contrib/pai_test.py
https://github.com/spotify/luigi/blob/master/test/contrib/pai_test.py
Apache-2.0
def test_s3_copy_to_missing_table(self, mock_redshift_target, mock_does_exist): """ Test missing table creation """ # Ensure `S3CopyToTable.create_table` does not throw an error. task = DummyS3CopyToTableKey() task.run() # Make sure the cursor was successfully used to create the table in # `create_table` as expected. mock_cursor = (mock_redshift_target.return_value .connect .return_value .cursor .return_value) assert mock_cursor.execute.call_args_list[0][0][0].startswith( "CREATE TABLE %s" % task.table) return
Test missing table creation
test_s3_copy_to_missing_table
python
spotify/luigi
test/contrib/redshift_test.py
https://github.com/spotify/luigi/blob/master/test/contrib/redshift_test.py
Apache-2.0
def test_s3_copy_to_missing_table_with_compression_encodings(self, mock_redshift_target, mock_does_exist): """ Test missing table creation with compression encodings """ # Ensure `S3CopyToTable.create_table` does not throw an error. task = DummyS3CopyToTableWithCompressionEncodings() task.run() # Make sure the cursor was successfully used to create the table in # `create_table` as expected. mock_cursor = (mock_redshift_target.return_value .connect .return_value .cursor .return_value) encode_string = ','.join( '{name} {type} ENCODE {encoding}'.format( name=name, type=type, encoding=encoding) for name, type, encoding in task.columns ) assert mock_cursor.execute.call_args_list[0][0][0].startswith( "CREATE TABLE %s (%s )" % (task.table, encode_string)) return
Test missing table creation with compression encodings
test_s3_copy_to_missing_table_with_compression_encodings
python
spotify/luigi
test/contrib/redshift_test.py
https://github.com/spotify/luigi/blob/master/test/contrib/redshift_test.py
Apache-2.0
def requires(self): """ Two lines from Word.task will cause two `mapper` call. """ return Words(self.use_hdfs)
Two lines from Word.task will cause two `mapper` call.
requires
python
spotify/luigi
test/contrib/hadoop_test.py
https://github.com/spotify/luigi/blob/master/test/contrib/hadoop_test.py
Apache-2.0
def setUp(self): """ Fill test database with fake data """ self.mongo_client = pymongo.MongoClient(HOST, PORT) self.collection = self.mongo_client[INDEX][COLLECTION] self.collection.delete_many({}) test_docs = [ {'_id': 'person_1', 'name': 'Mike', 'infos': {'family': 'single'}}, {'_id': 'person_2', 'name': 'Laura', 'surname': 'Gilmore'}, {'_id': 'person_3', 'surname': 'Specter'}, {'_id': 'person_4', 'surname': '', 'infos': {'family': {'children': ['jack', 'rose']}}} ] self.collection.insert_many(test_docs)
Fill test database with fake data
setUp
python
spotify/luigi
test/contrib/mongo_test.py
https://github.com/spotify/luigi/blob/master/test/contrib/mongo_test.py
Apache-2.0
def tearDown(self): """ Make sure the test database is in clean state """ self.collection.drop() self.mongo_client.drop_database(INDEX)
Make sure the test database is in clean state
tearDown
python
spotify/luigi
test/contrib/mongo_test.py
https://github.com/spotify/luigi/blob/master/test/contrib/mongo_test.py
Apache-2.0
def setUp(self): """ Fill test database with fake data """ self.mongo_client = pymongo.MongoClient(HOST, PORT) self.collection = self.mongo_client[INDEX][COLLECTION] self.collection.delete_many({}) test_docs = [ {'_id': 'person_1', 'age': 11, 'experience': 10, 'content': "Lorem ipsum, dolor sit amet. Consectetur adipiscing elit."}, {'_id': 'person_2', 'age': 12, 'experience': 22, 'content': "Sed purus nisl. Faucibus in, erat eu. Rhoncus mattis velit."}, {'_id': 'person_3', 'age': 13, 'content': "Nulla malesuada, fringilla lorem at pellentesque."}, {'_id': 'person_4', 'age': 14, 'content': "Curabitur condimentum. Venenatis fringilla."} ] self.collection.insert_many(test_docs)
Fill test database with fake data
setUp
python
spotify/luigi
test/contrib/mongo_test.py
https://github.com/spotify/luigi/blob/master/test/contrib/mongo_test.py
Apache-2.0
def tearDown(self): """ Make sure the test database is in clean state """ self.collection.drop() self.mongo_client.drop_database(INDEX)
Make sure the test database is in clean state
tearDown
python
spotify/luigi
test/contrib/mongo_test.py
https://github.com/spotify/luigi/blob/master/test/contrib/mongo_test.py
Apache-2.0
def bucket_url(suffix): """ Actually it's bucket + test folder name """ return 'gs://{}/{}/{}'.format(BUCKET_NAME, TEST_FOLDER, suffix)
Actually it's bucket + test folder name
bucket_url
python
spotify/luigi
test/contrib/gcs_test.py
https://github.com/spotify/luigi/blob/master/test/contrib/gcs_test.py
Apache-2.0
def flush(): """ Flush test DB""" redis_client = redis.StrictRedis( host=HOST, port=PORT, db=DB, socket_timeout=SOCKET_TIMEOUT) redis_client.flushdb()
Flush test DB
flush
python
spotify/luigi
test/contrib/redis_test.py
https://github.com/spotify/luigi/blob/master/test/contrib/redis_test.py
Apache-2.0
def _create_test_index(): """ Create content index, if if does not exists. """ es = elasticsearch.Elasticsearch(connection_class=Urllib3HttpConnection, host=HOST, port=PORT, http_auth=HTTP_AUTH) if not es.indices.exists(INDEX): es.indices.create(INDEX)
Create content index, if if does not exists.
_create_test_index
python
spotify/luigi
test/contrib/esindex_test.py
https://github.com/spotify/luigi/blob/master/test/contrib/esindex_test.py
Apache-2.0
def test_touch_and_exists(self): """ Basic test. """ target = ElasticsearchTarget(HOST, PORT, INDEX, DOC_TYPE, 'update_id', http_auth=HTTP_AUTH) target.marker_index = MARKER_INDEX target.marker_doc_type = MARKER_DOC_TYPE delete() self.assertFalse(target.exists(), 'Target should not exist before touching it') target.touch() self.assertTrue(target.exists(), 'Target should exist after touching it') delete()
Basic test.
test_touch_and_exists
python
spotify/luigi
test/contrib/esindex_test.py
https://github.com/spotify/luigi/blob/master/test/contrib/esindex_test.py
Apache-2.0
def delete(): """ Delete marker_index, if it exists. """ es = elasticsearch.Elasticsearch(connection_class=Urllib3HttpConnection, host=HOST, port=PORT, http_auth=HTTP_AUTH) if es.indices.exists(MARKER_INDEX): es.indices.delete(MARKER_INDEX) es.indices.refresh()
Delete marker_index, if it exists.
delete
python
spotify/luigi
test/contrib/esindex_test.py
https://github.com/spotify/luigi/blob/master/test/contrib/esindex_test.py
Apache-2.0
def output(self): """ Use a test target with an own marker_index. """ target = ElasticsearchTarget( host=self.host, port=self.port, http_auth=self.http_auth, index=self.index, doc_type=self.doc_type, update_id=self.update_id(), marker_index_hist_size=self.marker_index_hist_size ) target.marker_index = MARKER_INDEX target.marker_doc_type = MARKER_DOC_TYPE return target
Use a test target with an own marker_index.
output
python
spotify/luigi
test/contrib/esindex_test.py
https://github.com/spotify/luigi/blob/master/test/contrib/esindex_test.py
Apache-2.0
def docs(self): """ Return a list with a single doc. """ return [{'_id': 123, '_index': self.index, '_type': self.doc_type, 'name': 'sample', 'date': 'today'}]
Return a list with a single doc.
docs
python
spotify/luigi
test/contrib/esindex_test.py
https://github.com/spotify/luigi/blob/master/test/contrib/esindex_test.py
Apache-2.0
def docs(self): """ Return a list with a single doc. """ return [{'_id': 234, '_index': self.index, '_type': self.doc_type, 'name': 'another', 'date': 'today'}]
Return a list with a single doc.
docs
python
spotify/luigi
test/contrib/esindex_test.py
https://github.com/spotify/luigi/blob/master/test/contrib/esindex_test.py
Apache-2.0
def docs(self): """ Return a list with a single doc. """ return [{'_id': 234, '_index': self.index, '_type': self.doc_type, 'name': 'yet another', 'date': 'today'}]
Return a list with a single doc.
docs
python
spotify/luigi
test/contrib/esindex_test.py
https://github.com/spotify/luigi/blob/master/test/contrib/esindex_test.py
Apache-2.0
def _cleanup(): """ Delete both the test marker index and the content index. """ es = elasticsearch.Elasticsearch(connection_class=Urllib3HttpConnection, host=HOST, port=PORT, http_auth=HTTP_AUTH) if es.indices.exists(MARKER_INDEX): es.indices.delete(MARKER_INDEX) if es.indices.exists(INDEX): es.indices.delete(INDEX)
Delete both the test marker index and the content index.
_cleanup
python
spotify/luigi
test/contrib/esindex_test.py
https://github.com/spotify/luigi/blob/master/test/contrib/esindex_test.py
Apache-2.0