body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
---|---|---|---|---|---|---|---|---|---|
0b42b0e45c78481f0c54577a737b3f96f2c93a9eaa6d829f8d2a0df88add870b | @patch('dashboard.api.format_courserun_for_dashboard', autospec=True)
@patch('dashboard.api.is_exam_schedulable', return_value=False)
def test_info_not_enrolled_not_passed_not_offered(self, mock_schedulable, mock_format, mock_get_cert, mock_future_exams, mock_has_to_pay, mock_exam_url):
'test for get_info_for_course for course with run not passed and nothing offered'
self.mmtrack.configure_mock(**{'has_passed_course.return_value': False, 'is_enrolled_mmtrack.return_value': True})
with patch('dashboard.api.get_status_for_courserun', autospec=True, side_effect=self.get_mock_run_status_func(api.CourseRunStatus.CHECK_IF_PASSED, self.course_run, api.CourseRunStatus.CHECK_IF_PASSED)), patch('courses.models.Course.first_unexpired_run', return_value=None):
self.assert_course_equal(self.course, api.get_info_for_course(self.course, self.mmtrack))
mock_format.assert_any_call(self.course_run, api.CourseStatus.NOT_PASSED, self.mmtrack, position=1)
mock_format.assert_any_call(self.course_run_ver, api.CourseStatus.NOT_PASSED, self.mmtrack, position=2)
assert (mock_schedulable.call_count == 1)
assert (mock_has_to_pay.call_count == 1)
assert (mock_future_exams.call_count == 1)
assert (mock_get_cert.call_count == 1)
assert (mock_exam_url.call_count == 1) | test for get_info_for_course for course with run not passed and nothing offered | dashboard/api_test.py | test_info_not_enrolled_not_passed_not_offered | mitodl/micromasters | 32 | python | @patch('dashboard.api.format_courserun_for_dashboard', autospec=True)
@patch('dashboard.api.is_exam_schedulable', return_value=False)
def test_info_not_enrolled_not_passed_not_offered(self, mock_schedulable, mock_format, mock_get_cert, mock_future_exams, mock_has_to_pay, mock_exam_url):
self.mmtrack.configure_mock(**{'has_passed_course.return_value': False, 'is_enrolled_mmtrack.return_value': True})
with patch('dashboard.api.get_status_for_courserun', autospec=True, side_effect=self.get_mock_run_status_func(api.CourseRunStatus.CHECK_IF_PASSED, self.course_run, api.CourseRunStatus.CHECK_IF_PASSED)), patch('courses.models.Course.first_unexpired_run', return_value=None):
self.assert_course_equal(self.course, api.get_info_for_course(self.course, self.mmtrack))
mock_format.assert_any_call(self.course_run, api.CourseStatus.NOT_PASSED, self.mmtrack, position=1)
mock_format.assert_any_call(self.course_run_ver, api.CourseStatus.NOT_PASSED, self.mmtrack, position=2)
assert (mock_schedulable.call_count == 1)
assert (mock_has_to_pay.call_count == 1)
assert (mock_future_exams.call_count == 1)
assert (mock_get_cert.call_count == 1)
assert (mock_exam_url.call_count == 1) | @patch('dashboard.api.format_courserun_for_dashboard', autospec=True)
@patch('dashboard.api.is_exam_schedulable', return_value=False)
def test_info_not_enrolled_not_passed_not_offered(self, mock_schedulable, mock_format, mock_get_cert, mock_future_exams, mock_has_to_pay, mock_exam_url):
self.mmtrack.configure_mock(**{'has_passed_course.return_value': False, 'is_enrolled_mmtrack.return_value': True})
with patch('dashboard.api.get_status_for_courserun', autospec=True, side_effect=self.get_mock_run_status_func(api.CourseRunStatus.CHECK_IF_PASSED, self.course_run, api.CourseRunStatus.CHECK_IF_PASSED)), patch('courses.models.Course.first_unexpired_run', return_value=None):
self.assert_course_equal(self.course, api.get_info_for_course(self.course, self.mmtrack))
mock_format.assert_any_call(self.course_run, api.CourseStatus.NOT_PASSED, self.mmtrack, position=1)
mock_format.assert_any_call(self.course_run_ver, api.CourseStatus.NOT_PASSED, self.mmtrack, position=2)
assert (mock_schedulable.call_count == 1)
assert (mock_has_to_pay.call_count == 1)
assert (mock_future_exams.call_count == 1)
assert (mock_get_cert.call_count == 1)
assert (mock_exam_url.call_count == 1)<|docstring|>test for get_info_for_course for course with run not passed and nothing offered<|endoftext|> |
f1278de8995a5a76a747d04e7cb52d8074ba02cc72820d28e605e14aae27e83a | @patch('dashboard.api.format_courserun_for_dashboard', autospec=True)
@patch('dashboard.api.is_exam_schedulable', return_value=False)
def test_info_grade(self, mock_schedulable, mock_format, mock_get_cert, mock_future_exams, mock_has_to_pay, mock_exam_url):
'test for get_info_for_course for course with a course current and another not passed'
self.mmtrack.configure_mock(**{'has_passed_course.return_value': False, 'is_enrolled_mmtrack.return_value': True})
with patch('dashboard.api.get_status_for_courserun', autospec=True, side_effect=self.get_mock_run_status_func(api.CourseRunStatus.CURRENTLY_ENROLLED, self.course_run, api.CourseRunStatus.CHECK_IF_PASSED)):
self.assert_course_equal(self.course, api.get_info_for_course(self.course, self.mmtrack))
mock_format.assert_any_call(self.course_run, api.CourseStatus.CURRENTLY_ENROLLED, self.mmtrack, position=1)
mock_format.assert_any_call(self.course_run_ver, api.CourseStatus.NOT_PASSED, self.mmtrack, position=2)
assert (mock_schedulable.call_count == 1)
assert (mock_has_to_pay.call_count == 1)
assert (mock_future_exams.call_count == 1)
assert (mock_get_cert.call_count == 1)
assert (mock_exam_url.call_count == 1) | test for get_info_for_course for course with a course current and another not passed | dashboard/api_test.py | test_info_grade | mitodl/micromasters | 32 | python | @patch('dashboard.api.format_courserun_for_dashboard', autospec=True)
@patch('dashboard.api.is_exam_schedulable', return_value=False)
def test_info_grade(self, mock_schedulable, mock_format, mock_get_cert, mock_future_exams, mock_has_to_pay, mock_exam_url):
self.mmtrack.configure_mock(**{'has_passed_course.return_value': False, 'is_enrolled_mmtrack.return_value': True})
with patch('dashboard.api.get_status_for_courserun', autospec=True, side_effect=self.get_mock_run_status_func(api.CourseRunStatus.CURRENTLY_ENROLLED, self.course_run, api.CourseRunStatus.CHECK_IF_PASSED)):
self.assert_course_equal(self.course, api.get_info_for_course(self.course, self.mmtrack))
mock_format.assert_any_call(self.course_run, api.CourseStatus.CURRENTLY_ENROLLED, self.mmtrack, position=1)
mock_format.assert_any_call(self.course_run_ver, api.CourseStatus.NOT_PASSED, self.mmtrack, position=2)
assert (mock_schedulable.call_count == 1)
assert (mock_has_to_pay.call_count == 1)
assert (mock_future_exams.call_count == 1)
assert (mock_get_cert.call_count == 1)
assert (mock_exam_url.call_count == 1) | @patch('dashboard.api.format_courserun_for_dashboard', autospec=True)
@patch('dashboard.api.is_exam_schedulable', return_value=False)
def test_info_grade(self, mock_schedulable, mock_format, mock_get_cert, mock_future_exams, mock_has_to_pay, mock_exam_url):
self.mmtrack.configure_mock(**{'has_passed_course.return_value': False, 'is_enrolled_mmtrack.return_value': True})
with patch('dashboard.api.get_status_for_courserun', autospec=True, side_effect=self.get_mock_run_status_func(api.CourseRunStatus.CURRENTLY_ENROLLED, self.course_run, api.CourseRunStatus.CHECK_IF_PASSED)):
self.assert_course_equal(self.course, api.get_info_for_course(self.course, self.mmtrack))
mock_format.assert_any_call(self.course_run, api.CourseStatus.CURRENTLY_ENROLLED, self.mmtrack, position=1)
mock_format.assert_any_call(self.course_run_ver, api.CourseStatus.NOT_PASSED, self.mmtrack, position=2)
assert (mock_schedulable.call_count == 1)
assert (mock_has_to_pay.call_count == 1)
assert (mock_future_exams.call_count == 1)
assert (mock_get_cert.call_count == 1)
assert (mock_exam_url.call_count == 1)<|docstring|>test for get_info_for_course for course with a course current and another not passed<|endoftext|> |
30baae301d9e9e54577ae0bc89db5416a112a514359df31da23996e783a891d1 | @patch('dashboard.api.format_courserun_for_dashboard', autospec=True)
@patch('dashboard.api.is_exam_schedulable', return_value=False)
def test_info_check_but_not_passed(self, mock_schedulable, mock_format, mock_get_cert, mock_future_exams, mock_has_to_pay, mock_exam_url):
'\n test for get_info_for_course in case a check if the course has been passed is required\n '
self.mmtrack.configure_mock(**{'has_passed_course.return_value': False, 'is_enrolled_mmtrack.return_value': True})
with patch('dashboard.api.get_status_for_courserun', autospec=True, side_effect=self.get_mock_run_status_func(api.CourseRunStatus.NOT_ENROLLED, self.course_run, api.CourseRunStatus.CHECK_IF_PASSED)):
self.assert_course_equal(self.course, api.get_info_for_course(self.course, self.mmtrack))
mock_format.assert_any_call(self.course_run_ver, api.CourseStatus.NOT_PASSED, self.mmtrack, position=1)
mock_format.assert_any_call(self.course_run, api.CourseStatus.OFFERED, self.mmtrack, position=2)
assert (mock_schedulable.call_count == 1)
assert (mock_has_to_pay.call_count == 1)
assert (mock_future_exams.call_count == 1)
assert (mock_get_cert.call_count == 1)
assert (mock_exam_url.call_count == 1) | test for get_info_for_course in case a check if the course has been passed is required | dashboard/api_test.py | test_info_check_but_not_passed | mitodl/micromasters | 32 | python | @patch('dashboard.api.format_courserun_for_dashboard', autospec=True)
@patch('dashboard.api.is_exam_schedulable', return_value=False)
def test_info_check_but_not_passed(self, mock_schedulable, mock_format, mock_get_cert, mock_future_exams, mock_has_to_pay, mock_exam_url):
'\n \n '
self.mmtrack.configure_mock(**{'has_passed_course.return_value': False, 'is_enrolled_mmtrack.return_value': True})
with patch('dashboard.api.get_status_for_courserun', autospec=True, side_effect=self.get_mock_run_status_func(api.CourseRunStatus.NOT_ENROLLED, self.course_run, api.CourseRunStatus.CHECK_IF_PASSED)):
self.assert_course_equal(self.course, api.get_info_for_course(self.course, self.mmtrack))
mock_format.assert_any_call(self.course_run_ver, api.CourseStatus.NOT_PASSED, self.mmtrack, position=1)
mock_format.assert_any_call(self.course_run, api.CourseStatus.OFFERED, self.mmtrack, position=2)
assert (mock_schedulable.call_count == 1)
assert (mock_has_to_pay.call_count == 1)
assert (mock_future_exams.call_count == 1)
assert (mock_get_cert.call_count == 1)
assert (mock_exam_url.call_count == 1) | @patch('dashboard.api.format_courserun_for_dashboard', autospec=True)
@patch('dashboard.api.is_exam_schedulable', return_value=False)
def test_info_check_but_not_passed(self, mock_schedulable, mock_format, mock_get_cert, mock_future_exams, mock_has_to_pay, mock_exam_url):
'\n \n '
self.mmtrack.configure_mock(**{'has_passed_course.return_value': False, 'is_enrolled_mmtrack.return_value': True})
with patch('dashboard.api.get_status_for_courserun', autospec=True, side_effect=self.get_mock_run_status_func(api.CourseRunStatus.NOT_ENROLLED, self.course_run, api.CourseRunStatus.CHECK_IF_PASSED)):
self.assert_course_equal(self.course, api.get_info_for_course(self.course, self.mmtrack))
mock_format.assert_any_call(self.course_run_ver, api.CourseStatus.NOT_PASSED, self.mmtrack, position=1)
mock_format.assert_any_call(self.course_run, api.CourseStatus.OFFERED, self.mmtrack, position=2)
assert (mock_schedulable.call_count == 1)
assert (mock_has_to_pay.call_count == 1)
assert (mock_future_exams.call_count == 1)
assert (mock_get_cert.call_count == 1)
assert (mock_exam_url.call_count == 1)<|docstring|>test for get_info_for_course in case a check if the course has been passed is required<|endoftext|> |
f2fe41ee6c1354e52b39a83108a3d4ba6c4d6355b8d05adc003fa319b772b579 | @patch('dashboard.api.format_courserun_for_dashboard', autospec=True)
@patch('dashboard.api.is_exam_schedulable', return_value=False)
def test_info_missed_deadline(self, mock_schedulable, mock_format, mock_get_cert, mock_future_exams, mock_has_to_pay, mock_exam_url):
'\n test for get_info_for_course with a missed upgrade deadline\n '
self.mmtrack.configure_mock(**{'is_enrolled_mmtrack.return_value': True})
with patch('dashboard.api.get_status_for_courserun', autospec=True, side_effect=self.get_mock_run_status_func(api.CourseRunStatus.NOT_ENROLLED, self.course_run, api.CourseRunStatus.MISSED_DEADLINE)):
self.assert_course_equal(self.course, api.get_info_for_course(self.course, self.mmtrack))
mock_format.assert_any_call(self.course_run_ver, api.CourseStatus.MISSED_DEADLINE, self.mmtrack, position=1)
mock_format.assert_any_call(self.course_run, api.CourseStatus.OFFERED, self.mmtrack, position=2)
assert (mock_schedulable.call_count == 1)
assert (mock_has_to_pay.call_count == 1)
assert (mock_future_exams.call_count == 1)
assert (mock_get_cert.call_count == 1)
assert (mock_exam_url.call_count == 1) | test for get_info_for_course with a missed upgrade deadline | dashboard/api_test.py | test_info_missed_deadline | mitodl/micromasters | 32 | python | @patch('dashboard.api.format_courserun_for_dashboard', autospec=True)
@patch('dashboard.api.is_exam_schedulable', return_value=False)
def test_info_missed_deadline(self, mock_schedulable, mock_format, mock_get_cert, mock_future_exams, mock_has_to_pay, mock_exam_url):
'\n \n '
self.mmtrack.configure_mock(**{'is_enrolled_mmtrack.return_value': True})
with patch('dashboard.api.get_status_for_courserun', autospec=True, side_effect=self.get_mock_run_status_func(api.CourseRunStatus.NOT_ENROLLED, self.course_run, api.CourseRunStatus.MISSED_DEADLINE)):
self.assert_course_equal(self.course, api.get_info_for_course(self.course, self.mmtrack))
mock_format.assert_any_call(self.course_run_ver, api.CourseStatus.MISSED_DEADLINE, self.mmtrack, position=1)
mock_format.assert_any_call(self.course_run, api.CourseStatus.OFFERED, self.mmtrack, position=2)
assert (mock_schedulable.call_count == 1)
assert (mock_has_to_pay.call_count == 1)
assert (mock_future_exams.call_count == 1)
assert (mock_get_cert.call_count == 1)
assert (mock_exam_url.call_count == 1) | @patch('dashboard.api.format_courserun_for_dashboard', autospec=True)
@patch('dashboard.api.is_exam_schedulable', return_value=False)
def test_info_missed_deadline(self, mock_schedulable, mock_format, mock_get_cert, mock_future_exams, mock_has_to_pay, mock_exam_url):
'\n \n '
self.mmtrack.configure_mock(**{'is_enrolled_mmtrack.return_value': True})
with patch('dashboard.api.get_status_for_courserun', autospec=True, side_effect=self.get_mock_run_status_func(api.CourseRunStatus.NOT_ENROLLED, self.course_run, api.CourseRunStatus.MISSED_DEADLINE)):
self.assert_course_equal(self.course, api.get_info_for_course(self.course, self.mmtrack))
mock_format.assert_any_call(self.course_run_ver, api.CourseStatus.MISSED_DEADLINE, self.mmtrack, position=1)
mock_format.assert_any_call(self.course_run, api.CourseStatus.OFFERED, self.mmtrack, position=2)
assert (mock_schedulable.call_count == 1)
assert (mock_has_to_pay.call_count == 1)
assert (mock_future_exams.call_count == 1)
assert (mock_get_cert.call_count == 1)
assert (mock_exam_url.call_count == 1)<|docstring|>test for get_info_for_course with a missed upgrade deadline<|endoftext|> |
4050171ccd4a07c7d891dbaed7e8d74883dfa8278a53950cdd0a1057a3dd2f09 | @patch('dashboard.api.format_courserun_for_dashboard', autospec=True)
@patch('dashboard.api.is_exam_schedulable', return_value=False)
def test_info_check_but_not_passed_no_next(self, mock_schedulable, mock_format, mock_get_cert, mock_future_exams, mock_has_to_pay, mock_exam_url):
'\n test for get_info_for_course in case a check if the course has been passed\n is required for the course, the course has not been passed and there is no next run\n '
self.mmtrack.configure_mock(**{'has_passed_course.return_value': False})
with patch('dashboard.api.get_status_for_courserun', autospec=True, side_effect=self.get_mock_run_status_func(api.CourseRunStatus.CHECK_IF_PASSED, self.course_run_past, api.CourseRunStatus.NOT_ENROLLED)):
self.assert_course_equal(self.course_no_next_run, api.get_info_for_course(self.course_no_next_run, self.mmtrack))
mock_format.assert_called_once_with(self.course_run_past, api.CourseStatus.NOT_PASSED, self.mmtrack, position=1)
assert (mock_schedulable.call_count == 1)
assert (mock_has_to_pay.call_count == 1)
assert (mock_future_exams.call_count == 1)
assert (mock_get_cert.call_count == 1)
assert (mock_exam_url.call_count == 1) | test for get_info_for_course in case a check if the course has been passed
is required for the course, the course has not been passed and there is no next run | dashboard/api_test.py | test_info_check_but_not_passed_no_next | mitodl/micromasters | 32 | python | @patch('dashboard.api.format_courserun_for_dashboard', autospec=True)
@patch('dashboard.api.is_exam_schedulable', return_value=False)
def test_info_check_but_not_passed_no_next(self, mock_schedulable, mock_format, mock_get_cert, mock_future_exams, mock_has_to_pay, mock_exam_url):
'\n test for get_info_for_course in case a check if the course has been passed\n is required for the course, the course has not been passed and there is no next run\n '
self.mmtrack.configure_mock(**{'has_passed_course.return_value': False})
with patch('dashboard.api.get_status_for_courserun', autospec=True, side_effect=self.get_mock_run_status_func(api.CourseRunStatus.CHECK_IF_PASSED, self.course_run_past, api.CourseRunStatus.NOT_ENROLLED)):
self.assert_course_equal(self.course_no_next_run, api.get_info_for_course(self.course_no_next_run, self.mmtrack))
mock_format.assert_called_once_with(self.course_run_past, api.CourseStatus.NOT_PASSED, self.mmtrack, position=1)
assert (mock_schedulable.call_count == 1)
assert (mock_has_to_pay.call_count == 1)
assert (mock_future_exams.call_count == 1)
assert (mock_get_cert.call_count == 1)
assert (mock_exam_url.call_count == 1) | @patch('dashboard.api.format_courserun_for_dashboard', autospec=True)
@patch('dashboard.api.is_exam_schedulable', return_value=False)
def test_info_check_but_not_passed_no_next(self, mock_schedulable, mock_format, mock_get_cert, mock_future_exams, mock_has_to_pay, mock_exam_url):
'\n test for get_info_for_course in case a check if the course has been passed\n is required for the course, the course has not been passed and there is no next run\n '
self.mmtrack.configure_mock(**{'has_passed_course.return_value': False})
with patch('dashboard.api.get_status_for_courserun', autospec=True, side_effect=self.get_mock_run_status_func(api.CourseRunStatus.CHECK_IF_PASSED, self.course_run_past, api.CourseRunStatus.NOT_ENROLLED)):
self.assert_course_equal(self.course_no_next_run, api.get_info_for_course(self.course_no_next_run, self.mmtrack))
mock_format.assert_called_once_with(self.course_run_past, api.CourseStatus.NOT_PASSED, self.mmtrack, position=1)
assert (mock_schedulable.call_count == 1)
assert (mock_has_to_pay.call_count == 1)
assert (mock_future_exams.call_count == 1)
assert (mock_get_cert.call_count == 1)
assert (mock_exam_url.call_count == 1)<|docstring|>test for get_info_for_course in case a check if the course has been passed
is required for the course, the course has not been passed and there is no next run<|endoftext|> |
db71ad2ca63a2cf034e6aec8aaf659bc0f2f6616aa1c303aba9f98ea3abe5f8c | @patch('dashboard.api.format_courserun_for_dashboard', autospec=True)
@patch('dashboard.api.is_exam_schedulable', return_value=False)
def test_info_check_passed(self, mock_schedulable, mock_format, mock_get_cert, mock_future_exams, mock_has_to_pay, mock_exam_url):
'\n test for get_info_for_course in case a check if the course has been passed\n is required for the course and the course has been passed\n '
self.mmtrack.configure_mock(**{'has_passed_course.return_value': True, 'is_enrolled_mmtrack.return_value': True})
with patch('dashboard.api.get_status_for_courserun', autospec=True, side_effect=self.get_mock_run_status_func(api.CourseRunStatus.CHECK_IF_PASSED, self.course_run_ver, api.CourseRunStatus.NOT_ENROLLED)):
self.assert_course_equal(self.course, api.get_info_for_course(self.course, self.mmtrack))
assert (mock_format.call_count == 2)
assert (mock_schedulable.call_count == 1)
assert (mock_has_to_pay.call_count == 1)
assert (mock_future_exams.call_count == 1)
assert (mock_get_cert.call_count == 1)
assert (mock_exam_url.call_count == 1) | test for get_info_for_course in case a check if the course has been passed
is required for the course and the course has been passed | dashboard/api_test.py | test_info_check_passed | mitodl/micromasters | 32 | python | @patch('dashboard.api.format_courserun_for_dashboard', autospec=True)
@patch('dashboard.api.is_exam_schedulable', return_value=False)
def test_info_check_passed(self, mock_schedulable, mock_format, mock_get_cert, mock_future_exams, mock_has_to_pay, mock_exam_url):
'\n test for get_info_for_course in case a check if the course has been passed\n is required for the course and the course has been passed\n '
self.mmtrack.configure_mock(**{'has_passed_course.return_value': True, 'is_enrolled_mmtrack.return_value': True})
with patch('dashboard.api.get_status_for_courserun', autospec=True, side_effect=self.get_mock_run_status_func(api.CourseRunStatus.CHECK_IF_PASSED, self.course_run_ver, api.CourseRunStatus.NOT_ENROLLED)):
self.assert_course_equal(self.course, api.get_info_for_course(self.course, self.mmtrack))
assert (mock_format.call_count == 2)
assert (mock_schedulable.call_count == 1)
assert (mock_has_to_pay.call_count == 1)
assert (mock_future_exams.call_count == 1)
assert (mock_get_cert.call_count == 1)
assert (mock_exam_url.call_count == 1) | @patch('dashboard.api.format_courserun_for_dashboard', autospec=True)
@patch('dashboard.api.is_exam_schedulable', return_value=False)
def test_info_check_passed(self, mock_schedulable, mock_format, mock_get_cert, mock_future_exams, mock_has_to_pay, mock_exam_url):
'\n test for get_info_for_course in case a check if the course has been passed\n is required for the course and the course has been passed\n '
self.mmtrack.configure_mock(**{'has_passed_course.return_value': True, 'is_enrolled_mmtrack.return_value': True})
with patch('dashboard.api.get_status_for_courserun', autospec=True, side_effect=self.get_mock_run_status_func(api.CourseRunStatus.CHECK_IF_PASSED, self.course_run_ver, api.CourseRunStatus.NOT_ENROLLED)):
self.assert_course_equal(self.course, api.get_info_for_course(self.course, self.mmtrack))
assert (mock_format.call_count == 2)
assert (mock_schedulable.call_count == 1)
assert (mock_has_to_pay.call_count == 1)
assert (mock_future_exams.call_count == 1)
assert (mock_get_cert.call_count == 1)
assert (mock_exam_url.call_count == 1)<|docstring|>test for get_info_for_course in case a check if the course has been passed
is required for the course and the course has been passed<|endoftext|> |
b184c01cd26a39345f70477590a871bd9ed14cb0a3191e856286279403e608c4 | @patch('dashboard.api.format_courserun_for_dashboard', autospec=True)
@patch('dashboard.api.is_exam_schedulable', return_value=False)
def test_info_will_attend(self, mock_schedulable, mock_format, mock_get_cert, mock_future_exams, mock_has_to_pay, mock_exam_url):
'test for get_info_for_course for course with enrolled run that will happen in the future'
self.mmtrack.configure_mock(**{'is_enrolled_mmtrack.return_value': True})
with patch('dashboard.api.get_status_for_courserun', autospec=True, side_effect=self.get_mock_run_status_func(api.CourseRunStatus.WILL_ATTEND, self.course_run, api.CourseRunStatus.NOT_ENROLLED)):
self.assert_course_equal(self.course, api.get_info_for_course(self.course, self.mmtrack))
mock_format.assert_called_once_with(self.course_run, api.CourseStatus.WILL_ATTEND, self.mmtrack, position=1)
assert (mock_schedulable.call_count == 1)
assert (mock_has_to_pay.call_count == 1)
assert (mock_future_exams.call_count == 1)
assert (mock_get_cert.call_count == 1)
assert (mock_exam_url.call_count == 1) | test for get_info_for_course for course with enrolled run that will happen in the future | dashboard/api_test.py | test_info_will_attend | mitodl/micromasters | 32 | python | @patch('dashboard.api.format_courserun_for_dashboard', autospec=True)
@patch('dashboard.api.is_exam_schedulable', return_value=False)
def test_info_will_attend(self, mock_schedulable, mock_format, mock_get_cert, mock_future_exams, mock_has_to_pay, mock_exam_url):
self.mmtrack.configure_mock(**{'is_enrolled_mmtrack.return_value': True})
with patch('dashboard.api.get_status_for_courserun', autospec=True, side_effect=self.get_mock_run_status_func(api.CourseRunStatus.WILL_ATTEND, self.course_run, api.CourseRunStatus.NOT_ENROLLED)):
self.assert_course_equal(self.course, api.get_info_for_course(self.course, self.mmtrack))
mock_format.assert_called_once_with(self.course_run, api.CourseStatus.WILL_ATTEND, self.mmtrack, position=1)
assert (mock_schedulable.call_count == 1)
assert (mock_has_to_pay.call_count == 1)
assert (mock_future_exams.call_count == 1)
assert (mock_get_cert.call_count == 1)
assert (mock_exam_url.call_count == 1) | @patch('dashboard.api.format_courserun_for_dashboard', autospec=True)
@patch('dashboard.api.is_exam_schedulable', return_value=False)
def test_info_will_attend(self, mock_schedulable, mock_format, mock_get_cert, mock_future_exams, mock_has_to_pay, mock_exam_url):
self.mmtrack.configure_mock(**{'is_enrolled_mmtrack.return_value': True})
with patch('dashboard.api.get_status_for_courserun', autospec=True, side_effect=self.get_mock_run_status_func(api.CourseRunStatus.WILL_ATTEND, self.course_run, api.CourseRunStatus.NOT_ENROLLED)):
self.assert_course_equal(self.course, api.get_info_for_course(self.course, self.mmtrack))
mock_format.assert_called_once_with(self.course_run, api.CourseStatus.WILL_ATTEND, self.mmtrack, position=1)
assert (mock_schedulable.call_count == 1)
assert (mock_has_to_pay.call_count == 1)
assert (mock_future_exams.call_count == 1)
assert (mock_get_cert.call_count == 1)
assert (mock_exam_url.call_count == 1)<|docstring|>test for get_info_for_course for course with enrolled run that will happen in the future<|endoftext|> |
eb48aae166110f87b924d880a00dc5b5c6883f5985f4e8c7caae1ec4d39c2f46 | @patch('dashboard.api.format_courserun_for_dashboard', autospec=True)
@patch('dashboard.api.is_exam_schedulable', return_value=False)
def test_info_upgrade(self, mock_schedulable, mock_format, mock_get_cert, mock_future_exams, mock_has_to_pay, mock_exam_url):
'test for get_info_for_course for course with a run that needs to be upgraded'
self.mmtrack.configure_mock(**{'is_enrolled_mmtrack.return_value': True})
with patch('dashboard.api.get_status_for_courserun', autospec=True, side_effect=self.get_mock_run_status_func(api.CourseRunStatus.CAN_UPGRADE, self.course_run, api.CourseRunStatus.NOT_ENROLLED)):
self.assert_course_equal(self.course, api.get_info_for_course(self.course, self.mmtrack))
mock_format.assert_called_once_with(self.course_run, api.CourseStatus.CAN_UPGRADE, self.mmtrack, position=1)
assert (mock_schedulable.call_count == 1)
assert (mock_has_to_pay.call_count == 1)
assert (mock_future_exams.call_count == 1)
assert (mock_get_cert.call_count == 1)
assert (mock_exam_url.call_count == 1) | test for get_info_for_course for course with a run that needs to be upgraded | dashboard/api_test.py | test_info_upgrade | mitodl/micromasters | 32 | python | @patch('dashboard.api.format_courserun_for_dashboard', autospec=True)
@patch('dashboard.api.is_exam_schedulable', return_value=False)
def test_info_upgrade(self, mock_schedulable, mock_format, mock_get_cert, mock_future_exams, mock_has_to_pay, mock_exam_url):
self.mmtrack.configure_mock(**{'is_enrolled_mmtrack.return_value': True})
with patch('dashboard.api.get_status_for_courserun', autospec=True, side_effect=self.get_mock_run_status_func(api.CourseRunStatus.CAN_UPGRADE, self.course_run, api.CourseRunStatus.NOT_ENROLLED)):
self.assert_course_equal(self.course, api.get_info_for_course(self.course, self.mmtrack))
mock_format.assert_called_once_with(self.course_run, api.CourseStatus.CAN_UPGRADE, self.mmtrack, position=1)
assert (mock_schedulable.call_count == 1)
assert (mock_has_to_pay.call_count == 1)
assert (mock_future_exams.call_count == 1)
assert (mock_get_cert.call_count == 1)
assert (mock_exam_url.call_count == 1) | @patch('dashboard.api.format_courserun_for_dashboard', autospec=True)
@patch('dashboard.api.is_exam_schedulable', return_value=False)
def test_info_upgrade(self, mock_schedulable, mock_format, mock_get_cert, mock_future_exams, mock_has_to_pay, mock_exam_url):
self.mmtrack.configure_mock(**{'is_enrolled_mmtrack.return_value': True})
with patch('dashboard.api.get_status_for_courserun', autospec=True, side_effect=self.get_mock_run_status_func(api.CourseRunStatus.CAN_UPGRADE, self.course_run, api.CourseRunStatus.NOT_ENROLLED)):
self.assert_course_equal(self.course, api.get_info_for_course(self.course, self.mmtrack))
mock_format.assert_called_once_with(self.course_run, api.CourseStatus.CAN_UPGRADE, self.mmtrack, position=1)
assert (mock_schedulable.call_count == 1)
assert (mock_has_to_pay.call_count == 1)
assert (mock_future_exams.call_count == 1)
assert (mock_get_cert.call_count == 1)
assert (mock_exam_url.call_count == 1)<|docstring|>test for get_info_for_course for course with a run that needs to be upgraded<|endoftext|> |
919e7c78562f9097d4863e992653ea81d4037231486be9963a82341b5fefcff5 | @patch('dashboard.api.format_courserun_for_dashboard', autospec=True)
@patch('dashboard.api.is_exam_schedulable', return_value=False)
def test_info_upgrade_in_past(self, mock_schedulable, mock_format, mock_get_cert, mock_future_exams, mock_has_to_pay, mock_exam_url):
'\n test for get_info_for_course for course with a run\n that needs to be upgraded but before a current enrolled one\n '
self.mmtrack.configure_mock(**{'is_enrolled_mmtrack.return_value': True})
with patch('dashboard.api.get_status_for_courserun', autospec=True, side_effect=self.get_mock_run_status_func(api.CourseRunStatus.CURRENTLY_ENROLLED, self.course_run, api.CourseRunStatus.CAN_UPGRADE)):
self.assert_course_equal(self.course, api.get_info_for_course(self.course, self.mmtrack))
mock_format.assert_any_call(self.course_run, api.CourseStatus.CURRENTLY_ENROLLED, self.mmtrack, position=1)
mock_format.assert_any_call(self.course_run_ver, api.CourseStatus.CAN_UPGRADE, self.mmtrack, position=2)
assert (mock_schedulable.call_count == 1)
assert (mock_has_to_pay.call_count == 1)
assert (mock_future_exams.call_count == 1)
assert (mock_get_cert.call_count == 1)
assert (mock_exam_url.call_count == 1) | test for get_info_for_course for course with a run
that needs to be upgraded but before a current enrolled one | dashboard/api_test.py | test_info_upgrade_in_past | mitodl/micromasters | 32 | python | @patch('dashboard.api.format_courserun_for_dashboard', autospec=True)
@patch('dashboard.api.is_exam_schedulable', return_value=False)
def test_info_upgrade_in_past(self, mock_schedulable, mock_format, mock_get_cert, mock_future_exams, mock_has_to_pay, mock_exam_url):
'\n test for get_info_for_course for course with a run\n that needs to be upgraded but before a current enrolled one\n '
self.mmtrack.configure_mock(**{'is_enrolled_mmtrack.return_value': True})
with patch('dashboard.api.get_status_for_courserun', autospec=True, side_effect=self.get_mock_run_status_func(api.CourseRunStatus.CURRENTLY_ENROLLED, self.course_run, api.CourseRunStatus.CAN_UPGRADE)):
self.assert_course_equal(self.course, api.get_info_for_course(self.course, self.mmtrack))
mock_format.assert_any_call(self.course_run, api.CourseStatus.CURRENTLY_ENROLLED, self.mmtrack, position=1)
mock_format.assert_any_call(self.course_run_ver, api.CourseStatus.CAN_UPGRADE, self.mmtrack, position=2)
assert (mock_schedulable.call_count == 1)
assert (mock_has_to_pay.call_count == 1)
assert (mock_future_exams.call_count == 1)
assert (mock_get_cert.call_count == 1)
assert (mock_exam_url.call_count == 1) | @patch('dashboard.api.format_courserun_for_dashboard', autospec=True)
@patch('dashboard.api.is_exam_schedulable', return_value=False)
def test_info_upgrade_in_past(self, mock_schedulable, mock_format, mock_get_cert, mock_future_exams, mock_has_to_pay, mock_exam_url):
'\n test for get_info_for_course for course with a run\n that needs to be upgraded but before a current enrolled one\n '
self.mmtrack.configure_mock(**{'is_enrolled_mmtrack.return_value': True})
with patch('dashboard.api.get_status_for_courserun', autospec=True, side_effect=self.get_mock_run_status_func(api.CourseRunStatus.CURRENTLY_ENROLLED, self.course_run, api.CourseRunStatus.CAN_UPGRADE)):
self.assert_course_equal(self.course, api.get_info_for_course(self.course, self.mmtrack))
mock_format.assert_any_call(self.course_run, api.CourseStatus.CURRENTLY_ENROLLED, self.mmtrack, position=1)
mock_format.assert_any_call(self.course_run_ver, api.CourseStatus.CAN_UPGRADE, self.mmtrack, position=2)
assert (mock_schedulable.call_count == 1)
assert (mock_has_to_pay.call_count == 1)
assert (mock_future_exams.call_count == 1)
assert (mock_get_cert.call_count == 1)
assert (mock_exam_url.call_count == 1)<|docstring|>test for get_info_for_course for course with a run
that needs to be upgraded but before a current enrolled one<|endoftext|> |
1ff7c11d169b0f82c38d9ad09f256a4d789a1d886cdc361e177c602c33756e38 | @patch('dashboard.api.format_courserun_for_dashboard', autospec=True)
@patch('dashboard.api.is_exam_schedulable', return_value=False)
def test_info_default_should_not_happen(self, mock_schedulable, mock_format, mock_get_cert, mock_future_exams, mock_has_to_pay, mock_exam_url):
'\n test for get_info_for_course for course with a run with an\n unexpected state but that can be offered\n '
with patch('dashboard.api.get_status_for_courserun', autospec=True, side_effect=self.get_mock_run_status_func('status-that-we-should-never-have', self.course_run, api.CourseRunStatus.NOT_ENROLLED)):
self.assert_course_equal(self.course, api.get_info_for_course(self.course, self.mmtrack))
assert (mock_format.call_count == 0)
assert (mock_schedulable.call_count == 1)
assert (mock_has_to_pay.call_count == 1)
assert (mock_future_exams.call_count == 1)
assert (mock_get_cert.call_count == 1)
assert (mock_exam_url.call_count == 1) | test for get_info_for_course for course with a run with an
unexpected state but that can be offered | dashboard/api_test.py | test_info_default_should_not_happen | mitodl/micromasters | 32 | python | @patch('dashboard.api.format_courserun_for_dashboard', autospec=True)
@patch('dashboard.api.is_exam_schedulable', return_value=False)
def test_info_default_should_not_happen(self, mock_schedulable, mock_format, mock_get_cert, mock_future_exams, mock_has_to_pay, mock_exam_url):
'\n test for get_info_for_course for course with a run with an\n unexpected state but that can be offered\n '
with patch('dashboard.api.get_status_for_courserun', autospec=True, side_effect=self.get_mock_run_status_func('status-that-we-should-never-have', self.course_run, api.CourseRunStatus.NOT_ENROLLED)):
self.assert_course_equal(self.course, api.get_info_for_course(self.course, self.mmtrack))
assert (mock_format.call_count == 0)
assert (mock_schedulable.call_count == 1)
assert (mock_has_to_pay.call_count == 1)
assert (mock_future_exams.call_count == 1)
assert (mock_get_cert.call_count == 1)
assert (mock_exam_url.call_count == 1) | @patch('dashboard.api.format_courserun_for_dashboard', autospec=True)
@patch('dashboard.api.is_exam_schedulable', return_value=False)
def test_info_default_should_not_happen(self, mock_schedulable, mock_format, mock_get_cert, mock_future_exams, mock_has_to_pay, mock_exam_url):
'\n test for get_info_for_course for course with a run with an\n unexpected state but that can be offered\n '
with patch('dashboard.api.get_status_for_courserun', autospec=True, side_effect=self.get_mock_run_status_func('status-that-we-should-never-have', self.course_run, api.CourseRunStatus.NOT_ENROLLED)):
self.assert_course_equal(self.course, api.get_info_for_course(self.course, self.mmtrack))
assert (mock_format.call_count == 0)
assert (mock_schedulable.call_count == 1)
assert (mock_has_to_pay.call_count == 1)
assert (mock_future_exams.call_count == 1)
assert (mock_get_cert.call_count == 1)
assert (mock_exam_url.call_count == 1)<|docstring|>test for get_info_for_course for course with a run with an
unexpected state but that can be offered<|endoftext|> |
19352cae084dfb3c83b1bd2ee4d828866df1a11695abdf0e7db6f8f959c33def | @patch('dashboard.api.format_courserun_for_dashboard', autospec=True)
@patch('dashboard.api.is_exam_schedulable', return_value=False)
def test_info_default_should_not_happen_no_next(self, mock_schedulable, mock_format, mock_get_cert, mock_future_exams, mock_has_to_pay, mock_exam_url):
'test for get_info_for_course with no next and weird status'
with patch('dashboard.api.get_status_for_courserun', autospec=True, side_effect=self.get_mock_run_status_func('status-that-we-should-never-have', self.course_run_past, api.CourseRunStatus.NOT_ENROLLED)):
self.assert_course_equal(self.course_no_next_run, api.get_info_for_course(self.course_no_next_run, self.mmtrack))
assert (mock_format.call_count == 0)
assert (mock_schedulable.call_count == 1)
assert (mock_has_to_pay.call_count == 1)
assert (mock_future_exams.call_count == 1)
assert (mock_get_cert.call_count == 1)
assert (mock_exam_url.call_count == 1) | test for get_info_for_course with no next and weird status | dashboard/api_test.py | test_info_default_should_not_happen_no_next | mitodl/micromasters | 32 | python | @patch('dashboard.api.format_courserun_for_dashboard', autospec=True)
@patch('dashboard.api.is_exam_schedulable', return_value=False)
def test_info_default_should_not_happen_no_next(self, mock_schedulable, mock_format, mock_get_cert, mock_future_exams, mock_has_to_pay, mock_exam_url):
with patch('dashboard.api.get_status_for_courserun', autospec=True, side_effect=self.get_mock_run_status_func('status-that-we-should-never-have', self.course_run_past, api.CourseRunStatus.NOT_ENROLLED)):
self.assert_course_equal(self.course_no_next_run, api.get_info_for_course(self.course_no_next_run, self.mmtrack))
assert (mock_format.call_count == 0)
assert (mock_schedulable.call_count == 1)
assert (mock_has_to_pay.call_count == 1)
assert (mock_future_exams.call_count == 1)
assert (mock_get_cert.call_count == 1)
assert (mock_exam_url.call_count == 1) | @patch('dashboard.api.format_courserun_for_dashboard', autospec=True)
@patch('dashboard.api.is_exam_schedulable', return_value=False)
def test_info_default_should_not_happen_no_next(self, mock_schedulable, mock_format, mock_get_cert, mock_future_exams, mock_has_to_pay, mock_exam_url):
with patch('dashboard.api.get_status_for_courserun', autospec=True, side_effect=self.get_mock_run_status_func('status-that-we-should-never-have', self.course_run_past, api.CourseRunStatus.NOT_ENROLLED)):
self.assert_course_equal(self.course_no_next_run, api.get_info_for_course(self.course_no_next_run, self.mmtrack))
assert (mock_format.call_count == 0)
assert (mock_schedulable.call_count == 1)
assert (mock_has_to_pay.call_count == 1)
assert (mock_future_exams.call_count == 1)
assert (mock_get_cert.call_count == 1)
assert (mock_exam_url.call_count == 1)<|docstring|>test for get_info_for_course with no next and weird status<|endoftext|> |
b9160d01208b159dff6239ccb8ea0e29ca41545ffa8479e3014f005ed85a4b65 | @patch('dashboard.api.format_courserun_for_dashboard', autospec=True)
@patch('dashboard.api.is_exam_schedulable', return_value=False)
def test_info_read_cert_for_all_no_next(self, mock_schedulable, mock_format, mock_get_cert, mock_future_exams, mock_has_to_pay, mock_exam_url):
'\n test for get_info_for_course in case the less recent course is flagged to be checked if passed\n '
self.mmtrack.configure_mock(**{'has_passed_course.return_value': True, 'is_enrolled_mmtrack.return_value': True})
with patch('dashboard.api.get_status_for_courserun', autospec=True, side_effect=self.get_mock_run_status_func(api.CourseRunStatus.NOT_PASSED, self.course_run_past, api.CourseRunStatus.CHECK_IF_PASSED)):
self.assert_course_equal(self.course_no_next_run, api.get_info_for_course(self.course_no_next_run, self.mmtrack))
mock_format.assert_any_call(self.course_run_past, api.CourseStatus.NOT_PASSED, self.mmtrack, position=1)
mock_format.assert_any_call(self.course_run_past_ver, api.CourseStatus.PASSED, self.mmtrack, position=2)
assert (mock_schedulable.call_count == 1)
assert (mock_has_to_pay.call_count == 1)
assert (mock_future_exams.call_count == 1)
assert (mock_get_cert.call_count == 1)
assert (mock_exam_url.call_count == 1) | test for get_info_for_course in case the less recent course is flagged to be checked if passed | dashboard/api_test.py | test_info_read_cert_for_all_no_next | mitodl/micromasters | 32 | python | @patch('dashboard.api.format_courserun_for_dashboard', autospec=True)
@patch('dashboard.api.is_exam_schedulable', return_value=False)
def test_info_read_cert_for_all_no_next(self, mock_schedulable, mock_format, mock_get_cert, mock_future_exams, mock_has_to_pay, mock_exam_url):
'\n \n '
self.mmtrack.configure_mock(**{'has_passed_course.return_value': True, 'is_enrolled_mmtrack.return_value': True})
with patch('dashboard.api.get_status_for_courserun', autospec=True, side_effect=self.get_mock_run_status_func(api.CourseRunStatus.NOT_PASSED, self.course_run_past, api.CourseRunStatus.CHECK_IF_PASSED)):
self.assert_course_equal(self.course_no_next_run, api.get_info_for_course(self.course_no_next_run, self.mmtrack))
mock_format.assert_any_call(self.course_run_past, api.CourseStatus.NOT_PASSED, self.mmtrack, position=1)
mock_format.assert_any_call(self.course_run_past_ver, api.CourseStatus.PASSED, self.mmtrack, position=2)
assert (mock_schedulable.call_count == 1)
assert (mock_has_to_pay.call_count == 1)
assert (mock_future_exams.call_count == 1)
assert (mock_get_cert.call_count == 1)
assert (mock_exam_url.call_count == 1) | @patch('dashboard.api.format_courserun_for_dashboard', autospec=True)
@patch('dashboard.api.is_exam_schedulable', return_value=False)
def test_info_read_cert_for_all_no_next(self, mock_schedulable, mock_format, mock_get_cert, mock_future_exams, mock_has_to_pay, mock_exam_url):
'\n \n '
self.mmtrack.configure_mock(**{'has_passed_course.return_value': True, 'is_enrolled_mmtrack.return_value': True})
with patch('dashboard.api.get_status_for_courserun', autospec=True, side_effect=self.get_mock_run_status_func(api.CourseRunStatus.NOT_PASSED, self.course_run_past, api.CourseRunStatus.CHECK_IF_PASSED)):
self.assert_course_equal(self.course_no_next_run, api.get_info_for_course(self.course_no_next_run, self.mmtrack))
mock_format.assert_any_call(self.course_run_past, api.CourseStatus.NOT_PASSED, self.mmtrack, position=1)
mock_format.assert_any_call(self.course_run_past_ver, api.CourseStatus.PASSED, self.mmtrack, position=2)
assert (mock_schedulable.call_count == 1)
assert (mock_has_to_pay.call_count == 1)
assert (mock_future_exams.call_count == 1)
assert (mock_get_cert.call_count == 1)
assert (mock_exam_url.call_count == 1)<|docstring|>test for get_info_for_course in case the less recent course is flagged to be checked if passed<|endoftext|> |
0dba9b99732221bcf27b2a02ac6148decb323da23512d1540bd249df216920dc | @patch('dashboard.api.format_courserun_for_dashboard', autospec=True)
@patch('dashboard.api.is_exam_schedulable', return_value=False)
def test_course_run_end_date_mixed(self, mock_schedulable, mock_format, mock_get_cert, mock_future_exams, mock_has_to_pay, mock_exam_url):
'\n Test with a mix of end_date being None and also a valid date\n '
def mocked_get_status_for_courserun(run, enrollments):
'Mock get_status_for_courserun with different values for each run'
return api.CourseRunUserStatus(status=api.CourseRunStatus.NOT_ENROLLED, course_run=run)
self.mmtrack.configure_mock(**{'user': self.user, 'is_enrolled_mmtrack.return_value': True})
run1 = CourseRunFactory.create(start_date=now_in_utc(), end_date=None, enrollment_start=None, enrollment_end=None)
CourseRunFactory.create(start_date=now_in_utc(), end_date=now_in_utc(), enrollment_start=None, enrollment_end=None, course=run1.course)
with patch('dashboard.api.get_status_for_courserun', autospec=True, side_effect=mocked_get_status_for_courserun):
self.assert_course_equal(run1.course, api.get_info_for_course(run1.course, self.mmtrack))
mock_format.assert_called_once_with(run1, api.CourseStatus.OFFERED, self.mmtrack, position=1)
assert (mock_schedulable.call_count == 1)
assert (mock_has_to_pay.call_count == 1)
assert (mock_future_exams.call_count == 1)
assert (mock_get_cert.call_count == 1)
assert (mock_exam_url.call_count == 1) | Test with a mix of end_date being None and also a valid date | dashboard/api_test.py | test_course_run_end_date_mixed | mitodl/micromasters | 32 | python | @patch('dashboard.api.format_courserun_for_dashboard', autospec=True)
@patch('dashboard.api.is_exam_schedulable', return_value=False)
def test_course_run_end_date_mixed(self, mock_schedulable, mock_format, mock_get_cert, mock_future_exams, mock_has_to_pay, mock_exam_url):
'\n \n '
def mocked_get_status_for_courserun(run, enrollments):
'Mock get_status_for_courserun with different values for each run'
return api.CourseRunUserStatus(status=api.CourseRunStatus.NOT_ENROLLED, course_run=run)
self.mmtrack.configure_mock(**{'user': self.user, 'is_enrolled_mmtrack.return_value': True})
run1 = CourseRunFactory.create(start_date=now_in_utc(), end_date=None, enrollment_start=None, enrollment_end=None)
CourseRunFactory.create(start_date=now_in_utc(), end_date=now_in_utc(), enrollment_start=None, enrollment_end=None, course=run1.course)
with patch('dashboard.api.get_status_for_courserun', autospec=True, side_effect=mocked_get_status_for_courserun):
self.assert_course_equal(run1.course, api.get_info_for_course(run1.course, self.mmtrack))
mock_format.assert_called_once_with(run1, api.CourseStatus.OFFERED, self.mmtrack, position=1)
assert (mock_schedulable.call_count == 1)
assert (mock_has_to_pay.call_count == 1)
assert (mock_future_exams.call_count == 1)
assert (mock_get_cert.call_count == 1)
assert (mock_exam_url.call_count == 1) | @patch('dashboard.api.format_courserun_for_dashboard', autospec=True)
@patch('dashboard.api.is_exam_schedulable', return_value=False)
def test_course_run_end_date_mixed(self, mock_schedulable, mock_format, mock_get_cert, mock_future_exams, mock_has_to_pay, mock_exam_url):
'\n \n '
def mocked_get_status_for_courserun(run, enrollments):
'Mock get_status_for_courserun with different values for each run'
return api.CourseRunUserStatus(status=api.CourseRunStatus.NOT_ENROLLED, course_run=run)
self.mmtrack.configure_mock(**{'user': self.user, 'is_enrolled_mmtrack.return_value': True})
run1 = CourseRunFactory.create(start_date=now_in_utc(), end_date=None, enrollment_start=None, enrollment_end=None)
CourseRunFactory.create(start_date=now_in_utc(), end_date=now_in_utc(), enrollment_start=None, enrollment_end=None, course=run1.course)
with patch('dashboard.api.get_status_for_courserun', autospec=True, side_effect=mocked_get_status_for_courserun):
self.assert_course_equal(run1.course, api.get_info_for_course(run1.course, self.mmtrack))
mock_format.assert_called_once_with(run1, api.CourseStatus.OFFERED, self.mmtrack, position=1)
assert (mock_schedulable.call_count == 1)
assert (mock_has_to_pay.call_count == 1)
assert (mock_future_exams.call_count == 1)
assert (mock_get_cert.call_count == 1)
assert (mock_exam_url.call_count == 1)<|docstring|>Test with a mix of end_date being None and also a valid date<|endoftext|> |
8dd888722e9eee59c5ef4da85a11e013c73ac791531b00144f7f4bab8d4d7a0a | @patch('dashboard.api.format_courserun_for_dashboard', autospec=True)
@patch('dashboard.api.is_exam_schedulable', return_value=False)
def test_course_with_proctorate_exam(self, mock_schedulable, mock_format, mock_get_cert, mock_future_exams, mock_has_to_pay, mock_exam_url):
'\n Test with proctorate exam results\n '
for _ in range(3):
ProctoredExamGradeFactory.create(user=self.user, course=self.course_noruns)
proct_exam_qset = ProctoredExamGrade.for_user_course(user=self.user, course=self.course_noruns)
serialized_proct_exams = ProctoredExamGradeSerializer(proct_exam_qset, many=True).data
self.mmtrack.get_course_proctorate_exam_results.return_value = serialized_proct_exams
self.assert_course_equal(self.course_noruns, api.get_info_for_course(self.course_noruns, self.mmtrack), proct_exams=serialized_proct_exams)
assert (mock_format.called is False)
assert (mock_schedulable.call_count == 1)
assert (mock_has_to_pay.call_count == 1)
assert (mock_future_exams.call_count == 1)
assert (mock_get_cert.call_count == 1)
assert (mock_exam_url.call_count == 1)
self.mmtrack.get_course_proctorate_exam_results.assert_called_once_with(self.course_noruns) | Test with proctorate exam results | dashboard/api_test.py | test_course_with_proctorate_exam | mitodl/micromasters | 32 | python | @patch('dashboard.api.format_courserun_for_dashboard', autospec=True)
@patch('dashboard.api.is_exam_schedulable', return_value=False)
def test_course_with_proctorate_exam(self, mock_schedulable, mock_format, mock_get_cert, mock_future_exams, mock_has_to_pay, mock_exam_url):
'\n \n '
for _ in range(3):
ProctoredExamGradeFactory.create(user=self.user, course=self.course_noruns)
proct_exam_qset = ProctoredExamGrade.for_user_course(user=self.user, course=self.course_noruns)
serialized_proct_exams = ProctoredExamGradeSerializer(proct_exam_qset, many=True).data
self.mmtrack.get_course_proctorate_exam_results.return_value = serialized_proct_exams
self.assert_course_equal(self.course_noruns, api.get_info_for_course(self.course_noruns, self.mmtrack), proct_exams=serialized_proct_exams)
assert (mock_format.called is False)
assert (mock_schedulable.call_count == 1)
assert (mock_has_to_pay.call_count == 1)
assert (mock_future_exams.call_count == 1)
assert (mock_get_cert.call_count == 1)
assert (mock_exam_url.call_count == 1)
self.mmtrack.get_course_proctorate_exam_results.assert_called_once_with(self.course_noruns) | @patch('dashboard.api.format_courserun_for_dashboard', autospec=True)
@patch('dashboard.api.is_exam_schedulable', return_value=False)
def test_course_with_proctorate_exam(self, mock_schedulable, mock_format, mock_get_cert, mock_future_exams, mock_has_to_pay, mock_exam_url):
'\n \n '
for _ in range(3):
ProctoredExamGradeFactory.create(user=self.user, course=self.course_noruns)
proct_exam_qset = ProctoredExamGrade.for_user_course(user=self.user, course=self.course_noruns)
serialized_proct_exams = ProctoredExamGradeSerializer(proct_exam_qset, many=True).data
self.mmtrack.get_course_proctorate_exam_results.return_value = serialized_proct_exams
self.assert_course_equal(self.course_noruns, api.get_info_for_course(self.course_noruns, self.mmtrack), proct_exams=serialized_proct_exams)
assert (mock_format.called is False)
assert (mock_schedulable.call_count == 1)
assert (mock_has_to_pay.call_count == 1)
assert (mock_future_exams.call_count == 1)
assert (mock_get_cert.call_count == 1)
assert (mock_exam_url.call_count == 1)
self.mmtrack.get_course_proctorate_exam_results.assert_called_once_with(self.course_noruns)<|docstring|>Test with proctorate exam results<|endoftext|> |
acb420db35e9f8dd578efd2e020f9cb46339c932aed4f60b540002f62e1598a8 | @ddt.data([[True], [False]])
@patch('backends.edxorg.EdxOrgOAuth2.refresh_token', return_value=social_extra_data, autospec=True)
@patch('dashboard.api_edx_cache.CachedEdxDataApi.update_cache_if_expired', new_callable=MagicMock)
def test_format(self, update_cache, mock_cache_refresh, mock_refresh_token):
'Test that get_user_program_info fetches edx data and returns a list of Program data'
result = api.get_user_program_info(self.user)
assert (mock_refresh_token.call_count == (1 if update_cache else 0))
assert (mock_cache_refresh.call_count == (len(CachedEdxDataApi.EDX_SUPPORTED_CACHES) if update_cache else 0))
assert isinstance(result, dict)
assert ('is_edx_data_fresh' in result)
assert (result['is_edx_data_fresh'] is False)
assert ('programs' in result)
assert (len(result['programs']) == 2)
for i in range(2):
expected = {'id': self.expected_programs[i].id, 'description': self.expected_programs[i].description, 'title': self.expected_programs[i].title, 'financial_aid_availability': self.expected_programs[i].financial_aid_availability}
assert is_subset_dict(expected, result['programs'][i]) | Test that get_user_program_info fetches edx data and returns a list of Program data | dashboard/api_test.py | test_format | mitodl/micromasters | 32 | python | @ddt.data([[True], [False]])
@patch('backends.edxorg.EdxOrgOAuth2.refresh_token', return_value=social_extra_data, autospec=True)
@patch('dashboard.api_edx_cache.CachedEdxDataApi.update_cache_if_expired', new_callable=MagicMock)
def test_format(self, update_cache, mock_cache_refresh, mock_refresh_token):
result = api.get_user_program_info(self.user)
assert (mock_refresh_token.call_count == (1 if update_cache else 0))
assert (mock_cache_refresh.call_count == (len(CachedEdxDataApi.EDX_SUPPORTED_CACHES) if update_cache else 0))
assert isinstance(result, dict)
assert ('is_edx_data_fresh' in result)
assert (result['is_edx_data_fresh'] is False)
assert ('programs' in result)
assert (len(result['programs']) == 2)
for i in range(2):
expected = {'id': self.expected_programs[i].id, 'description': self.expected_programs[i].description, 'title': self.expected_programs[i].title, 'financial_aid_availability': self.expected_programs[i].financial_aid_availability}
assert is_subset_dict(expected, result['programs'][i]) | @ddt.data([[True], [False]])
@patch('backends.edxorg.EdxOrgOAuth2.refresh_token', return_value=social_extra_data, autospec=True)
@patch('dashboard.api_edx_cache.CachedEdxDataApi.update_cache_if_expired', new_callable=MagicMock)
def test_format(self, update_cache, mock_cache_refresh, mock_refresh_token):
result = api.get_user_program_info(self.user)
assert (mock_refresh_token.call_count == (1 if update_cache else 0))
assert (mock_cache_refresh.call_count == (len(CachedEdxDataApi.EDX_SUPPORTED_CACHES) if update_cache else 0))
assert isinstance(result, dict)
assert ('is_edx_data_fresh' in result)
assert (result['is_edx_data_fresh'] is False)
assert ('programs' in result)
assert (len(result['programs']) == 2)
for i in range(2):
expected = {'id': self.expected_programs[i].id, 'description': self.expected_programs[i].description, 'title': self.expected_programs[i].title, 'financial_aid_availability': self.expected_programs[i].financial_aid_availability}
assert is_subset_dict(expected, result['programs'][i])<|docstring|>Test that get_user_program_info fetches edx data and returns a list of Program data<|endoftext|> |
b31345c80724905adf389f58dbbca66e366ab6580039bacf5d5b7f35f35097a7 | @patch('backends.utils.refresh_user_token', autospec=True)
def test_past_course_runs(self, mock_refresh):
'Test that past course runs are returned in the API results'
now = now_in_utc()
program = self.program_non_fin_aid
course = program.course_set.first()
failed_course_run = course.courserun_set.first()
failed_course_run.end_date = (now - timedelta(days=1))
failed_course_run.upgrade_deadline = (now - timedelta(days=1))
failed_course_run.save()
CachedEnrollmentFactory.create(user=self.user, course_run=failed_course_run)
CachedCurrentGradeFactory.create(user=self.user, course_run=failed_course_run)
FinalGrade.objects.create(user=self.user, course_run=failed_course_run, grade=0.1, passed=False, status=FinalGradeStatus.COMPLETE, course_run_paid_on_edx=True)
CourseRunGradingStatus.objects.create(course_run=failed_course_run, status=FinalGradeStatus.COMPLETE)
previous_failed_course_run = CourseRunFactory.create(course=course, end_date=(failed_course_run.end_date - timedelta(days=30)), upgrade_deadline=(failed_course_run.upgrade_deadline - timedelta(days=30)))
CachedEnrollmentFactory.create(user=self.user, course_run=previous_failed_course_run)
CachedCurrentGradeFactory.create(user=self.user, course_run=previous_failed_course_run)
FinalGrade.objects.create(user=self.user, course_run=previous_failed_course_run, grade=0.1, passed=False, status=FinalGradeStatus.COMPLETE, course_run_paid_on_edx=True)
CourseRunGradingStatus.objects.create(course_run=previous_failed_course_run, status=FinalGradeStatus.COMPLETE)
UserCacheRefreshTimeFactory.create(user=self.user, unexpired=True)
result = api.get_user_program_info(self.user)
program_result = None
for res in result['programs']:
if (res['id'] == program.pk):
program_result = res
break
assert (program_result is not None)
assert (len(result['programs']) > 0)
assert (len(result['programs'][0]['courses']) > 0)
assert (len(result['programs'][0]['courses'][0]['runs']) == 2)
assert all([(run['status'] == api.CourseStatus.NOT_PASSED) for run in result['programs'][0]['courses'][0]['runs']]) | Test that past course runs are returned in the API results | dashboard/api_test.py | test_past_course_runs | mitodl/micromasters | 32 | python | @patch('backends.utils.refresh_user_token', autospec=True)
def test_past_course_runs(self, mock_refresh):
now = now_in_utc()
program = self.program_non_fin_aid
course = program.course_set.first()
failed_course_run = course.courserun_set.first()
failed_course_run.end_date = (now - timedelta(days=1))
failed_course_run.upgrade_deadline = (now - timedelta(days=1))
failed_course_run.save()
CachedEnrollmentFactory.create(user=self.user, course_run=failed_course_run)
CachedCurrentGradeFactory.create(user=self.user, course_run=failed_course_run)
FinalGrade.objects.create(user=self.user, course_run=failed_course_run, grade=0.1, passed=False, status=FinalGradeStatus.COMPLETE, course_run_paid_on_edx=True)
CourseRunGradingStatus.objects.create(course_run=failed_course_run, status=FinalGradeStatus.COMPLETE)
previous_failed_course_run = CourseRunFactory.create(course=course, end_date=(failed_course_run.end_date - timedelta(days=30)), upgrade_deadline=(failed_course_run.upgrade_deadline - timedelta(days=30)))
CachedEnrollmentFactory.create(user=self.user, course_run=previous_failed_course_run)
CachedCurrentGradeFactory.create(user=self.user, course_run=previous_failed_course_run)
FinalGrade.objects.create(user=self.user, course_run=previous_failed_course_run, grade=0.1, passed=False, status=FinalGradeStatus.COMPLETE, course_run_paid_on_edx=True)
CourseRunGradingStatus.objects.create(course_run=previous_failed_course_run, status=FinalGradeStatus.COMPLETE)
UserCacheRefreshTimeFactory.create(user=self.user, unexpired=True)
result = api.get_user_program_info(self.user)
program_result = None
for res in result['programs']:
if (res['id'] == program.pk):
program_result = res
break
assert (program_result is not None)
assert (len(result['programs']) > 0)
assert (len(result['programs'][0]['courses']) > 0)
assert (len(result['programs'][0]['courses'][0]['runs']) == 2)
assert all([(run['status'] == api.CourseStatus.NOT_PASSED) for run in result['programs'][0]['courses'][0]['runs']]) | @patch('backends.utils.refresh_user_token', autospec=True)
def test_past_course_runs(self, mock_refresh):
now = now_in_utc()
program = self.program_non_fin_aid
course = program.course_set.first()
failed_course_run = course.courserun_set.first()
failed_course_run.end_date = (now - timedelta(days=1))
failed_course_run.upgrade_deadline = (now - timedelta(days=1))
failed_course_run.save()
CachedEnrollmentFactory.create(user=self.user, course_run=failed_course_run)
CachedCurrentGradeFactory.create(user=self.user, course_run=failed_course_run)
FinalGrade.objects.create(user=self.user, course_run=failed_course_run, grade=0.1, passed=False, status=FinalGradeStatus.COMPLETE, course_run_paid_on_edx=True)
CourseRunGradingStatus.objects.create(course_run=failed_course_run, status=FinalGradeStatus.COMPLETE)
previous_failed_course_run = CourseRunFactory.create(course=course, end_date=(failed_course_run.end_date - timedelta(days=30)), upgrade_deadline=(failed_course_run.upgrade_deadline - timedelta(days=30)))
CachedEnrollmentFactory.create(user=self.user, course_run=previous_failed_course_run)
CachedCurrentGradeFactory.create(user=self.user, course_run=previous_failed_course_run)
FinalGrade.objects.create(user=self.user, course_run=previous_failed_course_run, grade=0.1, passed=False, status=FinalGradeStatus.COMPLETE, course_run_paid_on_edx=True)
CourseRunGradingStatus.objects.create(course_run=previous_failed_course_run, status=FinalGradeStatus.COMPLETE)
UserCacheRefreshTimeFactory.create(user=self.user, unexpired=True)
result = api.get_user_program_info(self.user)
program_result = None
for res in result['programs']:
if (res['id'] == program.pk):
program_result = res
break
assert (program_result is not None)
assert (len(result['programs']) > 0)
assert (len(result['programs'][0]['courses']) > 0)
assert (len(result['programs'][0]['courses'][0]['runs']) == 2)
assert all([(run['status'] == api.CourseStatus.NOT_PASSED) for run in result['programs'][0]['courses'][0]['runs']])<|docstring|>Test that past course runs are returned in the API results<|endoftext|> |
ae46c053528a07b111834baf1c753c58b5172a18881188b7d28b9277f9b877b2 | @patch('backends.utils.refresh_user_token', autospec=True)
def test_current_run_first(self, mock_refresh):
'Test that current course runs is on top of returned in the API results'
now = now_in_utc()
program = self.program_non_fin_aid
course = program.course_set.first()
current_run = course.courserun_set.first()
current_run.end_date = (now + timedelta(weeks=1))
current_run.upgrade_deadline = (now + timedelta(days=1))
current_run.save()
CachedEnrollmentFactory.create(user=self.user, course_run=current_run)
CachedCurrentGradeFactory.create(user=self.user, course_run=current_run)
order = OrderFactory.create(user=self.user, status=Order.FULFILLED)
LineFactory.create(order=order, course_key=current_run.edx_course_key)
future_course_run = CourseRunFactory.create(course=course, start_date=(now + timedelta(weeks=2)), end_date=(now + timedelta(weeks=20)), upgrade_deadline=(current_run.upgrade_deadline + timedelta(weeks=6)))
CachedEnrollmentFactory.create(user=self.user, course_run=future_course_run)
CachedCurrentGradeFactory.create(user=self.user, course_run=future_course_run)
order = OrderFactory.create(user=self.user, status=Order.FULFILLED)
LineFactory.create(order=order, course_key=future_course_run.edx_course_key)
UserCacheRefreshTimeFactory.create(user=self.user, unexpired=True)
result = api.get_user_program_info(self.user)
program_result = None
for res in result['programs']:
if (res['id'] == program.pk):
program_result = res
break
assert (program_result is not None)
assert (len(result['programs']) > 0)
assert (len(result['programs'][0]['courses']) > 0)
assert (len(result['programs'][0]['courses'][0]['runs']) == 2)
assert (result['programs'][0]['courses'][0]['runs'][0]['status'] == api.CourseRunStatus.CURRENTLY_ENROLLED) | Test that current course runs is on top of returned in the API results | dashboard/api_test.py | test_current_run_first | mitodl/micromasters | 32 | python | @patch('backends.utils.refresh_user_token', autospec=True)
def test_current_run_first(self, mock_refresh):
now = now_in_utc()
program = self.program_non_fin_aid
course = program.course_set.first()
current_run = course.courserun_set.first()
current_run.end_date = (now + timedelta(weeks=1))
current_run.upgrade_deadline = (now + timedelta(days=1))
current_run.save()
CachedEnrollmentFactory.create(user=self.user, course_run=current_run)
CachedCurrentGradeFactory.create(user=self.user, course_run=current_run)
order = OrderFactory.create(user=self.user, status=Order.FULFILLED)
LineFactory.create(order=order, course_key=current_run.edx_course_key)
future_course_run = CourseRunFactory.create(course=course, start_date=(now + timedelta(weeks=2)), end_date=(now + timedelta(weeks=20)), upgrade_deadline=(current_run.upgrade_deadline + timedelta(weeks=6)))
CachedEnrollmentFactory.create(user=self.user, course_run=future_course_run)
CachedCurrentGradeFactory.create(user=self.user, course_run=future_course_run)
order = OrderFactory.create(user=self.user, status=Order.FULFILLED)
LineFactory.create(order=order, course_key=future_course_run.edx_course_key)
UserCacheRefreshTimeFactory.create(user=self.user, unexpired=True)
result = api.get_user_program_info(self.user)
program_result = None
for res in result['programs']:
if (res['id'] == program.pk):
program_result = res
break
assert (program_result is not None)
assert (len(result['programs']) > 0)
assert (len(result['programs'][0]['courses']) > 0)
assert (len(result['programs'][0]['courses'][0]['runs']) == 2)
assert (result['programs'][0]['courses'][0]['runs'][0]['status'] == api.CourseRunStatus.CURRENTLY_ENROLLED) | @patch('backends.utils.refresh_user_token', autospec=True)
def test_current_run_first(self, mock_refresh):
now = now_in_utc()
program = self.program_non_fin_aid
course = program.course_set.first()
current_run = course.courserun_set.first()
current_run.end_date = (now + timedelta(weeks=1))
current_run.upgrade_deadline = (now + timedelta(days=1))
current_run.save()
CachedEnrollmentFactory.create(user=self.user, course_run=current_run)
CachedCurrentGradeFactory.create(user=self.user, course_run=current_run)
order = OrderFactory.create(user=self.user, status=Order.FULFILLED)
LineFactory.create(order=order, course_key=current_run.edx_course_key)
future_course_run = CourseRunFactory.create(course=course, start_date=(now + timedelta(weeks=2)), end_date=(now + timedelta(weeks=20)), upgrade_deadline=(current_run.upgrade_deadline + timedelta(weeks=6)))
CachedEnrollmentFactory.create(user=self.user, course_run=future_course_run)
CachedCurrentGradeFactory.create(user=self.user, course_run=future_course_run)
order = OrderFactory.create(user=self.user, status=Order.FULFILLED)
LineFactory.create(order=order, course_key=future_course_run.edx_course_key)
UserCacheRefreshTimeFactory.create(user=self.user, unexpired=True)
result = api.get_user_program_info(self.user)
program_result = None
for res in result['programs']:
if (res['id'] == program.pk):
program_result = res
break
assert (program_result is not None)
assert (len(result['programs']) > 0)
assert (len(result['programs'][0]['courses']) > 0)
assert (len(result['programs'][0]['courses'][0]['runs']) == 2)
assert (result['programs'][0]['courses'][0]['runs'][0]['status'] == api.CourseRunStatus.CURRENTLY_ENROLLED)<|docstring|>Test that current course runs is on top of returned in the API results<|endoftext|> |
d145f07ee27929e0bc6da51fae2cf9e84daac5d50674a40d87a828344ba70526 | @patch('backends.utils.refresh_user_token', autospec=True)
def test_when_enroll_in_only_future_run(self, mock_refresh):
'Test that user in enrolled in future run but not enrolled in current course runs'
now = now_in_utc()
program = self.program_non_fin_aid
course = program.course_set.first()
current_run = course.courserun_set.first()
current_run.end_date = (now + timedelta(weeks=1))
current_run.upgrade_deadline = (now + timedelta(days=1))
current_run.save()
future_course_run = CourseRunFactory.create(course=course, start_date=(now + timedelta(weeks=2)), end_date=(now + timedelta(weeks=20)), upgrade_deadline=(current_run.upgrade_deadline + timedelta(weeks=6)))
CachedEnrollmentFactory.create(user=self.user, course_run=future_course_run)
CachedCurrentGradeFactory.create(user=self.user, course_run=future_course_run)
order = OrderFactory.create(user=self.user, status=Order.FULFILLED)
LineFactory.create(order=order, course_key=future_course_run.edx_course_key)
UserCacheRefreshTimeFactory.create(user=self.user, unexpired=True)
result = api.get_user_program_info(self.user)
program_result = None
for res in result['programs']:
if (res['id'] == program.pk):
program_result = res
break
assert (program_result is not None)
assert (len(result['programs']) > 0)
assert (len(result['programs'][0]['courses']) > 0)
assert (len(result['programs'][0]['courses'][0]['runs']) == 1)
assert (result['programs'][0]['courses'][0]['runs'][0]['status'] == api.CourseRunStatus.WILL_ATTEND) | Test that user in enrolled in future run but not enrolled in current course runs | dashboard/api_test.py | test_when_enroll_in_only_future_run | mitodl/micromasters | 32 | python | @patch('backends.utils.refresh_user_token', autospec=True)
def test_when_enroll_in_only_future_run(self, mock_refresh):
now = now_in_utc()
program = self.program_non_fin_aid
course = program.course_set.first()
current_run = course.courserun_set.first()
current_run.end_date = (now + timedelta(weeks=1))
current_run.upgrade_deadline = (now + timedelta(days=1))
current_run.save()
future_course_run = CourseRunFactory.create(course=course, start_date=(now + timedelta(weeks=2)), end_date=(now + timedelta(weeks=20)), upgrade_deadline=(current_run.upgrade_deadline + timedelta(weeks=6)))
CachedEnrollmentFactory.create(user=self.user, course_run=future_course_run)
CachedCurrentGradeFactory.create(user=self.user, course_run=future_course_run)
order = OrderFactory.create(user=self.user, status=Order.FULFILLED)
LineFactory.create(order=order, course_key=future_course_run.edx_course_key)
UserCacheRefreshTimeFactory.create(user=self.user, unexpired=True)
result = api.get_user_program_info(self.user)
program_result = None
for res in result['programs']:
if (res['id'] == program.pk):
program_result = res
break
assert (program_result is not None)
assert (len(result['programs']) > 0)
assert (len(result['programs'][0]['courses']) > 0)
assert (len(result['programs'][0]['courses'][0]['runs']) == 1)
assert (result['programs'][0]['courses'][0]['runs'][0]['status'] == api.CourseRunStatus.WILL_ATTEND) | @patch('backends.utils.refresh_user_token', autospec=True)
def test_when_enroll_in_only_future_run(self, mock_refresh):
now = now_in_utc()
program = self.program_non_fin_aid
course = program.course_set.first()
current_run = course.courserun_set.first()
current_run.end_date = (now + timedelta(weeks=1))
current_run.upgrade_deadline = (now + timedelta(days=1))
current_run.save()
future_course_run = CourseRunFactory.create(course=course, start_date=(now + timedelta(weeks=2)), end_date=(now + timedelta(weeks=20)), upgrade_deadline=(current_run.upgrade_deadline + timedelta(weeks=6)))
CachedEnrollmentFactory.create(user=self.user, course_run=future_course_run)
CachedCurrentGradeFactory.create(user=self.user, course_run=future_course_run)
order = OrderFactory.create(user=self.user, status=Order.FULFILLED)
LineFactory.create(order=order, course_key=future_course_run.edx_course_key)
UserCacheRefreshTimeFactory.create(user=self.user, unexpired=True)
result = api.get_user_program_info(self.user)
program_result = None
for res in result['programs']:
if (res['id'] == program.pk):
program_result = res
break
assert (program_result is not None)
assert (len(result['programs']) > 0)
assert (len(result['programs'][0]['courses']) > 0)
assert (len(result['programs'][0]['courses'][0]['runs']) == 1)
assert (result['programs'][0]['courses'][0]['runs'][0]['status'] == api.CourseRunStatus.WILL_ATTEND)<|docstring|>Test that user in enrolled in future run but not enrolled in current course runs<|endoftext|> |
8b674e23b4b4fa1891b47136f68af0a7fa341f23e00349b449cbc05683cb30f9 | @patch('backends.utils.refresh_user_token', autospec=True)
@patch('dashboard.api_edx_cache.CachedEdxDataApi.update_cache_if_expired', new_callable=MagicMock)
def test_exception_in_refresh_cache_2(self, mock_cache_refresh, mock_token_refresh):
'Test in case the backend refresh cache raises any other exception'
mock_cache_refresh.side_effect = ZeroDivisionError
result = api.get_user_program_info(self.user)
assert (mock_token_refresh.call_count == 1)
assert isinstance(result, dict)
assert ('is_edx_data_fresh' in result)
assert (result['is_edx_data_fresh'] is False)
assert ('programs' in result)
assert (len(result['programs']) == 2) | Test in case the backend refresh cache raises any other exception | dashboard/api_test.py | test_exception_in_refresh_cache_2 | mitodl/micromasters | 32 | python | @patch('backends.utils.refresh_user_token', autospec=True)
@patch('dashboard.api_edx_cache.CachedEdxDataApi.update_cache_if_expired', new_callable=MagicMock)
def test_exception_in_refresh_cache_2(self, mock_cache_refresh, mock_token_refresh):
mock_cache_refresh.side_effect = ZeroDivisionError
result = api.get_user_program_info(self.user)
assert (mock_token_refresh.call_count == 1)
assert isinstance(result, dict)
assert ('is_edx_data_fresh' in result)
assert (result['is_edx_data_fresh'] is False)
assert ('programs' in result)
assert (len(result['programs']) == 2) | @patch('backends.utils.refresh_user_token', autospec=True)
@patch('dashboard.api_edx_cache.CachedEdxDataApi.update_cache_if_expired', new_callable=MagicMock)
def test_exception_in_refresh_cache_2(self, mock_cache_refresh, mock_token_refresh):
mock_cache_refresh.side_effect = ZeroDivisionError
result = api.get_user_program_info(self.user)
assert (mock_token_refresh.call_count == 1)
assert isinstance(result, dict)
assert ('is_edx_data_fresh' in result)
assert (result['is_edx_data_fresh'] is False)
assert ('programs' in result)
assert (len(result['programs']) == 2)<|docstring|>Test in case the backend refresh cache raises any other exception<|endoftext|> |
1ee26cbd5d1c13b7d0a0085ef8a4d0df653193d30ae6e9031d64bdefc10e31a7 | @patch('backends.utils.refresh_user_token', autospec=True)
@patch('dashboard.api_edx_cache.CachedEdxDataApi.update_cache_if_expired', new_callable=MagicMock)
def test_returns_courseruns_for_different_backends(self, mock_cache_refresh, mock_token_refresh):
'Test that user in enrolled in future run but not enrolled in current course runs'
UserSocialAuthFactory.create(user=self.user, provider=BACKEND_MITX_ONLINE)
course_run_mitxonline = CourseRunFactory.create(course__program=self.program_fin_aid)
set_course_run_current(course_run_mitxonline)
program = self.program_non_fin_aid
course = program.course_set.first()
current_run = course.courserun_set.first()
set_course_run_current(current_run)
CachedEnrollmentFactory.create(user=self.user, course_run=current_run)
CachedCurrentGradeFactory.create(user=self.user, course_run=current_run)
add_paid_order_for_course(self.user, current_run)
result = api.get_user_program_info(self.user)
assert (mock_token_refresh.call_count == 2)
assert (mock_cache_refresh.call_count == len(CachedEdxDataApi.ALL_CACHE_TYPES))
assert (len(result['programs']) == 2)
for program_result in result['programs']:
assert (program_result is not None)
assert (len(program_result['courses'][0]['runs']) == 1) | Test that user in enrolled in future run but not enrolled in current course runs | dashboard/api_test.py | test_returns_courseruns_for_different_backends | mitodl/micromasters | 32 | python | @patch('backends.utils.refresh_user_token', autospec=True)
@patch('dashboard.api_edx_cache.CachedEdxDataApi.update_cache_if_expired', new_callable=MagicMock)
def test_returns_courseruns_for_different_backends(self, mock_cache_refresh, mock_token_refresh):
UserSocialAuthFactory.create(user=self.user, provider=BACKEND_MITX_ONLINE)
course_run_mitxonline = CourseRunFactory.create(course__program=self.program_fin_aid)
set_course_run_current(course_run_mitxonline)
program = self.program_non_fin_aid
course = program.course_set.first()
current_run = course.courserun_set.first()
set_course_run_current(current_run)
CachedEnrollmentFactory.create(user=self.user, course_run=current_run)
CachedCurrentGradeFactory.create(user=self.user, course_run=current_run)
add_paid_order_for_course(self.user, current_run)
result = api.get_user_program_info(self.user)
assert (mock_token_refresh.call_count == 2)
assert (mock_cache_refresh.call_count == len(CachedEdxDataApi.ALL_CACHE_TYPES))
assert (len(result['programs']) == 2)
for program_result in result['programs']:
assert (program_result is not None)
assert (len(program_result['courses'][0]['runs']) == 1) | @patch('backends.utils.refresh_user_token', autospec=True)
@patch('dashboard.api_edx_cache.CachedEdxDataApi.update_cache_if_expired', new_callable=MagicMock)
def test_returns_courseruns_for_different_backends(self, mock_cache_refresh, mock_token_refresh):
UserSocialAuthFactory.create(user=self.user, provider=BACKEND_MITX_ONLINE)
course_run_mitxonline = CourseRunFactory.create(course__program=self.program_fin_aid)
set_course_run_current(course_run_mitxonline)
program = self.program_non_fin_aid
course = program.course_set.first()
current_run = course.courserun_set.first()
set_course_run_current(current_run)
CachedEnrollmentFactory.create(user=self.user, course_run=current_run)
CachedCurrentGradeFactory.create(user=self.user, course_run=current_run)
add_paid_order_for_course(self.user, current_run)
result = api.get_user_program_info(self.user)
assert (mock_token_refresh.call_count == 2)
assert (mock_cache_refresh.call_count == len(CachedEdxDataApi.ALL_CACHE_TYPES))
assert (len(result['programs']) == 2)
for program_result in result['programs']:
assert (program_result is not None)
assert (len(program_result['courses'][0]['runs']) == 1)<|docstring|>Test that user in enrolled in future run but not enrolled in current course runs<|endoftext|> |
b113a53896ba90df377263b8e861dc52b859f4ac5a719bccc8c8b34d2363f64b | @patch('dashboard.api.get_info_for_course', autospec=True)
def test_program(self, mock_info_course):
'Test happy path'
self.mmtrack.configure_mock(**{'program': self.program, 'financial_aid_available': False, 'get_exam_card_status.return_value': ExamProfile.PROFILE_SUCCESS, 'calculate_final_grade_average.return_value': 91, 'get_program_certificate_url.return_value': '', 'get_program_letter_url.return_value': '', 'get_number_of_passed_courses_for_completion.return_value': 0})
mock_info_course.return_value = {'position_in_program': 1}
res = api.get_info_for_program(self.mmtrack)
for course in self.courses:
mock_info_course.assert_any_call(course, self.mmtrack)
expected_data = {'id': self.program.pk, 'description': self.program.description, 'title': self.program.title, 'courses': [{'position_in_program': 1}, {'position_in_program': 1}, {'position_in_program': 1}], 'financial_aid_availability': False, 'exam_card_status': ExamProfile.PROFILE_SUCCESS, 'number_courses_required': 3, 'number_courses_passed': 0, 'grade_average': 91, 'certificate': ''}
self.assertEqual(res, expected_data) | Test happy path | dashboard/api_test.py | test_program | mitodl/micromasters | 32 | python | @patch('dashboard.api.get_info_for_course', autospec=True)
def test_program(self, mock_info_course):
self.mmtrack.configure_mock(**{'program': self.program, 'financial_aid_available': False, 'get_exam_card_status.return_value': ExamProfile.PROFILE_SUCCESS, 'calculate_final_grade_average.return_value': 91, 'get_program_certificate_url.return_value': , 'get_program_letter_url.return_value': , 'get_number_of_passed_courses_for_completion.return_value': 0})
mock_info_course.return_value = {'position_in_program': 1}
res = api.get_info_for_program(self.mmtrack)
for course in self.courses:
mock_info_course.assert_any_call(course, self.mmtrack)
expected_data = {'id': self.program.pk, 'description': self.program.description, 'title': self.program.title, 'courses': [{'position_in_program': 1}, {'position_in_program': 1}, {'position_in_program': 1}], 'financial_aid_availability': False, 'exam_card_status': ExamProfile.PROFILE_SUCCESS, 'number_courses_required': 3, 'number_courses_passed': 0, 'grade_average': 91, 'certificate': }
self.assertEqual(res, expected_data) | @patch('dashboard.api.get_info_for_course', autospec=True)
def test_program(self, mock_info_course):
self.mmtrack.configure_mock(**{'program': self.program, 'financial_aid_available': False, 'get_exam_card_status.return_value': ExamProfile.PROFILE_SUCCESS, 'calculate_final_grade_average.return_value': 91, 'get_program_certificate_url.return_value': , 'get_program_letter_url.return_value': , 'get_number_of_passed_courses_for_completion.return_value': 0})
mock_info_course.return_value = {'position_in_program': 1}
res = api.get_info_for_program(self.mmtrack)
for course in self.courses:
mock_info_course.assert_any_call(course, self.mmtrack)
expected_data = {'id': self.program.pk, 'description': self.program.description, 'title': self.program.title, 'courses': [{'position_in_program': 1}, {'position_in_program': 1}, {'position_in_program': 1}], 'financial_aid_availability': False, 'exam_card_status': ExamProfile.PROFILE_SUCCESS, 'number_courses_required': 3, 'number_courses_passed': 0, 'grade_average': 91, 'certificate': }
self.assertEqual(res, expected_data)<|docstring|>Test happy path<|endoftext|> |
64bbe5c2f06e34eab3f65a7058b472c73ef511fd2d4906b80cc948772b5af0f8 | @patch('dashboard.api.get_info_for_course', autospec=True)
def test_program_with_electives(self, mock_info_course):
'Test happy path'
self.program.num_required_courses = 5
self.mmtrack.configure_mock(**{'program': self.program, 'financial_aid_available': False, 'get_exam_card_status.return_value': ExamProfile.PROFILE_SUCCESS, 'calculate_final_grade_average.return_value': 91, 'get_program_certificate_url.return_value': '', 'get_program_letter_url.return_value': '', 'get_number_of_passed_courses_for_completion.return_value': 0})
mock_info_course.return_value = {'position_in_program': 1}
electives_set = ElectivesSet.objects.create(program=self.program, required_number=2)
for num in range(3):
course = CourseFactory.create(title='title course prog1 {}'.format(num), program=self.program)
ElectiveCourse.objects.create(course=course, electives_set=electives_set)
res = api.get_info_for_program(self.mmtrack)
for course in self.courses:
mock_info_course.assert_any_call(course, self.mmtrack)
expected_data = {'id': self.program.pk, 'description': self.program.description, 'title': self.program.title, 'courses': [{'position_in_program': 1}, {'position_in_program': 1}, {'position_in_program': 1}, {'position_in_program': 1}, {'position_in_program': 1}, {'position_in_program': 1}], 'financial_aid_availability': False, 'exam_card_status': ExamProfile.PROFILE_SUCCESS, 'number_courses_required': 5, 'number_courses_passed': 0, 'grade_average': 91, 'certificate': ''}
self.assertEqual(res, expected_data) | Test happy path | dashboard/api_test.py | test_program_with_electives | mitodl/micromasters | 32 | python | @patch('dashboard.api.get_info_for_course', autospec=True)
def test_program_with_electives(self, mock_info_course):
self.program.num_required_courses = 5
self.mmtrack.configure_mock(**{'program': self.program, 'financial_aid_available': False, 'get_exam_card_status.return_value': ExamProfile.PROFILE_SUCCESS, 'calculate_final_grade_average.return_value': 91, 'get_program_certificate_url.return_value': , 'get_program_letter_url.return_value': , 'get_number_of_passed_courses_for_completion.return_value': 0})
mock_info_course.return_value = {'position_in_program': 1}
electives_set = ElectivesSet.objects.create(program=self.program, required_number=2)
for num in range(3):
course = CourseFactory.create(title='title course prog1 {}'.format(num), program=self.program)
ElectiveCourse.objects.create(course=course, electives_set=electives_set)
res = api.get_info_for_program(self.mmtrack)
for course in self.courses:
mock_info_course.assert_any_call(course, self.mmtrack)
expected_data = {'id': self.program.pk, 'description': self.program.description, 'title': self.program.title, 'courses': [{'position_in_program': 1}, {'position_in_program': 1}, {'position_in_program': 1}, {'position_in_program': 1}, {'position_in_program': 1}, {'position_in_program': 1}], 'financial_aid_availability': False, 'exam_card_status': ExamProfile.PROFILE_SUCCESS, 'number_courses_required': 5, 'number_courses_passed': 0, 'grade_average': 91, 'certificate': }
self.assertEqual(res, expected_data) | @patch('dashboard.api.get_info_for_course', autospec=True)
def test_program_with_electives(self, mock_info_course):
self.program.num_required_courses = 5
self.mmtrack.configure_mock(**{'program': self.program, 'financial_aid_available': False, 'get_exam_card_status.return_value': ExamProfile.PROFILE_SUCCESS, 'calculate_final_grade_average.return_value': 91, 'get_program_certificate_url.return_value': , 'get_program_letter_url.return_value': , 'get_number_of_passed_courses_for_completion.return_value': 0})
mock_info_course.return_value = {'position_in_program': 1}
electives_set = ElectivesSet.objects.create(program=self.program, required_number=2)
for num in range(3):
course = CourseFactory.create(title='title course prog1 {}'.format(num), program=self.program)
ElectiveCourse.objects.create(course=course, electives_set=electives_set)
res = api.get_info_for_program(self.mmtrack)
for course in self.courses:
mock_info_course.assert_any_call(course, self.mmtrack)
expected_data = {'id': self.program.pk, 'description': self.program.description, 'title': self.program.title, 'courses': [{'position_in_program': 1}, {'position_in_program': 1}, {'position_in_program': 1}, {'position_in_program': 1}, {'position_in_program': 1}, {'position_in_program': 1}], 'financial_aid_availability': False, 'exam_card_status': ExamProfile.PROFILE_SUCCESS, 'number_courses_required': 5, 'number_courses_passed': 0, 'grade_average': 91, 'certificate': }
self.assertEqual(res, expected_data)<|docstring|>Test happy path<|endoftext|> |
7734e13572d859d42c994cd27a094bb96b5a963d7aaed911ae8f5dcd56df0a5a | @patch('dashboard.api.get_info_for_course', autospec=True)
def test_program_no_courses(self, mock_info_course):
'Test program with no courses'
self.mmtrack.configure_mock(**{'program': self.program_no_courses, 'financial_aid_available': False, 'get_exam_card_status.return_value': ExamProfile.PROFILE_INVALID, 'calculate_final_grade_average.return_value': 91, 'get_program_certificate_url.return_value': '', 'get_program_letter_url.return_value': '', 'get_number_of_passed_courses_for_completion.return_value': 0})
res = api.get_info_for_program(self.mmtrack)
assert (mock_info_course.called is False)
expected_data = {'id': self.program_no_courses.pk, 'description': self.program_no_courses.description, 'title': self.program_no_courses.title, 'courses': [], 'financial_aid_availability': False, 'exam_card_status': ExamProfile.PROFILE_INVALID, 'number_courses_required': 0, 'number_courses_passed': 0, 'grade_average': 91, 'certificate': ''}
self.assertEqual(res, expected_data) | Test program with no courses | dashboard/api_test.py | test_program_no_courses | mitodl/micromasters | 32 | python | @patch('dashboard.api.get_info_for_course', autospec=True)
def test_program_no_courses(self, mock_info_course):
self.mmtrack.configure_mock(**{'program': self.program_no_courses, 'financial_aid_available': False, 'get_exam_card_status.return_value': ExamProfile.PROFILE_INVALID, 'calculate_final_grade_average.return_value': 91, 'get_program_certificate_url.return_value': , 'get_program_letter_url.return_value': , 'get_number_of_passed_courses_for_completion.return_value': 0})
res = api.get_info_for_program(self.mmtrack)
assert (mock_info_course.called is False)
expected_data = {'id': self.program_no_courses.pk, 'description': self.program_no_courses.description, 'title': self.program_no_courses.title, 'courses': [], 'financial_aid_availability': False, 'exam_card_status': ExamProfile.PROFILE_INVALID, 'number_courses_required': 0, 'number_courses_passed': 0, 'grade_average': 91, 'certificate': }
self.assertEqual(res, expected_data) | @patch('dashboard.api.get_info_for_course', autospec=True)
def test_program_no_courses(self, mock_info_course):
self.mmtrack.configure_mock(**{'program': self.program_no_courses, 'financial_aid_available': False, 'get_exam_card_status.return_value': ExamProfile.PROFILE_INVALID, 'calculate_final_grade_average.return_value': 91, 'get_program_certificate_url.return_value': , 'get_program_letter_url.return_value': , 'get_number_of_passed_courses_for_completion.return_value': 0})
res = api.get_info_for_program(self.mmtrack)
assert (mock_info_course.called is False)
expected_data = {'id': self.program_no_courses.pk, 'description': self.program_no_courses.description, 'title': self.program_no_courses.title, 'courses': [], 'financial_aid_availability': False, 'exam_card_status': ExamProfile.PROFILE_INVALID, 'number_courses_required': 0, 'number_courses_passed': 0, 'grade_average': 91, 'certificate': }
self.assertEqual(res, expected_data)<|docstring|>Test program with no courses<|endoftext|> |
fc003d691cf8df32272b89dc0aa71f76fa8405b7de45dd7807fc716cb824b539 | @patch('dashboard.api.get_info_for_course', autospec=True)
@patch('financialaid.serializers.FinancialAidDashboardSerializer.serialize', new_callable=MagicMock)
def test_program_financial_aid(self, mock_fin_aid_serialize, mock_info_course):
'Test happy path'
self.mmtrack.configure_mock(**{'program': self.program, 'get_exam_card_status.return_value': ExamProfile.PROFILE_IN_PROGRESS, 'calculate_final_grade_average.return_value': 91, 'financial_aid_available': True, 'get_program_certificate_url.return_value': '', 'get_program_enrollment.return_value': self.program_enrollment, 'get_program_letter_url.return_value': '', 'get_number_of_passed_courses_for_completion.return_value': 0})
serialized_fin_aid = {'id': 123, 'has_user_applied': True, 'application_status': 'WHO-KNOWS', 'min_possible_cost': 100, 'max_possible_cost': 200, 'date_documents_sent': (now_in_utc() - timedelta(hours=12))}
mock_fin_aid_serialize.return_value = serialized_fin_aid
mock_info_course.return_value = {'position_in_program': 1}
res = api.get_info_for_program(self.mmtrack)
for course in self.courses:
mock_info_course.assert_any_call(course, self.mmtrack)
expected_data = {'id': self.program.pk, 'description': self.program.description, 'title': self.program.title, 'courses': [{'position_in_program': 1}, {'position_in_program': 1}, {'position_in_program': 1}], 'financial_aid_availability': True, 'financial_aid_user_info': serialized_fin_aid, 'exam_card_status': ExamProfile.PROFILE_IN_PROGRESS, 'number_courses_required': self.program.course_set.count(), 'number_courses_passed': 0, 'grade_average': 91, 'certificate': '', 'grade_records_url': reverse('grade_records', args=[self.program_enrollment.id])}
self.assertEqual(res, expected_data) | Test happy path | dashboard/api_test.py | test_program_financial_aid | mitodl/micromasters | 32 | python | @patch('dashboard.api.get_info_for_course', autospec=True)
@patch('financialaid.serializers.FinancialAidDashboardSerializer.serialize', new_callable=MagicMock)
def test_program_financial_aid(self, mock_fin_aid_serialize, mock_info_course):
self.mmtrack.configure_mock(**{'program': self.program, 'get_exam_card_status.return_value': ExamProfile.PROFILE_IN_PROGRESS, 'calculate_final_grade_average.return_value': 91, 'financial_aid_available': True, 'get_program_certificate_url.return_value': , 'get_program_enrollment.return_value': self.program_enrollment, 'get_program_letter_url.return_value': , 'get_number_of_passed_courses_for_completion.return_value': 0})
serialized_fin_aid = {'id': 123, 'has_user_applied': True, 'application_status': 'WHO-KNOWS', 'min_possible_cost': 100, 'max_possible_cost': 200, 'date_documents_sent': (now_in_utc() - timedelta(hours=12))}
mock_fin_aid_serialize.return_value = serialized_fin_aid
mock_info_course.return_value = {'position_in_program': 1}
res = api.get_info_for_program(self.mmtrack)
for course in self.courses:
mock_info_course.assert_any_call(course, self.mmtrack)
expected_data = {'id': self.program.pk, 'description': self.program.description, 'title': self.program.title, 'courses': [{'position_in_program': 1}, {'position_in_program': 1}, {'position_in_program': 1}], 'financial_aid_availability': True, 'financial_aid_user_info': serialized_fin_aid, 'exam_card_status': ExamProfile.PROFILE_IN_PROGRESS, 'number_courses_required': self.program.course_set.count(), 'number_courses_passed': 0, 'grade_average': 91, 'certificate': , 'grade_records_url': reverse('grade_records', args=[self.program_enrollment.id])}
self.assertEqual(res, expected_data) | @patch('dashboard.api.get_info_for_course', autospec=True)
@patch('financialaid.serializers.FinancialAidDashboardSerializer.serialize', new_callable=MagicMock)
def test_program_financial_aid(self, mock_fin_aid_serialize, mock_info_course):
self.mmtrack.configure_mock(**{'program': self.program, 'get_exam_card_status.return_value': ExamProfile.PROFILE_IN_PROGRESS, 'calculate_final_grade_average.return_value': 91, 'financial_aid_available': True, 'get_program_certificate_url.return_value': , 'get_program_enrollment.return_value': self.program_enrollment, 'get_program_letter_url.return_value': , 'get_number_of_passed_courses_for_completion.return_value': 0})
serialized_fin_aid = {'id': 123, 'has_user_applied': True, 'application_status': 'WHO-KNOWS', 'min_possible_cost': 100, 'max_possible_cost': 200, 'date_documents_sent': (now_in_utc() - timedelta(hours=12))}
mock_fin_aid_serialize.return_value = serialized_fin_aid
mock_info_course.return_value = {'position_in_program': 1}
res = api.get_info_for_program(self.mmtrack)
for course in self.courses:
mock_info_course.assert_any_call(course, self.mmtrack)
expected_data = {'id': self.program.pk, 'description': self.program.description, 'title': self.program.title, 'courses': [{'position_in_program': 1}, {'position_in_program': 1}, {'position_in_program': 1}], 'financial_aid_availability': True, 'financial_aid_user_info': serialized_fin_aid, 'exam_card_status': ExamProfile.PROFILE_IN_PROGRESS, 'number_courses_required': self.program.course_set.count(), 'number_courses_passed': 0, 'grade_average': 91, 'certificate': , 'grade_records_url': reverse('grade_records', args=[self.program_enrollment.id])}
self.assertEqual(res, expected_data)<|docstring|>Test happy path<|endoftext|> |
9455477bb436b5b64dd4029912ec0f2b8110453147aa16ec93bdbf2282186012 | @patch('dashboard.api.get_info_for_course', autospec=True)
def test_program_for_program_letter(self, mock_info_course):
' Verify that api returns program_letter_url if exists.'
self.mmtrack.configure_mock(**{'program': self.program, 'financial_aid_available': False, 'get_exam_card_status.return_value': ExamProfile.PROFILE_SUCCESS, 'calculate_final_grade_average.return_value': 91, 'get_program_certificate_url.return_value': '', 'get_program_letter_url.return_value': reverse('program_letter', args=[self.program_letter.uuid]), 'get_number_of_passed_courses_for_completion.return_value': 3})
mock_info_course.return_value = {'position_in_program': 1}
res = api.get_info_for_program(self.mmtrack)
for course in self.courses:
mock_info_course.assert_any_call(course, self.mmtrack)
expected_data = {'id': self.program.pk, 'description': self.program.description, 'title': self.program.title, 'courses': [{'position_in_program': 1}, {'position_in_program': 1}, {'position_in_program': 1}], 'financial_aid_availability': False, 'exam_card_status': ExamProfile.PROFILE_SUCCESS, 'number_courses_required': self.program.course_set.count(), 'number_courses_passed': 3, 'grade_average': 91, 'certificate': '', 'program_letter_url': reverse('program_letter', args=[self.program_letter.uuid])}
self.assertEqual(res, expected_data) | Verify that api returns program_letter_url if exists. | dashboard/api_test.py | test_program_for_program_letter | mitodl/micromasters | 32 | python | @patch('dashboard.api.get_info_for_course', autospec=True)
def test_program_for_program_letter(self, mock_info_course):
' '
self.mmtrack.configure_mock(**{'program': self.program, 'financial_aid_available': False, 'get_exam_card_status.return_value': ExamProfile.PROFILE_SUCCESS, 'calculate_final_grade_average.return_value': 91, 'get_program_certificate_url.return_value': , 'get_program_letter_url.return_value': reverse('program_letter', args=[self.program_letter.uuid]), 'get_number_of_passed_courses_for_completion.return_value': 3})
mock_info_course.return_value = {'position_in_program': 1}
res = api.get_info_for_program(self.mmtrack)
for course in self.courses:
mock_info_course.assert_any_call(course, self.mmtrack)
expected_data = {'id': self.program.pk, 'description': self.program.description, 'title': self.program.title, 'courses': [{'position_in_program': 1}, {'position_in_program': 1}, {'position_in_program': 1}], 'financial_aid_availability': False, 'exam_card_status': ExamProfile.PROFILE_SUCCESS, 'number_courses_required': self.program.course_set.count(), 'number_courses_passed': 3, 'grade_average': 91, 'certificate': , 'program_letter_url': reverse('program_letter', args=[self.program_letter.uuid])}
self.assertEqual(res, expected_data) | @patch('dashboard.api.get_info_for_course', autospec=True)
def test_program_for_program_letter(self, mock_info_course):
' '
self.mmtrack.configure_mock(**{'program': self.program, 'financial_aid_available': False, 'get_exam_card_status.return_value': ExamProfile.PROFILE_SUCCESS, 'calculate_final_grade_average.return_value': 91, 'get_program_certificate_url.return_value': , 'get_program_letter_url.return_value': reverse('program_letter', args=[self.program_letter.uuid]), 'get_number_of_passed_courses_for_completion.return_value': 3})
mock_info_course.return_value = {'position_in_program': 1}
res = api.get_info_for_program(self.mmtrack)
for course in self.courses:
mock_info_course.assert_any_call(course, self.mmtrack)
expected_data = {'id': self.program.pk, 'description': self.program.description, 'title': self.program.title, 'courses': [{'position_in_program': 1}, {'position_in_program': 1}, {'position_in_program': 1}], 'financial_aid_availability': False, 'exam_card_status': ExamProfile.PROFILE_SUCCESS, 'number_courses_required': self.program.course_set.count(), 'number_courses_passed': 3, 'grade_average': 91, 'certificate': , 'program_letter_url': reverse('program_letter', args=[self.program_letter.uuid])}
self.assertEqual(res, expected_data)<|docstring|>Verify that api returns program_letter_url if exists.<|endoftext|> |
c50b4436618c9fe049a3b10e393d742efdcc366156a085dff1602f034a189566 | @ddt.data((False, False, False, False, True, False), (False, True, False, False, True, False), (True, False, False, False, True, False), (True, True, False, False, True, False), (False, False, True, False, True, True), (False, False, True, False, False, True), (False, True, True, False, True, True), (True, False, True, False, True, True), (True, True, True, False, True, True), (True, True, True, True, True, False))
@ddt.unpack
@pytest.mark.flaky(max_runs=3, min_passes=1)
def test_is_exam_schedulable(self, is_past, is_future, has_eligibility_future, is_operation_delete, has_coupon, can_schedule_exam):
'Test that is_exam_schedulable is correct'
exam_run = ExamRunFactory.create(scheduling_past=is_past, scheduling_future=is_future, eligibility_past=(not has_eligibility_future), eligibility_future=has_eligibility_future)
exam_auth = ExamAuthorizationFactory.create(exam_run=exam_run, course=exam_run.course, status=ExamAuthorization.STATUS_SUCCESS, operation=(ExamAuthorization.OPERATION_DELETE if is_operation_delete else ExamAuthorization.OPERATION_ADD))
if has_coupon:
coupon = ExamRunCouponFactory.create(course=exam_run.course, coupon_url='http://example.com', is_taken=False)
exam_auth.exam_coupon = coupon
exam_auth.save()
assert (api.is_exam_schedulable(exam_auth.user, exam_auth.course) is can_schedule_exam) | Test that is_exam_schedulable is correct | dashboard/api_test.py | test_is_exam_schedulable | mitodl/micromasters | 32 | python | @ddt.data((False, False, False, False, True, False), (False, True, False, False, True, False), (True, False, False, False, True, False), (True, True, False, False, True, False), (False, False, True, False, True, True), (False, False, True, False, False, True), (False, True, True, False, True, True), (True, False, True, False, True, True), (True, True, True, False, True, True), (True, True, True, True, True, False))
@ddt.unpack
@pytest.mark.flaky(max_runs=3, min_passes=1)
def test_is_exam_schedulable(self, is_past, is_future, has_eligibility_future, is_operation_delete, has_coupon, can_schedule_exam):
exam_run = ExamRunFactory.create(scheduling_past=is_past, scheduling_future=is_future, eligibility_past=(not has_eligibility_future), eligibility_future=has_eligibility_future)
exam_auth = ExamAuthorizationFactory.create(exam_run=exam_run, course=exam_run.course, status=ExamAuthorization.STATUS_SUCCESS, operation=(ExamAuthorization.OPERATION_DELETE if is_operation_delete else ExamAuthorization.OPERATION_ADD))
if has_coupon:
coupon = ExamRunCouponFactory.create(course=exam_run.course, coupon_url='http://example.com', is_taken=False)
exam_auth.exam_coupon = coupon
exam_auth.save()
assert (api.is_exam_schedulable(exam_auth.user, exam_auth.course) is can_schedule_exam) | @ddt.data((False, False, False, False, True, False), (False, True, False, False, True, False), (True, False, False, False, True, False), (True, True, False, False, True, False), (False, False, True, False, True, True), (False, False, True, False, False, True), (False, True, True, False, True, True), (True, False, True, False, True, True), (True, True, True, False, True, True), (True, True, True, True, True, False))
@ddt.unpack
@pytest.mark.flaky(max_runs=3, min_passes=1)
def test_is_exam_schedulable(self, is_past, is_future, has_eligibility_future, is_operation_delete, has_coupon, can_schedule_exam):
exam_run = ExamRunFactory.create(scheduling_past=is_past, scheduling_future=is_future, eligibility_past=(not has_eligibility_future), eligibility_future=has_eligibility_future)
exam_auth = ExamAuthorizationFactory.create(exam_run=exam_run, course=exam_run.course, status=ExamAuthorization.STATUS_SUCCESS, operation=(ExamAuthorization.OPERATION_DELETE if is_operation_delete else ExamAuthorization.OPERATION_ADD))
if has_coupon:
coupon = ExamRunCouponFactory.create(course=exam_run.course, coupon_url='http://example.com', is_taken=False)
exam_auth.exam_coupon = coupon
exam_auth.save()
assert (api.is_exam_schedulable(exam_auth.user, exam_auth.course) is can_schedule_exam)<|docstring|>Test that is_exam_schedulable is correct<|endoftext|> |
be10d7a0a5cbd582e936d1e1f60de6d6ef1090e9c6c38bd239b9a0d1a59f3615 | @ddt.data((ExamAuthorization.STATUS_SUCCESS, True, True), (ExamAuthorization.STATUS_FAILED, True, False), (ExamAuthorization.STATUS_SUCCESS, False, False))
@ddt.unpack
def test_get_edx_exam_coupon_url(self, auth_status, has_coupon, returned_coupon):
'\n Test that get_edx_exam_coupon_url returns a url only if student is authorized for an current exam run\n and their is an available coupon\n '
coupon_url = 'http://example.com'
exam_run = ExamRunFactory.create(scheduling_past=False, scheduling_future=False, eligibility_past=False)
exam_auth = ExamAuthorizationFactory.create(exam_run=exam_run, course=exam_run.course, status=auth_status)
if has_coupon:
ExamRunCouponFactory.create(course=exam_run.course, coupon_url=coupon_url, is_taken=False)
expected = (coupon_url if returned_coupon else '')
assert (api.get_edx_exam_coupon_url(exam_auth.user, exam_auth.course) == expected) | Test that get_edx_exam_coupon_url returns a url only if student is authorized for an current exam run
and their is an available coupon | dashboard/api_test.py | test_get_edx_exam_coupon_url | mitodl/micromasters | 32 | python | @ddt.data((ExamAuthorization.STATUS_SUCCESS, True, True), (ExamAuthorization.STATUS_FAILED, True, False), (ExamAuthorization.STATUS_SUCCESS, False, False))
@ddt.unpack
def test_get_edx_exam_coupon_url(self, auth_status, has_coupon, returned_coupon):
'\n Test that get_edx_exam_coupon_url returns a url only if student is authorized for an current exam run\n and their is an available coupon\n '
coupon_url = 'http://example.com'
exam_run = ExamRunFactory.create(scheduling_past=False, scheduling_future=False, eligibility_past=False)
exam_auth = ExamAuthorizationFactory.create(exam_run=exam_run, course=exam_run.course, status=auth_status)
if has_coupon:
ExamRunCouponFactory.create(course=exam_run.course, coupon_url=coupon_url, is_taken=False)
expected = (coupon_url if returned_coupon else )
assert (api.get_edx_exam_coupon_url(exam_auth.user, exam_auth.course) == expected) | @ddt.data((ExamAuthorization.STATUS_SUCCESS, True, True), (ExamAuthorization.STATUS_FAILED, True, False), (ExamAuthorization.STATUS_SUCCESS, False, False))
@ddt.unpack
def test_get_edx_exam_coupon_url(self, auth_status, has_coupon, returned_coupon):
'\n Test that get_edx_exam_coupon_url returns a url only if student is authorized for an current exam run\n and their is an available coupon\n '
coupon_url = 'http://example.com'
exam_run = ExamRunFactory.create(scheduling_past=False, scheduling_future=False, eligibility_past=False)
exam_auth = ExamAuthorizationFactory.create(exam_run=exam_run, course=exam_run.course, status=auth_status)
if has_coupon:
ExamRunCouponFactory.create(course=exam_run.course, coupon_url=coupon_url, is_taken=False)
expected = (coupon_url if returned_coupon else )
assert (api.get_edx_exam_coupon_url(exam_auth.user, exam_auth.course) == expected)<|docstring|>Test that get_edx_exam_coupon_url returns a url only if student is authorized for an current exam run
and their is an available coupon<|endoftext|> |
0e7e8d4eb02d1a80b220ed1523eb86439a0dc6ec39d3589db68a30f4a30df05c | def test_get_edx_exam_coupon_url_returns_taken_coupon(self):
'\n Test that get_edx_exam_coupon_url returns a url only if student is authorized for an current exam run\n and their is an available coupon\n '
coupon_url = 'http://example.com'
exam_run = ExamRunFactory.create(scheduling_past=False, scheduling_future=False, eligibility_past=False)
ExamRunCouponFactory.create(course=exam_run.course, coupon_url=coupon_url, is_taken=False)
ExamRunCouponFactory.create(course=exam_run.course, coupon_url='coupon_2', is_taken=False)
exam_auth = ExamAuthorizationFactory.create(exam_run=exam_run, course=exam_run.course, status=ExamAuthorization.STATUS_SUCCESS)
assert (api.get_edx_exam_coupon_url(exam_auth.user, exam_auth.course) == coupon_url)
assert (api.get_edx_exam_coupon_url(exam_auth.user, exam_auth.course) == coupon_url) | Test that get_edx_exam_coupon_url returns a url only if student is authorized for an current exam run
and their is an available coupon | dashboard/api_test.py | test_get_edx_exam_coupon_url_returns_taken_coupon | mitodl/micromasters | 32 | python | def test_get_edx_exam_coupon_url_returns_taken_coupon(self):
'\n Test that get_edx_exam_coupon_url returns a url only if student is authorized for an current exam run\n and their is an available coupon\n '
coupon_url = 'http://example.com'
exam_run = ExamRunFactory.create(scheduling_past=False, scheduling_future=False, eligibility_past=False)
ExamRunCouponFactory.create(course=exam_run.course, coupon_url=coupon_url, is_taken=False)
ExamRunCouponFactory.create(course=exam_run.course, coupon_url='coupon_2', is_taken=False)
exam_auth = ExamAuthorizationFactory.create(exam_run=exam_run, course=exam_run.course, status=ExamAuthorization.STATUS_SUCCESS)
assert (api.get_edx_exam_coupon_url(exam_auth.user, exam_auth.course) == coupon_url)
assert (api.get_edx_exam_coupon_url(exam_auth.user, exam_auth.course) == coupon_url) | def test_get_edx_exam_coupon_url_returns_taken_coupon(self):
'\n Test that get_edx_exam_coupon_url returns a url only if student is authorized for an current exam run\n and their is an available coupon\n '
coupon_url = 'http://example.com'
exam_run = ExamRunFactory.create(scheduling_past=False, scheduling_future=False, eligibility_past=False)
ExamRunCouponFactory.create(course=exam_run.course, coupon_url=coupon_url, is_taken=False)
ExamRunCouponFactory.create(course=exam_run.course, coupon_url='coupon_2', is_taken=False)
exam_auth = ExamAuthorizationFactory.create(exam_run=exam_run, course=exam_run.course, status=ExamAuthorization.STATUS_SUCCESS)
assert (api.get_edx_exam_coupon_url(exam_auth.user, exam_auth.course) == coupon_url)
assert (api.get_edx_exam_coupon_url(exam_auth.user, exam_auth.course) == coupon_url)<|docstring|>Test that get_edx_exam_coupon_url returns a url only if student is authorized for an current exam run
and their is an available coupon<|endoftext|> |
b24fd92c00ee72506bf67fbf9261eb7ccb3644c74587a9c2a0052b46a4f5a260 | @ddt.data((False, True, 1), (True, False, 0), (False, False, 0))
@ddt.unpack
def test_get_future_exam_runs(self, is_past, is_future, result):
'test get_future_exam_runs'
exam_run = ExamRunFactory.create(scheduling_past=is_past, scheduling_future=is_future)
assert (len(api.get_future_exam_runs(exam_run.course)) == result) | test get_future_exam_runs | dashboard/api_test.py | test_get_future_exam_runs | mitodl/micromasters | 32 | python | @ddt.data((False, True, 1), (True, False, 0), (False, False, 0))
@ddt.unpack
def test_get_future_exam_runs(self, is_past, is_future, result):
exam_run = ExamRunFactory.create(scheduling_past=is_past, scheduling_future=is_future)
assert (len(api.get_future_exam_runs(exam_run.course)) == result) | @ddt.data((False, True, 1), (True, False, 0), (False, False, 0))
@ddt.unpack
def test_get_future_exam_runs(self, is_past, is_future, result):
exam_run = ExamRunFactory.create(scheduling_past=is_past, scheduling_future=is_future)
assert (len(api.get_future_exam_runs(exam_run.course)) == result)<|docstring|>test get_future_exam_runs<|endoftext|> |
e3d8850976ad9867aee57939b48902e00ed65462a1f16505b42b33a81b64bebf | @ddt.data((False, True), (True, True), (False, False))
@ddt.unpack
def test_get_current_exam_run_dates(self, eligibility, has_current_exam):
'test get_past_recent_exam_run'
exam_run = ExamRunFactory.create(scheduling_past=(not has_current_exam), scheduling_future=False, eligibility_past=False, eligibility_future=eligibility)
expected = ''
if has_current_exam:
expected = '{} and {}'.format(exam_run.date_first_eligible.strftime('%b %-d'), exam_run.date_last_eligible.strftime('%b %-d, %Y'))
self.assertEqual(api.get_current_exam_run_dates(exam_run.course), expected) | test get_past_recent_exam_run | dashboard/api_test.py | test_get_current_exam_run_dates | mitodl/micromasters | 32 | python | @ddt.data((False, True), (True, True), (False, False))
@ddt.unpack
def test_get_current_exam_run_dates(self, eligibility, has_current_exam):
exam_run = ExamRunFactory.create(scheduling_past=(not has_current_exam), scheduling_future=False, eligibility_past=False, eligibility_future=eligibility)
expected =
if has_current_exam:
expected = '{} and {}'.format(exam_run.date_first_eligible.strftime('%b %-d'), exam_run.date_last_eligible.strftime('%b %-d, %Y'))
self.assertEqual(api.get_current_exam_run_dates(exam_run.course), expected) | @ddt.data((False, True), (True, True), (False, False))
@ddt.unpack
def test_get_current_exam_run_dates(self, eligibility, has_current_exam):
exam_run = ExamRunFactory.create(scheduling_past=(not has_current_exam), scheduling_future=False, eligibility_past=False, eligibility_future=eligibility)
expected =
if has_current_exam:
expected = '{} and {}'.format(exam_run.date_first_eligible.strftime('%b %-d'), exam_run.date_last_eligible.strftime('%b %-d, %Y'))
self.assertEqual(api.get_current_exam_run_dates(exam_run.course), expected)<|docstring|>test get_past_recent_exam_run<|endoftext|> |
8b6610ec89d14775f4d792592dc7499075ab6ab8af41fad120c420e41ed32d97 | @ddt.data((1, 1, False, False), (2, 1, False, True), (3, 1, False, True), (3, 2, False, False), (1, 1, True, False), (2, 1, True, True), (3, 1, True, True), (3, 2, True, False))
@ddt.unpack
def test_has_to_pay(self, num_of_taken_exams, num_of_payments, first_date, result):
'Test has_to_pay_for_exam'
if first_date:
self.mmtrack.program.exam_attempts_first_date = (self.now + timedelta(weeks=2))
self.mmtrack.get_payments_count_for_course.return_value = num_of_payments
for _ in range(num_of_taken_exams):
ExamAuthorizationFactory.create(user=self.user, course=self.course, exam_taken=True)
assert (api.has_to_pay_for_exam(self.mmtrack, self.course) is result) | Test has_to_pay_for_exam | dashboard/api_test.py | test_has_to_pay | mitodl/micromasters | 32 | python | @ddt.data((1, 1, False, False), (2, 1, False, True), (3, 1, False, True), (3, 2, False, False), (1, 1, True, False), (2, 1, True, True), (3, 1, True, True), (3, 2, True, False))
@ddt.unpack
def test_has_to_pay(self, num_of_taken_exams, num_of_payments, first_date, result):
if first_date:
self.mmtrack.program.exam_attempts_first_date = (self.now + timedelta(weeks=2))
self.mmtrack.get_payments_count_for_course.return_value = num_of_payments
for _ in range(num_of_taken_exams):
ExamAuthorizationFactory.create(user=self.user, course=self.course, exam_taken=True)
assert (api.has_to_pay_for_exam(self.mmtrack, self.course) is result) | @ddt.data((1, 1, False, False), (2, 1, False, True), (3, 1, False, True), (3, 2, False, False), (1, 1, True, False), (2, 1, True, True), (3, 1, True, True), (3, 2, True, False))
@ddt.unpack
def test_has_to_pay(self, num_of_taken_exams, num_of_payments, first_date, result):
if first_date:
self.mmtrack.program.exam_attempts_first_date = (self.now + timedelta(weeks=2))
self.mmtrack.get_payments_count_for_course.return_value = num_of_payments
for _ in range(num_of_taken_exams):
ExamAuthorizationFactory.create(user=self.user, course=self.course, exam_taken=True)
assert (api.has_to_pay_for_exam(self.mmtrack, self.course) is result)<|docstring|>Test has_to_pay_for_exam<|endoftext|> |
acfe8962ab5c717aa16b7079f7f9dd62025f2494390bf3631f93199bf0197731 | @ddt.data((0, 1, False, False), (1, 1, False, True), (0, 1, True, False), (1, 1, True, True))
@ddt.unpack
def test_has_to_pay_after_first_date(self, num_of_taken_exams, num_of_payments, before_second_date, result):
'Test has_to_pay_for_exam after attempt dates have been set'
second_date = ((self.now + timedelta(weeks=1)) if before_second_date else (self.now - timedelta(weeks=1)))
self.mmtrack.program.exam_attempts_first_date = (self.now - timedelta(weeks=2))
self.mmtrack.program.exam_attempts_second_date = second_date
self.mmtrack.get_payments_count_for_course.return_value = num_of_payments
run = CourseRunFactory.create(course=self.course)
order = OrderFactory.create(user=self.user, status=Order.FULFILLED)
LineFactory.create(order=order, course_key=run.edx_course_key)
self.mmtrack.get_custom_number_of_attempts_for_course.return_value = num_of_payments
for _ in range(num_of_taken_exams):
ExamAuthorizationFactory.create(user=self.user, course=self.course, exam_taken=True)
assert (api.has_to_pay_for_exam(self.mmtrack, self.course) is result) | Test has_to_pay_for_exam after attempt dates have been set | dashboard/api_test.py | test_has_to_pay_after_first_date | mitodl/micromasters | 32 | python | @ddt.data((0, 1, False, False), (1, 1, False, True), (0, 1, True, False), (1, 1, True, True))
@ddt.unpack
def test_has_to_pay_after_first_date(self, num_of_taken_exams, num_of_payments, before_second_date, result):
second_date = ((self.now + timedelta(weeks=1)) if before_second_date else (self.now - timedelta(weeks=1)))
self.mmtrack.program.exam_attempts_first_date = (self.now - timedelta(weeks=2))
self.mmtrack.program.exam_attempts_second_date = second_date
self.mmtrack.get_payments_count_for_course.return_value = num_of_payments
run = CourseRunFactory.create(course=self.course)
order = OrderFactory.create(user=self.user, status=Order.FULFILLED)
LineFactory.create(order=order, course_key=run.edx_course_key)
self.mmtrack.get_custom_number_of_attempts_for_course.return_value = num_of_payments
for _ in range(num_of_taken_exams):
ExamAuthorizationFactory.create(user=self.user, course=self.course, exam_taken=True)
assert (api.has_to_pay_for_exam(self.mmtrack, self.course) is result) | @ddt.data((0, 1, False, False), (1, 1, False, True), (0, 1, True, False), (1, 1, True, True))
@ddt.unpack
def test_has_to_pay_after_first_date(self, num_of_taken_exams, num_of_payments, before_second_date, result):
second_date = ((self.now + timedelta(weeks=1)) if before_second_date else (self.now - timedelta(weeks=1)))
self.mmtrack.program.exam_attempts_first_date = (self.now - timedelta(weeks=2))
self.mmtrack.program.exam_attempts_second_date = second_date
self.mmtrack.get_payments_count_for_course.return_value = num_of_payments
run = CourseRunFactory.create(course=self.course)
order = OrderFactory.create(user=self.user, status=Order.FULFILLED)
LineFactory.create(order=order, course_key=run.edx_course_key)
self.mmtrack.get_custom_number_of_attempts_for_course.return_value = num_of_payments
for _ in range(num_of_taken_exams):
ExamAuthorizationFactory.create(user=self.user, course=self.course, exam_taken=True)
assert (api.has_to_pay_for_exam(self.mmtrack, self.course) is result)<|docstring|>Test has_to_pay_for_exam after attempt dates have been set<|endoftext|> |
1e5988b1d7894fca01a87e29a6a41cbded002f9b700846d2531aab466a6b242a | @ddt.data((0, 1, 1, False, False), (1, 1, 1, False, True), (1, 2, 2, False, False), (2, 2, 2, False, True), (0, 1, 2, True, False), (1, 1, 2, True, False), (1, 2, 3, True, False), (2, 2, 3, True, False), (3, 2, 3, True, True))
@ddt.unpack
def test_has_to_pay_after_first_date_old_payment(self, num_of_taken_exams, num_of_payments, total_attempts, older_purchase, result):
'Test has_to_pay_for_exam after attempt dates have been set'
second_date = (self.now + timedelta(weeks=1))
self.mmtrack.program.exam_attempts_first_date = (self.now - timedelta(weeks=2))
self.mmtrack.program.exam_attempts_second_date = second_date
self.mmtrack.get_payments_count_for_course.return_value = num_of_payments
run = CourseRunFactory.create(course=self.course)
order = OrderFactory.create(user=self.user, status=Order.FULFILLED)
LineFactory.create(order=order, course_key=run.edx_course_key)
if older_purchase:
run = CourseRunFactory.create(course=self.course)
order = OrderFactory.create(user=self.user, status=Order.FULFILLED)
LineFactory.create(order=order, course_key=run.edx_course_key)
ten_days_ago = (self.now - timedelta(weeks=3))
Line.objects.filter(order=order).update(modified_at=ten_days_ago)
self.mmtrack.get_custom_number_of_attempts_for_course.return_value = total_attempts
for _ in range(num_of_taken_exams):
ExamAuthorizationFactory.create(user=self.user, course=self.course, exam_taken=True)
assert (api.has_to_pay_for_exam(self.mmtrack, self.course) is result) | Test has_to_pay_for_exam after attempt dates have been set | dashboard/api_test.py | test_has_to_pay_after_first_date_old_payment | mitodl/micromasters | 32 | python | @ddt.data((0, 1, 1, False, False), (1, 1, 1, False, True), (1, 2, 2, False, False), (2, 2, 2, False, True), (0, 1, 2, True, False), (1, 1, 2, True, False), (1, 2, 3, True, False), (2, 2, 3, True, False), (3, 2, 3, True, True))
@ddt.unpack
def test_has_to_pay_after_first_date_old_payment(self, num_of_taken_exams, num_of_payments, total_attempts, older_purchase, result):
second_date = (self.now + timedelta(weeks=1))
self.mmtrack.program.exam_attempts_first_date = (self.now - timedelta(weeks=2))
self.mmtrack.program.exam_attempts_second_date = second_date
self.mmtrack.get_payments_count_for_course.return_value = num_of_payments
run = CourseRunFactory.create(course=self.course)
order = OrderFactory.create(user=self.user, status=Order.FULFILLED)
LineFactory.create(order=order, course_key=run.edx_course_key)
if older_purchase:
run = CourseRunFactory.create(course=self.course)
order = OrderFactory.create(user=self.user, status=Order.FULFILLED)
LineFactory.create(order=order, course_key=run.edx_course_key)
ten_days_ago = (self.now - timedelta(weeks=3))
Line.objects.filter(order=order).update(modified_at=ten_days_ago)
self.mmtrack.get_custom_number_of_attempts_for_course.return_value = total_attempts
for _ in range(num_of_taken_exams):
ExamAuthorizationFactory.create(user=self.user, course=self.course, exam_taken=True)
assert (api.has_to_pay_for_exam(self.mmtrack, self.course) is result) | @ddt.data((0, 1, 1, False, False), (1, 1, 1, False, True), (1, 2, 2, False, False), (2, 2, 2, False, True), (0, 1, 2, True, False), (1, 1, 2, True, False), (1, 2, 3, True, False), (2, 2, 3, True, False), (3, 2, 3, True, True))
@ddt.unpack
def test_has_to_pay_after_first_date_old_payment(self, num_of_taken_exams, num_of_payments, total_attempts, older_purchase, result):
second_date = (self.now + timedelta(weeks=1))
self.mmtrack.program.exam_attempts_first_date = (self.now - timedelta(weeks=2))
self.mmtrack.program.exam_attempts_second_date = second_date
self.mmtrack.get_payments_count_for_course.return_value = num_of_payments
run = CourseRunFactory.create(course=self.course)
order = OrderFactory.create(user=self.user, status=Order.FULFILLED)
LineFactory.create(order=order, course_key=run.edx_course_key)
if older_purchase:
run = CourseRunFactory.create(course=self.course)
order = OrderFactory.create(user=self.user, status=Order.FULFILLED)
LineFactory.create(order=order, course_key=run.edx_course_key)
ten_days_ago = (self.now - timedelta(weeks=3))
Line.objects.filter(order=order).update(modified_at=ten_days_ago)
self.mmtrack.get_custom_number_of_attempts_for_course.return_value = total_attempts
for _ in range(num_of_taken_exams):
ExamAuthorizationFactory.create(user=self.user, course=self.course, exam_taken=True)
assert (api.has_to_pay_for_exam(self.mmtrack, self.course) is result)<|docstring|>Test has_to_pay_for_exam after attempt dates have been set<|endoftext|> |
4833a011b3f35367a0958447e714d77fe5e7787706991e0cf8d1ab8121dbb61a | def test_get_certificate_url(self):
'Test get_certificate_url for course with certificate'
self.mmtrack.get_best_final_grade_for_course.return_value = self.final_grade
cert = MicromastersCourseCertificateFactory.create(course=self.course, user=self.user)
self.mmtrack.get_course_certificate.return_value = cert
CourseCertificateSignatoriesFactory.create(course=self.course)
assert (api.get_certificate_url(self.mmtrack, self.course) == '/certificate/course/{}'.format(cert.hash)) | Test get_certificate_url for course with certificate | dashboard/api_test.py | test_get_certificate_url | mitodl/micromasters | 32 | python | def test_get_certificate_url(self):
self.mmtrack.get_best_final_grade_for_course.return_value = self.final_grade
cert = MicromastersCourseCertificateFactory.create(course=self.course, user=self.user)
self.mmtrack.get_course_certificate.return_value = cert
CourseCertificateSignatoriesFactory.create(course=self.course)
assert (api.get_certificate_url(self.mmtrack, self.course) == '/certificate/course/{}'.format(cert.hash)) | def test_get_certificate_url(self):
self.mmtrack.get_best_final_grade_for_course.return_value = self.final_grade
cert = MicromastersCourseCertificateFactory.create(course=self.course, user=self.user)
self.mmtrack.get_course_certificate.return_value = cert
CourseCertificateSignatoriesFactory.create(course=self.course)
assert (api.get_certificate_url(self.mmtrack, self.course) == '/certificate/course/{}'.format(cert.hash))<|docstring|>Test get_certificate_url for course with certificate<|endoftext|> |
15072b77b5cb7b237b637aa4f272926026ec80ccedea4fb412237ed127006f1e | def test_no_signatories(self):
'Test get_certificate_url for course with no signatories'
self.mmtrack.get_best_final_grade_for_course.return_value = self.final_grade
MicromastersCourseCertificateFactory.create(course=self.course, user=self.user)
self.mmtrack.get_course_certificate.return_value = None
assert (api.get_certificate_url(self.mmtrack, self.course) == '') | Test get_certificate_url for course with no signatories | dashboard/api_test.py | test_no_signatories | mitodl/micromasters | 32 | python | def test_no_signatories(self):
self.mmtrack.get_best_final_grade_for_course.return_value = self.final_grade
MicromastersCourseCertificateFactory.create(course=self.course, user=self.user)
self.mmtrack.get_course_certificate.return_value = None
assert (api.get_certificate_url(self.mmtrack, self.course) == ) | def test_no_signatories(self):
self.mmtrack.get_best_final_grade_for_course.return_value = self.final_grade
MicromastersCourseCertificateFactory.create(course=self.course, user=self.user)
self.mmtrack.get_course_certificate.return_value = None
assert (api.get_certificate_url(self.mmtrack, self.course) == )<|docstring|>Test get_certificate_url for course with no signatories<|endoftext|> |
558fd0e8ef3aa538bf16353caa766a23d3662717149a6ee9850d1453c04325e9 | def test_has_no_final_grade(self):
'Test no final grade for a course'
self.mmtrack.get_best_final_grade_for_course.return_value = None
assert (api.get_certificate_url(self.mmtrack, self.course) == '') | Test no final grade for a course | dashboard/api_test.py | test_has_no_final_grade | mitodl/micromasters | 32 | python | def test_has_no_final_grade(self):
self.mmtrack.get_best_final_grade_for_course.return_value = None
assert (api.get_certificate_url(self.mmtrack, self.course) == ) | def test_has_no_final_grade(self):
self.mmtrack.get_best_final_grade_for_course.return_value = None
assert (api.get_certificate_url(self.mmtrack, self.course) == )<|docstring|>Test no final grade for a course<|endoftext|> |
e74865b58c3660f4ce63edce7c7da9fe3709ec6d10615672c92a8246c682a19b | def test_has_passing_grade_no_certificate(self):
'Test has passing grade but no certificate'
self.mmtrack.get_best_final_grade_for_course.return_value = self.final_grade
self.mmtrack.get_course_certificate.return_value = None
assert (api.get_certificate_url(self.mmtrack, self.course) == '') | Test has passing grade but no certificate | dashboard/api_test.py | test_has_passing_grade_no_certificate | mitodl/micromasters | 32 | python | def test_has_passing_grade_no_certificate(self):
self.mmtrack.get_best_final_grade_for_course.return_value = self.final_grade
self.mmtrack.get_course_certificate.return_value = None
assert (api.get_certificate_url(self.mmtrack, self.course) == ) | def test_has_passing_grade_no_certificate(self):
self.mmtrack.get_best_final_grade_for_course.return_value = self.final_grade
self.mmtrack.get_course_certificate.return_value = None
assert (api.get_certificate_url(self.mmtrack, self.course) == )<|docstring|>Test has passing grade but no certificate<|endoftext|> |
00aeebeca458543e7ee370557cd714f0759daa81e71d14705b39bf1c584c2eaf | @ddt.data(('verified', True, True), ('audit', False, False), ('verified', False, False), ('audit', True, False))
@ddt.unpack
def test_edx_course_certificate(self, certificate_type, is_passing, has_url):
'Test edx certificate url for non FA courses'
self.mmtrack.get_best_final_grade_for_course.return_value = self.final_grade
self.mmtrack.financial_aid_available = False
self.mmtrack.has_passing_certificate.return_value = ((certificate_type == 'verified') and is_passing)
cert_json = {'username': 'staff', 'course_id': self.course_run.edx_course_key, 'certificate_type': certificate_type, 'is_passing': is_passing, 'status': 'downloadable', 'download_url': '/certificates/user/course_key', 'grade': '0.98'}
self.mmtrack.certificates = CachedCertificate.deserialize_edx_data([cert_json])
certificate_url = (urljoin(settings.EDXORG_BASE_URL, 'certificates/user/course_key') if has_url else '')
assert (api.get_certificate_url(self.mmtrack, self.course) == certificate_url) | Test edx certificate url for non FA courses | dashboard/api_test.py | test_edx_course_certificate | mitodl/micromasters | 32 | python | @ddt.data(('verified', True, True), ('audit', False, False), ('verified', False, False), ('audit', True, False))
@ddt.unpack
def test_edx_course_certificate(self, certificate_type, is_passing, has_url):
self.mmtrack.get_best_final_grade_for_course.return_value = self.final_grade
self.mmtrack.financial_aid_available = False
self.mmtrack.has_passing_certificate.return_value = ((certificate_type == 'verified') and is_passing)
cert_json = {'username': 'staff', 'course_id': self.course_run.edx_course_key, 'certificate_type': certificate_type, 'is_passing': is_passing, 'status': 'downloadable', 'download_url': '/certificates/user/course_key', 'grade': '0.98'}
self.mmtrack.certificates = CachedCertificate.deserialize_edx_data([cert_json])
certificate_url = (urljoin(settings.EDXORG_BASE_URL, 'certificates/user/course_key') if has_url else )
assert (api.get_certificate_url(self.mmtrack, self.course) == certificate_url) | @ddt.data(('verified', True, True), ('audit', False, False), ('verified', False, False), ('audit', True, False))
@ddt.unpack
def test_edx_course_certificate(self, certificate_type, is_passing, has_url):
self.mmtrack.get_best_final_grade_for_course.return_value = self.final_grade
self.mmtrack.financial_aid_available = False
self.mmtrack.has_passing_certificate.return_value = ((certificate_type == 'verified') and is_passing)
cert_json = {'username': 'staff', 'course_id': self.course_run.edx_course_key, 'certificate_type': certificate_type, 'is_passing': is_passing, 'status': 'downloadable', 'download_url': '/certificates/user/course_key', 'grade': '0.98'}
self.mmtrack.certificates = CachedCertificate.deserialize_edx_data([cert_json])
certificate_url = (urljoin(settings.EDXORG_BASE_URL, 'certificates/user/course_key') if has_url else )
assert (api.get_certificate_url(self.mmtrack, self.course) == certificate_url)<|docstring|>Test edx certificate url for non FA courses<|endoftext|> |
10b8954c9ba3915f419499cc076f2face6b02b6fdc808791b79f7d944f080af0 | def _update_cache(user, edx_client, cache_type, provider):
'Fail updating the cache for only the given cache type'
if (cache_type == failed_cache_type):
raise KeyError() | Fail updating the cache for only the given cache type | dashboard/api_test.py | _update_cache | mitodl/micromasters | 32 | python | def _update_cache(user, edx_client, cache_type, provider):
if (cache_type == failed_cache_type):
raise KeyError() | def _update_cache(user, edx_client, cache_type, provider):
if (cache_type == failed_cache_type):
raise KeyError()<|docstring|>Fail updating the cache for only the given cache type<|endoftext|> |
4bb3a220208c3139b89c6ba4045ae4878d7b7eb8abe700ed5e00b109abb66fe6 | def mock_return_status(actual_course_run, *args, **kargs):
'Mock function for get_status_for_courserun'
if (actual_course_run == specific_run):
return api.CourseRunUserStatus(status=status, course_run=actual_course_run)
return api.CourseRunUserStatus(status=other_run_status, course_run=actual_course_run) | Mock function for get_status_for_courserun | dashboard/api_test.py | mock_return_status | mitodl/micromasters | 32 | python | def mock_return_status(actual_course_run, *args, **kargs):
if (actual_course_run == specific_run):
return api.CourseRunUserStatus(status=status, course_run=actual_course_run)
return api.CourseRunUserStatus(status=other_run_status, course_run=actual_course_run) | def mock_return_status(actual_course_run, *args, **kargs):
if (actual_course_run == specific_run):
return api.CourseRunUserStatus(status=status, course_run=actual_course_run)
return api.CourseRunUserStatus(status=other_run_status, course_run=actual_course_run)<|docstring|>Mock function for get_status_for_courserun<|endoftext|> |
cfdbfe9dc747b93a887834ce31a904a3566a88c64bffd5b0ea20032d8bf43b55 | def mocked_get_status_for_courserun(run, enrollments):
'Mock get_status_for_courserun with different values for each run'
return api.CourseRunUserStatus(status=api.CourseRunStatus.NOT_ENROLLED, course_run=run) | Mock get_status_for_courserun with different values for each run | dashboard/api_test.py | mocked_get_status_for_courserun | mitodl/micromasters | 32 | python | def mocked_get_status_for_courserun(run, enrollments):
return api.CourseRunUserStatus(status=api.CourseRunStatus.NOT_ENROLLED, course_run=run) | def mocked_get_status_for_courserun(run, enrollments):
return api.CourseRunUserStatus(status=api.CourseRunStatus.NOT_ENROLLED, course_run=run)<|docstring|>Mock get_status_for_courserun with different values for each run<|endoftext|> |
e4552a58f26c2193002a5459f8cc67a5090337cdebad2261fa215630358776dc | def getSerialPorts():
' Lists serial port names\n\n :raises EnvironmentError:\n On unsupported or unknown platforms\n :returns:\n A list of the serial ports available on the system\n '
if sys.platform.startswith('win'):
ports = [('COM%s' % (i + 1)) for i in range(256)]
elif (sys.platform.startswith('linux') or sys.platform.startswith('cygwin')):
ports = glob.glob('/dev/tty[A-Za-z]*')
elif sys.platform.startswith('darwin'):
ports = glob.glob('/dev/tty.*')
else:
raise EnvironmentError('Unsupported platform')
result = []
for port in ports:
try:
s = serial.Serial(port)
s.close()
result.append(port)
except (OSError, serial.SerialException):
pass
return result | Lists serial port names
:raises EnvironmentError:
On unsupported or unknown platforms
:returns:
A list of the serial ports available on the system | projects/util/term/startTerms.py | getSerialPorts | adriansaridache/demo_fruity | 0 | python | def getSerialPorts():
' Lists serial port names\n\n :raises EnvironmentError:\n On unsupported or unknown platforms\n :returns:\n A list of the serial ports available on the system\n '
if sys.platform.startswith('win'):
ports = [('COM%s' % (i + 1)) for i in range(256)]
elif (sys.platform.startswith('linux') or sys.platform.startswith('cygwin')):
ports = glob.glob('/dev/tty[A-Za-z]*')
elif sys.platform.startswith('darwin'):
ports = glob.glob('/dev/tty.*')
else:
raise EnvironmentError('Unsupported platform')
result = []
for port in ports:
try:
s = serial.Serial(port)
s.close()
result.append(port)
except (OSError, serial.SerialException):
pass
return result | def getSerialPorts():
' Lists serial port names\n\n :raises EnvironmentError:\n On unsupported or unknown platforms\n :returns:\n A list of the serial ports available on the system\n '
if sys.platform.startswith('win'):
ports = [('COM%s' % (i + 1)) for i in range(256)]
elif (sys.platform.startswith('linux') or sys.platform.startswith('cygwin')):
ports = glob.glob('/dev/tty[A-Za-z]*')
elif sys.platform.startswith('darwin'):
ports = glob.glob('/dev/tty.*')
else:
raise EnvironmentError('Unsupported platform')
result = []
for port in ports:
try:
s = serial.Serial(port)
s.close()
result.append(port)
except (OSError, serial.SerialException):
pass
return result<|docstring|>Lists serial port names
:raises EnvironmentError:
On unsupported or unknown platforms
:returns:
A list of the serial ports available on the system<|endoftext|> |
bf4df05f961eb8aceb450bc6054329303711595a18ec434d62cead6b9e3f1741 | def np_put(p):
'\n a[order][np_put(order)] = a\n\n '
n = p.size
s = np.zeros(n, dtype=np.int32)
i = np.arange(n, dtype=np.int32)
return np.put(s, p, i) | a[order][np_put(order)] = a | pypcurve.py | np_put | c235gsy/scTrack | 0 | python | def np_put(p):
'\n \n\n '
n = p.size
s = np.zeros(n, dtype=np.int32)
i = np.arange(n, dtype=np.int32)
return np.put(s, p, i) | def np_put(p):
'\n \n\n '
n = p.size
s = np.zeros(n, dtype=np.int32)
i = np.arange(n, dtype=np.int32)
return np.put(s, p, i)<|docstring|>a[order][np_put(order)] = a<|endoftext|> |
94b0909df8a7b736a28f9f274cfa3f784cd9ba307eeada437d5e550463ac0552 | def project_points(points, curve, init_lambda=None, inter_dimension=3, extend=2, n_curve_seq=int(10000.0), return_curve_seq=False, lambda_model='pseudotime', smoothness=1.0):
'\n points : the points used to project\n curve : the curve points project to\n inter_dimension : the interpolation dimension of the interpolation functions\n extend : the rate of the extension of the interpolation prediction\n n_curve_seq : the number of point used to general the curve sequences\n return_curve_seq : default False, if True, the dictionary returned contains a key "curve_seq"\n\n return:\n a dictionary:\n "projection" : the projections of the points\n "order" : the order array of the points in the curve\n "lambda_points" : the lambda of the points\n "extend" : the rate of the extension of the interpolation prediction\n "n_curve_seq" : the number of point used to general the curve sequences\n "curve_seq" : the whole curve sequences\n '
n_points = len(points)
n_curves = len(curve)
n_features = len(points[0])
n_curve_seq_all = int(((n_curve_seq * (100 + (extend * 2))) / 100))
if (init_lambda is None):
lambda_curve = np.linspace(0, 100, n_curves)
else:
lambda_curve = ((100 * (init_lambda - init_lambda.min())) / (init_lambda.max() - init_lambda.min()))
lambda_seq = np.linspace((0 - extend), (100 + extend), n_curve_seq_all)
(sorted_lambda_curve, lambda_curve_idx) = np.unique(lambda_curve, return_index=True)
interpolation_functions = [UnivariateSpline(x=sorted_lambda_curve, y=curve[(lambda_curve_idx, n_f)], k=inter_dimension, s=(smoothness * len(sorted_lambda_curve))) for n_f in range(n_features)]
curve_seq = np.array([function(lambda_seq) for function in interpolation_functions]).T
min_dist_idx_ = np.array([np.argmin(np.sum(((p - curve_seq) ** 2), axis=1), axis=0) for p in points])
projection = curve_seq[min_dist_idx_]
order = np.argsort(lambda_seq[min_dist_idx_])
if (lambda_model == 'arc'):
ord_projection = projection[order]
lambda_points = [0]
arc = 0
for i in range((len(ord_projection) - 1)):
arc += np.sqrt(np.sum(((ord_projection[i] - ord_projection[(i + 1)]) ** 2)))
lambda_points.append(arc)
lambda_points = np.array(lambda_points)[np_put(order)]
elif (lambda_model == 'pseudotime'):
lambda_points = lambda_seq[min_dist_idx_]
lambda_points = ((100 * (lambda_points - lambda_points.min())) / (lambda_points.max() - lambda_points.min()))
else:
print('The lambda_model must be chosen from "arc" and "pseudotime" ')
sys.exit()
output = {'projection': projection, 'order': order, 'lambda_points': lambda_points, 'extend': extend, 'n_curve_seq': n_curve_seq}
if return_curve_seq:
output['curve_seq'] = curve_seq
return output | points : the points used to project
curve : the curve points project to
inter_dimension : the interpolation dimension of the interpolation functions
extend : the rate of the extension of the interpolation prediction
n_curve_seq : the number of point used to general the curve sequences
return_curve_seq : default False, if True, the dictionary returned contains a key "curve_seq"
return:
a dictionary:
"projection" : the projections of the points
"order" : the order array of the points in the curve
"lambda_points" : the lambda of the points
"extend" : the rate of the extension of the interpolation prediction
"n_curve_seq" : the number of point used to general the curve sequences
"curve_seq" : the whole curve sequences | pypcurve.py | project_points | c235gsy/scTrack | 0 | python | def project_points(points, curve, init_lambda=None, inter_dimension=3, extend=2, n_curve_seq=int(10000.0), return_curve_seq=False, lambda_model='pseudotime', smoothness=1.0):
'\n points : the points used to project\n curve : the curve points project to\n inter_dimension : the interpolation dimension of the interpolation functions\n extend : the rate of the extension of the interpolation prediction\n n_curve_seq : the number of point used to general the curve sequences\n return_curve_seq : default False, if True, the dictionary returned contains a key "curve_seq"\n\n return:\n a dictionary:\n "projection" : the projections of the points\n "order" : the order array of the points in the curve\n "lambda_points" : the lambda of the points\n "extend" : the rate of the extension of the interpolation prediction\n "n_curve_seq" : the number of point used to general the curve sequences\n "curve_seq" : the whole curve sequences\n '
n_points = len(points)
n_curves = len(curve)
n_features = len(points[0])
n_curve_seq_all = int(((n_curve_seq * (100 + (extend * 2))) / 100))
if (init_lambda is None):
lambda_curve = np.linspace(0, 100, n_curves)
else:
lambda_curve = ((100 * (init_lambda - init_lambda.min())) / (init_lambda.max() - init_lambda.min()))
lambda_seq = np.linspace((0 - extend), (100 + extend), n_curve_seq_all)
(sorted_lambda_curve, lambda_curve_idx) = np.unique(lambda_curve, return_index=True)
interpolation_functions = [UnivariateSpline(x=sorted_lambda_curve, y=curve[(lambda_curve_idx, n_f)], k=inter_dimension, s=(smoothness * len(sorted_lambda_curve))) for n_f in range(n_features)]
curve_seq = np.array([function(lambda_seq) for function in interpolation_functions]).T
min_dist_idx_ = np.array([np.argmin(np.sum(((p - curve_seq) ** 2), axis=1), axis=0) for p in points])
projection = curve_seq[min_dist_idx_]
order = np.argsort(lambda_seq[min_dist_idx_])
if (lambda_model == 'arc'):
ord_projection = projection[order]
lambda_points = [0]
arc = 0
for i in range((len(ord_projection) - 1)):
arc += np.sqrt(np.sum(((ord_projection[i] - ord_projection[(i + 1)]) ** 2)))
lambda_points.append(arc)
lambda_points = np.array(lambda_points)[np_put(order)]
elif (lambda_model == 'pseudotime'):
lambda_points = lambda_seq[min_dist_idx_]
lambda_points = ((100 * (lambda_points - lambda_points.min())) / (lambda_points.max() - lambda_points.min()))
else:
print('The lambda_model must be chosen from "arc" and "pseudotime" ')
sys.exit()
output = {'projection': projection, 'order': order, 'lambda_points': lambda_points, 'extend': extend, 'n_curve_seq': n_curve_seq}
if return_curve_seq:
output['curve_seq'] = curve_seq
return output | def project_points(points, curve, init_lambda=None, inter_dimension=3, extend=2, n_curve_seq=int(10000.0), return_curve_seq=False, lambda_model='pseudotime', smoothness=1.0):
'\n points : the points used to project\n curve : the curve points project to\n inter_dimension : the interpolation dimension of the interpolation functions\n extend : the rate of the extension of the interpolation prediction\n n_curve_seq : the number of point used to general the curve sequences\n return_curve_seq : default False, if True, the dictionary returned contains a key "curve_seq"\n\n return:\n a dictionary:\n "projection" : the projections of the points\n "order" : the order array of the points in the curve\n "lambda_points" : the lambda of the points\n "extend" : the rate of the extension of the interpolation prediction\n "n_curve_seq" : the number of point used to general the curve sequences\n "curve_seq" : the whole curve sequences\n '
n_points = len(points)
n_curves = len(curve)
n_features = len(points[0])
n_curve_seq_all = int(((n_curve_seq * (100 + (extend * 2))) / 100))
if (init_lambda is None):
lambda_curve = np.linspace(0, 100, n_curves)
else:
lambda_curve = ((100 * (init_lambda - init_lambda.min())) / (init_lambda.max() - init_lambda.min()))
lambda_seq = np.linspace((0 - extend), (100 + extend), n_curve_seq_all)
(sorted_lambda_curve, lambda_curve_idx) = np.unique(lambda_curve, return_index=True)
interpolation_functions = [UnivariateSpline(x=sorted_lambda_curve, y=curve[(lambda_curve_idx, n_f)], k=inter_dimension, s=(smoothness * len(sorted_lambda_curve))) for n_f in range(n_features)]
curve_seq = np.array([function(lambda_seq) for function in interpolation_functions]).T
min_dist_idx_ = np.array([np.argmin(np.sum(((p - curve_seq) ** 2), axis=1), axis=0) for p in points])
projection = curve_seq[min_dist_idx_]
order = np.argsort(lambda_seq[min_dist_idx_])
if (lambda_model == 'arc'):
ord_projection = projection[order]
lambda_points = [0]
arc = 0
for i in range((len(ord_projection) - 1)):
arc += np.sqrt(np.sum(((ord_projection[i] - ord_projection[(i + 1)]) ** 2)))
lambda_points.append(arc)
lambda_points = np.array(lambda_points)[np_put(order)]
elif (lambda_model == 'pseudotime'):
lambda_points = lambda_seq[min_dist_idx_]
lambda_points = ((100 * (lambda_points - lambda_points.min())) / (lambda_points.max() - lambda_points.min()))
else:
print('The lambda_model must be chosen from "arc" and "pseudotime" ')
sys.exit()
output = {'projection': projection, 'order': order, 'lambda_points': lambda_points, 'extend': extend, 'n_curve_seq': n_curve_seq}
if return_curve_seq:
output['curve_seq'] = curve_seq
return output<|docstring|>points : the points used to project
curve : the curve points project to
inter_dimension : the interpolation dimension of the interpolation functions
extend : the rate of the extension of the interpolation prediction
n_curve_seq : the number of point used to general the curve sequences
return_curve_seq : default False, if True, the dictionary returned contains a key "curve_seq"
return:
a dictionary:
"projection" : the projections of the points
"order" : the order array of the points in the curve
"lambda_points" : the lambda of the points
"extend" : the rate of the extension of the interpolation prediction
"n_curve_seq" : the number of point used to general the curve sequences
"curve_seq" : the whole curve sequences<|endoftext|> |
ffb065e9ab3e76b15e99369b086db23a136fcfaa946ce7ba4712c2edbd3682a4 | def clear_old_snapshots():
' Remove any old snapshots to minimize disk space usage locally. '
logging.info('Removing old Cassandra snapshots...')
try:
subprocess.check_call([NODE_TOOL, 'clearsnapshot'])
except CalledProcessError as error:
logging.error('Error while deleting old Cassandra snapshots. Error: {0}'.format(str(error))) | Remove any old snapshots to minimize disk space usage locally. | AppDB/appscale/datastore/backup/cassandra_backup.py | clear_old_snapshots | HafeezRai/appscale | 1 | python | def clear_old_snapshots():
' '
logging.info('Removing old Cassandra snapshots...')
try:
subprocess.check_call([NODE_TOOL, 'clearsnapshot'])
except CalledProcessError as error:
logging.error('Error while deleting old Cassandra snapshots. Error: {0}'.format(str(error))) | def clear_old_snapshots():
' '
logging.info('Removing old Cassandra snapshots...')
try:
subprocess.check_call([NODE_TOOL, 'clearsnapshot'])
except CalledProcessError as error:
logging.error('Error while deleting old Cassandra snapshots. Error: {0}'.format(str(error)))<|docstring|>Remove any old snapshots to minimize disk space usage locally.<|endoftext|> |
8864771b4338502cc52db24f185d14500c8af79ace02dc4c54f75b26d9946ea8 | def create_snapshot(snapshot_name=''):
' Perform local Cassandra backup by taking a new snapshot.\n\n Args:\n snapshot_name: A str, optional. A fixed name for the snapshot to create.\n Returns:\n True on success, False otherwise.\n '
logging.info('Creating new Cassandra snapshots...')
try:
subprocess.check_call([NODE_TOOL, 'snapshot'])
except CalledProcessError as error:
logging.error('Error while creating new Cassandra snapshots. Error: {0}'.format(str(error)))
return False
return True | Perform local Cassandra backup by taking a new snapshot.
Args:
snapshot_name: A str, optional. A fixed name for the snapshot to create.
Returns:
True on success, False otherwise. | AppDB/appscale/datastore/backup/cassandra_backup.py | create_snapshot | HafeezRai/appscale | 1 | python | def create_snapshot(snapshot_name=):
' Perform local Cassandra backup by taking a new snapshot.\n\n Args:\n snapshot_name: A str, optional. A fixed name for the snapshot to create.\n Returns:\n True on success, False otherwise.\n '
logging.info('Creating new Cassandra snapshots...')
try:
subprocess.check_call([NODE_TOOL, 'snapshot'])
except CalledProcessError as error:
logging.error('Error while creating new Cassandra snapshots. Error: {0}'.format(str(error)))
return False
return True | def create_snapshot(snapshot_name=):
' Perform local Cassandra backup by taking a new snapshot.\n\n Args:\n snapshot_name: A str, optional. A fixed name for the snapshot to create.\n Returns:\n True on success, False otherwise.\n '
logging.info('Creating new Cassandra snapshots...')
try:
subprocess.check_call([NODE_TOOL, 'snapshot'])
except CalledProcessError as error:
logging.error('Error while creating new Cassandra snapshots. Error: {0}'.format(str(error)))
return False
return True<|docstring|>Perform local Cassandra backup by taking a new snapshot.
Args:
snapshot_name: A str, optional. A fixed name for the snapshot to create.
Returns:
True on success, False otherwise.<|endoftext|> |
0147949e4ddc9ae6072403adbb1cd5ca8935498b46c49cd27fb0e6d62c4e1fa3 | def remove_old_data():
' Removes previous node data from the Cassandra store. '
for directory in CASSANDRA_DATA_SUBDIRS:
data_dir = '{0}/{1}/{2}'.format(APPSCALE_DATA_DIR, 'cassandra', directory)
logging.warning('Removing data from {0}'.format(data_dir))
try:
subprocess.Popen('find /opt/appscale/cassandra -name "*" | grep ".db\\|.txt\\|.log" | grep -v snapshot | xargs rm', shell=True)
logging.info('Done removing data!')
except CalledProcessError as error:
logging.error('Error while removing old data from db. Overwriting... Error: {0}'.format(str(error))) | Removes previous node data from the Cassandra store. | AppDB/appscale/datastore/backup/cassandra_backup.py | remove_old_data | HafeezRai/appscale | 1 | python | def remove_old_data():
' '
for directory in CASSANDRA_DATA_SUBDIRS:
data_dir = '{0}/{1}/{2}'.format(APPSCALE_DATA_DIR, 'cassandra', directory)
logging.warning('Removing data from {0}'.format(data_dir))
try:
subprocess.Popen('find /opt/appscale/cassandra -name "*" | grep ".db\\|.txt\\|.log" | grep -v snapshot | xargs rm', shell=True)
logging.info('Done removing data!')
except CalledProcessError as error:
logging.error('Error while removing old data from db. Overwriting... Error: {0}'.format(str(error))) | def remove_old_data():
' '
for directory in CASSANDRA_DATA_SUBDIRS:
data_dir = '{0}/{1}/{2}'.format(APPSCALE_DATA_DIR, 'cassandra', directory)
logging.warning('Removing data from {0}'.format(data_dir))
try:
subprocess.Popen('find /opt/appscale/cassandra -name "*" | grep ".db\\|.txt\\|.log" | grep -v snapshot | xargs rm', shell=True)
logging.info('Done removing data!')
except CalledProcessError as error:
logging.error('Error while removing old data from db. Overwriting... Error: {0}'.format(str(error)))<|docstring|>Removes previous node data from the Cassandra store.<|endoftext|> |
fd4c174473bf20705a58513cfda4fa92abe9c560206baac577ceb4abc71207ed | def restore_snapshots():
' Restore snapshot into correct directories.\n\n Returns:\n True on success, False otherwise.\n '
logging.info('Restoring Cassandra snapshots.')
for directory in CASSANDRA_DATA_SUBDIRS:
data_dir = '{0}/{1}/{2}/'.format(APPSCALE_DATA_DIR, 'cassandra', directory)
logging.debug('Restoring in dir {0}'.format(data_dir))
for (path, _, filenames) in os.walk(data_dir):
for filename in filenames:
logging.debug('Restoring: {0}'.format(filename))
if (not filename):
logging.warn('skipping...')
continue
full_path = '{0}/{1}'.format(path, filename)
new_full_path = '{0}/../../{1}'.format(path, filename)
logging.debug('{0} -> {1}'.format(full_path, new_full_path))
if (not backup_recovery_helper.rename(full_path, new_full_path)):
logging.error('Error while moving Cassandra snapshot in place. Aborting restore...')
return False
logging.info('Done restoring Cassandra snapshots.')
return True | Restore snapshot into correct directories.
Returns:
True on success, False otherwise. | AppDB/appscale/datastore/backup/cassandra_backup.py | restore_snapshots | HafeezRai/appscale | 1 | python | def restore_snapshots():
' Restore snapshot into correct directories.\n\n Returns:\n True on success, False otherwise.\n '
logging.info('Restoring Cassandra snapshots.')
for directory in CASSANDRA_DATA_SUBDIRS:
data_dir = '{0}/{1}/{2}/'.format(APPSCALE_DATA_DIR, 'cassandra', directory)
logging.debug('Restoring in dir {0}'.format(data_dir))
for (path, _, filenames) in os.walk(data_dir):
for filename in filenames:
logging.debug('Restoring: {0}'.format(filename))
if (not filename):
logging.warn('skipping...')
continue
full_path = '{0}/{1}'.format(path, filename)
new_full_path = '{0}/../../{1}'.format(path, filename)
logging.debug('{0} -> {1}'.format(full_path, new_full_path))
if (not backup_recovery_helper.rename(full_path, new_full_path)):
logging.error('Error while moving Cassandra snapshot in place. Aborting restore...')
return False
logging.info('Done restoring Cassandra snapshots.')
return True | def restore_snapshots():
' Restore snapshot into correct directories.\n\n Returns:\n True on success, False otherwise.\n '
logging.info('Restoring Cassandra snapshots.')
for directory in CASSANDRA_DATA_SUBDIRS:
data_dir = '{0}/{1}/{2}/'.format(APPSCALE_DATA_DIR, 'cassandra', directory)
logging.debug('Restoring in dir {0}'.format(data_dir))
for (path, _, filenames) in os.walk(data_dir):
for filename in filenames:
logging.debug('Restoring: {0}'.format(filename))
if (not filename):
logging.warn('skipping...')
continue
full_path = '{0}/{1}'.format(path, filename)
new_full_path = '{0}/../../{1}'.format(path, filename)
logging.debug('{0} -> {1}'.format(full_path, new_full_path))
if (not backup_recovery_helper.rename(full_path, new_full_path)):
logging.error('Error while moving Cassandra snapshot in place. Aborting restore...')
return False
logging.info('Done restoring Cassandra snapshots.')
return True<|docstring|>Restore snapshot into correct directories.
Returns:
True on success, False otherwise.<|endoftext|> |
d96564eb3fb3a7fb4d6662fbcc61a36a0e0b328e5a04dd4ec3f392b3a0c7a589 | def shutdown_datastore():
' Top level function for bringing down Cassandra.\n\n Returns:\n True on success, False otherwise.\n '
logging.info('Shutting down Cassandra.')
monit_interface.stop(cassandra_interface.CASSANDRA_MONIT_WATCH_NAME, is_group=False)
logging.warning('Done!')
return True | Top level function for bringing down Cassandra.
Returns:
True on success, False otherwise. | AppDB/appscale/datastore/backup/cassandra_backup.py | shutdown_datastore | HafeezRai/appscale | 1 | python | def shutdown_datastore():
' Top level function for bringing down Cassandra.\n\n Returns:\n True on success, False otherwise.\n '
logging.info('Shutting down Cassandra.')
monit_interface.stop(cassandra_interface.CASSANDRA_MONIT_WATCH_NAME, is_group=False)
logging.warning('Done!')
return True | def shutdown_datastore():
' Top level function for bringing down Cassandra.\n\n Returns:\n True on success, False otherwise.\n '
logging.info('Shutting down Cassandra.')
monit_interface.stop(cassandra_interface.CASSANDRA_MONIT_WATCH_NAME, is_group=False)
logging.warning('Done!')
return True<|docstring|>Top level function for bringing down Cassandra.
Returns:
True on success, False otherwise.<|endoftext|> |
06a498e8357c0fdd732b996adb3c99d82f10de0a542f1604674cb1a0c1b10add | def backup_data(path, keyname):
" Backup Cassandra snapshot data directories/files.\n\n Args:\n path: A string containing the location to store the backup on each of the\n DB machines.\n keyname: A string containing the deployment's keyname.\n Raises:\n BRException if unable to find any Cassandra machines or if DB machine has\n insufficient space.\n "
logging.info('Starting new db backup.')
db_ips = appscale_info.get_db_ips()
if (not db_ips):
raise BRException('Unable to find any Cassandra machines.')
for db_ip in db_ips:
appscale_utils.ssh(db_ip, keyname, '{} clearsnapshot'.format(NODE_TOOL))
appscale_utils.ssh(db_ip, keyname, '{} snapshot'.format(NODE_TOOL))
get_snapshot_size = 'find {0} -name "snapshots" -exec du -s {{}} \\;'.format(APPSCALE_DATA_DIR)
du_output = appscale_utils.ssh(db_ip, keyname, get_snapshot_size, method=subprocess.check_output)
backup_size = sum((int(line.split()[0]) for line in du_output.split('\n') if line))
output_dir = ('/'.join(path.split('/')[:(- 1)]) + '/')
df_output = appscale_utils.ssh(db_ip, keyname, 'df {}'.format(output_dir), method=subprocess.check_output)
available = int(df_output.split('\n')[1].split()[3])
if (backup_size > (available * PADDING_PERCENTAGE)):
raise BRException('{} has insufficient space: {}/{}'.format(db_ip, (available * PADDING_PERCENTAGE), backup_size))
cassandra_dir = '{}/cassandra'.format(APPSCALE_DATA_DIR)
for db_ip in db_ips:
create_tar = 'find . -regex ".*/snapshots/[0-9]*/.*" -exec tar --transform="s/snapshots\\/[0-9]*\\///" -cf {0} {{}} +'.format(path)
appscale_utils.ssh(db_ip, keyname, 'cd {} && {}'.format(cassandra_dir, create_tar))
logging.info('Done with db backup.') | Backup Cassandra snapshot data directories/files.
Args:
path: A string containing the location to store the backup on each of the
DB machines.
keyname: A string containing the deployment's keyname.
Raises:
BRException if unable to find any Cassandra machines or if DB machine has
insufficient space. | AppDB/appscale/datastore/backup/cassandra_backup.py | backup_data | HafeezRai/appscale | 1 | python | def backup_data(path, keyname):
" Backup Cassandra snapshot data directories/files.\n\n Args:\n path: A string containing the location to store the backup on each of the\n DB machines.\n keyname: A string containing the deployment's keyname.\n Raises:\n BRException if unable to find any Cassandra machines or if DB machine has\n insufficient space.\n "
logging.info('Starting new db backup.')
db_ips = appscale_info.get_db_ips()
if (not db_ips):
raise BRException('Unable to find any Cassandra machines.')
for db_ip in db_ips:
appscale_utils.ssh(db_ip, keyname, '{} clearsnapshot'.format(NODE_TOOL))
appscale_utils.ssh(db_ip, keyname, '{} snapshot'.format(NODE_TOOL))
get_snapshot_size = 'find {0} -name "snapshots" -exec du -s {{}} \\;'.format(APPSCALE_DATA_DIR)
du_output = appscale_utils.ssh(db_ip, keyname, get_snapshot_size, method=subprocess.check_output)
backup_size = sum((int(line.split()[0]) for line in du_output.split('\n') if line))
output_dir = ('/'.join(path.split('/')[:(- 1)]) + '/')
df_output = appscale_utils.ssh(db_ip, keyname, 'df {}'.format(output_dir), method=subprocess.check_output)
available = int(df_output.split('\n')[1].split()[3])
if (backup_size > (available * PADDING_PERCENTAGE)):
raise BRException('{} has insufficient space: {}/{}'.format(db_ip, (available * PADDING_PERCENTAGE), backup_size))
cassandra_dir = '{}/cassandra'.format(APPSCALE_DATA_DIR)
for db_ip in db_ips:
create_tar = 'find . -regex ".*/snapshots/[0-9]*/.*" -exec tar --transform="s/snapshots\\/[0-9]*\\///" -cf {0} {{}} +'.format(path)
appscale_utils.ssh(db_ip, keyname, 'cd {} && {}'.format(cassandra_dir, create_tar))
logging.info('Done with db backup.') | def backup_data(path, keyname):
" Backup Cassandra snapshot data directories/files.\n\n Args:\n path: A string containing the location to store the backup on each of the\n DB machines.\n keyname: A string containing the deployment's keyname.\n Raises:\n BRException if unable to find any Cassandra machines or if DB machine has\n insufficient space.\n "
logging.info('Starting new db backup.')
db_ips = appscale_info.get_db_ips()
if (not db_ips):
raise BRException('Unable to find any Cassandra machines.')
for db_ip in db_ips:
appscale_utils.ssh(db_ip, keyname, '{} clearsnapshot'.format(NODE_TOOL))
appscale_utils.ssh(db_ip, keyname, '{} snapshot'.format(NODE_TOOL))
get_snapshot_size = 'find {0} -name "snapshots" -exec du -s {{}} \\;'.format(APPSCALE_DATA_DIR)
du_output = appscale_utils.ssh(db_ip, keyname, get_snapshot_size, method=subprocess.check_output)
backup_size = sum((int(line.split()[0]) for line in du_output.split('\n') if line))
output_dir = ('/'.join(path.split('/')[:(- 1)]) + '/')
df_output = appscale_utils.ssh(db_ip, keyname, 'df {}'.format(output_dir), method=subprocess.check_output)
available = int(df_output.split('\n')[1].split()[3])
if (backup_size > (available * PADDING_PERCENTAGE)):
raise BRException('{} has insufficient space: {}/{}'.format(db_ip, (available * PADDING_PERCENTAGE), backup_size))
cassandra_dir = '{}/cassandra'.format(APPSCALE_DATA_DIR)
for db_ip in db_ips:
create_tar = 'find . -regex ".*/snapshots/[0-9]*/.*" -exec tar --transform="s/snapshots\\/[0-9]*\\///" -cf {0} {{}} +'.format(path)
appscale_utils.ssh(db_ip, keyname, 'cd {} && {}'.format(cassandra_dir, create_tar))
logging.info('Done with db backup.')<|docstring|>Backup Cassandra snapshot data directories/files.
Args:
path: A string containing the location to store the backup on each of the
DB machines.
keyname: A string containing the deployment's keyname.
Raises:
BRException if unable to find any Cassandra machines or if DB machine has
insufficient space.<|endoftext|> |
fdea63ee4a2bb63495379abb18c68a14e69557ea27124d9f90fb1c7398ef4a60 | def restore_data(path, keyname, force=False):
" Restores the Cassandra backup.\n\n Args:\n path: A string containing the location on each of the DB machines to use\n for restoring data.\n keyname: A string containing the deployment's keyname.\n Raises:\n BRException if unable to find any Cassandra machines or if DB machine has\n insufficient space.\n "
logging.info('Starting new db restore.')
db_ips = appscale_info.get_db_ips()
if (not db_ips):
raise BRException('Unable to find any Cassandra machines.')
machines_without_restore = []
for db_ip in db_ips:
exit_code = appscale_utils.ssh(db_ip, keyname, 'ls {}'.format(path), method=subprocess.call)
if (exit_code != ExitCodes.SUCCESS):
machines_without_restore.append(db_ip)
if (machines_without_restore and (not force)):
logging.info('The following machines do not have a restore file: {}'.format(machines_without_restore))
response = raw_input('Would you like to continue? [y/N] ')
if (response not in ['Y', 'y']):
return
for db_ip in db_ips:
logging.info('Stopping Cassandra on {}'.format(db_ip))
summary = appscale_utils.ssh(db_ip, keyname, 'monit summary', method=subprocess.check_output)
status = utils.monit_status(summary, CASSANDRA_MONIT_WATCH_NAME)
retries = SERVICE_RETRIES
while (status != MonitStates.UNMONITORED):
appscale_utils.ssh(db_ip, keyname, 'monit stop {}'.format(CASSANDRA_MONIT_WATCH_NAME), method=subprocess.call)
time.sleep(3)
summary = appscale_utils.ssh(db_ip, keyname, 'monit summary', method=subprocess.check_output)
status = utils.monit_status(summary, CASSANDRA_MONIT_WATCH_NAME)
retries -= 1
if (retries < 0):
raise BRException('Unable to stop Cassandra')
cassandra_dir = '{}/cassandra'.format(APPSCALE_DATA_DIR)
for db_ip in db_ips:
logging.info('Restoring Cassandra data on {}'.format(db_ip))
clear_db = 'find {0} -regex ".*\\.\\(db\\|txt\\|log\\)$" -exec rm {{}} \\;'.format(cassandra_dir)
appscale_utils.ssh(db_ip, keyname, clear_db)
if (db_ip not in machines_without_restore):
appscale_utils.ssh(db_ip, keyname, 'tar xf {} -C {}'.format(path, cassandra_dir))
appscale_utils.ssh(db_ip, keyname, 'chown -R cassandra {}'.format(cassandra_dir))
logging.info('Starting Cassandra on {}'.format(db_ip))
retries = SERVICE_RETRIES
status = MonitStates.UNMONITORED
while (status != MonitStates.RUNNING):
appscale_utils.ssh(db_ip, keyname, 'monit start {}'.format(CASSANDRA_MONIT_WATCH_NAME), method=subprocess.call)
time.sleep(3)
summary = appscale_utils.ssh(db_ip, keyname, 'monit summary', method=subprocess.check_output)
status = utils.monit_status(summary, CASSANDRA_MONIT_WATCH_NAME)
retries -= 1
if (retries < 0):
raise BRException('Unable to start Cassandra')
appscale_utils.ssh(db_ip, keyname, 'monit start {}'.format(CASSANDRA_MONIT_WATCH_NAME))
logging.info('Waiting for Cassandra cluster to be ready')
db_ip = db_ips[0]
deadline = (time.time() + SCHEMA_CHANGE_TIMEOUT)
while True:
ready = True
try:
output = appscale_utils.ssh(db_ip, keyname, '{} status'.format(NODE_TOOL), method=subprocess.check_output)
nodes_ready = len([line for line in output.split('\n') if line.startswith('UN')])
if (nodes_ready < len(db_ips)):
ready = False
except CalledProcessError:
ready = False
if ready:
break
if (time.time() > deadline):
logging.warning('Cassandra cluster still not ready.')
break
time.sleep(3)
logging.info('Done with db restore.') | Restores the Cassandra backup.
Args:
path: A string containing the location on each of the DB machines to use
for restoring data.
keyname: A string containing the deployment's keyname.
Raises:
BRException if unable to find any Cassandra machines or if DB machine has
insufficient space. | AppDB/appscale/datastore/backup/cassandra_backup.py | restore_data | HafeezRai/appscale | 1 | python | def restore_data(path, keyname, force=False):
" Restores the Cassandra backup.\n\n Args:\n path: A string containing the location on each of the DB machines to use\n for restoring data.\n keyname: A string containing the deployment's keyname.\n Raises:\n BRException if unable to find any Cassandra machines or if DB machine has\n insufficient space.\n "
logging.info('Starting new db restore.')
db_ips = appscale_info.get_db_ips()
if (not db_ips):
raise BRException('Unable to find any Cassandra machines.')
machines_without_restore = []
for db_ip in db_ips:
exit_code = appscale_utils.ssh(db_ip, keyname, 'ls {}'.format(path), method=subprocess.call)
if (exit_code != ExitCodes.SUCCESS):
machines_without_restore.append(db_ip)
if (machines_without_restore and (not force)):
logging.info('The following machines do not have a restore file: {}'.format(machines_without_restore))
response = raw_input('Would you like to continue? [y/N] ')
if (response not in ['Y', 'y']):
return
for db_ip in db_ips:
logging.info('Stopping Cassandra on {}'.format(db_ip))
summary = appscale_utils.ssh(db_ip, keyname, 'monit summary', method=subprocess.check_output)
status = utils.monit_status(summary, CASSANDRA_MONIT_WATCH_NAME)
retries = SERVICE_RETRIES
while (status != MonitStates.UNMONITORED):
appscale_utils.ssh(db_ip, keyname, 'monit stop {}'.format(CASSANDRA_MONIT_WATCH_NAME), method=subprocess.call)
time.sleep(3)
summary = appscale_utils.ssh(db_ip, keyname, 'monit summary', method=subprocess.check_output)
status = utils.monit_status(summary, CASSANDRA_MONIT_WATCH_NAME)
retries -= 1
if (retries < 0):
raise BRException('Unable to stop Cassandra')
cassandra_dir = '{}/cassandra'.format(APPSCALE_DATA_DIR)
for db_ip in db_ips:
logging.info('Restoring Cassandra data on {}'.format(db_ip))
clear_db = 'find {0} -regex ".*\\.\\(db\\|txt\\|log\\)$" -exec rm {{}} \\;'.format(cassandra_dir)
appscale_utils.ssh(db_ip, keyname, clear_db)
if (db_ip not in machines_without_restore):
appscale_utils.ssh(db_ip, keyname, 'tar xf {} -C {}'.format(path, cassandra_dir))
appscale_utils.ssh(db_ip, keyname, 'chown -R cassandra {}'.format(cassandra_dir))
logging.info('Starting Cassandra on {}'.format(db_ip))
retries = SERVICE_RETRIES
status = MonitStates.UNMONITORED
while (status != MonitStates.RUNNING):
appscale_utils.ssh(db_ip, keyname, 'monit start {}'.format(CASSANDRA_MONIT_WATCH_NAME), method=subprocess.call)
time.sleep(3)
summary = appscale_utils.ssh(db_ip, keyname, 'monit summary', method=subprocess.check_output)
status = utils.monit_status(summary, CASSANDRA_MONIT_WATCH_NAME)
retries -= 1
if (retries < 0):
raise BRException('Unable to start Cassandra')
appscale_utils.ssh(db_ip, keyname, 'monit start {}'.format(CASSANDRA_MONIT_WATCH_NAME))
logging.info('Waiting for Cassandra cluster to be ready')
db_ip = db_ips[0]
deadline = (time.time() + SCHEMA_CHANGE_TIMEOUT)
while True:
ready = True
try:
output = appscale_utils.ssh(db_ip, keyname, '{} status'.format(NODE_TOOL), method=subprocess.check_output)
nodes_ready = len([line for line in output.split('\n') if line.startswith('UN')])
if (nodes_ready < len(db_ips)):
ready = False
except CalledProcessError:
ready = False
if ready:
break
if (time.time() > deadline):
logging.warning('Cassandra cluster still not ready.')
break
time.sleep(3)
logging.info('Done with db restore.') | def restore_data(path, keyname, force=False):
" Restores the Cassandra backup.\n\n Args:\n path: A string containing the location on each of the DB machines to use\n for restoring data.\n keyname: A string containing the deployment's keyname.\n Raises:\n BRException if unable to find any Cassandra machines or if DB machine has\n insufficient space.\n "
logging.info('Starting new db restore.')
db_ips = appscale_info.get_db_ips()
if (not db_ips):
raise BRException('Unable to find any Cassandra machines.')
machines_without_restore = []
for db_ip in db_ips:
exit_code = appscale_utils.ssh(db_ip, keyname, 'ls {}'.format(path), method=subprocess.call)
if (exit_code != ExitCodes.SUCCESS):
machines_without_restore.append(db_ip)
if (machines_without_restore and (not force)):
logging.info('The following machines do not have a restore file: {}'.format(machines_without_restore))
response = raw_input('Would you like to continue? [y/N] ')
if (response not in ['Y', 'y']):
return
for db_ip in db_ips:
logging.info('Stopping Cassandra on {}'.format(db_ip))
summary = appscale_utils.ssh(db_ip, keyname, 'monit summary', method=subprocess.check_output)
status = utils.monit_status(summary, CASSANDRA_MONIT_WATCH_NAME)
retries = SERVICE_RETRIES
while (status != MonitStates.UNMONITORED):
appscale_utils.ssh(db_ip, keyname, 'monit stop {}'.format(CASSANDRA_MONIT_WATCH_NAME), method=subprocess.call)
time.sleep(3)
summary = appscale_utils.ssh(db_ip, keyname, 'monit summary', method=subprocess.check_output)
status = utils.monit_status(summary, CASSANDRA_MONIT_WATCH_NAME)
retries -= 1
if (retries < 0):
raise BRException('Unable to stop Cassandra')
cassandra_dir = '{}/cassandra'.format(APPSCALE_DATA_DIR)
for db_ip in db_ips:
logging.info('Restoring Cassandra data on {}'.format(db_ip))
clear_db = 'find {0} -regex ".*\\.\\(db\\|txt\\|log\\)$" -exec rm {{}} \\;'.format(cassandra_dir)
appscale_utils.ssh(db_ip, keyname, clear_db)
if (db_ip not in machines_without_restore):
appscale_utils.ssh(db_ip, keyname, 'tar xf {} -C {}'.format(path, cassandra_dir))
appscale_utils.ssh(db_ip, keyname, 'chown -R cassandra {}'.format(cassandra_dir))
logging.info('Starting Cassandra on {}'.format(db_ip))
retries = SERVICE_RETRIES
status = MonitStates.UNMONITORED
while (status != MonitStates.RUNNING):
appscale_utils.ssh(db_ip, keyname, 'monit start {}'.format(CASSANDRA_MONIT_WATCH_NAME), method=subprocess.call)
time.sleep(3)
summary = appscale_utils.ssh(db_ip, keyname, 'monit summary', method=subprocess.check_output)
status = utils.monit_status(summary, CASSANDRA_MONIT_WATCH_NAME)
retries -= 1
if (retries < 0):
raise BRException('Unable to start Cassandra')
appscale_utils.ssh(db_ip, keyname, 'monit start {}'.format(CASSANDRA_MONIT_WATCH_NAME))
logging.info('Waiting for Cassandra cluster to be ready')
db_ip = db_ips[0]
deadline = (time.time() + SCHEMA_CHANGE_TIMEOUT)
while True:
ready = True
try:
output = appscale_utils.ssh(db_ip, keyname, '{} status'.format(NODE_TOOL), method=subprocess.check_output)
nodes_ready = len([line for line in output.split('\n') if line.startswith('UN')])
if (nodes_ready < len(db_ips)):
ready = False
except CalledProcessError:
ready = False
if ready:
break
if (time.time() > deadline):
logging.warning('Cassandra cluster still not ready.')
break
time.sleep(3)
logging.info('Done with db restore.')<|docstring|>Restores the Cassandra backup.
Args:
path: A string containing the location on each of the DB machines to use
for restoring data.
keyname: A string containing the deployment's keyname.
Raises:
BRException if unable to find any Cassandra machines or if DB machine has
insufficient space.<|endoftext|> |
0ad9e745a629a3ed6335f4b188b8e5f8a4d97f48fe0850d40835c244ff13c064 | def save_figure_array(figarr, f):
'save array representing colored figure (produced by figure2array) to file'
Image.fromarray(figarr[0]).save(f) | save array representing colored figure (produced by figure2array) to file | MC simulation/mcdose/mcdose/visualize.py | save_figure_array | qihuilyu/P2T | 0 | python | def save_figure_array(figarr, f):
Image.fromarray(figarr[0]).save(f) | def save_figure_array(figarr, f):
Image.fromarray(figarr[0]).save(f)<|docstring|>save array representing colored figure (produced by figure2array) to file<|endoftext|> |
ac7e228af34dd9e420f3153e85378d44bc0a0ae14fa9de7bf1eac1c74be94c4e | def create_volume_dose_figure(arr, col_labels=[], dpi=FIG_DPI, cmap='viridis', own_scale=False, return_fig=False, ndiff_cols=1):
'create figure of axes rows mapped to each slice in numpy array [Rows, Cols, H, W]\n with each row containing imshow() instances with matched value limits (vmin, vmax) according\n to data (min, max)\n '
(nrows, ncols, H, W) = arr.shape
axwidth_inches = 1.25
width_ratios = (([1.0] * ncols) + ([0.25] * 2))
figwidth = (axwidth_inches * np.sum(width_ratios))
figheight = ((((axwidth_inches / W) * H) * nrows) / 0.98)
fig = plt.figure(figsize=(figwidth, figheight), dpi=dpi)
spec = gridspec.GridSpec(nrows=nrows, ncols=(ncols + 2), width_ratios=width_ratios, wspace=0, hspace=0, left=0.0, right=1.0, bottom=0.0, top=0.98, figure=fig)
annotate_fontsize = 5
annotate_margin = 0.03
row_minmax = []
row_diff_absmax = []
for row in range(nrows):
rowarr = arr[row]
if (ndiff_cols > 0):
(vmin, vmax) = (np.amin(rowarr[:(- ndiff_cols)]), np.amax(rowarr[:(- ndiff_cols)]))
(diffmin, diffmax) = (np.amin(rowarr[(- ndiff_cols):]), np.amax(rowarr[(- ndiff_cols):]))
diffabsmax = max(abs(diffmin), abs(diffmax))
row_diff_absmax.append(diffabsmax)
else:
(vmin, vmax) = (np.amin(rowarr), np.amax(rowarr))
row_minmax.append((vmin, vmax))
for col in range(ncols):
if (col >= (ncols - ndiff_cols)):
this_norm = colors.Normalize(vmin=(- diffabsmax), vmax=diffabsmax)
this_cmap = 'RdBu_r'
this_fontcolor = 'black'
else:
this_norm = colors.Normalize(vmin=vmin, vmax=vmax)
this_cmap = cmap
this_fontcolor = 'white'
cellarr = rowarr[col]
ax = fig.add_subplot(spec[(row, col)])
ax.set_xticks([])
ax.set_yticks([])
ax.imshow(cellarr, interpolation='none', aspect='equal', cmap=this_cmap, norm=(this_norm if (not own_scale) else None))
margin = 0.03
fmt = '{:0.2f}'
(cellmin, cellmax) = (np.amin(cellarr), np.amax(cellarr))
ax.text(margin, (1.0 - margin), fmt.format(cellmax), fontsize=annotate_fontsize, color=this_fontcolor, horizontalalignment='left', verticalalignment='top', transform=ax.transAxes)
ax.text(margin, margin, fmt.format(cellmin), fontsize=annotate_fontsize, color=this_fontcolor, horizontalalignment='left', verticalalignment='bottom', transform=ax.transAxes)
if ((row == 0) and (col < len(col_labels))):
ax.text(0.5, 1.01, col_labels[col], horizontalalignment='center', verticalalignment='bottom', transform=ax.transAxes)
cbar_ax = fig.add_subplot(spec[(:, (- 2))])
fig.colorbar(cm.ScalarMappable(norm=colors.Normalize(vmin=0, vmax=1), cmap='viridis'), cax=cbar_ax, ticks=[])
if (ndiff_cols > 0):
cbar_ax2 = fig.add_subplot(spec[(:, (- 1))])
fig.colorbar(cm.ScalarMappable(norm=colors.Normalize(vmin=(- 1), vmax=1), cmap='RdBu_r'), cax=cbar_ax2, ticks=[])
for row in range(nrows):
ypos_high = (1.0 - (float((row + margin)) / nrows))
ypos_low = (1.0 - (float(((row + 1) - margin)) / nrows))
(row_min, row_max) = row_minmax[row]
cbar_ax.text(0.5, ypos_high, '{:0.2f}'.format(row_max), fontsize=annotate_fontsize, horizontalalignment='center', verticalalignment='top', transform=cbar_ax.transAxes)
cbar_ax.text(0.5, ypos_low, '{:0.2f}'.format(row_min), fontsize=annotate_fontsize, horizontalalignment='center', verticalalignment='bottom', transform=cbar_ax.transAxes)
if (ndiff_cols > 0):
row_diffabsmax = row_diff_absmax[row]
cbar_ax2.text(0.5, ypos_high, '{:0.2f}'.format(row_diffabsmax), fontsize=annotate_fontsize, horizontalalignment='center', verticalalignment='top', transform=cbar_ax2.transAxes)
cbar_ax2.text(0.5, ypos_low, '{:0.2f}'.format((- row_diffabsmax)), fontsize=annotate_fontsize, horizontalalignment='center', verticalalignment='bottom', transform=cbar_ax2.transAxes)
if return_fig:
return fig
else:
return figure2array(fig) | create figure of axes rows mapped to each slice in numpy array [Rows, Cols, H, W]
with each row containing imshow() instances with matched value limits (vmin, vmax) according
to data (min, max) | MC simulation/mcdose/mcdose/visualize.py | create_volume_dose_figure | qihuilyu/P2T | 0 | python | def create_volume_dose_figure(arr, col_labels=[], dpi=FIG_DPI, cmap='viridis', own_scale=False, return_fig=False, ndiff_cols=1):
'create figure of axes rows mapped to each slice in numpy array [Rows, Cols, H, W]\n with each row containing imshow() instances with matched value limits (vmin, vmax) according\n to data (min, max)\n '
(nrows, ncols, H, W) = arr.shape
axwidth_inches = 1.25
width_ratios = (([1.0] * ncols) + ([0.25] * 2))
figwidth = (axwidth_inches * np.sum(width_ratios))
figheight = ((((axwidth_inches / W) * H) * nrows) / 0.98)
fig = plt.figure(figsize=(figwidth, figheight), dpi=dpi)
spec = gridspec.GridSpec(nrows=nrows, ncols=(ncols + 2), width_ratios=width_ratios, wspace=0, hspace=0, left=0.0, right=1.0, bottom=0.0, top=0.98, figure=fig)
annotate_fontsize = 5
annotate_margin = 0.03
row_minmax = []
row_diff_absmax = []
for row in range(nrows):
rowarr = arr[row]
if (ndiff_cols > 0):
(vmin, vmax) = (np.amin(rowarr[:(- ndiff_cols)]), np.amax(rowarr[:(- ndiff_cols)]))
(diffmin, diffmax) = (np.amin(rowarr[(- ndiff_cols):]), np.amax(rowarr[(- ndiff_cols):]))
diffabsmax = max(abs(diffmin), abs(diffmax))
row_diff_absmax.append(diffabsmax)
else:
(vmin, vmax) = (np.amin(rowarr), np.amax(rowarr))
row_minmax.append((vmin, vmax))
for col in range(ncols):
if (col >= (ncols - ndiff_cols)):
this_norm = colors.Normalize(vmin=(- diffabsmax), vmax=diffabsmax)
this_cmap = 'RdBu_r'
this_fontcolor = 'black'
else:
this_norm = colors.Normalize(vmin=vmin, vmax=vmax)
this_cmap = cmap
this_fontcolor = 'white'
cellarr = rowarr[col]
ax = fig.add_subplot(spec[(row, col)])
ax.set_xticks([])
ax.set_yticks([])
ax.imshow(cellarr, interpolation='none', aspect='equal', cmap=this_cmap, norm=(this_norm if (not own_scale) else None))
margin = 0.03
fmt = '{:0.2f}'
(cellmin, cellmax) = (np.amin(cellarr), np.amax(cellarr))
ax.text(margin, (1.0 - margin), fmt.format(cellmax), fontsize=annotate_fontsize, color=this_fontcolor, horizontalalignment='left', verticalalignment='top', transform=ax.transAxes)
ax.text(margin, margin, fmt.format(cellmin), fontsize=annotate_fontsize, color=this_fontcolor, horizontalalignment='left', verticalalignment='bottom', transform=ax.transAxes)
if ((row == 0) and (col < len(col_labels))):
ax.text(0.5, 1.01, col_labels[col], horizontalalignment='center', verticalalignment='bottom', transform=ax.transAxes)
cbar_ax = fig.add_subplot(spec[(:, (- 2))])
fig.colorbar(cm.ScalarMappable(norm=colors.Normalize(vmin=0, vmax=1), cmap='viridis'), cax=cbar_ax, ticks=[])
if (ndiff_cols > 0):
cbar_ax2 = fig.add_subplot(spec[(:, (- 1))])
fig.colorbar(cm.ScalarMappable(norm=colors.Normalize(vmin=(- 1), vmax=1), cmap='RdBu_r'), cax=cbar_ax2, ticks=[])
for row in range(nrows):
ypos_high = (1.0 - (float((row + margin)) / nrows))
ypos_low = (1.0 - (float(((row + 1) - margin)) / nrows))
(row_min, row_max) = row_minmax[row]
cbar_ax.text(0.5, ypos_high, '{:0.2f}'.format(row_max), fontsize=annotate_fontsize, horizontalalignment='center', verticalalignment='top', transform=cbar_ax.transAxes)
cbar_ax.text(0.5, ypos_low, '{:0.2f}'.format(row_min), fontsize=annotate_fontsize, horizontalalignment='center', verticalalignment='bottom', transform=cbar_ax.transAxes)
if (ndiff_cols > 0):
row_diffabsmax = row_diff_absmax[row]
cbar_ax2.text(0.5, ypos_high, '{:0.2f}'.format(row_diffabsmax), fontsize=annotate_fontsize, horizontalalignment='center', verticalalignment='top', transform=cbar_ax2.transAxes)
cbar_ax2.text(0.5, ypos_low, '{:0.2f}'.format((- row_diffabsmax)), fontsize=annotate_fontsize, horizontalalignment='center', verticalalignment='bottom', transform=cbar_ax2.transAxes)
if return_fig:
return fig
else:
return figure2array(fig) | def create_volume_dose_figure(arr, col_labels=[], dpi=FIG_DPI, cmap='viridis', own_scale=False, return_fig=False, ndiff_cols=1):
'create figure of axes rows mapped to each slice in numpy array [Rows, Cols, H, W]\n with each row containing imshow() instances with matched value limits (vmin, vmax) according\n to data (min, max)\n '
(nrows, ncols, H, W) = arr.shape
axwidth_inches = 1.25
width_ratios = (([1.0] * ncols) + ([0.25] * 2))
figwidth = (axwidth_inches * np.sum(width_ratios))
figheight = ((((axwidth_inches / W) * H) * nrows) / 0.98)
fig = plt.figure(figsize=(figwidth, figheight), dpi=dpi)
spec = gridspec.GridSpec(nrows=nrows, ncols=(ncols + 2), width_ratios=width_ratios, wspace=0, hspace=0, left=0.0, right=1.0, bottom=0.0, top=0.98, figure=fig)
annotate_fontsize = 5
annotate_margin = 0.03
row_minmax = []
row_diff_absmax = []
for row in range(nrows):
rowarr = arr[row]
if (ndiff_cols > 0):
(vmin, vmax) = (np.amin(rowarr[:(- ndiff_cols)]), np.amax(rowarr[:(- ndiff_cols)]))
(diffmin, diffmax) = (np.amin(rowarr[(- ndiff_cols):]), np.amax(rowarr[(- ndiff_cols):]))
diffabsmax = max(abs(diffmin), abs(diffmax))
row_diff_absmax.append(diffabsmax)
else:
(vmin, vmax) = (np.amin(rowarr), np.amax(rowarr))
row_minmax.append((vmin, vmax))
for col in range(ncols):
if (col >= (ncols - ndiff_cols)):
this_norm = colors.Normalize(vmin=(- diffabsmax), vmax=diffabsmax)
this_cmap = 'RdBu_r'
this_fontcolor = 'black'
else:
this_norm = colors.Normalize(vmin=vmin, vmax=vmax)
this_cmap = cmap
this_fontcolor = 'white'
cellarr = rowarr[col]
ax = fig.add_subplot(spec[(row, col)])
ax.set_xticks([])
ax.set_yticks([])
ax.imshow(cellarr, interpolation='none', aspect='equal', cmap=this_cmap, norm=(this_norm if (not own_scale) else None))
margin = 0.03
fmt = '{:0.2f}'
(cellmin, cellmax) = (np.amin(cellarr), np.amax(cellarr))
ax.text(margin, (1.0 - margin), fmt.format(cellmax), fontsize=annotate_fontsize, color=this_fontcolor, horizontalalignment='left', verticalalignment='top', transform=ax.transAxes)
ax.text(margin, margin, fmt.format(cellmin), fontsize=annotate_fontsize, color=this_fontcolor, horizontalalignment='left', verticalalignment='bottom', transform=ax.transAxes)
if ((row == 0) and (col < len(col_labels))):
ax.text(0.5, 1.01, col_labels[col], horizontalalignment='center', verticalalignment='bottom', transform=ax.transAxes)
cbar_ax = fig.add_subplot(spec[(:, (- 2))])
fig.colorbar(cm.ScalarMappable(norm=colors.Normalize(vmin=0, vmax=1), cmap='viridis'), cax=cbar_ax, ticks=[])
if (ndiff_cols > 0):
cbar_ax2 = fig.add_subplot(spec[(:, (- 1))])
fig.colorbar(cm.ScalarMappable(norm=colors.Normalize(vmin=(- 1), vmax=1), cmap='RdBu_r'), cax=cbar_ax2, ticks=[])
for row in range(nrows):
ypos_high = (1.0 - (float((row + margin)) / nrows))
ypos_low = (1.0 - (float(((row + 1) - margin)) / nrows))
(row_min, row_max) = row_minmax[row]
cbar_ax.text(0.5, ypos_high, '{:0.2f}'.format(row_max), fontsize=annotate_fontsize, horizontalalignment='center', verticalalignment='top', transform=cbar_ax.transAxes)
cbar_ax.text(0.5, ypos_low, '{:0.2f}'.format(row_min), fontsize=annotate_fontsize, horizontalalignment='center', verticalalignment='bottom', transform=cbar_ax.transAxes)
if (ndiff_cols > 0):
row_diffabsmax = row_diff_absmax[row]
cbar_ax2.text(0.5, ypos_high, '{:0.2f}'.format(row_diffabsmax), fontsize=annotate_fontsize, horizontalalignment='center', verticalalignment='top', transform=cbar_ax2.transAxes)
cbar_ax2.text(0.5, ypos_low, '{:0.2f}'.format((- row_diffabsmax)), fontsize=annotate_fontsize, horizontalalignment='center', verticalalignment='bottom', transform=cbar_ax2.transAxes)
if return_fig:
return fig
else:
return figure2array(fig)<|docstring|>create figure of axes rows mapped to each slice in numpy array [Rows, Cols, H, W]
with each row containing imshow() instances with matched value limits (vmin, vmax) according
to data (min, max)<|endoftext|> |
d171085426402c5b89bdce4b65eeb79d5c7d6e31a2748d97506464d92e73ac2a | def make_image_figure(colorbar=True, dpi=FIG_DPI):
'makes standard figure with large imshow axes and colorbar to right'
fig = plt.figure(dpi=dpi)
if colorbar:
ax_im = fig.add_axes([0, 0, 0.87, 1.0])
ax_cbar = fig.add_axes([0.89, 0.05, 0.04, 0.9])
else:
ax_im = fig.add_axes([0, 0, 1.0, 1.0])
ax_cbar = None
return (fig, ax_im, ax_cbar) | makes standard figure with large imshow axes and colorbar to right | MC simulation/mcdose/mcdose/visualize.py | make_image_figure | qihuilyu/P2T | 0 | python | def make_image_figure(colorbar=True, dpi=FIG_DPI):
fig = plt.figure(dpi=dpi)
if colorbar:
ax_im = fig.add_axes([0, 0, 0.87, 1.0])
ax_cbar = fig.add_axes([0.89, 0.05, 0.04, 0.9])
else:
ax_im = fig.add_axes([0, 0, 1.0, 1.0])
ax_cbar = None
return (fig, ax_im, ax_cbar) | def make_image_figure(colorbar=True, dpi=FIG_DPI):
fig = plt.figure(dpi=dpi)
if colorbar:
ax_im = fig.add_axes([0, 0, 0.87, 1.0])
ax_cbar = fig.add_axes([0.89, 0.05, 0.04, 0.9])
else:
ax_im = fig.add_axes([0, 0, 1.0, 1.0])
ax_cbar = None
return (fig, ax_im, ax_cbar)<|docstring|>makes standard figure with large imshow axes and colorbar to right<|endoftext|> |
5120693bc549135756e069bb3d51d7409995847d42fd6e3e285573333982d809 | def plot_dose(*args, colorbar=True, dpi=FIG_DPI, **kwargs):
'See _plot_gamma_components for function signature'
(fig, ax_im, ax_cbar) = make_image_figure(colorbar, dpi)
_plot_dose(ax_im, ax_cbar, *args, **kwargs)
return figure2array(fig) | See _plot_gamma_components for function signature | MC simulation/mcdose/mcdose/visualize.py | plot_dose | qihuilyu/P2T | 0 | python | def plot_dose(*args, colorbar=True, dpi=FIG_DPI, **kwargs):
(fig, ax_im, ax_cbar) = make_image_figure(colorbar, dpi)
_plot_dose(ax_im, ax_cbar, *args, **kwargs)
return figure2array(fig) | def plot_dose(*args, colorbar=True, dpi=FIG_DPI, **kwargs):
(fig, ax_im, ax_cbar) = make_image_figure(colorbar, dpi)
_plot_dose(ax_im, ax_cbar, *args, **kwargs)
return figure2array(fig)<|docstring|>See _plot_gamma_components for function signature<|endoftext|> |
3533a0b49e5bf5453e1e251aeb8695a80604c6f7c841803a6ec3a2e1447e27cc | def _plot_dose(ax_im, ax_cbar, arr, **kwargs):
'plots array using imshow with colorbar then converts back to png compatible rgb array'
kwargs['cmap'] = kwargs.get('cmap', 'viridis')
im = ax_im.imshow(arr, interpolation='nearest', **kwargs)
ax_im.set_axis_off()
if (ax_cbar is not None):
plt.colorbar(im, ax_cbar)
return (ax_im, ax_cbar) | plots array using imshow with colorbar then converts back to png compatible rgb array | MC simulation/mcdose/mcdose/visualize.py | _plot_dose | qihuilyu/P2T | 0 | python | def _plot_dose(ax_im, ax_cbar, arr, **kwargs):
kwargs['cmap'] = kwargs.get('cmap', 'viridis')
im = ax_im.imshow(arr, interpolation='nearest', **kwargs)
ax_im.set_axis_off()
if (ax_cbar is not None):
plt.colorbar(im, ax_cbar)
return (ax_im, ax_cbar) | def _plot_dose(ax_im, ax_cbar, arr, **kwargs):
kwargs['cmap'] = kwargs.get('cmap', 'viridis')
im = ax_im.imshow(arr, interpolation='nearest', **kwargs)
ax_im.set_axis_off()
if (ax_cbar is not None):
plt.colorbar(im, ax_cbar)
return (ax_im, ax_cbar)<|docstring|>plots array using imshow with colorbar then converts back to png compatible rgb array<|endoftext|> |
7fdc80fc34baeba640820792787b32e00fe64bad154244b3feb1458274948c8c | def plot_gamma(*args, colorbar=True, dpi=FIG_DPI, **kwargs):
'See _plot_gamma_components for function signature'
(fig, ax_im, ax_cbar) = make_image_figure(colorbar, dpi)
_plot_gamma(fig, ax_im, ax_cbar, *args, **kwargs)
return figure2array(fig) | See _plot_gamma_components for function signature | MC simulation/mcdose/mcdose/visualize.py | plot_gamma | qihuilyu/P2T | 0 | python | def plot_gamma(*args, colorbar=True, dpi=FIG_DPI, **kwargs):
(fig, ax_im, ax_cbar) = make_image_figure(colorbar, dpi)
_plot_gamma(fig, ax_im, ax_cbar, *args, **kwargs)
return figure2array(fig) | def plot_gamma(*args, colorbar=True, dpi=FIG_DPI, **kwargs):
(fig, ax_im, ax_cbar) = make_image_figure(colorbar, dpi)
_plot_gamma(fig, ax_im, ax_cbar, *args, **kwargs)
return figure2array(fig)<|docstring|>See _plot_gamma_components for function signature<|endoftext|> |
28a3d986a731d3a604b82efb40aa58ac3224cb27f72d182ec8eb67d97e64af59 | def _plot_gamma(fig, ax_im, ax_cbar, arr, annotate=None, **kwargs):
'plots array using imshow with colorbar then converts back to png compatible rgb array'
ignored_cmap = get_ignored_cmap()
im = ax_im.imshow(arr, cmap='RdBu_r', interpolation='nearest', norm=MidpointNormalize(0, 10, 1), **kwargs)
im1 = masked_array(arr, (arr >= 0))
ax_im.imshow(im1, cmap=ignored_cmap, interpolation='nearest')
ax_im.set_axis_off()
if (ax_cbar is not None):
plt.colorbar(im, ax_cbar)
if annotate:
ax_im.text(0.02, 0.02, str(annotate), fontsize=11, bbox={'facecolor': 'white', 'alpha': 1.0}, horizontalalignment='left', verticalalignment='bottom', transform=fig.transFigure)
return (ax_im, ax_cbar) | plots array using imshow with colorbar then converts back to png compatible rgb array | MC simulation/mcdose/mcdose/visualize.py | _plot_gamma | qihuilyu/P2T | 0 | python | def _plot_gamma(fig, ax_im, ax_cbar, arr, annotate=None, **kwargs):
ignored_cmap = get_ignored_cmap()
im = ax_im.imshow(arr, cmap='RdBu_r', interpolation='nearest', norm=MidpointNormalize(0, 10, 1), **kwargs)
im1 = masked_array(arr, (arr >= 0))
ax_im.imshow(im1, cmap=ignored_cmap, interpolation='nearest')
ax_im.set_axis_off()
if (ax_cbar is not None):
plt.colorbar(im, ax_cbar)
if annotate:
ax_im.text(0.02, 0.02, str(annotate), fontsize=11, bbox={'facecolor': 'white', 'alpha': 1.0}, horizontalalignment='left', verticalalignment='bottom', transform=fig.transFigure)
return (ax_im, ax_cbar) | def _plot_gamma(fig, ax_im, ax_cbar, arr, annotate=None, **kwargs):
ignored_cmap = get_ignored_cmap()
im = ax_im.imshow(arr, cmap='RdBu_r', interpolation='nearest', norm=MidpointNormalize(0, 10, 1), **kwargs)
im1 = masked_array(arr, (arr >= 0))
ax_im.imshow(im1, cmap=ignored_cmap, interpolation='nearest')
ax_im.set_axis_off()
if (ax_cbar is not None):
plt.colorbar(im, ax_cbar)
if annotate:
ax_im.text(0.02, 0.02, str(annotate), fontsize=11, bbox={'facecolor': 'white', 'alpha': 1.0}, horizontalalignment='left', verticalalignment='bottom', transform=fig.transFigure)
return (ax_im, ax_cbar)<|docstring|>plots array using imshow with colorbar then converts back to png compatible rgb array<|endoftext|> |
82d495d4b94ba5e0b5148f5722fcbc469f5c180cbd5f60ca8f6fe7c296430b2d | def plot_gamma_components(*args, colorbar=True, dpi=FIG_DPI, **kwargs):
'See _plot_gamma_components for function signature'
(fig, ax_im, ax_cbar) = make_image_figure(colorbar, dpi)
_plot_gamma_components(fig, ax_im, ax_cbar, *args, **kwargs)
return figure2array(fig) | See _plot_gamma_components for function signature | MC simulation/mcdose/mcdose/visualize.py | plot_gamma_components | qihuilyu/P2T | 0 | python | def plot_gamma_components(*args, colorbar=True, dpi=FIG_DPI, **kwargs):
(fig, ax_im, ax_cbar) = make_image_figure(colorbar, dpi)
_plot_gamma_components(fig, ax_im, ax_cbar, *args, **kwargs)
return figure2array(fig) | def plot_gamma_components(*args, colorbar=True, dpi=FIG_DPI, **kwargs):
(fig, ax_im, ax_cbar) = make_image_figure(colorbar, dpi)
_plot_gamma_components(fig, ax_im, ax_cbar, *args, **kwargs)
return figure2array(fig)<|docstring|>See _plot_gamma_components for function signature<|endoftext|> |
62c6f0171e87aa63569dd2bce3ff0a70e4aeedd4e6ec8d0108cd0ee779b2b3f1 | def _plot_gamma_components(fig, ax_im, ax_cbar, arr_dd, arr_dta, annotate=None, array_spacing=2, **kwargs):
'plots array using imshow with colorbar then converts back to png compatible rgb array'
ignored_cmap = get_ignored_cmap()
arr = np.concatenate([arr_dd, ((- 1) * np.ones((arr_dd.shape[0], array_spacing))), arr_dta], axis=1)
im = ax_im.imshow(arr, cmap='RdBu_r', interpolation='nearest', norm=MidpointNormalize(0, 10, 1), **kwargs)
im1 = masked_array(arr, (arr >= 0))
ax_im.imshow(im1, cmap=ignored_cmap, interpolation='nearest')
ax_im.set_axis_off()
try:
dd_passing = ((np.count_nonzero((arr_dd <= 1)) - np.count_nonzero((arr_dd < 0))) / np.count_nonzero((arr_dd >= 0)))
except:
dd_passing = np.nan
ax_im.text(0.02, 0.02, 'dd: {:0.2f}%'.format((dd_passing * 100)), fontsize=9, bbox={'facecolor': 'white', 'alpha': 1.0}, horizontalalignment='left', verticalalignment='bottom', transform=ax_im.transAxes)
try:
dta_passing = ((np.count_nonzero((arr_dta <= 1)) - np.count_nonzero((arr_dta < 0))) / np.count_nonzero((arr_dta >= 0)))
except:
dta_passing = np.nan
ax_im.text(0.52, 0.02, 'dta: {:0.2f}%'.format((dta_passing * 100)), fontsize=9, bbox={'facecolor': 'white', 'alpha': 1.0}, horizontalalignment='left', verticalalignment='bottom', transform=ax_im.transAxes)
if (ax_cbar is not None):
plt.colorbar(im, ax_cbar)
if annotate:
ax_im.text(0.02, 0.02, str(annotate), fontsize=11, bbox={'facecolor': 'white', 'alpha': 1.0}, horizontalalignment='left', verticalalignment='bottom', transform=fig.transFigure)
return (ax_im, ax_cbar) | plots array using imshow with colorbar then converts back to png compatible rgb array | MC simulation/mcdose/mcdose/visualize.py | _plot_gamma_components | qihuilyu/P2T | 0 | python | def _plot_gamma_components(fig, ax_im, ax_cbar, arr_dd, arr_dta, annotate=None, array_spacing=2, **kwargs):
ignored_cmap = get_ignored_cmap()
arr = np.concatenate([arr_dd, ((- 1) * np.ones((arr_dd.shape[0], array_spacing))), arr_dta], axis=1)
im = ax_im.imshow(arr, cmap='RdBu_r', interpolation='nearest', norm=MidpointNormalize(0, 10, 1), **kwargs)
im1 = masked_array(arr, (arr >= 0))
ax_im.imshow(im1, cmap=ignored_cmap, interpolation='nearest')
ax_im.set_axis_off()
try:
dd_passing = ((np.count_nonzero((arr_dd <= 1)) - np.count_nonzero((arr_dd < 0))) / np.count_nonzero((arr_dd >= 0)))
except:
dd_passing = np.nan
ax_im.text(0.02, 0.02, 'dd: {:0.2f}%'.format((dd_passing * 100)), fontsize=9, bbox={'facecolor': 'white', 'alpha': 1.0}, horizontalalignment='left', verticalalignment='bottom', transform=ax_im.transAxes)
try:
dta_passing = ((np.count_nonzero((arr_dta <= 1)) - np.count_nonzero((arr_dta < 0))) / np.count_nonzero((arr_dta >= 0)))
except:
dta_passing = np.nan
ax_im.text(0.52, 0.02, 'dta: {:0.2f}%'.format((dta_passing * 100)), fontsize=9, bbox={'facecolor': 'white', 'alpha': 1.0}, horizontalalignment='left', verticalalignment='bottom', transform=ax_im.transAxes)
if (ax_cbar is not None):
plt.colorbar(im, ax_cbar)
if annotate:
ax_im.text(0.02, 0.02, str(annotate), fontsize=11, bbox={'facecolor': 'white', 'alpha': 1.0}, horizontalalignment='left', verticalalignment='bottom', transform=fig.transFigure)
return (ax_im, ax_cbar) | def _plot_gamma_components(fig, ax_im, ax_cbar, arr_dd, arr_dta, annotate=None, array_spacing=2, **kwargs):
ignored_cmap = get_ignored_cmap()
arr = np.concatenate([arr_dd, ((- 1) * np.ones((arr_dd.shape[0], array_spacing))), arr_dta], axis=1)
im = ax_im.imshow(arr, cmap='RdBu_r', interpolation='nearest', norm=MidpointNormalize(0, 10, 1), **kwargs)
im1 = masked_array(arr, (arr >= 0))
ax_im.imshow(im1, cmap=ignored_cmap, interpolation='nearest')
ax_im.set_axis_off()
try:
dd_passing = ((np.count_nonzero((arr_dd <= 1)) - np.count_nonzero((arr_dd < 0))) / np.count_nonzero((arr_dd >= 0)))
except:
dd_passing = np.nan
ax_im.text(0.02, 0.02, 'dd: {:0.2f}%'.format((dd_passing * 100)), fontsize=9, bbox={'facecolor': 'white', 'alpha': 1.0}, horizontalalignment='left', verticalalignment='bottom', transform=ax_im.transAxes)
try:
dta_passing = ((np.count_nonzero((arr_dta <= 1)) - np.count_nonzero((arr_dta < 0))) / np.count_nonzero((arr_dta >= 0)))
except:
dta_passing = np.nan
ax_im.text(0.52, 0.02, 'dta: {:0.2f}%'.format((dta_passing * 100)), fontsize=9, bbox={'facecolor': 'white', 'alpha': 1.0}, horizontalalignment='left', verticalalignment='bottom', transform=ax_im.transAxes)
if (ax_cbar is not None):
plt.colorbar(im, ax_cbar)
if annotate:
ax_im.text(0.02, 0.02, str(annotate), fontsize=11, bbox={'facecolor': 'white', 'alpha': 1.0}, horizontalalignment='left', verticalalignment='bottom', transform=fig.transFigure)
return (ax_im, ax_cbar)<|docstring|>plots array using imshow with colorbar then converts back to png compatible rgb array<|endoftext|> |
9669ad04f44b5f800d4da52351263284a1f4f01f193ef823533048a8ba51ecfd | def plot_profile(arr_pred, arr_true=None, annotate=None, dpi=FIG_DPI, **kwargs):
'plots line profiles at various depths then converts back to png compatible rgb array'
idx = [0.1, 0.3, 0.5, 0.7, 0.9]
color = ['green', 'red', 'yellow', 'skyblue', 'blue']
fig = plt.figure(dpi=dpi)
ax = fig.add_subplot(1, 1, 1)
x_axis = [i for i in range(arr_pred.shape[1])]
for j in range(len(idx)):
sliceidx = int((idx[j] * arr_pred.shape[0]))
dose_pred = arr_pred[(sliceidx, :)].tolist()
ax.plot(x_axis, dose_pred, color[j], label=('profile_at_%d_pixel' % sliceidx), **kwargs)
if (arr_true is not None):
dose_true = arr_true[(sliceidx, :)].tolist()
ax.plot(x_axis, dose_true, color[j], linestyle=':', label=None, **kwargs)
plt.legend()
ax.set_ylabel('dose')
if annotate:
ax.text(0.02, 0.02, str(annotate), fontsize=11, bbox={'facecolor': 'white', 'alpha': 1.0}, horizontalalignment='left', verticalalignment='bottom', transform=fig.transFigure)
return figure2array(fig) | plots line profiles at various depths then converts back to png compatible rgb array | MC simulation/mcdose/mcdose/visualize.py | plot_profile | qihuilyu/P2T | 0 | python | def plot_profile(arr_pred, arr_true=None, annotate=None, dpi=FIG_DPI, **kwargs):
idx = [0.1, 0.3, 0.5, 0.7, 0.9]
color = ['green', 'red', 'yellow', 'skyblue', 'blue']
fig = plt.figure(dpi=dpi)
ax = fig.add_subplot(1, 1, 1)
x_axis = [i for i in range(arr_pred.shape[1])]
for j in range(len(idx)):
sliceidx = int((idx[j] * arr_pred.shape[0]))
dose_pred = arr_pred[(sliceidx, :)].tolist()
ax.plot(x_axis, dose_pred, color[j], label=('profile_at_%d_pixel' % sliceidx), **kwargs)
if (arr_true is not None):
dose_true = arr_true[(sliceidx, :)].tolist()
ax.plot(x_axis, dose_true, color[j], linestyle=':', label=None, **kwargs)
plt.legend()
ax.set_ylabel('dose')
if annotate:
ax.text(0.02, 0.02, str(annotate), fontsize=11, bbox={'facecolor': 'white', 'alpha': 1.0}, horizontalalignment='left', verticalalignment='bottom', transform=fig.transFigure)
return figure2array(fig) | def plot_profile(arr_pred, arr_true=None, annotate=None, dpi=FIG_DPI, **kwargs):
idx = [0.1, 0.3, 0.5, 0.7, 0.9]
color = ['green', 'red', 'yellow', 'skyblue', 'blue']
fig = plt.figure(dpi=dpi)
ax = fig.add_subplot(1, 1, 1)
x_axis = [i for i in range(arr_pred.shape[1])]
for j in range(len(idx)):
sliceidx = int((idx[j] * arr_pred.shape[0]))
dose_pred = arr_pred[(sliceidx, :)].tolist()
ax.plot(x_axis, dose_pred, color[j], label=('profile_at_%d_pixel' % sliceidx), **kwargs)
if (arr_true is not None):
dose_true = arr_true[(sliceidx, :)].tolist()
ax.plot(x_axis, dose_true, color[j], linestyle=':', label=None, **kwargs)
plt.legend()
ax.set_ylabel('dose')
if annotate:
ax.text(0.02, 0.02, str(annotate), fontsize=11, bbox={'facecolor': 'white', 'alpha': 1.0}, horizontalalignment='left', verticalalignment='bottom', transform=fig.transFigure)
return figure2array(fig)<|docstring|>plots line profiles at various depths then converts back to png compatible rgb array<|endoftext|> |
911d0591f331cf5c3c1f2f08adc94e0f00496c218769ffd09ce50bb275a1620d | def _plot_gamma_scatter(fig, ax, arr_dd, arr_dta, arr_gamma, dd_thresh, dta_thresh, **kwargs):
'place voxels on scatter-plot based on coordinates in dd-dta space'
select = np.logical_and((arr_gamma >= 0), np.isfinite(arr_gamma))
dd_flat = ((np.ravel(arr_dd[select]) * dd_thresh) * 100)
dta_flat = (np.ravel(arr_dta[select]) * dta_thresh)
gamma_flat = np.ravel(arr_gamma[select])
scat = ax.scatter(dd_flat, dta_flat, s=4, marker='o', color='black')
dd_max = np.max(dd_flat)
dta_max = np.max(dta_flat)
axis_buffer = 0.01
ax.set_xlim([((- axis_buffer) * dd_max), (dd_max + (axis_buffer * dd_max))])
ax.set_ylim([((- axis_buffer) * dta_max), (dta_max + (axis_buffer * dta_max))])
ax.set_xlabel('Percent dose difference')
ax.set_ylabel('Distance to agreement (mm)')
lineargs = {'linewidth': 1, 'linestyle': '-', 'color': 'black'}
if (dd_max > (dd_thresh * 100)):
ax.add_line(Line2D(ax.get_xlim(), [dta_thresh, dta_thresh], **lineargs))
if (dta_max > dta_thresh):
ax.add_line(Line2D([(dd_thresh * 100), (dd_thresh * 100)], ax.get_ylim(), **lineargs))
try:
gamma_passing = ((np.count_nonzero((arr_gamma <= 1)) - np.count_nonzero((arr_gamma < 0))) / np.count_nonzero((arr_gamma >= 0)))
except:
gamma_passing = np.nan
try:
dd_passing = ((np.count_nonzero((arr_dd <= 1)) - np.count_nonzero((arr_dd < 0))) / np.count_nonzero((arr_dd >= 0)))
except:
dd_passing = np.nan
try:
dta_passing = ((np.count_nonzero((arr_dta <= 1)) - np.count_nonzero((arr_dta < 0))) / np.count_nonzero((arr_dta >= 0)))
except:
dta_passing = np.nan
nautofail = (np.count_nonzero(np.isinf(arr_gamma)) / np.count_nonzero((arr_gamma >= 0)))
ax.text(1.0, 1.0, 'gamma: {:0.2f}%\ndd: {:0.2f}%\ndta: {:0.2f}%\ninf: {:0.2f}%'.format((gamma_passing * 100), (dd_passing * 100), (dta_passing * 100), (nautofail * 100)), fontsize=11, bbox={'facecolor': 'white', 'alpha': 1.0}, horizontalalignment='right', verticalalignment='top', transform=ax.transAxes)
return ax | place voxels on scatter-plot based on coordinates in dd-dta space | MC simulation/mcdose/mcdose/visualize.py | _plot_gamma_scatter | qihuilyu/P2T | 0 | python | def _plot_gamma_scatter(fig, ax, arr_dd, arr_dta, arr_gamma, dd_thresh, dta_thresh, **kwargs):
select = np.logical_and((arr_gamma >= 0), np.isfinite(arr_gamma))
dd_flat = ((np.ravel(arr_dd[select]) * dd_thresh) * 100)
dta_flat = (np.ravel(arr_dta[select]) * dta_thresh)
gamma_flat = np.ravel(arr_gamma[select])
scat = ax.scatter(dd_flat, dta_flat, s=4, marker='o', color='black')
dd_max = np.max(dd_flat)
dta_max = np.max(dta_flat)
axis_buffer = 0.01
ax.set_xlim([((- axis_buffer) * dd_max), (dd_max + (axis_buffer * dd_max))])
ax.set_ylim([((- axis_buffer) * dta_max), (dta_max + (axis_buffer * dta_max))])
ax.set_xlabel('Percent dose difference')
ax.set_ylabel('Distance to agreement (mm)')
lineargs = {'linewidth': 1, 'linestyle': '-', 'color': 'black'}
if (dd_max > (dd_thresh * 100)):
ax.add_line(Line2D(ax.get_xlim(), [dta_thresh, dta_thresh], **lineargs))
if (dta_max > dta_thresh):
ax.add_line(Line2D([(dd_thresh * 100), (dd_thresh * 100)], ax.get_ylim(), **lineargs))
try:
gamma_passing = ((np.count_nonzero((arr_gamma <= 1)) - np.count_nonzero((arr_gamma < 0))) / np.count_nonzero((arr_gamma >= 0)))
except:
gamma_passing = np.nan
try:
dd_passing = ((np.count_nonzero((arr_dd <= 1)) - np.count_nonzero((arr_dd < 0))) / np.count_nonzero((arr_dd >= 0)))
except:
dd_passing = np.nan
try:
dta_passing = ((np.count_nonzero((arr_dta <= 1)) - np.count_nonzero((arr_dta < 0))) / np.count_nonzero((arr_dta >= 0)))
except:
dta_passing = np.nan
nautofail = (np.count_nonzero(np.isinf(arr_gamma)) / np.count_nonzero((arr_gamma >= 0)))
ax.text(1.0, 1.0, 'gamma: {:0.2f}%\ndd: {:0.2f}%\ndta: {:0.2f}%\ninf: {:0.2f}%'.format((gamma_passing * 100), (dd_passing * 100), (dta_passing * 100), (nautofail * 100)), fontsize=11, bbox={'facecolor': 'white', 'alpha': 1.0}, horizontalalignment='right', verticalalignment='top', transform=ax.transAxes)
return ax | def _plot_gamma_scatter(fig, ax, arr_dd, arr_dta, arr_gamma, dd_thresh, dta_thresh, **kwargs):
select = np.logical_and((arr_gamma >= 0), np.isfinite(arr_gamma))
dd_flat = ((np.ravel(arr_dd[select]) * dd_thresh) * 100)
dta_flat = (np.ravel(arr_dta[select]) * dta_thresh)
gamma_flat = np.ravel(arr_gamma[select])
scat = ax.scatter(dd_flat, dta_flat, s=4, marker='o', color='black')
dd_max = np.max(dd_flat)
dta_max = np.max(dta_flat)
axis_buffer = 0.01
ax.set_xlim([((- axis_buffer) * dd_max), (dd_max + (axis_buffer * dd_max))])
ax.set_ylim([((- axis_buffer) * dta_max), (dta_max + (axis_buffer * dta_max))])
ax.set_xlabel('Percent dose difference')
ax.set_ylabel('Distance to agreement (mm)')
lineargs = {'linewidth': 1, 'linestyle': '-', 'color': 'black'}
if (dd_max > (dd_thresh * 100)):
ax.add_line(Line2D(ax.get_xlim(), [dta_thresh, dta_thresh], **lineargs))
if (dta_max > dta_thresh):
ax.add_line(Line2D([(dd_thresh * 100), (dd_thresh * 100)], ax.get_ylim(), **lineargs))
try:
gamma_passing = ((np.count_nonzero((arr_gamma <= 1)) - np.count_nonzero((arr_gamma < 0))) / np.count_nonzero((arr_gamma >= 0)))
except:
gamma_passing = np.nan
try:
dd_passing = ((np.count_nonzero((arr_dd <= 1)) - np.count_nonzero((arr_dd < 0))) / np.count_nonzero((arr_dd >= 0)))
except:
dd_passing = np.nan
try:
dta_passing = ((np.count_nonzero((arr_dta <= 1)) - np.count_nonzero((arr_dta < 0))) / np.count_nonzero((arr_dta >= 0)))
except:
dta_passing = np.nan
nautofail = (np.count_nonzero(np.isinf(arr_gamma)) / np.count_nonzero((arr_gamma >= 0)))
ax.text(1.0, 1.0, 'gamma: {:0.2f}%\ndd: {:0.2f}%\ndta: {:0.2f}%\ninf: {:0.2f}%'.format((gamma_passing * 100), (dd_passing * 100), (dta_passing * 100), (nautofail * 100)), fontsize=11, bbox={'facecolor': 'white', 'alpha': 1.0}, horizontalalignment='right', verticalalignment='top', transform=ax.transAxes)
return ax<|docstring|>place voxels on scatter-plot based on coordinates in dd-dta space<|endoftext|> |
3717d9429a5969b08ba50eea4b31efb3bb940aa62045ab757b4010fc0633535f | def register_custom_scalars_layout(writer):
"define custom plotting in 'Custom Scalars' tab of TensorBoard"
layout_summary = summary_lib.custom_scalar_pb(layout_pb2.Layout(category=[layout_pb2.Category(title='all', chart=[layout_pb2.Chart(title='loss', multiline=layout_pb2.MultilineChartContent(tag=['train/loss', 'eval/loss/test', 'eval/loss/train'])), layout_pb2.Chart(title='eval-avg_gammapass/0.1mm_0.1%', multiline=layout_pb2.MultilineChartContent(tag=['eval-avg_gammapass/0.1mm_0.1%/.*'])), layout_pb2.Chart(title='eval-avg_gammapass/0.2mm_0.2%', multiline=layout_pb2.MultilineChartContent(tag=['eval-avg_gammapass/0.2mm_0.2%/.*'])), layout_pb2.Chart(title='eval-avg_gammapass/0.5mm_0.5%', multiline=layout_pb2.MultilineChartContent(tag=['eval-avg_gammapass/0.5mm_0.5%/.*'])), layout_pb2.Chart(title='eval-avg_gammapass/1.0mm_1.0%', multiline=layout_pb2.MultilineChartContent(tag=['eval-avg_gammapass/1.0mm_1.0%/.*'])), layout_pb2.Chart(title='eval-avg_gammapass/2.0mm_2.0%', multiline=layout_pb2.MultilineChartContent(tag=['eval-avg_gammapass/2.0mm_2.0%/.*'])), layout_pb2.Chart(title='MSE', multiline=layout_pb2.MultilineChartContent(tag=['.*mse.*']))])]))
writer.add_summary(layout_summary) | define custom plotting in 'Custom Scalars' tab of TensorBoard | MC simulation/mcdose/mcdose/visualize.py | register_custom_scalars_layout | qihuilyu/P2T | 0 | python | def register_custom_scalars_layout(writer):
layout_summary = summary_lib.custom_scalar_pb(layout_pb2.Layout(category=[layout_pb2.Category(title='all', chart=[layout_pb2.Chart(title='loss', multiline=layout_pb2.MultilineChartContent(tag=['train/loss', 'eval/loss/test', 'eval/loss/train'])), layout_pb2.Chart(title='eval-avg_gammapass/0.1mm_0.1%', multiline=layout_pb2.MultilineChartContent(tag=['eval-avg_gammapass/0.1mm_0.1%/.*'])), layout_pb2.Chart(title='eval-avg_gammapass/0.2mm_0.2%', multiline=layout_pb2.MultilineChartContent(tag=['eval-avg_gammapass/0.2mm_0.2%/.*'])), layout_pb2.Chart(title='eval-avg_gammapass/0.5mm_0.5%', multiline=layout_pb2.MultilineChartContent(tag=['eval-avg_gammapass/0.5mm_0.5%/.*'])), layout_pb2.Chart(title='eval-avg_gammapass/1.0mm_1.0%', multiline=layout_pb2.MultilineChartContent(tag=['eval-avg_gammapass/1.0mm_1.0%/.*'])), layout_pb2.Chart(title='eval-avg_gammapass/2.0mm_2.0%', multiline=layout_pb2.MultilineChartContent(tag=['eval-avg_gammapass/2.0mm_2.0%/.*'])), layout_pb2.Chart(title='MSE', multiline=layout_pb2.MultilineChartContent(tag=['.*mse.*']))])]))
writer.add_summary(layout_summary) | def register_custom_scalars_layout(writer):
layout_summary = summary_lib.custom_scalar_pb(layout_pb2.Layout(category=[layout_pb2.Category(title='all', chart=[layout_pb2.Chart(title='loss', multiline=layout_pb2.MultilineChartContent(tag=['train/loss', 'eval/loss/test', 'eval/loss/train'])), layout_pb2.Chart(title='eval-avg_gammapass/0.1mm_0.1%', multiline=layout_pb2.MultilineChartContent(tag=['eval-avg_gammapass/0.1mm_0.1%/.*'])), layout_pb2.Chart(title='eval-avg_gammapass/0.2mm_0.2%', multiline=layout_pb2.MultilineChartContent(tag=['eval-avg_gammapass/0.2mm_0.2%/.*'])), layout_pb2.Chart(title='eval-avg_gammapass/0.5mm_0.5%', multiline=layout_pb2.MultilineChartContent(tag=['eval-avg_gammapass/0.5mm_0.5%/.*'])), layout_pb2.Chart(title='eval-avg_gammapass/1.0mm_1.0%', multiline=layout_pb2.MultilineChartContent(tag=['eval-avg_gammapass/1.0mm_1.0%/.*'])), layout_pb2.Chart(title='eval-avg_gammapass/2.0mm_2.0%', multiline=layout_pb2.MultilineChartContent(tag=['eval-avg_gammapass/2.0mm_2.0%/.*'])), layout_pb2.Chart(title='MSE', multiline=layout_pb2.MultilineChartContent(tag=['.*mse.*']))])]))
writer.add_summary(layout_summary)<|docstring|>define custom plotting in 'Custom Scalars' tab of TensorBoard<|endoftext|> |
ec7c09253b91e542651ea06cf80fa12afd4e979218d4d6666523d0fdfa449106 | def tile(array_list, perrow, square=False, pad_width=5, pad_intensity=1000):
'Takes a list of arrays and number of images per row and constructs a tiled array for margin-less\n visualization\n\n Args:\n array_list -- list of np.ndarrays to be tiled in row-major order\n perrow -- integer specifying number of images per row\n\n Optional Args:\n square -- Try to make length and width equal by tiling vertical columns side-by-side\n pad_width -- # columns between vertical tiling columns\n pad_intensity -- # intensity value of padding cells\n\n Returns:\n numpy matrix/2dArray\n '
if (not isinstance(array_list, list)):
array_list_old = array_list
ndims = len(array_list_old.shape)
if (ndims == 3):
array_list = []
array_list_old_2dshape = (array_list_old.shape[1], array_list_old.shape[2])
for i in range(array_list_old.shape[0]):
array_list.append(array_list_old[(i, :, :)].reshape(array_list_old_2dshape))
elif (ndims == 2):
array_list = [array_list_old]
nimages = len(array_list)
expect_row_shape = (array_list[0].shape[0], (perrow * array_list[0].shape[1]))
rows_list = []
this_row_array = None
for i in range((nimages + 1)):
if ((i % perrow) == 0):
if (i > 0):
rows_list.append(this_row_array)
this_row_array = None
if (i < nimages):
this_row_array = array_list[i]
else:
this_row_array = np.concatenate((this_row_array, array_list[i]), axis=1)
for (i, row) in enumerate(rows_list):
if (row.shape != expect_row_shape):
extra = np.zeros((expect_row_shape[0], (expect_row_shape[1] - row.shape[1])))
row = np.concatenate((row, extra), axis=1)
rows_list[i] = row
if square:
if (pad_width >= 0):
pad = pad_width
else:
pad = 0
if (pad_intensity <= 0):
pad_intensity = 0
rows = (len(rows_list) * expect_row_shape[0])
cols = expect_row_shape[1]
area = (rows * cols)
pref_rows = math.ceil((math.sqrt(area) / expect_row_shape[0]))
cols_list = []
this_col_array = []
for i in range((len(rows_list) + 1)):
if (((i % pref_rows) == 0) or (i >= len(rows_list))):
if (i > 0):
cols_list.append(this_col_array)
if (i >= len(rows_list)):
break
if ((pad > 0) and (i < (len(rows_list) - 1))):
cols_list.append((pad_intensity * np.ones(((pref_rows * expect_row_shape[0]), pad))))
this_col_array = rows_list[i]
else:
this_col_array = np.concatenate((this_col_array, rows_list[i]), axis=0)
for (i, col) in enumerate(cols_list):
if (col.shape[0] != (pref_rows * expect_row_shape[0])):
extra = np.zeros((((expect_row_shape[0] * pref_rows) - col.shape[0]), expect_row_shape[1]))
row = np.concatenate((col, extra), axis=0)
cols_list[i] = row
tiled_array = np.concatenate(cols_list, axis=1)
else:
tiled_array = np.concatenate(rows_list, axis=0)
return tiled_array | Takes a list of arrays and number of images per row and constructs a tiled array for margin-less
visualization
Args:
array_list -- list of np.ndarrays to be tiled in row-major order
perrow -- integer specifying number of images per row
Optional Args:
square -- Try to make length and width equal by tiling vertical columns side-by-side
pad_width -- # columns between vertical tiling columns
pad_intensity -- # intensity value of padding cells
Returns:
numpy matrix/2dArray | MC simulation/mcdose/mcdose/visualize.py | tile | qihuilyu/P2T | 0 | python | def tile(array_list, perrow, square=False, pad_width=5, pad_intensity=1000):
'Takes a list of arrays and number of images per row and constructs a tiled array for margin-less\n visualization\n\n Args:\n array_list -- list of np.ndarrays to be tiled in row-major order\n perrow -- integer specifying number of images per row\n\n Optional Args:\n square -- Try to make length and width equal by tiling vertical columns side-by-side\n pad_width -- # columns between vertical tiling columns\n pad_intensity -- # intensity value of padding cells\n\n Returns:\n numpy matrix/2dArray\n '
if (not isinstance(array_list, list)):
array_list_old = array_list
ndims = len(array_list_old.shape)
if (ndims == 3):
array_list = []
array_list_old_2dshape = (array_list_old.shape[1], array_list_old.shape[2])
for i in range(array_list_old.shape[0]):
array_list.append(array_list_old[(i, :, :)].reshape(array_list_old_2dshape))
elif (ndims == 2):
array_list = [array_list_old]
nimages = len(array_list)
expect_row_shape = (array_list[0].shape[0], (perrow * array_list[0].shape[1]))
rows_list = []
this_row_array = None
for i in range((nimages + 1)):
if ((i % perrow) == 0):
if (i > 0):
rows_list.append(this_row_array)
this_row_array = None
if (i < nimages):
this_row_array = array_list[i]
else:
this_row_array = np.concatenate((this_row_array, array_list[i]), axis=1)
for (i, row) in enumerate(rows_list):
if (row.shape != expect_row_shape):
extra = np.zeros((expect_row_shape[0], (expect_row_shape[1] - row.shape[1])))
row = np.concatenate((row, extra), axis=1)
rows_list[i] = row
if square:
if (pad_width >= 0):
pad = pad_width
else:
pad = 0
if (pad_intensity <= 0):
pad_intensity = 0
rows = (len(rows_list) * expect_row_shape[0])
cols = expect_row_shape[1]
area = (rows * cols)
pref_rows = math.ceil((math.sqrt(area) / expect_row_shape[0]))
cols_list = []
this_col_array = []
for i in range((len(rows_list) + 1)):
if (((i % pref_rows) == 0) or (i >= len(rows_list))):
if (i > 0):
cols_list.append(this_col_array)
if (i >= len(rows_list)):
break
if ((pad > 0) and (i < (len(rows_list) - 1))):
cols_list.append((pad_intensity * np.ones(((pref_rows * expect_row_shape[0]), pad))))
this_col_array = rows_list[i]
else:
this_col_array = np.concatenate((this_col_array, rows_list[i]), axis=0)
for (i, col) in enumerate(cols_list):
if (col.shape[0] != (pref_rows * expect_row_shape[0])):
extra = np.zeros((((expect_row_shape[0] * pref_rows) - col.shape[0]), expect_row_shape[1]))
row = np.concatenate((col, extra), axis=0)
cols_list[i] = row
tiled_array = np.concatenate(cols_list, axis=1)
else:
tiled_array = np.concatenate(rows_list, axis=0)
return tiled_array | def tile(array_list, perrow, square=False, pad_width=5, pad_intensity=1000):
'Takes a list of arrays and number of images per row and constructs a tiled array for margin-less\n visualization\n\n Args:\n array_list -- list of np.ndarrays to be tiled in row-major order\n perrow -- integer specifying number of images per row\n\n Optional Args:\n square -- Try to make length and width equal by tiling vertical columns side-by-side\n pad_width -- # columns between vertical tiling columns\n pad_intensity -- # intensity value of padding cells\n\n Returns:\n numpy matrix/2dArray\n '
if (not isinstance(array_list, list)):
array_list_old = array_list
ndims = len(array_list_old.shape)
if (ndims == 3):
array_list = []
array_list_old_2dshape = (array_list_old.shape[1], array_list_old.shape[2])
for i in range(array_list_old.shape[0]):
array_list.append(array_list_old[(i, :, :)].reshape(array_list_old_2dshape))
elif (ndims == 2):
array_list = [array_list_old]
nimages = len(array_list)
expect_row_shape = (array_list[0].shape[0], (perrow * array_list[0].shape[1]))
rows_list = []
this_row_array = None
for i in range((nimages + 1)):
if ((i % perrow) == 0):
if (i > 0):
rows_list.append(this_row_array)
this_row_array = None
if (i < nimages):
this_row_array = array_list[i]
else:
this_row_array = np.concatenate((this_row_array, array_list[i]), axis=1)
for (i, row) in enumerate(rows_list):
if (row.shape != expect_row_shape):
extra = np.zeros((expect_row_shape[0], (expect_row_shape[1] - row.shape[1])))
row = np.concatenate((row, extra), axis=1)
rows_list[i] = row
if square:
if (pad_width >= 0):
pad = pad_width
else:
pad = 0
if (pad_intensity <= 0):
pad_intensity = 0
rows = (len(rows_list) * expect_row_shape[0])
cols = expect_row_shape[1]
area = (rows * cols)
pref_rows = math.ceil((math.sqrt(area) / expect_row_shape[0]))
cols_list = []
this_col_array = []
for i in range((len(rows_list) + 1)):
if (((i % pref_rows) == 0) or (i >= len(rows_list))):
if (i > 0):
cols_list.append(this_col_array)
if (i >= len(rows_list)):
break
if ((pad > 0) and (i < (len(rows_list) - 1))):
cols_list.append((pad_intensity * np.ones(((pref_rows * expect_row_shape[0]), pad))))
this_col_array = rows_list[i]
else:
this_col_array = np.concatenate((this_col_array, rows_list[i]), axis=0)
for (i, col) in enumerate(cols_list):
if (col.shape[0] != (pref_rows * expect_row_shape[0])):
extra = np.zeros((((expect_row_shape[0] * pref_rows) - col.shape[0]), expect_row_shape[1]))
row = np.concatenate((col, extra), axis=0)
cols_list[i] = row
tiled_array = np.concatenate(cols_list, axis=1)
else:
tiled_array = np.concatenate(rows_list, axis=0)
return tiled_array<|docstring|>Takes a list of arrays and number of images per row and constructs a tiled array for margin-less
visualization
Args:
array_list -- list of np.ndarrays to be tiled in row-major order
perrow -- integer specifying number of images per row
Optional Args:
square -- Try to make length and width equal by tiling vertical columns side-by-side
pad_width -- # columns between vertical tiling columns
pad_intensity -- # intensity value of padding cells
Returns:
numpy matrix/2dArray<|endoftext|> |
b1750c72d6d47246a130e6ea159f6719f41358b881be9aabde85dc5dd0f4682e | @abstractmethod
def verify_suffix(self, filename: Union[(Sequence[str], str)]) -> bool:
'\n Verify whether the specified file or files format is supported by current reader.\n\n Args:\n filename: file name or a list of file names to read.\n if a list of files, verify all the suffixes.\n\n '
raise NotImplementedError(f'Subclass {self.__class__.__name__} must implement this method.') | Verify whether the specified file or files format is supported by current reader.
Args:
filename: file name or a list of file names to read.
if a list of files, verify all the suffixes. | monai/data/image_reader.py | verify_suffix | lyndonboone/MONAI | 1 | python | @abstractmethod
def verify_suffix(self, filename: Union[(Sequence[str], str)]) -> bool:
'\n Verify whether the specified file or files format is supported by current reader.\n\n Args:\n filename: file name or a list of file names to read.\n if a list of files, verify all the suffixes.\n\n '
raise NotImplementedError(f'Subclass {self.__class__.__name__} must implement this method.') | @abstractmethod
def verify_suffix(self, filename: Union[(Sequence[str], str)]) -> bool:
'\n Verify whether the specified file or files format is supported by current reader.\n\n Args:\n filename: file name or a list of file names to read.\n if a list of files, verify all the suffixes.\n\n '
raise NotImplementedError(f'Subclass {self.__class__.__name__} must implement this method.')<|docstring|>Verify whether the specified file or files format is supported by current reader.
Args:
filename: file name or a list of file names to read.
if a list of files, verify all the suffixes.<|endoftext|> |
ac95ef0278076f08a22d2cdd709393329466a87bc71e92cd814d4cc9af585810 | @abstractmethod
def read(self, data: Union[(Sequence[str], str)], **kwargs) -> Union[(Sequence[Any], Any)]:
'\n Read image data from specified file or files.\n Note that it returns the raw data, so different readers return different image data type.\n\n Args:\n data: file name or a list of file names to read.\n kwargs: additional args for actual `read` API of 3rd party libs.\n\n '
raise NotImplementedError(f'Subclass {self.__class__.__name__} must implement this method.') | Read image data from specified file or files.
Note that it returns the raw data, so different readers return different image data type.
Args:
data: file name or a list of file names to read.
kwargs: additional args for actual `read` API of 3rd party libs. | monai/data/image_reader.py | read | lyndonboone/MONAI | 1 | python | @abstractmethod
def read(self, data: Union[(Sequence[str], str)], **kwargs) -> Union[(Sequence[Any], Any)]:
'\n Read image data from specified file or files.\n Note that it returns the raw data, so different readers return different image data type.\n\n Args:\n data: file name or a list of file names to read.\n kwargs: additional args for actual `read` API of 3rd party libs.\n\n '
raise NotImplementedError(f'Subclass {self.__class__.__name__} must implement this method.') | @abstractmethod
def read(self, data: Union[(Sequence[str], str)], **kwargs) -> Union[(Sequence[Any], Any)]:
'\n Read image data from specified file or files.\n Note that it returns the raw data, so different readers return different image data type.\n\n Args:\n data: file name or a list of file names to read.\n kwargs: additional args for actual `read` API of 3rd party libs.\n\n '
raise NotImplementedError(f'Subclass {self.__class__.__name__} must implement this method.')<|docstring|>Read image data from specified file or files.
Note that it returns the raw data, so different readers return different image data type.
Args:
data: file name or a list of file names to read.
kwargs: additional args for actual `read` API of 3rd party libs.<|endoftext|> |
fe4a4c6a69aef5328b549e732a600a325292d27365122d49ffd81870b23165bc | @abstractmethod
def get_data(self, img) -> Tuple[(np.ndarray, Dict)]:
'\n Extract data array and meta data from loaded image and return them.\n This function must return 2 objects, first is numpy array of image data, second is dict of meta data.\n\n Args:\n img: an image object loaded from a image file or a list of image objects.\n\n '
raise NotImplementedError(f'Subclass {self.__class__.__name__} must implement this method.') | Extract data array and meta data from loaded image and return them.
This function must return 2 objects, first is numpy array of image data, second is dict of meta data.
Args:
img: an image object loaded from a image file or a list of image objects. | monai/data/image_reader.py | get_data | lyndonboone/MONAI | 1 | python | @abstractmethod
def get_data(self, img) -> Tuple[(np.ndarray, Dict)]:
'\n Extract data array and meta data from loaded image and return them.\n This function must return 2 objects, first is numpy array of image data, second is dict of meta data.\n\n Args:\n img: an image object loaded from a image file or a list of image objects.\n\n '
raise NotImplementedError(f'Subclass {self.__class__.__name__} must implement this method.') | @abstractmethod
def get_data(self, img) -> Tuple[(np.ndarray, Dict)]:
'\n Extract data array and meta data from loaded image and return them.\n This function must return 2 objects, first is numpy array of image data, second is dict of meta data.\n\n Args:\n img: an image object loaded from a image file or a list of image objects.\n\n '
raise NotImplementedError(f'Subclass {self.__class__.__name__} must implement this method.')<|docstring|>Extract data array and meta data from loaded image and return them.
This function must return 2 objects, first is numpy array of image data, second is dict of meta data.
Args:
img: an image object loaded from a image file or a list of image objects.<|endoftext|> |
4843a33ad6a10a120d58e39192f207a08aa73fcec0c10be064c6b13d7c1a0353 | def verify_suffix(self, filename: Union[(Sequence[str], str)]) -> bool:
'\n Verify whether the specified file or files format is supported by ITK reader.\n\n Args:\n filename: file name or a list of file names to read.\n if a list of files, verify all the suffixes.\n\n '
return has_itk | Verify whether the specified file or files format is supported by ITK reader.
Args:
filename: file name or a list of file names to read.
if a list of files, verify all the suffixes. | monai/data/image_reader.py | verify_suffix | lyndonboone/MONAI | 1 | python | def verify_suffix(self, filename: Union[(Sequence[str], str)]) -> bool:
'\n Verify whether the specified file or files format is supported by ITK reader.\n\n Args:\n filename: file name or a list of file names to read.\n if a list of files, verify all the suffixes.\n\n '
return has_itk | def verify_suffix(self, filename: Union[(Sequence[str], str)]) -> bool:
'\n Verify whether the specified file or files format is supported by ITK reader.\n\n Args:\n filename: file name or a list of file names to read.\n if a list of files, verify all the suffixes.\n\n '
return has_itk<|docstring|>Verify whether the specified file or files format is supported by ITK reader.
Args:
filename: file name or a list of file names to read.
if a list of files, verify all the suffixes.<|endoftext|> |
00f400ede9f0cc142e4577d2a333ac4b2b8764915c4272b63c0445c6e7b4ad90 | def read(self, data: Union[(Sequence[str], str)], **kwargs):
'\n Read image data from specified file or files.\n Note that the returned object is ITK image object or list of ITK image objects.\n\n Args:\n data: file name or a list of file names to read,\n kwargs: additional args for `itk.imread` API, will override `self.kwargs` for existing keys.\n More details about available args:\n https://github.com/InsightSoftwareConsortium/ITK/blob/master/Wrapping/Generators/Python/itkExtras.py\n\n '
img_: List[Image] = []
filenames: Sequence[str] = ensure_tuple(data)
kwargs_ = self.kwargs.copy()
kwargs_.update(kwargs)
for name in filenames:
if os.path.isdir(name):
names_generator = itk.GDCMSeriesFileNames.New()
names_generator.SetUseSeriesDetails(True)
names_generator.AddSeriesRestriction('0008|0021')
names_generator.SetDirectory(name)
series_uid = names_generator.GetSeriesUIDs()
if (len(series_uid) == 0):
raise FileNotFoundError(f'no DICOMs in: {name}.')
if (len(series_uid) > 1):
raise OSError(f'the directory: {name} contains more than one DICOM series.')
series_identifier = series_uid[0]
name = names_generator.GetFileNames(series_identifier)
img_.append(itk.imread(name, **kwargs_))
return (img_ if (len(filenames) > 1) else img_[0]) | Read image data from specified file or files.
Note that the returned object is ITK image object or list of ITK image objects.
Args:
data: file name or a list of file names to read,
kwargs: additional args for `itk.imread` API, will override `self.kwargs` for existing keys.
More details about available args:
https://github.com/InsightSoftwareConsortium/ITK/blob/master/Wrapping/Generators/Python/itkExtras.py | monai/data/image_reader.py | read | lyndonboone/MONAI | 1 | python | def read(self, data: Union[(Sequence[str], str)], **kwargs):
'\n Read image data from specified file or files.\n Note that the returned object is ITK image object or list of ITK image objects.\n\n Args:\n data: file name or a list of file names to read,\n kwargs: additional args for `itk.imread` API, will override `self.kwargs` for existing keys.\n More details about available args:\n https://github.com/InsightSoftwareConsortium/ITK/blob/master/Wrapping/Generators/Python/itkExtras.py\n\n '
img_: List[Image] = []
filenames: Sequence[str] = ensure_tuple(data)
kwargs_ = self.kwargs.copy()
kwargs_.update(kwargs)
for name in filenames:
if os.path.isdir(name):
names_generator = itk.GDCMSeriesFileNames.New()
names_generator.SetUseSeriesDetails(True)
names_generator.AddSeriesRestriction('0008|0021')
names_generator.SetDirectory(name)
series_uid = names_generator.GetSeriesUIDs()
if (len(series_uid) == 0):
raise FileNotFoundError(f'no DICOMs in: {name}.')
if (len(series_uid) > 1):
raise OSError(f'the directory: {name} contains more than one DICOM series.')
series_identifier = series_uid[0]
name = names_generator.GetFileNames(series_identifier)
img_.append(itk.imread(name, **kwargs_))
return (img_ if (len(filenames) > 1) else img_[0]) | def read(self, data: Union[(Sequence[str], str)], **kwargs):
'\n Read image data from specified file or files.\n Note that the returned object is ITK image object or list of ITK image objects.\n\n Args:\n data: file name or a list of file names to read,\n kwargs: additional args for `itk.imread` API, will override `self.kwargs` for existing keys.\n More details about available args:\n https://github.com/InsightSoftwareConsortium/ITK/blob/master/Wrapping/Generators/Python/itkExtras.py\n\n '
img_: List[Image] = []
filenames: Sequence[str] = ensure_tuple(data)
kwargs_ = self.kwargs.copy()
kwargs_.update(kwargs)
for name in filenames:
if os.path.isdir(name):
names_generator = itk.GDCMSeriesFileNames.New()
names_generator.SetUseSeriesDetails(True)
names_generator.AddSeriesRestriction('0008|0021')
names_generator.SetDirectory(name)
series_uid = names_generator.GetSeriesUIDs()
if (len(series_uid) == 0):
raise FileNotFoundError(f'no DICOMs in: {name}.')
if (len(series_uid) > 1):
raise OSError(f'the directory: {name} contains more than one DICOM series.')
series_identifier = series_uid[0]
name = names_generator.GetFileNames(series_identifier)
img_.append(itk.imread(name, **kwargs_))
return (img_ if (len(filenames) > 1) else img_[0])<|docstring|>Read image data from specified file or files.
Note that the returned object is ITK image object or list of ITK image objects.
Args:
data: file name or a list of file names to read,
kwargs: additional args for `itk.imread` API, will override `self.kwargs` for existing keys.
More details about available args:
https://github.com/InsightSoftwareConsortium/ITK/blob/master/Wrapping/Generators/Python/itkExtras.py<|endoftext|> |
cb1ccc80b743fa3d2ca079302e9b2c315966e56bec9c4f8c136cf0209e9d9d3b | def get_data(self, img):
'\n Extract data array and meta data from loaded image and return them.\n This function returns 2 objects, first is numpy array of image data, second is dict of meta data.\n It constructs `affine`, `original_affine`, and `spatial_shape` and stores in meta dict.\n If loading a list of files, stack them together and add a new dimension as first dimension,\n and use the meta data of the first image to represent the stacked result.\n\n Args:\n img: a ITK image object loaded from a image file or a list of ITK image objects.\n\n '
img_array: List[np.ndarray] = []
compatible_meta: Dict = {}
for i in ensure_tuple(img):
header = self._get_meta_dict(i)
header['original_affine'] = self._get_affine(i)
header['affine'] = header['original_affine'].copy()
header['spatial_shape'] = self._get_spatial_shape(i)
data = self._get_array_data(i)
img_array.append(data)
header['original_channel_dim'] = ('no_channel' if (len(data.shape) == len(header['spatial_shape'])) else (- 1))
_copy_compatible_dict(header, compatible_meta)
return (_stack_images(img_array, compatible_meta), compatible_meta) | Extract data array and meta data from loaded image and return them.
This function returns 2 objects, first is numpy array of image data, second is dict of meta data.
It constructs `affine`, `original_affine`, and `spatial_shape` and stores in meta dict.
If loading a list of files, stack them together and add a new dimension as first dimension,
and use the meta data of the first image to represent the stacked result.
Args:
img: a ITK image object loaded from a image file or a list of ITK image objects. | monai/data/image_reader.py | get_data | lyndonboone/MONAI | 1 | python | def get_data(self, img):
'\n Extract data array and meta data from loaded image and return them.\n This function returns 2 objects, first is numpy array of image data, second is dict of meta data.\n It constructs `affine`, `original_affine`, and `spatial_shape` and stores in meta dict.\n If loading a list of files, stack them together and add a new dimension as first dimension,\n and use the meta data of the first image to represent the stacked result.\n\n Args:\n img: a ITK image object loaded from a image file or a list of ITK image objects.\n\n '
img_array: List[np.ndarray] = []
compatible_meta: Dict = {}
for i in ensure_tuple(img):
header = self._get_meta_dict(i)
header['original_affine'] = self._get_affine(i)
header['affine'] = header['original_affine'].copy()
header['spatial_shape'] = self._get_spatial_shape(i)
data = self._get_array_data(i)
img_array.append(data)
header['original_channel_dim'] = ('no_channel' if (len(data.shape) == len(header['spatial_shape'])) else (- 1))
_copy_compatible_dict(header, compatible_meta)
return (_stack_images(img_array, compatible_meta), compatible_meta) | def get_data(self, img):
'\n Extract data array and meta data from loaded image and return them.\n This function returns 2 objects, first is numpy array of image data, second is dict of meta data.\n It constructs `affine`, `original_affine`, and `spatial_shape` and stores in meta dict.\n If loading a list of files, stack them together and add a new dimension as first dimension,\n and use the meta data of the first image to represent the stacked result.\n\n Args:\n img: a ITK image object loaded from a image file or a list of ITK image objects.\n\n '
img_array: List[np.ndarray] = []
compatible_meta: Dict = {}
for i in ensure_tuple(img):
header = self._get_meta_dict(i)
header['original_affine'] = self._get_affine(i)
header['affine'] = header['original_affine'].copy()
header['spatial_shape'] = self._get_spatial_shape(i)
data = self._get_array_data(i)
img_array.append(data)
header['original_channel_dim'] = ('no_channel' if (len(data.shape) == len(header['spatial_shape'])) else (- 1))
_copy_compatible_dict(header, compatible_meta)
return (_stack_images(img_array, compatible_meta), compatible_meta)<|docstring|>Extract data array and meta data from loaded image and return them.
This function returns 2 objects, first is numpy array of image data, second is dict of meta data.
It constructs `affine`, `original_affine`, and `spatial_shape` and stores in meta dict.
If loading a list of files, stack them together and add a new dimension as first dimension,
and use the meta data of the first image to represent the stacked result.
Args:
img: a ITK image object loaded from a image file or a list of ITK image objects.<|endoftext|> |
74f469defaa4fcd707c2e29d2a00d7abe5ef41f2c8aa7164ad9adff1e98e7258 | def _get_meta_dict(self, img) -> Dict:
'\n Get all the meta data of the image and convert to dict type.\n\n Args:\n img: a ITK image object loaded from a image file.\n\n '
img_meta_dict = img.GetMetaDataDictionary()
meta_dict = {}
for key in img_meta_dict.GetKeys():
if key.startswith('ITK_original_'):
continue
if ((key == 'NRRD_measurement frame') and (int(itk.Version.GetITKMajorVersion()) == 5) and (int(itk.Version.GetITKMinorVersion()) < 2)):
warnings.warn("Ignoring 'measurement frame' field. Correct reading of NRRD05 files requires ITK >= 5.2: `pip install --upgrade --pre itk`")
continue
meta_dict[key] = img_meta_dict[key]
meta_dict['origin'] = np.asarray(img.GetOrigin())
meta_dict['spacing'] = np.asarray(img.GetSpacing())
meta_dict['direction'] = itk.array_from_matrix(img.GetDirection())
return meta_dict | Get all the meta data of the image and convert to dict type.
Args:
img: a ITK image object loaded from a image file. | monai/data/image_reader.py | _get_meta_dict | lyndonboone/MONAI | 1 | python | def _get_meta_dict(self, img) -> Dict:
'\n Get all the meta data of the image and convert to dict type.\n\n Args:\n img: a ITK image object loaded from a image file.\n\n '
img_meta_dict = img.GetMetaDataDictionary()
meta_dict = {}
for key in img_meta_dict.GetKeys():
if key.startswith('ITK_original_'):
continue
if ((key == 'NRRD_measurement frame') and (int(itk.Version.GetITKMajorVersion()) == 5) and (int(itk.Version.GetITKMinorVersion()) < 2)):
warnings.warn("Ignoring 'measurement frame' field. Correct reading of NRRD05 files requires ITK >= 5.2: `pip install --upgrade --pre itk`")
continue
meta_dict[key] = img_meta_dict[key]
meta_dict['origin'] = np.asarray(img.GetOrigin())
meta_dict['spacing'] = np.asarray(img.GetSpacing())
meta_dict['direction'] = itk.array_from_matrix(img.GetDirection())
return meta_dict | def _get_meta_dict(self, img) -> Dict:
'\n Get all the meta data of the image and convert to dict type.\n\n Args:\n img: a ITK image object loaded from a image file.\n\n '
img_meta_dict = img.GetMetaDataDictionary()
meta_dict = {}
for key in img_meta_dict.GetKeys():
if key.startswith('ITK_original_'):
continue
if ((key == 'NRRD_measurement frame') and (int(itk.Version.GetITKMajorVersion()) == 5) and (int(itk.Version.GetITKMinorVersion()) < 2)):
warnings.warn("Ignoring 'measurement frame' field. Correct reading of NRRD05 files requires ITK >= 5.2: `pip install --upgrade --pre itk`")
continue
meta_dict[key] = img_meta_dict[key]
meta_dict['origin'] = np.asarray(img.GetOrigin())
meta_dict['spacing'] = np.asarray(img.GetSpacing())
meta_dict['direction'] = itk.array_from_matrix(img.GetDirection())
return meta_dict<|docstring|>Get all the meta data of the image and convert to dict type.
Args:
img: a ITK image object loaded from a image file.<|endoftext|> |
b5974427049978a727064d0e7a583727f2a121b1fc29dc42a92520ecf08a699f | def _get_affine(self, img):
'\n Get or construct the affine matrix of the image, it can be used to correct\n spacing, orientation or execute spatial transforms.\n Construct Affine matrix based on direction, spacing, origin information.\n Refer to: https://github.com/RSIP-Vision/medio\n\n Args:\n img: a ITK image object loaded from a image file.\n\n '
direction = itk.array_from_matrix(img.GetDirection())
spacing = np.asarray(img.GetSpacing())
origin = np.asarray(img.GetOrigin())
direction = np.asarray(direction)
affine: np.ndarray = np.eye((direction.shape[0] + 1))
affine[(slice((- 1)), slice((- 1)))] = (direction @ np.diag(spacing))
affine[(slice((- 1)), (- 1))] = origin
return affine | Get or construct the affine matrix of the image, it can be used to correct
spacing, orientation or execute spatial transforms.
Construct Affine matrix based on direction, spacing, origin information.
Refer to: https://github.com/RSIP-Vision/medio
Args:
img: a ITK image object loaded from a image file. | monai/data/image_reader.py | _get_affine | lyndonboone/MONAI | 1 | python | def _get_affine(self, img):
'\n Get or construct the affine matrix of the image, it can be used to correct\n spacing, orientation or execute spatial transforms.\n Construct Affine matrix based on direction, spacing, origin information.\n Refer to: https://github.com/RSIP-Vision/medio\n\n Args:\n img: a ITK image object loaded from a image file.\n\n '
direction = itk.array_from_matrix(img.GetDirection())
spacing = np.asarray(img.GetSpacing())
origin = np.asarray(img.GetOrigin())
direction = np.asarray(direction)
affine: np.ndarray = np.eye((direction.shape[0] + 1))
affine[(slice((- 1)), slice((- 1)))] = (direction @ np.diag(spacing))
affine[(slice((- 1)), (- 1))] = origin
return affine | def _get_affine(self, img):
'\n Get or construct the affine matrix of the image, it can be used to correct\n spacing, orientation or execute spatial transforms.\n Construct Affine matrix based on direction, spacing, origin information.\n Refer to: https://github.com/RSIP-Vision/medio\n\n Args:\n img: a ITK image object loaded from a image file.\n\n '
direction = itk.array_from_matrix(img.GetDirection())
spacing = np.asarray(img.GetSpacing())
origin = np.asarray(img.GetOrigin())
direction = np.asarray(direction)
affine: np.ndarray = np.eye((direction.shape[0] + 1))
affine[(slice((- 1)), slice((- 1)))] = (direction @ np.diag(spacing))
affine[(slice((- 1)), (- 1))] = origin
return affine<|docstring|>Get or construct the affine matrix of the image, it can be used to correct
spacing, orientation or execute spatial transforms.
Construct Affine matrix based on direction, spacing, origin information.
Refer to: https://github.com/RSIP-Vision/medio
Args:
img: a ITK image object loaded from a image file.<|endoftext|> |
e5393307964b8d85f0ce04173cc19e4989680bdc11760edaa54e00669d3b0256 | def _get_spatial_shape(self, img):
"\n Get the spatial shape of image data, it doesn't contain the channel dim.\n\n Args:\n img: a ITK image object loaded from a image file.\n\n "
shape = list(itk.size(img))
shape.reverse()
return np.asarray(shape) | Get the spatial shape of image data, it doesn't contain the channel dim.
Args:
img: a ITK image object loaded from a image file. | monai/data/image_reader.py | _get_spatial_shape | lyndonboone/MONAI | 1 | python | def _get_spatial_shape(self, img):
"\n Get the spatial shape of image data, it doesn't contain the channel dim.\n\n Args:\n img: a ITK image object loaded from a image file.\n\n "
shape = list(itk.size(img))
shape.reverse()
return np.asarray(shape) | def _get_spatial_shape(self, img):
"\n Get the spatial shape of image data, it doesn't contain the channel dim.\n\n Args:\n img: a ITK image object loaded from a image file.\n\n "
shape = list(itk.size(img))
shape.reverse()
return np.asarray(shape)<|docstring|>Get the spatial shape of image data, it doesn't contain the channel dim.
Args:
img: a ITK image object loaded from a image file.<|endoftext|> |
592ba53f9bfb591d61381873c9bbe75031ae4bcd7b9c1b5224f2de163225925d | def _get_array_data(self, img):
'\n Get the raw array data of the image, converted to Numpy array.\n\n Following PyTorch conventions, the returned array data has contiguous channels,\n e.g. for an RGB image, all red channel image pixels are contiguous in memory.\n The first axis of the returned array is the channel axis.\n\n Args:\n img: a ITK image object loaded from a image file.\n\n '
channels = img.GetNumberOfComponentsPerPixel()
if (channels == 1):
return itk.array_view_from_image(img, keep_axes=False)
arr = itk.array_view_from_image(img, keep_axes=False)
dest = list(range(img.ndim))
source = dest.copy()
end = source.pop()
source.insert(0, end)
return np.moveaxis(arr, source, dest) | Get the raw array data of the image, converted to Numpy array.
Following PyTorch conventions, the returned array data has contiguous channels,
e.g. for an RGB image, all red channel image pixels are contiguous in memory.
The first axis of the returned array is the channel axis.
Args:
img: a ITK image object loaded from a image file. | monai/data/image_reader.py | _get_array_data | lyndonboone/MONAI | 1 | python | def _get_array_data(self, img):
'\n Get the raw array data of the image, converted to Numpy array.\n\n Following PyTorch conventions, the returned array data has contiguous channels,\n e.g. for an RGB image, all red channel image pixels are contiguous in memory.\n The first axis of the returned array is the channel axis.\n\n Args:\n img: a ITK image object loaded from a image file.\n\n '
channels = img.GetNumberOfComponentsPerPixel()
if (channels == 1):
return itk.array_view_from_image(img, keep_axes=False)
arr = itk.array_view_from_image(img, keep_axes=False)
dest = list(range(img.ndim))
source = dest.copy()
end = source.pop()
source.insert(0, end)
return np.moveaxis(arr, source, dest) | def _get_array_data(self, img):
'\n Get the raw array data of the image, converted to Numpy array.\n\n Following PyTorch conventions, the returned array data has contiguous channels,\n e.g. for an RGB image, all red channel image pixels are contiguous in memory.\n The first axis of the returned array is the channel axis.\n\n Args:\n img: a ITK image object loaded from a image file.\n\n '
channels = img.GetNumberOfComponentsPerPixel()
if (channels == 1):
return itk.array_view_from_image(img, keep_axes=False)
arr = itk.array_view_from_image(img, keep_axes=False)
dest = list(range(img.ndim))
source = dest.copy()
end = source.pop()
source.insert(0, end)
return np.moveaxis(arr, source, dest)<|docstring|>Get the raw array data of the image, converted to Numpy array.
Following PyTorch conventions, the returned array data has contiguous channels,
e.g. for an RGB image, all red channel image pixels are contiguous in memory.
The first axis of the returned array is the channel axis.
Args:
img: a ITK image object loaded from a image file.<|endoftext|> |
d599d4a8b0b22a702603d5d7ef8fa49bd197f9d1a09d570665bc12cc6a41410c | def verify_suffix(self, filename: Union[(Sequence[str], str)]) -> bool:
'\n Verify whether the specified file or files format is supported by Nibabel reader.\n\n Args:\n filename: file name or a list of file names to read.\n if a list of files, verify all the suffixes.\n\n '
suffixes: Sequence[str] = ['nii', 'nii.gz']
return (has_nib and is_supported_format(filename, suffixes)) | Verify whether the specified file or files format is supported by Nibabel reader.
Args:
filename: file name or a list of file names to read.
if a list of files, verify all the suffixes. | monai/data/image_reader.py | verify_suffix | lyndonboone/MONAI | 1 | python | def verify_suffix(self, filename: Union[(Sequence[str], str)]) -> bool:
'\n Verify whether the specified file or files format is supported by Nibabel reader.\n\n Args:\n filename: file name or a list of file names to read.\n if a list of files, verify all the suffixes.\n\n '
suffixes: Sequence[str] = ['nii', 'nii.gz']
return (has_nib and is_supported_format(filename, suffixes)) | def verify_suffix(self, filename: Union[(Sequence[str], str)]) -> bool:
'\n Verify whether the specified file or files format is supported by Nibabel reader.\n\n Args:\n filename: file name or a list of file names to read.\n if a list of files, verify all the suffixes.\n\n '
suffixes: Sequence[str] = ['nii', 'nii.gz']
return (has_nib and is_supported_format(filename, suffixes))<|docstring|>Verify whether the specified file or files format is supported by Nibabel reader.
Args:
filename: file name or a list of file names to read.
if a list of files, verify all the suffixes.<|endoftext|> |
7ca6f46f332119a631912f9d7dc5b1c389e48558cc703ed2b031c9eec5b06687 | def read(self, data: Union[(Sequence[str], str)], **kwargs):
'\n Read image data from specified file or files.\n Note that the returned object is Nibabel image object or list of Nibabel image objects.\n\n Args:\n data: file name or a list of file names to read.\n kwargs: additional args for `nibabel.load` API, will override `self.kwargs` for existing keys.\n More details about available args:\n https://github.com/nipy/nibabel/blob/master/nibabel/loadsave.py\n\n '
img_: List[Nifti1Image] = []
filenames: Sequence[str] = ensure_tuple(data)
kwargs_ = self.kwargs.copy()
kwargs_.update(kwargs)
for name in filenames:
img = nib.load(name, **kwargs_)
img = correct_nifti_header_if_necessary(img)
img_.append(img)
return (img_ if (len(filenames) > 1) else img_[0]) | Read image data from specified file or files.
Note that the returned object is Nibabel image object or list of Nibabel image objects.
Args:
data: file name or a list of file names to read.
kwargs: additional args for `nibabel.load` API, will override `self.kwargs` for existing keys.
More details about available args:
https://github.com/nipy/nibabel/blob/master/nibabel/loadsave.py | monai/data/image_reader.py | read | lyndonboone/MONAI | 1 | python | def read(self, data: Union[(Sequence[str], str)], **kwargs):
'\n Read image data from specified file or files.\n Note that the returned object is Nibabel image object or list of Nibabel image objects.\n\n Args:\n data: file name or a list of file names to read.\n kwargs: additional args for `nibabel.load` API, will override `self.kwargs` for existing keys.\n More details about available args:\n https://github.com/nipy/nibabel/blob/master/nibabel/loadsave.py\n\n '
img_: List[Nifti1Image] = []
filenames: Sequence[str] = ensure_tuple(data)
kwargs_ = self.kwargs.copy()
kwargs_.update(kwargs)
for name in filenames:
img = nib.load(name, **kwargs_)
img = correct_nifti_header_if_necessary(img)
img_.append(img)
return (img_ if (len(filenames) > 1) else img_[0]) | def read(self, data: Union[(Sequence[str], str)], **kwargs):
'\n Read image data from specified file or files.\n Note that the returned object is Nibabel image object or list of Nibabel image objects.\n\n Args:\n data: file name or a list of file names to read.\n kwargs: additional args for `nibabel.load` API, will override `self.kwargs` for existing keys.\n More details about available args:\n https://github.com/nipy/nibabel/blob/master/nibabel/loadsave.py\n\n '
img_: List[Nifti1Image] = []
filenames: Sequence[str] = ensure_tuple(data)
kwargs_ = self.kwargs.copy()
kwargs_.update(kwargs)
for name in filenames:
img = nib.load(name, **kwargs_)
img = correct_nifti_header_if_necessary(img)
img_.append(img)
return (img_ if (len(filenames) > 1) else img_[0])<|docstring|>Read image data from specified file or files.
Note that the returned object is Nibabel image object or list of Nibabel image objects.
Args:
data: file name or a list of file names to read.
kwargs: additional args for `nibabel.load` API, will override `self.kwargs` for existing keys.
More details about available args:
https://github.com/nipy/nibabel/blob/master/nibabel/loadsave.py<|endoftext|> |
c799b00d08b9a9764b0739495a11ea7b6ed1539a249518f306f14954aff8fc0f | def get_data(self, img):
'\n Extract data array and meta data from loaded image and return them.\n This function returns 2 objects, first is numpy array of image data, second is dict of meta data.\n It constructs `affine`, `original_affine`, and `spatial_shape` and stores in meta dict.\n If loading a list of files, stack them together and add a new dimension as first dimension,\n and use the meta data of the first image to represent the stacked result.\n\n Args:\n img: a Nibabel image object loaded from a image file or a list of Nibabel image objects.\n\n '
img_array: List[np.ndarray] = []
compatible_meta: Dict = {}
for i in ensure_tuple(img):
header = self._get_meta_dict(i)
header['affine'] = self._get_affine(i)
header['original_affine'] = self._get_affine(i)
header['as_closest_canonical'] = self.as_closest_canonical
if self.as_closest_canonical:
i = nib.as_closest_canonical(i)
header['affine'] = self._get_affine(i)
header['spatial_shape'] = self._get_spatial_shape(i)
data = self._get_array_data(i)
img_array.append(data)
header['original_channel_dim'] = ('no_channel' if (len(data.shape) == len(header['spatial_shape'])) else (- 1))
_copy_compatible_dict(header, compatible_meta)
return (_stack_images(img_array, compatible_meta), compatible_meta) | Extract data array and meta data from loaded image and return them.
This function returns 2 objects, first is numpy array of image data, second is dict of meta data.
It constructs `affine`, `original_affine`, and `spatial_shape` and stores in meta dict.
If loading a list of files, stack them together and add a new dimension as first dimension,
and use the meta data of the first image to represent the stacked result.
Args:
img: a Nibabel image object loaded from a image file or a list of Nibabel image objects. | monai/data/image_reader.py | get_data | lyndonboone/MONAI | 1 | python | def get_data(self, img):
'\n Extract data array and meta data from loaded image and return them.\n This function returns 2 objects, first is numpy array of image data, second is dict of meta data.\n It constructs `affine`, `original_affine`, and `spatial_shape` and stores in meta dict.\n If loading a list of files, stack them together and add a new dimension as first dimension,\n and use the meta data of the first image to represent the stacked result.\n\n Args:\n img: a Nibabel image object loaded from a image file or a list of Nibabel image objects.\n\n '
img_array: List[np.ndarray] = []
compatible_meta: Dict = {}
for i in ensure_tuple(img):
header = self._get_meta_dict(i)
header['affine'] = self._get_affine(i)
header['original_affine'] = self._get_affine(i)
header['as_closest_canonical'] = self.as_closest_canonical
if self.as_closest_canonical:
i = nib.as_closest_canonical(i)
header['affine'] = self._get_affine(i)
header['spatial_shape'] = self._get_spatial_shape(i)
data = self._get_array_data(i)
img_array.append(data)
header['original_channel_dim'] = ('no_channel' if (len(data.shape) == len(header['spatial_shape'])) else (- 1))
_copy_compatible_dict(header, compatible_meta)
return (_stack_images(img_array, compatible_meta), compatible_meta) | def get_data(self, img):
'\n Extract data array and meta data from loaded image and return them.\n This function returns 2 objects, first is numpy array of image data, second is dict of meta data.\n It constructs `affine`, `original_affine`, and `spatial_shape` and stores in meta dict.\n If loading a list of files, stack them together and add a new dimension as first dimension,\n and use the meta data of the first image to represent the stacked result.\n\n Args:\n img: a Nibabel image object loaded from a image file or a list of Nibabel image objects.\n\n '
img_array: List[np.ndarray] = []
compatible_meta: Dict = {}
for i in ensure_tuple(img):
header = self._get_meta_dict(i)
header['affine'] = self._get_affine(i)
header['original_affine'] = self._get_affine(i)
header['as_closest_canonical'] = self.as_closest_canonical
if self.as_closest_canonical:
i = nib.as_closest_canonical(i)
header['affine'] = self._get_affine(i)
header['spatial_shape'] = self._get_spatial_shape(i)
data = self._get_array_data(i)
img_array.append(data)
header['original_channel_dim'] = ('no_channel' if (len(data.shape) == len(header['spatial_shape'])) else (- 1))
_copy_compatible_dict(header, compatible_meta)
return (_stack_images(img_array, compatible_meta), compatible_meta)<|docstring|>Extract data array and meta data from loaded image and return them.
This function returns 2 objects, first is numpy array of image data, second is dict of meta data.
It constructs `affine`, `original_affine`, and `spatial_shape` and stores in meta dict.
If loading a list of files, stack them together and add a new dimension as first dimension,
and use the meta data of the first image to represent the stacked result.
Args:
img: a Nibabel image object loaded from a image file or a list of Nibabel image objects.<|endoftext|> |
8a8267ee03fe933b4a236e42c7e3dd575289e8d550ae9b68f703ee9dbade73e7 | def _get_meta_dict(self, img) -> Dict:
'\n Get the all the meta data of the image and convert to dict type.\n\n Args:\n img: a Nibabel image object loaded from a image file.\n\n '
header = img.header.as_byteswapped('<')
return dict(header) | Get the all the meta data of the image and convert to dict type.
Args:
img: a Nibabel image object loaded from a image file. | monai/data/image_reader.py | _get_meta_dict | lyndonboone/MONAI | 1 | python | def _get_meta_dict(self, img) -> Dict:
'\n Get the all the meta data of the image and convert to dict type.\n\n Args:\n img: a Nibabel image object loaded from a image file.\n\n '
header = img.header.as_byteswapped('<')
return dict(header) | def _get_meta_dict(self, img) -> Dict:
'\n Get the all the meta data of the image and convert to dict type.\n\n Args:\n img: a Nibabel image object loaded from a image file.\n\n '
header = img.header.as_byteswapped('<')
return dict(header)<|docstring|>Get the all the meta data of the image and convert to dict type.
Args:
img: a Nibabel image object loaded from a image file.<|endoftext|> |
43f1acdc99b2b7283de62797c830422172fc87c3190bc9d9c4d92701cc8747f2 | def _get_affine(self, img):
'\n Get the affine matrix of the image, it can be used to correct\n spacing, orientation or execute spatial transforms.\n\n Args:\n img: a Nibabel image object loaded from a image file.\n\n '
return np.array(img.affine, copy=True) | Get the affine matrix of the image, it can be used to correct
spacing, orientation or execute spatial transforms.
Args:
img: a Nibabel image object loaded from a image file. | monai/data/image_reader.py | _get_affine | lyndonboone/MONAI | 1 | python | def _get_affine(self, img):
'\n Get the affine matrix of the image, it can be used to correct\n spacing, orientation or execute spatial transforms.\n\n Args:\n img: a Nibabel image object loaded from a image file.\n\n '
return np.array(img.affine, copy=True) | def _get_affine(self, img):
'\n Get the affine matrix of the image, it can be used to correct\n spacing, orientation or execute spatial transforms.\n\n Args:\n img: a Nibabel image object loaded from a image file.\n\n '
return np.array(img.affine, copy=True)<|docstring|>Get the affine matrix of the image, it can be used to correct
spacing, orientation or execute spatial transforms.
Args:
img: a Nibabel image object loaded from a image file.<|endoftext|> |
b097c8a758fa6b8dfc7320c242e85c3889e32d65e7a3afe898c43a10c26183db | def _get_spatial_shape(self, img):
"\n Get the spatial shape of image data, it doesn't contain the channel dim.\n\n Args:\n img: a Nibabel image object loaded from a image file.\n\n "
header = img.header.as_byteswapped('<')
ndim = header['dim'][0]
spatial_rank = min(ndim, 3)
return np.asarray(header['dim'][1:(spatial_rank + 1)]) | Get the spatial shape of image data, it doesn't contain the channel dim.
Args:
img: a Nibabel image object loaded from a image file. | monai/data/image_reader.py | _get_spatial_shape | lyndonboone/MONAI | 1 | python | def _get_spatial_shape(self, img):
"\n Get the spatial shape of image data, it doesn't contain the channel dim.\n\n Args:\n img: a Nibabel image object loaded from a image file.\n\n "
header = img.header.as_byteswapped('<')
ndim = header['dim'][0]
spatial_rank = min(ndim, 3)
return np.asarray(header['dim'][1:(spatial_rank + 1)]) | def _get_spatial_shape(self, img):
"\n Get the spatial shape of image data, it doesn't contain the channel dim.\n\n Args:\n img: a Nibabel image object loaded from a image file.\n\n "
header = img.header.as_byteswapped('<')
ndim = header['dim'][0]
spatial_rank = min(ndim, 3)
return np.asarray(header['dim'][1:(spatial_rank + 1)])<|docstring|>Get the spatial shape of image data, it doesn't contain the channel dim.
Args:
img: a Nibabel image object loaded from a image file.<|endoftext|> |
4da40f70df7063d8badc8ab652f6f0cf65332281ac006e8a1cebff524ebf34e8 | def _get_array_data(self, img):
'\n Get the raw array data of the image, converted to Numpy array.\n\n Args:\n img: a Nibabel image object loaded from a image file.\n\n '
_array = np.array(img.get_fdata(dtype=self.dtype))
img.uncache()
return _array | Get the raw array data of the image, converted to Numpy array.
Args:
img: a Nibabel image object loaded from a image file. | monai/data/image_reader.py | _get_array_data | lyndonboone/MONAI | 1 | python | def _get_array_data(self, img):
'\n Get the raw array data of the image, converted to Numpy array.\n\n Args:\n img: a Nibabel image object loaded from a image file.\n\n '
_array = np.array(img.get_fdata(dtype=self.dtype))
img.uncache()
return _array | def _get_array_data(self, img):
'\n Get the raw array data of the image, converted to Numpy array.\n\n Args:\n img: a Nibabel image object loaded from a image file.\n\n '
_array = np.array(img.get_fdata(dtype=self.dtype))
img.uncache()
return _array<|docstring|>Get the raw array data of the image, converted to Numpy array.
Args:
img: a Nibabel image object loaded from a image file.<|endoftext|> |
da8a63bc8578858f173e53b338610103bade555e1368e1a412b24b38ce214bf3 | def verify_suffix(self, filename: Union[(Sequence[str], str)]) -> bool:
'\n Verify whether the specified file or files format is supported by Numpy reader.\n\n Args:\n filename: file name or a list of file names to read.\n if a list of files, verify all the suffixes.\n '
suffixes: Sequence[str] = ['npz', 'npy']
return is_supported_format(filename, suffixes) | Verify whether the specified file or files format is supported by Numpy reader.
Args:
filename: file name or a list of file names to read.
if a list of files, verify all the suffixes. | monai/data/image_reader.py | verify_suffix | lyndonboone/MONAI | 1 | python | def verify_suffix(self, filename: Union[(Sequence[str], str)]) -> bool:
'\n Verify whether the specified file or files format is supported by Numpy reader.\n\n Args:\n filename: file name or a list of file names to read.\n if a list of files, verify all the suffixes.\n '
suffixes: Sequence[str] = ['npz', 'npy']
return is_supported_format(filename, suffixes) | def verify_suffix(self, filename: Union[(Sequence[str], str)]) -> bool:
'\n Verify whether the specified file or files format is supported by Numpy reader.\n\n Args:\n filename: file name or a list of file names to read.\n if a list of files, verify all the suffixes.\n '
suffixes: Sequence[str] = ['npz', 'npy']
return is_supported_format(filename, suffixes)<|docstring|>Verify whether the specified file or files format is supported by Numpy reader.
Args:
filename: file name or a list of file names to read.
if a list of files, verify all the suffixes.<|endoftext|> |
52cbe998b05b934bd2013c008e67e3a548588274fc0484d5a4652583be10d2ff | def read(self, data: Union[(Sequence[str], str)], **kwargs):
'\n Read image data from specified file or files.\n Note that the returned object is Numpy array or list of Numpy arrays.\n\n Args:\n data: file name or a list of file names to read.\n kwargs: additional args for `numpy.load` API except `allow_pickle`, will override `self.kwargs` for existing keys.\n More details about available args:\n https://numpy.org/doc/stable/reference/generated/numpy.load.html\n\n '
img_: List[Nifti1Image] = []
filenames: Sequence[str] = ensure_tuple(data)
kwargs_ = self.kwargs.copy()
kwargs_.update(kwargs)
for name in filenames:
img = np.load(name, allow_pickle=True, **kwargs_)
if name.endswith('.npz'):
npz_keys = ([f'arr_{i}' for i in range(len(img))] if (self.npz_keys is None) else self.npz_keys)
for k in npz_keys:
img_.append(img[k])
else:
img_.append(img)
return (img_ if (len(img_) > 1) else img_[0]) | Read image data from specified file or files.
Note that the returned object is Numpy array or list of Numpy arrays.
Args:
data: file name or a list of file names to read.
kwargs: additional args for `numpy.load` API except `allow_pickle`, will override `self.kwargs` for existing keys.
More details about available args:
https://numpy.org/doc/stable/reference/generated/numpy.load.html | monai/data/image_reader.py | read | lyndonboone/MONAI | 1 | python | def read(self, data: Union[(Sequence[str], str)], **kwargs):
'\n Read image data from specified file or files.\n Note that the returned object is Numpy array or list of Numpy arrays.\n\n Args:\n data: file name or a list of file names to read.\n kwargs: additional args for `numpy.load` API except `allow_pickle`, will override `self.kwargs` for existing keys.\n More details about available args:\n https://numpy.org/doc/stable/reference/generated/numpy.load.html\n\n '
img_: List[Nifti1Image] = []
filenames: Sequence[str] = ensure_tuple(data)
kwargs_ = self.kwargs.copy()
kwargs_.update(kwargs)
for name in filenames:
img = np.load(name, allow_pickle=True, **kwargs_)
if name.endswith('.npz'):
npz_keys = ([f'arr_{i}' for i in range(len(img))] if (self.npz_keys is None) else self.npz_keys)
for k in npz_keys:
img_.append(img[k])
else:
img_.append(img)
return (img_ if (len(img_) > 1) else img_[0]) | def read(self, data: Union[(Sequence[str], str)], **kwargs):
'\n Read image data from specified file or files.\n Note that the returned object is Numpy array or list of Numpy arrays.\n\n Args:\n data: file name or a list of file names to read.\n kwargs: additional args for `numpy.load` API except `allow_pickle`, will override `self.kwargs` for existing keys.\n More details about available args:\n https://numpy.org/doc/stable/reference/generated/numpy.load.html\n\n '
img_: List[Nifti1Image] = []
filenames: Sequence[str] = ensure_tuple(data)
kwargs_ = self.kwargs.copy()
kwargs_.update(kwargs)
for name in filenames:
img = np.load(name, allow_pickle=True, **kwargs_)
if name.endswith('.npz'):
npz_keys = ([f'arr_{i}' for i in range(len(img))] if (self.npz_keys is None) else self.npz_keys)
for k in npz_keys:
img_.append(img[k])
else:
img_.append(img)
return (img_ if (len(img_) > 1) else img_[0])<|docstring|>Read image data from specified file or files.
Note that the returned object is Numpy array or list of Numpy arrays.
Args:
data: file name or a list of file names to read.
kwargs: additional args for `numpy.load` API except `allow_pickle`, will override `self.kwargs` for existing keys.
More details about available args:
https://numpy.org/doc/stable/reference/generated/numpy.load.html<|endoftext|> |
be666806596a8aa342c0986a29d967c64e7cdce5de9907a0340e7f1c70b51016 | def get_data(self, img):
'\n Extract data array and meta data from loaded data and return them.\n This function returns 2 objects, first is numpy array of image data, second is dict of meta data.\n It constructs `spatial_shape=data.shape` and stores in meta dict if the data is numpy array.\n If loading a list of files, stack them together and add a new dimension as first dimension,\n and use the meta data of the first image to represent the stacked result.\n\n Args:\n img: a Numpy array loaded from a file or a list of Numpy arrays.\n\n '
img_array: List[np.ndarray] = []
compatible_meta: Dict = {}
if isinstance(img, np.ndarray):
img = (img,)
for i in ensure_tuple(img):
header = {}
if isinstance(i, np.ndarray):
header['spatial_shape'] = i.shape
img_array.append(i)
_copy_compatible_dict(header, compatible_meta)
return (_stack_images(img_array, compatible_meta), compatible_meta) | Extract data array and meta data from loaded data and return them.
This function returns 2 objects, first is numpy array of image data, second is dict of meta data.
It constructs `spatial_shape=data.shape` and stores in meta dict if the data is numpy array.
If loading a list of files, stack them together and add a new dimension as first dimension,
and use the meta data of the first image to represent the stacked result.
Args:
img: a Numpy array loaded from a file or a list of Numpy arrays. | monai/data/image_reader.py | get_data | lyndonboone/MONAI | 1 | python | def get_data(self, img):
'\n Extract data array and meta data from loaded data and return them.\n This function returns 2 objects, first is numpy array of image data, second is dict of meta data.\n It constructs `spatial_shape=data.shape` and stores in meta dict if the data is numpy array.\n If loading a list of files, stack them together and add a new dimension as first dimension,\n and use the meta data of the first image to represent the stacked result.\n\n Args:\n img: a Numpy array loaded from a file or a list of Numpy arrays.\n\n '
img_array: List[np.ndarray] = []
compatible_meta: Dict = {}
if isinstance(img, np.ndarray):
img = (img,)
for i in ensure_tuple(img):
header = {}
if isinstance(i, np.ndarray):
header['spatial_shape'] = i.shape
img_array.append(i)
_copy_compatible_dict(header, compatible_meta)
return (_stack_images(img_array, compatible_meta), compatible_meta) | def get_data(self, img):
'\n Extract data array and meta data from loaded data and return them.\n This function returns 2 objects, first is numpy array of image data, second is dict of meta data.\n It constructs `spatial_shape=data.shape` and stores in meta dict if the data is numpy array.\n If loading a list of files, stack them together and add a new dimension as first dimension,\n and use the meta data of the first image to represent the stacked result.\n\n Args:\n img: a Numpy array loaded from a file or a list of Numpy arrays.\n\n '
img_array: List[np.ndarray] = []
compatible_meta: Dict = {}
if isinstance(img, np.ndarray):
img = (img,)
for i in ensure_tuple(img):
header = {}
if isinstance(i, np.ndarray):
header['spatial_shape'] = i.shape
img_array.append(i)
_copy_compatible_dict(header, compatible_meta)
return (_stack_images(img_array, compatible_meta), compatible_meta)<|docstring|>Extract data array and meta data from loaded data and return them.
This function returns 2 objects, first is numpy array of image data, second is dict of meta data.
It constructs `spatial_shape=data.shape` and stores in meta dict if the data is numpy array.
If loading a list of files, stack them together and add a new dimension as first dimension,
and use the meta data of the first image to represent the stacked result.
Args:
img: a Numpy array loaded from a file or a list of Numpy arrays.<|endoftext|> |
bbd4e8cabe313a8d1c797e9ef93fdc459f4261972e1a330459d91ef6701fe77d | def verify_suffix(self, filename: Union[(Sequence[str], str)]) -> bool:
'\n Verify whether the specified file or files format is supported by PIL reader.\n\n Args:\n filename: file name or a list of file names to read.\n if a list of files, verify all the suffixes.\n '
suffixes: Sequence[str] = ['png', 'jpg', 'jpeg', 'bmp']
return (has_pil and is_supported_format(filename, suffixes)) | Verify whether the specified file or files format is supported by PIL reader.
Args:
filename: file name or a list of file names to read.
if a list of files, verify all the suffixes. | monai/data/image_reader.py | verify_suffix | lyndonboone/MONAI | 1 | python | def verify_suffix(self, filename: Union[(Sequence[str], str)]) -> bool:
'\n Verify whether the specified file or files format is supported by PIL reader.\n\n Args:\n filename: file name or a list of file names to read.\n if a list of files, verify all the suffixes.\n '
suffixes: Sequence[str] = ['png', 'jpg', 'jpeg', 'bmp']
return (has_pil and is_supported_format(filename, suffixes)) | def verify_suffix(self, filename: Union[(Sequence[str], str)]) -> bool:
'\n Verify whether the specified file or files format is supported by PIL reader.\n\n Args:\n filename: file name or a list of file names to read.\n if a list of files, verify all the suffixes.\n '
suffixes: Sequence[str] = ['png', 'jpg', 'jpeg', 'bmp']
return (has_pil and is_supported_format(filename, suffixes))<|docstring|>Verify whether the specified file or files format is supported by PIL reader.
Args:
filename: file name or a list of file names to read.
if a list of files, verify all the suffixes.<|endoftext|> |
d8993fc50489d5492cbee7e05c99ea411c64f4835999c683c5b2e6d2ab2371b0 | def read(self, data: Union[(Sequence[str], str, np.ndarray)], **kwargs):
'\n Read image data from specified file or files.\n Note that the returned object is PIL image or list of PIL image.\n\n Args:\n data: file name or a list of file names to read.\n kwargs: additional args for `Image.open` API in `read()`, will override `self.kwargs` for existing keys.\n Mode details about available args:\n https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.open\n\n '
img_: List[PILImage.Image] = []
filenames: Sequence[str] = ensure_tuple(data)
kwargs_ = self.kwargs.copy()
kwargs_.update(kwargs)
for name in filenames:
img = PILImage.open(name, **kwargs_)
if callable(self.converter):
img = self.converter(img)
img_.append(img)
return (img_ if (len(filenames) > 1) else img_[0]) | Read image data from specified file or files.
Note that the returned object is PIL image or list of PIL image.
Args:
data: file name or a list of file names to read.
kwargs: additional args for `Image.open` API in `read()`, will override `self.kwargs` for existing keys.
Mode details about available args:
https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.open | monai/data/image_reader.py | read | lyndonboone/MONAI | 1 | python | def read(self, data: Union[(Sequence[str], str, np.ndarray)], **kwargs):
'\n Read image data from specified file or files.\n Note that the returned object is PIL image or list of PIL image.\n\n Args:\n data: file name or a list of file names to read.\n kwargs: additional args for `Image.open` API in `read()`, will override `self.kwargs` for existing keys.\n Mode details about available args:\n https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.open\n\n '
img_: List[PILImage.Image] = []
filenames: Sequence[str] = ensure_tuple(data)
kwargs_ = self.kwargs.copy()
kwargs_.update(kwargs)
for name in filenames:
img = PILImage.open(name, **kwargs_)
if callable(self.converter):
img = self.converter(img)
img_.append(img)
return (img_ if (len(filenames) > 1) else img_[0]) | def read(self, data: Union[(Sequence[str], str, np.ndarray)], **kwargs):
'\n Read image data from specified file or files.\n Note that the returned object is PIL image or list of PIL image.\n\n Args:\n data: file name or a list of file names to read.\n kwargs: additional args for `Image.open` API in `read()`, will override `self.kwargs` for existing keys.\n Mode details about available args:\n https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.open\n\n '
img_: List[PILImage.Image] = []
filenames: Sequence[str] = ensure_tuple(data)
kwargs_ = self.kwargs.copy()
kwargs_.update(kwargs)
for name in filenames:
img = PILImage.open(name, **kwargs_)
if callable(self.converter):
img = self.converter(img)
img_.append(img)
return (img_ if (len(filenames) > 1) else img_[0])<|docstring|>Read image data from specified file or files.
Note that the returned object is PIL image or list of PIL image.
Args:
data: file name or a list of file names to read.
kwargs: additional args for `Image.open` API in `read()`, will override `self.kwargs` for existing keys.
Mode details about available args:
https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.open<|endoftext|> |
8bf13abf255a1fa2a99a95532eedf29fbc8d3f7f302149b7944980a8c234c5fc | def get_data(self, img):
'\n Extract data array and meta data from loaded data and return them.\n This function returns 2 objects, first is numpy array of image data, second is dict of meta data.\n It constructs `spatial_shape` and stores in meta dict.\n If loading a list of files, stack them together and add a new dimension as first dimension,\n and use the meta data of the first image to represent the stacked result.\n\n Args:\n img: a PIL Image object loaded from a file or a list of PIL Image objects.\n\n '
img_array: List[np.ndarray] = []
compatible_meta: Dict = {}
for i in ensure_tuple(img):
header = self._get_meta_dict(i)
header['spatial_shape'] = self._get_spatial_shape(i)
data = np.asarray(i)
img_array.append(data)
header['original_channel_dim'] = ('no_channel' if (len(data.shape) == len(header['spatial_shape'])) else (- 1))
_copy_compatible_dict(header, compatible_meta)
return (_stack_images(img_array, compatible_meta), compatible_meta) | Extract data array and meta data from loaded data and return them.
This function returns 2 objects, first is numpy array of image data, second is dict of meta data.
It constructs `spatial_shape` and stores in meta dict.
If loading a list of files, stack them together and add a new dimension as first dimension,
and use the meta data of the first image to represent the stacked result.
Args:
img: a PIL Image object loaded from a file or a list of PIL Image objects. | monai/data/image_reader.py | get_data | lyndonboone/MONAI | 1 | python | def get_data(self, img):
'\n Extract data array and meta data from loaded data and return them.\n This function returns 2 objects, first is numpy array of image data, second is dict of meta data.\n It constructs `spatial_shape` and stores in meta dict.\n If loading a list of files, stack them together and add a new dimension as first dimension,\n and use the meta data of the first image to represent the stacked result.\n\n Args:\n img: a PIL Image object loaded from a file or a list of PIL Image objects.\n\n '
img_array: List[np.ndarray] = []
compatible_meta: Dict = {}
for i in ensure_tuple(img):
header = self._get_meta_dict(i)
header['spatial_shape'] = self._get_spatial_shape(i)
data = np.asarray(i)
img_array.append(data)
header['original_channel_dim'] = ('no_channel' if (len(data.shape) == len(header['spatial_shape'])) else (- 1))
_copy_compatible_dict(header, compatible_meta)
return (_stack_images(img_array, compatible_meta), compatible_meta) | def get_data(self, img):
'\n Extract data array and meta data from loaded data and return them.\n This function returns 2 objects, first is numpy array of image data, second is dict of meta data.\n It constructs `spatial_shape` and stores in meta dict.\n If loading a list of files, stack them together and add a new dimension as first dimension,\n and use the meta data of the first image to represent the stacked result.\n\n Args:\n img: a PIL Image object loaded from a file or a list of PIL Image objects.\n\n '
img_array: List[np.ndarray] = []
compatible_meta: Dict = {}
for i in ensure_tuple(img):
header = self._get_meta_dict(i)
header['spatial_shape'] = self._get_spatial_shape(i)
data = np.asarray(i)
img_array.append(data)
header['original_channel_dim'] = ('no_channel' if (len(data.shape) == len(header['spatial_shape'])) else (- 1))
_copy_compatible_dict(header, compatible_meta)
return (_stack_images(img_array, compatible_meta), compatible_meta)<|docstring|>Extract data array and meta data from loaded data and return them.
This function returns 2 objects, first is numpy array of image data, second is dict of meta data.
It constructs `spatial_shape` and stores in meta dict.
If loading a list of files, stack them together and add a new dimension as first dimension,
and use the meta data of the first image to represent the stacked result.
Args:
img: a PIL Image object loaded from a file or a list of PIL Image objects.<|endoftext|> |
02e164c6a7bc398cc8885b1cea6a29141b53a05ccbfd37383286273f11b34728 | def _get_meta_dict(self, img) -> Dict:
'\n Get the all the meta data of the image and convert to dict type.\n Args:\n img: a PIL Image object loaded from a image file.\n\n '
return {'format': img.format, 'mode': img.mode, 'width': img.width, 'height': img.height} | Get the all the meta data of the image and convert to dict type.
Args:
img: a PIL Image object loaded from a image file. | monai/data/image_reader.py | _get_meta_dict | lyndonboone/MONAI | 1 | python | def _get_meta_dict(self, img) -> Dict:
'\n Get the all the meta data of the image and convert to dict type.\n Args:\n img: a PIL Image object loaded from a image file.\n\n '
return {'format': img.format, 'mode': img.mode, 'width': img.width, 'height': img.height} | def _get_meta_dict(self, img) -> Dict:
'\n Get the all the meta data of the image and convert to dict type.\n Args:\n img: a PIL Image object loaded from a image file.\n\n '
return {'format': img.format, 'mode': img.mode, 'width': img.width, 'height': img.height}<|docstring|>Get the all the meta data of the image and convert to dict type.
Args:
img: a PIL Image object loaded from a image file.<|endoftext|> |
a2f6048885700df60caf106a047a6ab957a16e372cfa3e3c7d18679e7ce871b2 | def _get_spatial_shape(self, img):
"\n Get the spatial shape of image data, it doesn't contain the channel dim.\n Args:\n img: a PIL Image object loaded from a image file.\n "
return np.asarray((img.width, img.height)) | Get the spatial shape of image data, it doesn't contain the channel dim.
Args:
img: a PIL Image object loaded from a image file. | monai/data/image_reader.py | _get_spatial_shape | lyndonboone/MONAI | 1 | python | def _get_spatial_shape(self, img):
"\n Get the spatial shape of image data, it doesn't contain the channel dim.\n Args:\n img: a PIL Image object loaded from a image file.\n "
return np.asarray((img.width, img.height)) | def _get_spatial_shape(self, img):
"\n Get the spatial shape of image data, it doesn't contain the channel dim.\n Args:\n img: a PIL Image object loaded from a image file.\n "
return np.asarray((img.width, img.height))<|docstring|>Get the spatial shape of image data, it doesn't contain the channel dim.
Args:
img: a PIL Image object loaded from a image file.<|endoftext|> |
aa2da00ceb29c08933646c5aac15af7eba2e632bf8b3f61b4adc8587dd4e9c85 | def verify_suffix(self, filename: Union[(Sequence[str], str)]) -> bool:
'\n Verify whether the specified file or files format is supported by WSI reader.\n\n Args:\n filename: file name or a list of file names to read.\n if a list of files, verify all the suffixes.\n '
return is_supported_format(filename, ['tif', 'tiff']) | Verify whether the specified file or files format is supported by WSI reader.
Args:
filename: file name or a list of file names to read.
if a list of files, verify all the suffixes. | monai/data/image_reader.py | verify_suffix | lyndonboone/MONAI | 1 | python | def verify_suffix(self, filename: Union[(Sequence[str], str)]) -> bool:
'\n Verify whether the specified file or files format is supported by WSI reader.\n\n Args:\n filename: file name or a list of file names to read.\n if a list of files, verify all the suffixes.\n '
return is_supported_format(filename, ['tif', 'tiff']) | def verify_suffix(self, filename: Union[(Sequence[str], str)]) -> bool:
'\n Verify whether the specified file or files format is supported by WSI reader.\n\n Args:\n filename: file name or a list of file names to read.\n if a list of files, verify all the suffixes.\n '
return is_supported_format(filename, ['tif', 'tiff'])<|docstring|>Verify whether the specified file or files format is supported by WSI reader.
Args:
filename: file name or a list of file names to read.
if a list of files, verify all the suffixes.<|endoftext|> |
cff75a2d79ae7a30759d0cdfabb3d3fa0de21b2e3b52f591fc10ab01708863c5 | def read(self, data: Union[(Sequence[str], str, np.ndarray)], **kwargs):
'\n Read image data from specified file or files.\n Note that the returned object is CuImage or list of CuImage objects.\n\n Args:\n data: file name or a list of file names to read.\n\n '
if ((self.reader_lib == 'openslide') and (not has_osl)):
raise ImportError("No module named 'openslide'")
if ((self.reader_lib == 'cucim') and (not has_cim)):
raise ImportError("No module named 'cucim'")
img_: List = []
filenames: Sequence[str] = ensure_tuple(data)
for name in filenames:
img = self.wsi_reader(name)
if (self.reader_lib == 'openslide'):
img.shape = (img.dimensions[1], img.dimensions[0], 3)
img_.append(img)
return (img_ if (len(filenames) > 1) else img_[0]) | Read image data from specified file or files.
Note that the returned object is CuImage or list of CuImage objects.
Args:
data: file name or a list of file names to read. | monai/data/image_reader.py | read | lyndonboone/MONAI | 1 | python | def read(self, data: Union[(Sequence[str], str, np.ndarray)], **kwargs):
'\n Read image data from specified file or files.\n Note that the returned object is CuImage or list of CuImage objects.\n\n Args:\n data: file name or a list of file names to read.\n\n '
if ((self.reader_lib == 'openslide') and (not has_osl)):
raise ImportError("No module named 'openslide'")
if ((self.reader_lib == 'cucim') and (not has_cim)):
raise ImportError("No module named 'cucim'")
img_: List = []
filenames: Sequence[str] = ensure_tuple(data)
for name in filenames:
img = self.wsi_reader(name)
if (self.reader_lib == 'openslide'):
img.shape = (img.dimensions[1], img.dimensions[0], 3)
img_.append(img)
return (img_ if (len(filenames) > 1) else img_[0]) | def read(self, data: Union[(Sequence[str], str, np.ndarray)], **kwargs):
'\n Read image data from specified file or files.\n Note that the returned object is CuImage or list of CuImage objects.\n\n Args:\n data: file name or a list of file names to read.\n\n '
if ((self.reader_lib == 'openslide') and (not has_osl)):
raise ImportError("No module named 'openslide'")
if ((self.reader_lib == 'cucim') and (not has_cim)):
raise ImportError("No module named 'cucim'")
img_: List = []
filenames: Sequence[str] = ensure_tuple(data)
for name in filenames:
img = self.wsi_reader(name)
if (self.reader_lib == 'openslide'):
img.shape = (img.dimensions[1], img.dimensions[0], 3)
img_.append(img)
return (img_ if (len(filenames) > 1) else img_[0])<|docstring|>Read image data from specified file or files.
Note that the returned object is CuImage or list of CuImage objects.
Args:
data: file name or a list of file names to read.<|endoftext|> |
9456476468e198906f78ba61ce197ccee9fa6e4be34903100e8b6fc99c18b4bb | def get_data(self, img, location: Tuple[(int, int)]=(0, 0), size: Optional[Tuple[(int, int)]]=None, level: int=0, dtype: DtypeLike=np.uint8, grid_shape: Tuple[(int, int)]=(1, 1), patch_size: Optional[Union[(int, Tuple[(int, int)])]]=None):
'\n Extract regions as numpy array from WSI image and return them.\n\n Args:\n img: a WSIReader image object loaded from a file, or list of CuImage objects\n location: (x_min, y_min) tuple giving the top left pixel in the level 0 reference frame,\n or list of tuples (default=(0, 0))\n size: (height, width) tuple giving the region size, or list of tuples (default to full image size)\n This is the size of image at the given level (`level`)\n level: the level number, or list of level numbers (default=0)\n dtype: the data type of output image\n grid_shape: (row, columns) tuple define a grid to extract patches on that\n patch_size: (height, width) the size of extracted patches at the given level\n '
if ((self.reader_lib == 'openslide') and (size is None)):
size = (((img.shape[0] // (2 ** level)) - location[0]), ((img.shape[1] // (2 ** level)) - location[1]))
region = self._extract_region(img, location=location, size=size, level=level, dtype=dtype)
metadata: Dict = {}
metadata['spatial_shape'] = size
metadata['original_channel_dim'] = (- 1)
region = EnsureChannelFirst()(region, metadata)
if (patch_size is None):
patches = region
else:
tuple_patch_size = ensure_tuple_rep(patch_size, 2)
patches = self._extract_patches(region, patch_size=tuple_patch_size, grid_shape=grid_shape, dtype=dtype)
return (patches, metadata) | Extract regions as numpy array from WSI image and return them.
Args:
img: a WSIReader image object loaded from a file, or list of CuImage objects
location: (x_min, y_min) tuple giving the top left pixel in the level 0 reference frame,
or list of tuples (default=(0, 0))
size: (height, width) tuple giving the region size, or list of tuples (default to full image size)
This is the size of image at the given level (`level`)
level: the level number, or list of level numbers (default=0)
dtype: the data type of output image
grid_shape: (row, columns) tuple define a grid to extract patches on that
patch_size: (height, width) the size of extracted patches at the given level | monai/data/image_reader.py | get_data | lyndonboone/MONAI | 1 | python | def get_data(self, img, location: Tuple[(int, int)]=(0, 0), size: Optional[Tuple[(int, int)]]=None, level: int=0, dtype: DtypeLike=np.uint8, grid_shape: Tuple[(int, int)]=(1, 1), patch_size: Optional[Union[(int, Tuple[(int, int)])]]=None):
'\n Extract regions as numpy array from WSI image and return them.\n\n Args:\n img: a WSIReader image object loaded from a file, or list of CuImage objects\n location: (x_min, y_min) tuple giving the top left pixel in the level 0 reference frame,\n or list of tuples (default=(0, 0))\n size: (height, width) tuple giving the region size, or list of tuples (default to full image size)\n This is the size of image at the given level (`level`)\n level: the level number, or list of level numbers (default=0)\n dtype: the data type of output image\n grid_shape: (row, columns) tuple define a grid to extract patches on that\n patch_size: (height, width) the size of extracted patches at the given level\n '
if ((self.reader_lib == 'openslide') and (size is None)):
size = (((img.shape[0] // (2 ** level)) - location[0]), ((img.shape[1] // (2 ** level)) - location[1]))
region = self._extract_region(img, location=location, size=size, level=level, dtype=dtype)
metadata: Dict = {}
metadata['spatial_shape'] = size
metadata['original_channel_dim'] = (- 1)
region = EnsureChannelFirst()(region, metadata)
if (patch_size is None):
patches = region
else:
tuple_patch_size = ensure_tuple_rep(patch_size, 2)
patches = self._extract_patches(region, patch_size=tuple_patch_size, grid_shape=grid_shape, dtype=dtype)
return (patches, metadata) | def get_data(self, img, location: Tuple[(int, int)]=(0, 0), size: Optional[Tuple[(int, int)]]=None, level: int=0, dtype: DtypeLike=np.uint8, grid_shape: Tuple[(int, int)]=(1, 1), patch_size: Optional[Union[(int, Tuple[(int, int)])]]=None):
'\n Extract regions as numpy array from WSI image and return them.\n\n Args:\n img: a WSIReader image object loaded from a file, or list of CuImage objects\n location: (x_min, y_min) tuple giving the top left pixel in the level 0 reference frame,\n or list of tuples (default=(0, 0))\n size: (height, width) tuple giving the region size, or list of tuples (default to full image size)\n This is the size of image at the given level (`level`)\n level: the level number, or list of level numbers (default=0)\n dtype: the data type of output image\n grid_shape: (row, columns) tuple define a grid to extract patches on that\n patch_size: (height, width) the size of extracted patches at the given level\n '
if ((self.reader_lib == 'openslide') and (size is None)):
size = (((img.shape[0] // (2 ** level)) - location[0]), ((img.shape[1] // (2 ** level)) - location[1]))
region = self._extract_region(img, location=location, size=size, level=level, dtype=dtype)
metadata: Dict = {}
metadata['spatial_shape'] = size
metadata['original_channel_dim'] = (- 1)
region = EnsureChannelFirst()(region, metadata)
if (patch_size is None):
patches = region
else:
tuple_patch_size = ensure_tuple_rep(patch_size, 2)
patches = self._extract_patches(region, patch_size=tuple_patch_size, grid_shape=grid_shape, dtype=dtype)
return (patches, metadata)<|docstring|>Extract regions as numpy array from WSI image and return them.
Args:
img: a WSIReader image object loaded from a file, or list of CuImage objects
location: (x_min, y_min) tuple giving the top left pixel in the level 0 reference frame,
or list of tuples (default=(0, 0))
size: (height, width) tuple giving the region size, or list of tuples (default to full image size)
This is the size of image at the given level (`level`)
level: the level number, or list of level numbers (default=0)
dtype: the data type of output image
grid_shape: (row, columns) tuple define a grid to extract patches on that
patch_size: (height, width) the size of extracted patches at the given level<|endoftext|> |
1658ff9e78748541ed2aac4ab6101a5754d75d9afb962b44428ccb8beae5c3f1 | def convert_to_rgb_array(self, raw_region, dtype: DtypeLike=np.uint8):
'Convert to RGB mode and numpy array'
if (self.reader_lib == 'openslide'):
raw_region = raw_region.convert('RGB')
raw_region = np.asarray(raw_region, dtype=dtype)
else:
num_channels = len(raw_region.channel_names)
raw_region = np.asarray(raw_region, dtype=dtype)
if (num_channels > 3):
raw_region = raw_region[(:, :, :3)]
return raw_region | Convert to RGB mode and numpy array | monai/data/image_reader.py | convert_to_rgb_array | lyndonboone/MONAI | 1 | python | def convert_to_rgb_array(self, raw_region, dtype: DtypeLike=np.uint8):
if (self.reader_lib == 'openslide'):
raw_region = raw_region.convert('RGB')
raw_region = np.asarray(raw_region, dtype=dtype)
else:
num_channels = len(raw_region.channel_names)
raw_region = np.asarray(raw_region, dtype=dtype)
if (num_channels > 3):
raw_region = raw_region[(:, :, :3)]
return raw_region | def convert_to_rgb_array(self, raw_region, dtype: DtypeLike=np.uint8):
if (self.reader_lib == 'openslide'):
raw_region = raw_region.convert('RGB')
raw_region = np.asarray(raw_region, dtype=dtype)
else:
num_channels = len(raw_region.channel_names)
raw_region = np.asarray(raw_region, dtype=dtype)
if (num_channels > 3):
raw_region = raw_region[(:, :, :3)]
return raw_region<|docstring|>Convert to RGB mode and numpy array<|endoftext|> |
b79becc4818a1c8fe25c64350553f81f790d56939e2ec82d84d69cf52ab0cdaf | def modify_document(self, doc):
' This handler does not make any modifications to the Document.\n\n Args:\n doc (Document) : A Bokeh Document to update in-place\n\n *This handler does not modify the document*\n\n Returns:\n None\n\n '
pass | This handler does not make any modifications to the Document.
Args:
doc (Document) : A Bokeh Document to update in-place
*This handler does not modify the document*
Returns:
None | bokeh/application/handlers/lifecycle.py | modify_document | jdlesage/bokeh | 1 | python | def modify_document(self, doc):
' This handler does not make any modifications to the Document.\n\n Args:\n doc (Document) : A Bokeh Document to update in-place\n\n *This handler does not modify the document*\n\n Returns:\n None\n\n '
pass | def modify_document(self, doc):
' This handler does not make any modifications to the Document.\n\n Args:\n doc (Document) : A Bokeh Document to update in-place\n\n *This handler does not modify the document*\n\n Returns:\n None\n\n '
pass<|docstring|>This handler does not make any modifications to the Document.
Args:
doc (Document) : A Bokeh Document to update in-place
*This handler does not modify the document*
Returns:
None<|endoftext|> |
3789adb3f21429fd1170e8e6249351152b2d0e9615fc2b7de23c0b304e27f137 | def on_server_loaded(self, server_context):
' Execute `on_server_unloaded`` from the configured module (if\n it is defined) when the server is first started.\n\n Args:\n server_context (ServerContext) :\n\n '
return self._on_server_loaded(server_context) | Execute `on_server_unloaded`` from the configured module (if
it is defined) when the server is first started.
Args:
server_context (ServerContext) : | bokeh/application/handlers/lifecycle.py | on_server_loaded | jdlesage/bokeh | 1 | python | def on_server_loaded(self, server_context):
' Execute `on_server_unloaded`` from the configured module (if\n it is defined) when the server is first started.\n\n Args:\n server_context (ServerContext) :\n\n '
return self._on_server_loaded(server_context) | def on_server_loaded(self, server_context):
' Execute `on_server_unloaded`` from the configured module (if\n it is defined) when the server is first started.\n\n Args:\n server_context (ServerContext) :\n\n '
return self._on_server_loaded(server_context)<|docstring|>Execute `on_server_unloaded`` from the configured module (if
it is defined) when the server is first started.
Args:
server_context (ServerContext) :<|endoftext|> |
c3f913d339799524e0e40b1388675007bc2d68188108c761141a7b2d37e52591 | def on_server_unloaded(self, server_context):
" Execute ``on_server_unloaded`` from the configured module (if\n it is defined) when the server cleanly exits. (Before stopping the\n server's ``IOLoop``.)\n\n Args:\n server_context (ServerContext) :\n\n .. warning::\n In practice this code may not run, since servers are often killed\n by a signal.\n\n "
return self._on_server_unloaded(server_context) | Execute ``on_server_unloaded`` from the configured module (if
it is defined) when the server cleanly exits. (Before stopping the
server's ``IOLoop``.)
Args:
server_context (ServerContext) :
.. warning::
In practice this code may not run, since servers are often killed
by a signal. | bokeh/application/handlers/lifecycle.py | on_server_unloaded | jdlesage/bokeh | 1 | python | def on_server_unloaded(self, server_context):
" Execute ``on_server_unloaded`` from the configured module (if\n it is defined) when the server cleanly exits. (Before stopping the\n server's ``IOLoop``.)\n\n Args:\n server_context (ServerContext) :\n\n .. warning::\n In practice this code may not run, since servers are often killed\n by a signal.\n\n "
return self._on_server_unloaded(server_context) | def on_server_unloaded(self, server_context):
" Execute ``on_server_unloaded`` from the configured module (if\n it is defined) when the server cleanly exits. (Before stopping the\n server's ``IOLoop``.)\n\n Args:\n server_context (ServerContext) :\n\n .. warning::\n In practice this code may not run, since servers are often killed\n by a signal.\n\n "
return self._on_server_unloaded(server_context)<|docstring|>Execute ``on_server_unloaded`` from the configured module (if
it is defined) when the server cleanly exits. (Before stopping the
server's ``IOLoop``.)
Args:
server_context (ServerContext) :
.. warning::
In practice this code may not run, since servers are often killed
by a signal.<|endoftext|> |
b0366169ffd59183db5e0924ed308f0249b96b0fc6fccf6598122c80f643d001 | async def on_session_created(self, session_context):
' Execute ``on_session_created`` from the configured module (if\n it is defined) when a new session is created.\n\n Args:\n session_context (SessionContext) :\n\n '
return self._on_session_created(session_context) | Execute ``on_session_created`` from the configured module (if
it is defined) when a new session is created.
Args:
session_context (SessionContext) : | bokeh/application/handlers/lifecycle.py | on_session_created | jdlesage/bokeh | 1 | python | async def on_session_created(self, session_context):
' Execute ``on_session_created`` from the configured module (if\n it is defined) when a new session is created.\n\n Args:\n session_context (SessionContext) :\n\n '
return self._on_session_created(session_context) | async def on_session_created(self, session_context):
' Execute ``on_session_created`` from the configured module (if\n it is defined) when a new session is created.\n\n Args:\n session_context (SessionContext) :\n\n '
return self._on_session_created(session_context)<|docstring|>Execute ``on_session_created`` from the configured module (if
it is defined) when a new session is created.
Args:
session_context (SessionContext) :<|endoftext|> |
48ffb6b3b090c20856d0337fd0577228a757a1c177942de64e4522f9af2fff66 | async def on_session_destroyed(self, session_context):
' Execute ``on_session_destroyed`` from the configured module (if\n it is defined) when a new session is destroyed.\n\n Args:\n session_context (SessionContext) :\n\n '
return self._on_session_destroyed(session_context) | Execute ``on_session_destroyed`` from the configured module (if
it is defined) when a new session is destroyed.
Args:
session_context (SessionContext) : | bokeh/application/handlers/lifecycle.py | on_session_destroyed | jdlesage/bokeh | 1 | python | async def on_session_destroyed(self, session_context):
' Execute ``on_session_destroyed`` from the configured module (if\n it is defined) when a new session is destroyed.\n\n Args:\n session_context (SessionContext) :\n\n '
return self._on_session_destroyed(session_context) | async def on_session_destroyed(self, session_context):
' Execute ``on_session_destroyed`` from the configured module (if\n it is defined) when a new session is destroyed.\n\n Args:\n session_context (SessionContext) :\n\n '
return self._on_session_destroyed(session_context)<|docstring|>Execute ``on_session_destroyed`` from the configured module (if
it is defined) when a new session is destroyed.
Args:
session_context (SessionContext) :<|endoftext|> |
5fe882d13586697b171c55d8b72f407b2d831877c71e89bdde888122cd235ac8 | def cnn_model(input_shape, num_of_class):
'\n A method to define the CNN architecture\n '
input = Input(shape=input_shape)
x = Conv2D(filters=32, kernel_size=(3, 3), strides=(1, 1), activation='relu', padding='valid')(input)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Conv2D(filters=32, kernel_size=(3, 3), strides=(1, 1), activation='relu', padding='valid')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Flatten()(x)
x = Dense(256, activation='relu')(x)
output = Dense(num_of_class, activation='softmax')(x)
model = Model(input, output)
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
return model | A method to define the CNN architecture | ml_proj_init/data/network/cnn_nn.py | cnn_model | faruk-ahmad/ml-proj-init | 1 | python | def cnn_model(input_shape, num_of_class):
'\n \n '
input = Input(shape=input_shape)
x = Conv2D(filters=32, kernel_size=(3, 3), strides=(1, 1), activation='relu', padding='valid')(input)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Conv2D(filters=32, kernel_size=(3, 3), strides=(1, 1), activation='relu', padding='valid')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Flatten()(x)
x = Dense(256, activation='relu')(x)
output = Dense(num_of_class, activation='softmax')(x)
model = Model(input, output)
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
return model | def cnn_model(input_shape, num_of_class):
'\n \n '
input = Input(shape=input_shape)
x = Conv2D(filters=32, kernel_size=(3, 3), strides=(1, 1), activation='relu', padding='valid')(input)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Conv2D(filters=32, kernel_size=(3, 3), strides=(1, 1), activation='relu', padding='valid')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = Flatten()(x)
x = Dense(256, activation='relu')(x)
output = Dense(num_of_class, activation='softmax')(x)
model = Model(input, output)
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
return model<|docstring|>A method to define the CNN architecture<|endoftext|> |
dffa24d757efd2e98ed4eab1638a3c5043e42bcf382364c24b446d2c69457974 | def train_mnist(x_train, y_train, input_shape, num_classes):
'\n a dummy method to train the built CNN model\n '
model = cnn_model(input_shape, num_classes)
model.fit(x_train, y_train, epochs=1, batch_size=32, validation_split=0.2)
return model | a dummy method to train the built CNN model | ml_proj_init/data/network/cnn_nn.py | train_mnist | faruk-ahmad/ml-proj-init | 1 | python | def train_mnist(x_train, y_train, input_shape, num_classes):
'\n \n '
model = cnn_model(input_shape, num_classes)
model.fit(x_train, y_train, epochs=1, batch_size=32, validation_split=0.2)
return model | def train_mnist(x_train, y_train, input_shape, num_classes):
'\n \n '
model = cnn_model(input_shape, num_classes)
model.fit(x_train, y_train, epochs=1, batch_size=32, validation_split=0.2)
return model<|docstring|>a dummy method to train the built CNN model<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.