body_hash
stringlengths 64
64
| body
stringlengths 23
109k
| docstring
stringlengths 1
57k
| path
stringlengths 4
198
| name
stringlengths 1
115
| repository_name
stringlengths 7
111
| repository_stars
float64 0
191k
| lang
stringclasses 1
value | body_without_docstring
stringlengths 14
108k
| unified
stringlengths 45
133k
|
---|---|---|---|---|---|---|---|---|---|
ed32e35895df97d157702dc7b851f84eb6553dd6022effb03081071731636e69 | def test_get_current_grade(self):
'\n Test for get_current_grade method\n '
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.get_current_grade('course-v1:edX+DemoX+Demo_Course') == 77.0)
assert (mmtrack.get_current_grade('course-v1:MITx+8.MechCX+2014_T1') == 3.0)
assert (mmtrack.get_current_grade('course-v1:odl+FOO101+CR-FALL15') is None)
with patch('edx_api.grades.models.CurrentGradesByUser.get_current_grade', return_value=None):
assert (mmtrack.get_current_grade('course-v1:MITx+8.MechCX+2014_T1') is None) | Test for get_current_grade method | dashboard/utils_test.py | test_get_current_grade | mitodl/micromasters | 32 | python | def test_get_current_grade(self):
'\n \n '
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.get_current_grade('course-v1:edX+DemoX+Demo_Course') == 77.0)
assert (mmtrack.get_current_grade('course-v1:MITx+8.MechCX+2014_T1') == 3.0)
assert (mmtrack.get_current_grade('course-v1:odl+FOO101+CR-FALL15') is None)
with patch('edx_api.grades.models.CurrentGradesByUser.get_current_grade', return_value=None):
assert (mmtrack.get_current_grade('course-v1:MITx+8.MechCX+2014_T1') is None) | def test_get_current_grade(self):
'\n \n '
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.get_current_grade('course-v1:edX+DemoX+Demo_Course') == 77.0)
assert (mmtrack.get_current_grade('course-v1:MITx+8.MechCX+2014_T1') == 3.0)
assert (mmtrack.get_current_grade('course-v1:odl+FOO101+CR-FALL15') is None)
with patch('edx_api.grades.models.CurrentGradesByUser.get_current_grade', return_value=None):
assert (mmtrack.get_current_grade('course-v1:MITx+8.MechCX+2014_T1') is None)<|docstring|>Test for get_current_grade method<|endoftext|> |
42afe185376d650b4ec42c614dfd589106ff6d0e4b5cbc213b23f2a3e233c77c | def test_count_courses_passed_normal(self):
'\n Assert that count_courses_passed works in case of normal program.\n '
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.count_courses_passed() == 0)
course_run = self.cruns[0]
FinalGradeFactory.create(user=self.user, course_run=course_run, passed=True)
assert (mmtrack.count_courses_passed() == 1)
course = CourseFactory.create(program=self.program)
final_grade = FinalGradeFactory.create(user=self.user, course_run__course=course, passed=True)
mmtrack.edx_course_keys.add(final_grade.course_run.edx_course_key)
assert (mmtrack.count_courses_passed() == 2) | Assert that count_courses_passed works in case of normal program. | dashboard/utils_test.py | test_count_courses_passed_normal | mitodl/micromasters | 32 | python | def test_count_courses_passed_normal(self):
'\n \n '
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.count_courses_passed() == 0)
course_run = self.cruns[0]
FinalGradeFactory.create(user=self.user, course_run=course_run, passed=True)
assert (mmtrack.count_courses_passed() == 1)
course = CourseFactory.create(program=self.program)
final_grade = FinalGradeFactory.create(user=self.user, course_run__course=course, passed=True)
mmtrack.edx_course_keys.add(final_grade.course_run.edx_course_key)
assert (mmtrack.count_courses_passed() == 2) | def test_count_courses_passed_normal(self):
'\n \n '
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.count_courses_passed() == 0)
course_run = self.cruns[0]
FinalGradeFactory.create(user=self.user, course_run=course_run, passed=True)
assert (mmtrack.count_courses_passed() == 1)
course = CourseFactory.create(program=self.program)
final_grade = FinalGradeFactory.create(user=self.user, course_run__course=course, passed=True)
mmtrack.edx_course_keys.add(final_grade.course_run.edx_course_key)
assert (mmtrack.count_courses_passed() == 2)<|docstring|>Assert that count_courses_passed works in case of normal program.<|endoftext|> |
2c44a65d3eca532c58cd9aa66088347841107498ceab8af67495a3c039cdcbe0 | def test_count_courses_passed_fa(self):
'\n Assert that count_courses_passed works in case of fa program.\n '
mmtrack = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=self.cached_edx_user_data)
with patch('courses.models.Course.has_exam', new_callable=PropertyMock, return_value=True):
assert (mmtrack.count_courses_passed() == 0)
CombinedFinalGrade.objects.create(user=self.user, course=self.crun_fa.course, grade=0.6)
assert (mmtrack.count_courses_passed() == 1) | Assert that count_courses_passed works in case of fa program. | dashboard/utils_test.py | test_count_courses_passed_fa | mitodl/micromasters | 32 | python | def test_count_courses_passed_fa(self):
'\n \n '
mmtrack = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=self.cached_edx_user_data)
with patch('courses.models.Course.has_exam', new_callable=PropertyMock, return_value=True):
assert (mmtrack.count_courses_passed() == 0)
CombinedFinalGrade.objects.create(user=self.user, course=self.crun_fa.course, grade=0.6)
assert (mmtrack.count_courses_passed() == 1) | def test_count_courses_passed_fa(self):
'\n \n '
mmtrack = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=self.cached_edx_user_data)
with patch('courses.models.Course.has_exam', new_callable=PropertyMock, return_value=True):
assert (mmtrack.count_courses_passed() == 0)
CombinedFinalGrade.objects.create(user=self.user, course=self.crun_fa.course, grade=0.6)
assert (mmtrack.count_courses_passed() == 1)<|docstring|>Assert that count_courses_passed works in case of fa program.<|endoftext|> |
b316e4b565aefdab03f37cda62dbf25f2be01f910a51ac846e3ef9de8149b13f | def test_count_courses_mixed_fa(self):
'\n Test count_courses_passed with mixed course-exam configuration\n '
mmtrack = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=self.cached_edx_user_data)
course_with_exam_1 = CourseFactory.create(program=self.program_financial_aid)
ExamRunFactory.create(course=course_with_exam_1, date_grades_available=(now_in_utc() - timedelta(weeks=1)))
CombinedFinalGrade.objects.create(user=self.user, course=course_with_exam_1, grade=0.7)
ExamRunFactory.create(course__program=self.program_financial_aid, date_grades_available=(now_in_utc() - timedelta(weeks=1)))
FinalGradeFactory.create(user=self.user, course_run=self.crun_fa, passed=True)
assert (mmtrack.count_courses_passed() == 2) | Test count_courses_passed with mixed course-exam configuration | dashboard/utils_test.py | test_count_courses_mixed_fa | mitodl/micromasters | 32 | python | def test_count_courses_mixed_fa(self):
'\n \n '
mmtrack = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=self.cached_edx_user_data)
course_with_exam_1 = CourseFactory.create(program=self.program_financial_aid)
ExamRunFactory.create(course=course_with_exam_1, date_grades_available=(now_in_utc() - timedelta(weeks=1)))
CombinedFinalGrade.objects.create(user=self.user, course=course_with_exam_1, grade=0.7)
ExamRunFactory.create(course__program=self.program_financial_aid, date_grades_available=(now_in_utc() - timedelta(weeks=1)))
FinalGradeFactory.create(user=self.user, course_run=self.crun_fa, passed=True)
assert (mmtrack.count_courses_passed() == 2) | def test_count_courses_mixed_fa(self):
'\n \n '
mmtrack = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=self.cached_edx_user_data)
course_with_exam_1 = CourseFactory.create(program=self.program_financial_aid)
ExamRunFactory.create(course=course_with_exam_1, date_grades_available=(now_in_utc() - timedelta(weeks=1)))
CombinedFinalGrade.objects.create(user=self.user, course=course_with_exam_1, grade=0.7)
ExamRunFactory.create(course__program=self.program_financial_aid, date_grades_available=(now_in_utc() - timedelta(weeks=1)))
FinalGradeFactory.create(user=self.user, course_run=self.crun_fa, passed=True)
assert (mmtrack.count_courses_passed() == 2)<|docstring|>Test count_courses_passed with mixed course-exam configuration<|endoftext|> |
7ea0d31559f197d63dd4753e164a98bfc47ce840f23504d5ddebd24b288842ef | def test_get_number_of_passed_courses_for_completion(self):
'\n Assert that get_number_of_passed_courses_for_completion computes a number of courses passed for\n programs with elective sets\n '
course_run = self.cruns[0]
FinalGradeFactory.create(user=self.user, course_run=course_run, passed=True)
electives_set = ElectivesSet.objects.create(program=self.program, required_number=1)
elective_cruns = []
for _ in range(2):
run = CourseRunFactory.create(course__program=self.program)
FinalGradeFactory.create(user=self.user, course_run=run, passed=True, status='complete', grade=0.7)
elective_cruns.append(run)
CourseRunGradingStatus.objects.create(course_run=run, status='complete')
ElectiveCourse.objects.create(course=run.course, electives_set=electives_set)
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.count_courses_passed() == 3)
assert (mmtrack.get_number_of_passed_courses_for_completion() == 2) | Assert that get_number_of_passed_courses_for_completion computes a number of courses passed for
programs with elective sets | dashboard/utils_test.py | test_get_number_of_passed_courses_for_completion | mitodl/micromasters | 32 | python | def test_get_number_of_passed_courses_for_completion(self):
'\n Assert that get_number_of_passed_courses_for_completion computes a number of courses passed for\n programs with elective sets\n '
course_run = self.cruns[0]
FinalGradeFactory.create(user=self.user, course_run=course_run, passed=True)
electives_set = ElectivesSet.objects.create(program=self.program, required_number=1)
elective_cruns = []
for _ in range(2):
run = CourseRunFactory.create(course__program=self.program)
FinalGradeFactory.create(user=self.user, course_run=run, passed=True, status='complete', grade=0.7)
elective_cruns.append(run)
CourseRunGradingStatus.objects.create(course_run=run, status='complete')
ElectiveCourse.objects.create(course=run.course, electives_set=electives_set)
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.count_courses_passed() == 3)
assert (mmtrack.get_number_of_passed_courses_for_completion() == 2) | def test_get_number_of_passed_courses_for_completion(self):
'\n Assert that get_number_of_passed_courses_for_completion computes a number of courses passed for\n programs with elective sets\n '
course_run = self.cruns[0]
FinalGradeFactory.create(user=self.user, course_run=course_run, passed=True)
electives_set = ElectivesSet.objects.create(program=self.program, required_number=1)
elective_cruns = []
for _ in range(2):
run = CourseRunFactory.create(course__program=self.program)
FinalGradeFactory.create(user=self.user, course_run=run, passed=True, status='complete', grade=0.7)
elective_cruns.append(run)
CourseRunGradingStatus.objects.create(course_run=run, status='complete')
ElectiveCourse.objects.create(course=run.course, electives_set=electives_set)
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.count_courses_passed() == 3)
assert (mmtrack.get_number_of_passed_courses_for_completion() == 2)<|docstring|>Assert that get_number_of_passed_courses_for_completion computes a number of courses passed for
programs with elective sets<|endoftext|> |
bd2b70d6ef32a17da36562ee9b5dc280eb85c97e3366db00d051f6f9a6f6d9ea | def test_get_number_of_passed_courses_for_completion_no_electives(self):
'\n test get_number_of_passed_courses_for_completion returns number of passed courses if no electives\n '
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
for course_run in self.cruns:
FinalGradeFactory.create(user=self.user, course_run=course_run, passed=True)
assert (mmtrack.get_number_of_passed_courses_for_completion() == 1) | test get_number_of_passed_courses_for_completion returns number of passed courses if no electives | dashboard/utils_test.py | test_get_number_of_passed_courses_for_completion_no_electives | mitodl/micromasters | 32 | python | def test_get_number_of_passed_courses_for_completion_no_electives(self):
'\n \n '
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
for course_run in self.cruns:
FinalGradeFactory.create(user=self.user, course_run=course_run, passed=True)
assert (mmtrack.get_number_of_passed_courses_for_completion() == 1) | def test_get_number_of_passed_courses_for_completion_no_electives(self):
'\n \n '
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
for course_run in self.cruns:
FinalGradeFactory.create(user=self.user, course_run=course_run, passed=True)
assert (mmtrack.get_number_of_passed_courses_for_completion() == 1)<|docstring|>test get_number_of_passed_courses_for_completion returns number of passed courses if no electives<|endoftext|> |
606e73baee990403c6af4df10ed0ec7a048154fa065b9a684bec1a93952c9c32 | def test_count_passing_courses_for_keys(self):
'\n Assert that count_courses_passed works in case of normal program.\n '
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.count_passing_courses_for_keys(mmtrack.edx_course_keys) == 0)
for crun_index in [0, 1]:
course_run = self.cruns[crun_index]
FinalGradeFactory.create(user=self.user, course_run=course_run, passed=True)
assert (mmtrack.count_passing_courses_for_keys(mmtrack.edx_course_keys) == 1)
final_grade = FinalGradeFactory.create(user=self.user, course_run__course__program=self.program, passed=True)
mmtrack.edx_course_keys.add(final_grade.course_run.edx_course_key)
assert (mmtrack.count_passing_courses_for_keys(mmtrack.edx_course_keys) == 2) | Assert that count_courses_passed works in case of normal program. | dashboard/utils_test.py | test_count_passing_courses_for_keys | mitodl/micromasters | 32 | python | def test_count_passing_courses_for_keys(self):
'\n \n '
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.count_passing_courses_for_keys(mmtrack.edx_course_keys) == 0)
for crun_index in [0, 1]:
course_run = self.cruns[crun_index]
FinalGradeFactory.create(user=self.user, course_run=course_run, passed=True)
assert (mmtrack.count_passing_courses_for_keys(mmtrack.edx_course_keys) == 1)
final_grade = FinalGradeFactory.create(user=self.user, course_run__course__program=self.program, passed=True)
mmtrack.edx_course_keys.add(final_grade.course_run.edx_course_key)
assert (mmtrack.count_passing_courses_for_keys(mmtrack.edx_course_keys) == 2) | def test_count_passing_courses_for_keys(self):
'\n \n '
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.count_passing_courses_for_keys(mmtrack.edx_course_keys) == 0)
for crun_index in [0, 1]:
course_run = self.cruns[crun_index]
FinalGradeFactory.create(user=self.user, course_run=course_run, passed=True)
assert (mmtrack.count_passing_courses_for_keys(mmtrack.edx_course_keys) == 1)
final_grade = FinalGradeFactory.create(user=self.user, course_run__course__program=self.program, passed=True)
mmtrack.edx_course_keys.add(final_grade.course_run.edx_course_key)
assert (mmtrack.count_passing_courses_for_keys(mmtrack.edx_course_keys) == 2)<|docstring|>Assert that count_courses_passed works in case of normal program.<|endoftext|> |
79f75fbe5b64a263d6a9b2bdd2a4f0d2e9aa7d3d8e5868e06ae05647e4cef3c0 | def test_has_paid_fa_no_final_grade(self):
'\n Assert that has_paid works for FA programs in case there is no final grade\n '
mmtrack = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=self.cached_edx_user_data)
key = self.crun_fa.edx_course_key
assert (mmtrack.has_paid(key) is False)
self.pay_for_fa_course(key)
mmtrack = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.has_paid(key) is True) | Assert that has_paid works for FA programs in case there is no final grade | dashboard/utils_test.py | test_has_paid_fa_no_final_grade | mitodl/micromasters | 32 | python | def test_has_paid_fa_no_final_grade(self):
'\n \n '
mmtrack = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=self.cached_edx_user_data)
key = self.crun_fa.edx_course_key
assert (mmtrack.has_paid(key) is False)
self.pay_for_fa_course(key)
mmtrack = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.has_paid(key) is True) | def test_has_paid_fa_no_final_grade(self):
'\n \n '
mmtrack = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=self.cached_edx_user_data)
key = self.crun_fa.edx_course_key
assert (mmtrack.has_paid(key) is False)
self.pay_for_fa_course(key)
mmtrack = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.has_paid(key) is True)<|docstring|>Assert that has_paid works for FA programs in case there is no final grade<|endoftext|> |
5dbff100f2ed205e845169e2edccedf8fae3669958e444ab5cbd6d847c40c5db | def test_has_paid_for_entire_course(self):
'\n Tests that the .has_paid method returns true if\n any of the course runs in the course have been paid for\n '
self.pay_for_fa_course(self.crun_fa.edx_course_key)
mmtrack = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.has_paid(self.crun_fa2.edx_course_key) is True) | Tests that the .has_paid method returns true if
any of the course runs in the course have been paid for | dashboard/utils_test.py | test_has_paid_for_entire_course | mitodl/micromasters | 32 | python | def test_has_paid_for_entire_course(self):
'\n Tests that the .has_paid method returns true if\n any of the course runs in the course have been paid for\n '
self.pay_for_fa_course(self.crun_fa.edx_course_key)
mmtrack = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.has_paid(self.crun_fa2.edx_course_key) is True) | def test_has_paid_for_entire_course(self):
'\n Tests that the .has_paid method returns true if\n any of the course runs in the course have been paid for\n '
self.pay_for_fa_course(self.crun_fa.edx_course_key)
mmtrack = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.has_paid(self.crun_fa2.edx_course_key) is True)<|docstring|>Tests that the .has_paid method returns true if
any of the course runs in the course have been paid for<|endoftext|> |
877931c25a3966a19e96ca623646212d9901c11fab7974b65cd9524c8c9d3879 | def test_not_paid_fa_with_course_run_paid_on_edx(self):
'\n Test for has_paid is False for FA programs even in case\n there is a final grade with course_run_paid_on_edx=True\n '
mmtrack = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=self.cached_edx_user_data)
key = self.crun_fa.edx_course_key
assert (mmtrack.has_paid(key) is False)
final_grade = FinalGradeFactory.create(user=self.user, course_run=self.crun_fa, course_run_paid_on_edx=True)
assert (mmtrack.has_paid(key) is False)
final_grade.course_run_paid_on_edx = False
final_grade.save()
assert (mmtrack.has_paid(key) is False) | Test for has_paid is False for FA programs even in case
there is a final grade with course_run_paid_on_edx=True | dashboard/utils_test.py | test_not_paid_fa_with_course_run_paid_on_edx | mitodl/micromasters | 32 | python | def test_not_paid_fa_with_course_run_paid_on_edx(self):
'\n Test for has_paid is False for FA programs even in case\n there is a final grade with course_run_paid_on_edx=True\n '
mmtrack = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=self.cached_edx_user_data)
key = self.crun_fa.edx_course_key
assert (mmtrack.has_paid(key) is False)
final_grade = FinalGradeFactory.create(user=self.user, course_run=self.crun_fa, course_run_paid_on_edx=True)
assert (mmtrack.has_paid(key) is False)
final_grade.course_run_paid_on_edx = False
final_grade.save()
assert (mmtrack.has_paid(key) is False) | def test_not_paid_fa_with_course_run_paid_on_edx(self):
'\n Test for has_paid is False for FA programs even in case\n there is a final grade with course_run_paid_on_edx=True\n '
mmtrack = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=self.cached_edx_user_data)
key = self.crun_fa.edx_course_key
assert (mmtrack.has_paid(key) is False)
final_grade = FinalGradeFactory.create(user=self.user, course_run=self.crun_fa, course_run_paid_on_edx=True)
assert (mmtrack.has_paid(key) is False)
final_grade.course_run_paid_on_edx = False
final_grade.save()
assert (mmtrack.has_paid(key) is False)<|docstring|>Test for has_paid is False for FA programs even in case
there is a final grade with course_run_paid_on_edx=True<|endoftext|> |
197179a1ccd3c57e4302786a95d41503a039c0cd17c0e6363a2227d54fd6d9f2 | def test_has_paid_fa_with_course_run_paid_on_mm(self):
'\n Test for has_paid is True for FA programs when the course has been paid on MicroMasters\n '
mmtrack = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=self.cached_edx_user_data)
key = self.crun_fa.edx_course_key
assert (mmtrack.has_paid(key) is False)
self.pay_for_fa_course(key)
mmtrack = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.has_paid(key) is True) | Test for has_paid is True for FA programs when the course has been paid on MicroMasters | dashboard/utils_test.py | test_has_paid_fa_with_course_run_paid_on_mm | mitodl/micromasters | 32 | python | def test_has_paid_fa_with_course_run_paid_on_mm(self):
'\n \n '
mmtrack = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=self.cached_edx_user_data)
key = self.crun_fa.edx_course_key
assert (mmtrack.has_paid(key) is False)
self.pay_for_fa_course(key)
mmtrack = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.has_paid(key) is True) | def test_has_paid_fa_with_course_run_paid_on_mm(self):
'\n \n '
mmtrack = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=self.cached_edx_user_data)
key = self.crun_fa.edx_course_key
assert (mmtrack.has_paid(key) is False)
self.pay_for_fa_course(key)
mmtrack = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.has_paid(key) is True)<|docstring|>Test for has_paid is True for FA programs when the course has been paid on MicroMasters<|endoftext|> |
37e934b85ba2f87b453677cde5267b8cb8558269597f29aea1a439b219923134 | def test_has_paid_not_fa_no_final_grade(self):
'\n Assert that has_paid works for non-FA programs in case there is no final grade\n '
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
key = 'course-v1:edX+DemoX+Demo_Course'
assert (mmtrack.has_paid(key) is True) | Assert that has_paid works for non-FA programs in case there is no final grade | dashboard/utils_test.py | test_has_paid_not_fa_no_final_grade | mitodl/micromasters | 32 | python | def test_has_paid_not_fa_no_final_grade(self):
'\n \n '
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
key = 'course-v1:edX+DemoX+Demo_Course'
assert (mmtrack.has_paid(key) is True) | def test_has_paid_not_fa_no_final_grade(self):
'\n \n '
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
key = 'course-v1:edX+DemoX+Demo_Course'
assert (mmtrack.has_paid(key) is True)<|docstring|>Assert that has_paid works for non-FA programs in case there is no final grade<|endoftext|> |
c660108bec5a974ff90770fb776d6872e2d84e4646560c5130c7d8168fb87527 | def test_has_paid_not_fa_with_final_grade(self):
'\n Assert that has_paid works for non-FA programs in case there is a final grade\n '
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
key = 'course-v1:odl+FOO102+CR-FALL16'
assert (mmtrack.has_paid(key) is False)
course_run = self.cruns[(- 1)]
final_grade = FinalGradeFactory.create(user=self.user, course_run=course_run, course_run_paid_on_edx=True)
assert (mmtrack.has_paid(key) is True)
final_grade.course_run_paid_on_edx = False
final_grade.save()
assert (mmtrack.has_paid(key) is False) | Assert that has_paid works for non-FA programs in case there is a final grade | dashboard/utils_test.py | test_has_paid_not_fa_with_final_grade | mitodl/micromasters | 32 | python | def test_has_paid_not_fa_with_final_grade(self):
'\n \n '
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
key = 'course-v1:odl+FOO102+CR-FALL16'
assert (mmtrack.has_paid(key) is False)
course_run = self.cruns[(- 1)]
final_grade = FinalGradeFactory.create(user=self.user, course_run=course_run, course_run_paid_on_edx=True)
assert (mmtrack.has_paid(key) is True)
final_grade.course_run_paid_on_edx = False
final_grade.save()
assert (mmtrack.has_paid(key) is False) | def test_has_paid_not_fa_with_final_grade(self):
'\n \n '
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
key = 'course-v1:odl+FOO102+CR-FALL16'
assert (mmtrack.has_paid(key) is False)
course_run = self.cruns[(- 1)]
final_grade = FinalGradeFactory.create(user=self.user, course_run=course_run, course_run_paid_on_edx=True)
assert (mmtrack.has_paid(key) is True)
final_grade.course_run_paid_on_edx = False
final_grade.save()
assert (mmtrack.has_paid(key) is False)<|docstring|>Assert that has_paid works for non-FA programs in case there is a final grade<|endoftext|> |
a6a8c42bc4f7b9ed99d06ef765ccdcd33b9cd258f3d7ae46aa45d66f9590608a | def test_has_paid_for_any_in_program(self):
'\n Assert that has_paid_for_any_in_program returns True if any CourseRun associated with a Program has been\n paid for.\n '
new_program = ProgramFactory.create()
new_course_runs = CourseRunFactory.create_batch(2, course__program=new_program)
mmtrack = MMTrack(user=self.user, program=new_program, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.has_paid_for_any_in_program() is False)
fg = FinalGradeFactory.create(user=self.user, course_run=new_course_runs[0], course_run_paid_on_edx=True)
assert (mmtrack.has_paid_for_any_in_program() is True)
fg.delete()
FinalGradeFactory.create(user=self.user, course_run=new_course_runs[1], course_run_paid_on_edx=True)
assert (mmtrack.has_paid_for_any_in_program() is True) | Assert that has_paid_for_any_in_program returns True if any CourseRun associated with a Program has been
paid for. | dashboard/utils_test.py | test_has_paid_for_any_in_program | mitodl/micromasters | 32 | python | def test_has_paid_for_any_in_program(self):
'\n Assert that has_paid_for_any_in_program returns True if any CourseRun associated with a Program has been\n paid for.\n '
new_program = ProgramFactory.create()
new_course_runs = CourseRunFactory.create_batch(2, course__program=new_program)
mmtrack = MMTrack(user=self.user, program=new_program, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.has_paid_for_any_in_program() is False)
fg = FinalGradeFactory.create(user=self.user, course_run=new_course_runs[0], course_run_paid_on_edx=True)
assert (mmtrack.has_paid_for_any_in_program() is True)
fg.delete()
FinalGradeFactory.create(user=self.user, course_run=new_course_runs[1], course_run_paid_on_edx=True)
assert (mmtrack.has_paid_for_any_in_program() is True) | def test_has_paid_for_any_in_program(self):
'\n Assert that has_paid_for_any_in_program returns True if any CourseRun associated with a Program has been\n paid for.\n '
new_program = ProgramFactory.create()
new_course_runs = CourseRunFactory.create_batch(2, course__program=new_program)
mmtrack = MMTrack(user=self.user, program=new_program, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.has_paid_for_any_in_program() is False)
fg = FinalGradeFactory.create(user=self.user, course_run=new_course_runs[0], course_run_paid_on_edx=True)
assert (mmtrack.has_paid_for_any_in_program() is True)
fg.delete()
FinalGradeFactory.create(user=self.user, course_run=new_course_runs[1], course_run_paid_on_edx=True)
assert (mmtrack.has_paid_for_any_in_program() is True)<|docstring|>Assert that has_paid_for_any_in_program returns True if any CourseRun associated with a Program has been
paid for.<|endoftext|> |
d37b9c2012716b1793d4bb916c0f1b69c2832cb40e6b3916da77c532e39deaf5 | @ddt.data(('verified', True, True), ('audit', False, False), ('verified', False, False))
@ddt.unpack
def test_has_passing_certificate(self, certificate_type, is_passing, expected_result):
'\n Test for has_passing_certificate method with different type of certificates\n '
course_key = self.crun_fa.edx_course_key
cert_json = {'username': 'staff', 'course_id': course_key, 'certificate_type': certificate_type, 'is_passing': is_passing, 'status': 'downloadable', 'download_url': 'http://www.example.com/demo.pdf', 'grade': '0.98'}
cached_edx_user_data = MagicMock(spec=CachedEdxUserData, enrollments=CachedEnrollment.deserialize_edx_data(self.enrollments_json), certificates=CachedCertificate.deserialize_edx_data((self.certificates_json + [cert_json])), current_grades=CachedCurrentGrade.deserialize_edx_data(self.current_grades_json))
mmtrack = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=cached_edx_user_data)
assert (mmtrack.has_passing_certificate(course_key) is expected_result) | Test for has_passing_certificate method with different type of certificates | dashboard/utils_test.py | test_has_passing_certificate | mitodl/micromasters | 32 | python | @ddt.data(('verified', True, True), ('audit', False, False), ('verified', False, False))
@ddt.unpack
def test_has_passing_certificate(self, certificate_type, is_passing, expected_result):
'\n \n '
course_key = self.crun_fa.edx_course_key
cert_json = {'username': 'staff', 'course_id': course_key, 'certificate_type': certificate_type, 'is_passing': is_passing, 'status': 'downloadable', 'download_url': 'http://www.example.com/demo.pdf', 'grade': '0.98'}
cached_edx_user_data = MagicMock(spec=CachedEdxUserData, enrollments=CachedEnrollment.deserialize_edx_data(self.enrollments_json), certificates=CachedCertificate.deserialize_edx_data((self.certificates_json + [cert_json])), current_grades=CachedCurrentGrade.deserialize_edx_data(self.current_grades_json))
mmtrack = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=cached_edx_user_data)
assert (mmtrack.has_passing_certificate(course_key) is expected_result) | @ddt.data(('verified', True, True), ('audit', False, False), ('verified', False, False))
@ddt.unpack
def test_has_passing_certificate(self, certificate_type, is_passing, expected_result):
'\n \n '
course_key = self.crun_fa.edx_course_key
cert_json = {'username': 'staff', 'course_id': course_key, 'certificate_type': certificate_type, 'is_passing': is_passing, 'status': 'downloadable', 'download_url': 'http://www.example.com/demo.pdf', 'grade': '0.98'}
cached_edx_user_data = MagicMock(spec=CachedEdxUserData, enrollments=CachedEnrollment.deserialize_edx_data(self.enrollments_json), certificates=CachedCertificate.deserialize_edx_data((self.certificates_json + [cert_json])), current_grades=CachedCurrentGrade.deserialize_edx_data(self.current_grades_json))
mmtrack = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=cached_edx_user_data)
assert (mmtrack.has_passing_certificate(course_key) is expected_result)<|docstring|>Test for has_passing_certificate method with different type of certificates<|endoftext|> |
380ed7f0555bbef10c3ea75adef67e95270d18758845ee7f978492338ac77d32 | def test_has_passing_certificate_fa(self):
'\n Assert that has_passing_certificate is true if user has a cert even if has_paid is false for FA programs\n '
mmtrack = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=self.cached_edx_user_data)
key = self.crun_fa.edx_course_key
assert (mmtrack.has_passing_certificate(key) is False)
assert (mmtrack.has_paid(key) is False)
cert_json = {'username': 'staff', 'course_id': self.crun_fa.edx_course_key, 'certificate_type': 'verified', 'status': 'downloadable', 'is_passing': True, 'download_url': 'http://www.example.com/demo.pdf', 'grade': '0.98'}
cached_edx_user_data = MagicMock(spec=CachedEdxUserData, enrollments=CachedEnrollment.deserialize_edx_data(self.enrollments_json), certificates=CachedCertificate.deserialize_edx_data((self.certificates_json + [cert_json])), current_grades=CachedCurrentGrade.deserialize_edx_data(self.current_grades_json))
mmtrack = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=cached_edx_user_data)
assert (mmtrack.has_passing_certificate(key) is True)
assert (mmtrack.has_paid(key) is False) | Assert that has_passing_certificate is true if user has a cert even if has_paid is false for FA programs | dashboard/utils_test.py | test_has_passing_certificate_fa | mitodl/micromasters | 32 | python | def test_has_passing_certificate_fa(self):
'\n \n '
mmtrack = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=self.cached_edx_user_data)
key = self.crun_fa.edx_course_key
assert (mmtrack.has_passing_certificate(key) is False)
assert (mmtrack.has_paid(key) is False)
cert_json = {'username': 'staff', 'course_id': self.crun_fa.edx_course_key, 'certificate_type': 'verified', 'status': 'downloadable', 'is_passing': True, 'download_url': 'http://www.example.com/demo.pdf', 'grade': '0.98'}
cached_edx_user_data = MagicMock(spec=CachedEdxUserData, enrollments=CachedEnrollment.deserialize_edx_data(self.enrollments_json), certificates=CachedCertificate.deserialize_edx_data((self.certificates_json + [cert_json])), current_grades=CachedCurrentGrade.deserialize_edx_data(self.current_grades_json))
mmtrack = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=cached_edx_user_data)
assert (mmtrack.has_passing_certificate(key) is True)
assert (mmtrack.has_paid(key) is False) | def test_has_passing_certificate_fa(self):
'\n \n '
mmtrack = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=self.cached_edx_user_data)
key = self.crun_fa.edx_course_key
assert (mmtrack.has_passing_certificate(key) is False)
assert (mmtrack.has_paid(key) is False)
cert_json = {'username': 'staff', 'course_id': self.crun_fa.edx_course_key, 'certificate_type': 'verified', 'status': 'downloadable', 'is_passing': True, 'download_url': 'http://www.example.com/demo.pdf', 'grade': '0.98'}
cached_edx_user_data = MagicMock(spec=CachedEdxUserData, enrollments=CachedEnrollment.deserialize_edx_data(self.enrollments_json), certificates=CachedCertificate.deserialize_edx_data((self.certificates_json + [cert_json])), current_grades=CachedCurrentGrade.deserialize_edx_data(self.current_grades_json))
mmtrack = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=cached_edx_user_data)
assert (mmtrack.has_passing_certificate(key) is True)
assert (mmtrack.has_paid(key) is False)<|docstring|>Assert that has_passing_certificate is true if user has a cert even if has_paid is false for FA programs<|endoftext|> |
b9479722d576c1b3d8b6ff00447936e0635479d0af799ceb6cb9c619ee290730 | def test_get_program_certificate_url(self):
'\n Test get_program_certificate_url\n '
mmtrack = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.get_program_certificate_url() == '')
certificate = MicromastersProgramCertificate.objects.create(user=self.user, program=self.program_financial_aid)
assert (mmtrack.get_program_certificate_url() == '')
ProgramCertificateSignatoriesFactory.create(program_page__program=certificate.program)
assert (mmtrack.get_program_certificate_url() == reverse('program-certificate', args=[certificate.hash])) | Test get_program_certificate_url | dashboard/utils_test.py | test_get_program_certificate_url | mitodl/micromasters | 32 | python | def test_get_program_certificate_url(self):
'\n \n '
mmtrack = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.get_program_certificate_url() == )
certificate = MicromastersProgramCertificate.objects.create(user=self.user, program=self.program_financial_aid)
assert (mmtrack.get_program_certificate_url() == )
ProgramCertificateSignatoriesFactory.create(program_page__program=certificate.program)
assert (mmtrack.get_program_certificate_url() == reverse('program-certificate', args=[certificate.hash])) | def test_get_program_certificate_url(self):
'\n \n '
mmtrack = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.get_program_certificate_url() == )
certificate = MicromastersProgramCertificate.objects.create(user=self.user, program=self.program_financial_aid)
assert (mmtrack.get_program_certificate_url() == )
ProgramCertificateSignatoriesFactory.create(program_page__program=certificate.program)
assert (mmtrack.get_program_certificate_url() == reverse('program-certificate', args=[certificate.hash]))<|docstring|>Test get_program_certificate_url<|endoftext|> |
cbcf2a1dad811b2a72619073fb55848e6d102df207880b29d8528d4257628581 | def test_get_program_letter_url(self):
'\n Test get_program_letter_url\n '
mmtrack = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.get_program_letter_url() == '')
letter = MicromastersProgramCommendation.objects.create(user=self.user, program=self.program_financial_aid)
assert (mmtrack.get_program_letter_url() == '')
signatory = ProgramLetterSignatoryFactory.create(program_page__program=letter.program)
assert (mmtrack.get_program_letter_url() == '')
program_page = signatory.program_page
program_page.program_letter_text = '<p> Some example test </p>'
program_page.save()
assert (mmtrack.get_program_letter_url() == '')
program_page.program_letter_logo = ImageFactory()
program_page.save()
assert (mmtrack.get_program_letter_url() == reverse('program_letter', args=[letter.uuid])) | Test get_program_letter_url | dashboard/utils_test.py | test_get_program_letter_url | mitodl/micromasters | 32 | python | def test_get_program_letter_url(self):
'\n \n '
mmtrack = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.get_program_letter_url() == )
letter = MicromastersProgramCommendation.objects.create(user=self.user, program=self.program_financial_aid)
assert (mmtrack.get_program_letter_url() == )
signatory = ProgramLetterSignatoryFactory.create(program_page__program=letter.program)
assert (mmtrack.get_program_letter_url() == )
program_page = signatory.program_page
program_page.program_letter_text = '<p> Some example test </p>'
program_page.save()
assert (mmtrack.get_program_letter_url() == )
program_page.program_letter_logo = ImageFactory()
program_page.save()
assert (mmtrack.get_program_letter_url() == reverse('program_letter', args=[letter.uuid])) | def test_get_program_letter_url(self):
'\n \n '
mmtrack = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.get_program_letter_url() == )
letter = MicromastersProgramCommendation.objects.create(user=self.user, program=self.program_financial_aid)
assert (mmtrack.get_program_letter_url() == )
signatory = ProgramLetterSignatoryFactory.create(program_page__program=letter.program)
assert (mmtrack.get_program_letter_url() == )
program_page = signatory.program_page
program_page.program_letter_text = '<p> Some example test </p>'
program_page.save()
assert (mmtrack.get_program_letter_url() == )
program_page.program_letter_logo = ImageFactory()
program_page.save()
assert (mmtrack.get_program_letter_url() == reverse('program_letter', args=[letter.uuid]))<|docstring|>Test get_program_letter_url<|endoftext|> |
856e950f27c009060638386032b8815990d8360e90340e0ea60676e53d23fbf8 | def test_get_best_final_grade_for_course(self):
'\n Test for get_best_final_grade_for_course to return the highest grade over all course runs\n '
mmtrack = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=self.cached_edx_user_data)
finaid_course = self.crun_fa.course
FinalGradeFactory.create(user=self.user, course_run=self.crun_fa, grade=0.3, passed=False)
assert (mmtrack.get_best_final_grade_for_course(finaid_course) is None)
for grade in [0.3, 0.5, 0.8]:
course_run = CourseRunFactory.create(course=finaid_course)
FinalGradeFactory.create(user=self.user, course_run=course_run, grade=grade, passed=True)
assert (mmtrack.get_best_final_grade_for_course(finaid_course).grade == 0.8) | Test for get_best_final_grade_for_course to return the highest grade over all course runs | dashboard/utils_test.py | test_get_best_final_grade_for_course | mitodl/micromasters | 32 | python | def test_get_best_final_grade_for_course(self):
'\n \n '
mmtrack = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=self.cached_edx_user_data)
finaid_course = self.crun_fa.course
FinalGradeFactory.create(user=self.user, course_run=self.crun_fa, grade=0.3, passed=False)
assert (mmtrack.get_best_final_grade_for_course(finaid_course) is None)
for grade in [0.3, 0.5, 0.8]:
course_run = CourseRunFactory.create(course=finaid_course)
FinalGradeFactory.create(user=self.user, course_run=course_run, grade=grade, passed=True)
assert (mmtrack.get_best_final_grade_for_course(finaid_course).grade == 0.8) | def test_get_best_final_grade_for_course(self):
'\n \n '
mmtrack = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=self.cached_edx_user_data)
finaid_course = self.crun_fa.course
FinalGradeFactory.create(user=self.user, course_run=self.crun_fa, grade=0.3, passed=False)
assert (mmtrack.get_best_final_grade_for_course(finaid_course) is None)
for grade in [0.3, 0.5, 0.8]:
course_run = CourseRunFactory.create(course=finaid_course)
FinalGradeFactory.create(user=self.user, course_run=course_run, grade=grade, passed=True)
assert (mmtrack.get_best_final_grade_for_course(finaid_course).grade == 0.8)<|docstring|>Test for get_best_final_grade_for_course to return the highest grade over all course runs<|endoftext|> |
db87ee58fc53f0d28695ce32404346287ba53ed26a5b15a9270fd1f0120d8e74 | def test_get_overall_final_grade_for_course(self):
'\n Test for get_overall_final_grade_for_course to return CombinedFinalGrade for course\n '
mmtrack = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=self.cached_edx_user_data)
finaid_course = self.crun_fa.course
assert (mmtrack.get_overall_final_grade_for_course(finaid_course) == '')
FinalGradeFactory.create(user=self.user, course_run=self.crun_fa, passed=True, grade=0.8)
assert (mmtrack.get_overall_final_grade_for_course(finaid_course) == '80')
ExamRunFactory.create(course=finaid_course)
CombinedFinalGrade.objects.create(user=self.user, course=finaid_course, grade='74')
assert (mmtrack.get_overall_final_grade_for_course(finaid_course) == '74') | Test for get_overall_final_grade_for_course to return CombinedFinalGrade for course | dashboard/utils_test.py | test_get_overall_final_grade_for_course | mitodl/micromasters | 32 | python | def test_get_overall_final_grade_for_course(self):
'\n \n '
mmtrack = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=self.cached_edx_user_data)
finaid_course = self.crun_fa.course
assert (mmtrack.get_overall_final_grade_for_course(finaid_course) == )
FinalGradeFactory.create(user=self.user, course_run=self.crun_fa, passed=True, grade=0.8)
assert (mmtrack.get_overall_final_grade_for_course(finaid_course) == '80')
ExamRunFactory.create(course=finaid_course)
CombinedFinalGrade.objects.create(user=self.user, course=finaid_course, grade='74')
assert (mmtrack.get_overall_final_grade_for_course(finaid_course) == '74') | def test_get_overall_final_grade_for_course(self):
'\n \n '
mmtrack = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=self.cached_edx_user_data)
finaid_course = self.crun_fa.course
assert (mmtrack.get_overall_final_grade_for_course(finaid_course) == )
FinalGradeFactory.create(user=self.user, course_run=self.crun_fa, passed=True, grade=0.8)
assert (mmtrack.get_overall_final_grade_for_course(finaid_course) == '80')
ExamRunFactory.create(course=finaid_course)
CombinedFinalGrade.objects.create(user=self.user, course=finaid_course, grade='74')
assert (mmtrack.get_overall_final_grade_for_course(finaid_course) == '74')<|docstring|>Test for get_overall_final_grade_for_course to return CombinedFinalGrade for course<|endoftext|> |
3bd2da0fa74f367cc107089c14fc3ed3fce213e3dd9c10575f7741d78b458dc9 | def test_get_best_proctored_exam_grade(self):
'\n Test get_best_proctorate_exam_grade to return a passed exam with the highest score\n '
mmtrack = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=self.cached_edx_user_data)
finaid_course = self.crun_fa.course
last_week = (now_in_utc() - timedelta(weeks=1))
ProctoredExamGradeFactory.create(user=self.user, course=finaid_course, passed=False, percentage_grade=0.6)
assert (mmtrack.get_best_proctored_exam_grade(finaid_course) is None)
best_exam = ProctoredExamGradeFactory.create(user=self.user, course=finaid_course, passed=True, percentage_grade=0.9, exam_run__date_grades_available=last_week)
assert (mmtrack.get_best_proctored_exam_grade(finaid_course) == best_exam)
ProctoredExamGradeFactory.create(user=self.user, course=finaid_course, passed=True, percentage_grade=0.8, exam_run__date_grades_available=last_week)
assert (mmtrack.get_best_proctored_exam_grade(finaid_course) == best_exam) | Test get_best_proctorate_exam_grade to return a passed exam with the highest score | dashboard/utils_test.py | test_get_best_proctored_exam_grade | mitodl/micromasters | 32 | python | def test_get_best_proctored_exam_grade(self):
'\n \n '
mmtrack = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=self.cached_edx_user_data)
finaid_course = self.crun_fa.course
last_week = (now_in_utc() - timedelta(weeks=1))
ProctoredExamGradeFactory.create(user=self.user, course=finaid_course, passed=False, percentage_grade=0.6)
assert (mmtrack.get_best_proctored_exam_grade(finaid_course) is None)
best_exam = ProctoredExamGradeFactory.create(user=self.user, course=finaid_course, passed=True, percentage_grade=0.9, exam_run__date_grades_available=last_week)
assert (mmtrack.get_best_proctored_exam_grade(finaid_course) == best_exam)
ProctoredExamGradeFactory.create(user=self.user, course=finaid_course, passed=True, percentage_grade=0.8, exam_run__date_grades_available=last_week)
assert (mmtrack.get_best_proctored_exam_grade(finaid_course) == best_exam) | def test_get_best_proctored_exam_grade(self):
'\n \n '
mmtrack = MMTrack(user=self.user, program=self.program_financial_aid, edx_user_data=self.cached_edx_user_data)
finaid_course = self.crun_fa.course
last_week = (now_in_utc() - timedelta(weeks=1))
ProctoredExamGradeFactory.create(user=self.user, course=finaid_course, passed=False, percentage_grade=0.6)
assert (mmtrack.get_best_proctored_exam_grade(finaid_course) is None)
best_exam = ProctoredExamGradeFactory.create(user=self.user, course=finaid_course, passed=True, percentage_grade=0.9, exam_run__date_grades_available=last_week)
assert (mmtrack.get_best_proctored_exam_grade(finaid_course) == best_exam)
ProctoredExamGradeFactory.create(user=self.user, course=finaid_course, passed=True, percentage_grade=0.8, exam_run__date_grades_available=last_week)
assert (mmtrack.get_best_proctored_exam_grade(finaid_course) == best_exam)<|docstring|>Test get_best_proctorate_exam_grade to return a passed exam with the highest score<|endoftext|> |
5e523d16df59a7748f68a2d1cd343cac0284d89d335bf00676c208afa4168857 | def test_get_mmtrack(self):
'\n test creation of mmtrack(dashboard.utils.MMTrack) object.\n '
self.pay_for_fa_course(self.crun_fa.edx_course_key)
mmtrack = get_mmtrack(self.user, self.program_financial_aid)
key = self.crun_fa.edx_course_key
assert (mmtrack.user == self.user)
assert (mmtrack.has_paid(key) is True) | test creation of mmtrack(dashboard.utils.MMTrack) object. | dashboard/utils_test.py | test_get_mmtrack | mitodl/micromasters | 32 | python | def test_get_mmtrack(self):
'\n \n '
self.pay_for_fa_course(self.crun_fa.edx_course_key)
mmtrack = get_mmtrack(self.user, self.program_financial_aid)
key = self.crun_fa.edx_course_key
assert (mmtrack.user == self.user)
assert (mmtrack.has_paid(key) is True) | def test_get_mmtrack(self):
'\n \n '
self.pay_for_fa_course(self.crun_fa.edx_course_key)
mmtrack = get_mmtrack(self.user, self.program_financial_aid)
key = self.crun_fa.edx_course_key
assert (mmtrack.user == self.user)
assert (mmtrack.has_paid(key) is True)<|docstring|>test creation of mmtrack(dashboard.utils.MMTrack) object.<|endoftext|> |
2ce077c515e0285da3792e73986246b4869d6778d25d76659622f4c27c5467e4 | @ddt.data(['', '', False, False, False], ['', ExamProfile.PROFILE_ABSENT, True, False, False], [ExamProfile.PROFILE_INVALID, ExamProfile.PROFILE_SUCCESS, True, True, False], [ExamProfile.PROFILE_FAILED, ExamProfile.PROFILE_SUCCESS, True, True, False], ['', ExamProfile.PROFILE_SUCCESS, True, True, False], [ExamProfile.PROFILE_IN_PROGRESS, ExamProfile.PROFILE_SUCCESS, True, True, False], [ExamProfile.PROFILE_SUCCESS, ExamProfile.PROFILE_SUCCESS, True, True, False], [ExamProfile.PROFILE_SUCCESS, ExamProfile.PROFILE_SCHEDULABLE, True, True, True])
@ddt.unpack
def test_get_exam_card_status_for_edx_exams(self, profile_status, expected_status, make_exam_run, make_profile, make_auth):
'\n test get_exam_card_status\n '
now = now_in_utc()
exam_run = None
if make_exam_run:
exam_run = ExamRunFactory.create(course=self.course, date_first_eligible=(now - timedelta(weeks=1)), date_last_eligible=(now + timedelta(weeks=1)))
if make_profile:
ExamProfileFactory.create(profile=self.user.profile, status=profile_status)
if make_auth:
ExamAuthorizationFactory.create(user=self.user, status=ExamAuthorization.STATUS_SUCCESS, exam_run=exam_run)
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.get_exam_card_status() == expected_status) | test get_exam_card_status | dashboard/utils_test.py | test_get_exam_card_status_for_edx_exams | mitodl/micromasters | 32 | python | @ddt.data([, , False, False, False], [, ExamProfile.PROFILE_ABSENT, True, False, False], [ExamProfile.PROFILE_INVALID, ExamProfile.PROFILE_SUCCESS, True, True, False], [ExamProfile.PROFILE_FAILED, ExamProfile.PROFILE_SUCCESS, True, True, False], [, ExamProfile.PROFILE_SUCCESS, True, True, False], [ExamProfile.PROFILE_IN_PROGRESS, ExamProfile.PROFILE_SUCCESS, True, True, False], [ExamProfile.PROFILE_SUCCESS, ExamProfile.PROFILE_SUCCESS, True, True, False], [ExamProfile.PROFILE_SUCCESS, ExamProfile.PROFILE_SCHEDULABLE, True, True, True])
@ddt.unpack
def test_get_exam_card_status_for_edx_exams(self, profile_status, expected_status, make_exam_run, make_profile, make_auth):
'\n \n '
now = now_in_utc()
exam_run = None
if make_exam_run:
exam_run = ExamRunFactory.create(course=self.course, date_first_eligible=(now - timedelta(weeks=1)), date_last_eligible=(now + timedelta(weeks=1)))
if make_profile:
ExamProfileFactory.create(profile=self.user.profile, status=profile_status)
if make_auth:
ExamAuthorizationFactory.create(user=self.user, status=ExamAuthorization.STATUS_SUCCESS, exam_run=exam_run)
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.get_exam_card_status() == expected_status) | @ddt.data([, , False, False, False], [, ExamProfile.PROFILE_ABSENT, True, False, False], [ExamProfile.PROFILE_INVALID, ExamProfile.PROFILE_SUCCESS, True, True, False], [ExamProfile.PROFILE_FAILED, ExamProfile.PROFILE_SUCCESS, True, True, False], [, ExamProfile.PROFILE_SUCCESS, True, True, False], [ExamProfile.PROFILE_IN_PROGRESS, ExamProfile.PROFILE_SUCCESS, True, True, False], [ExamProfile.PROFILE_SUCCESS, ExamProfile.PROFILE_SUCCESS, True, True, False], [ExamProfile.PROFILE_SUCCESS, ExamProfile.PROFILE_SCHEDULABLE, True, True, True])
@ddt.unpack
def test_get_exam_card_status_for_edx_exams(self, profile_status, expected_status, make_exam_run, make_profile, make_auth):
'\n \n '
now = now_in_utc()
exam_run = None
if make_exam_run:
exam_run = ExamRunFactory.create(course=self.course, date_first_eligible=(now - timedelta(weeks=1)), date_last_eligible=(now + timedelta(weeks=1)))
if make_profile:
ExamProfileFactory.create(profile=self.user.profile, status=profile_status)
if make_auth:
ExamAuthorizationFactory.create(user=self.user, status=ExamAuthorization.STATUS_SUCCESS, exam_run=exam_run)
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
assert (mmtrack.get_exam_card_status() == expected_status)<|docstring|>test get_exam_card_status<|endoftext|> |
6dc21bb83b4ffec8d2075490fcb4c9f14730d2aa201ce154617a4f25eb150f4d | def test_get_exam_card_status_eligible(self):
'\n test get_exam_card_status against valid eligibility dates\n '
ExamProfileFactory.create(profile=self.user.profile, status=ExamProfile.PROFILE_SUCCESS)
now = datetime(2016, 3, 15, tzinfo=pytz.UTC)
past = datetime(2016, 3, 10, tzinfo=pytz.UTC)
future = datetime(2016, 3, 20, tzinfo=pytz.UTC)
valid_dates = [(past - timedelta(days=1)), past, now, future]
invalid_dates = [(future + timedelta(days=1))]
ExamAuthorizationFactory.create(user=self.user, status=ExamAuthorization.STATUS_SUCCESS, exam_run__course=self.course, exam_run__date_first_eligible=past.date(), exam_run__date_last_eligible=future.date())
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
for now_value in valid_dates:
mmtrack.now = now_value
assert (mmtrack.get_exam_card_status() == ExamProfile.PROFILE_SCHEDULABLE)
for now_value in invalid_dates:
mmtrack.now = now_value
assert (mmtrack.get_exam_card_status() == ExamProfile.PROFILE_SUCCESS) | test get_exam_card_status against valid eligibility dates | dashboard/utils_test.py | test_get_exam_card_status_eligible | mitodl/micromasters | 32 | python | def test_get_exam_card_status_eligible(self):
'\n \n '
ExamProfileFactory.create(profile=self.user.profile, status=ExamProfile.PROFILE_SUCCESS)
now = datetime(2016, 3, 15, tzinfo=pytz.UTC)
past = datetime(2016, 3, 10, tzinfo=pytz.UTC)
future = datetime(2016, 3, 20, tzinfo=pytz.UTC)
valid_dates = [(past - timedelta(days=1)), past, now, future]
invalid_dates = [(future + timedelta(days=1))]
ExamAuthorizationFactory.create(user=self.user, status=ExamAuthorization.STATUS_SUCCESS, exam_run__course=self.course, exam_run__date_first_eligible=past.date(), exam_run__date_last_eligible=future.date())
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
for now_value in valid_dates:
mmtrack.now = now_value
assert (mmtrack.get_exam_card_status() == ExamProfile.PROFILE_SCHEDULABLE)
for now_value in invalid_dates:
mmtrack.now = now_value
assert (mmtrack.get_exam_card_status() == ExamProfile.PROFILE_SUCCESS) | def test_get_exam_card_status_eligible(self):
'\n \n '
ExamProfileFactory.create(profile=self.user.profile, status=ExamProfile.PROFILE_SUCCESS)
now = datetime(2016, 3, 15, tzinfo=pytz.UTC)
past = datetime(2016, 3, 10, tzinfo=pytz.UTC)
future = datetime(2016, 3, 20, tzinfo=pytz.UTC)
valid_dates = [(past - timedelta(days=1)), past, now, future]
invalid_dates = [(future + timedelta(days=1))]
ExamAuthorizationFactory.create(user=self.user, status=ExamAuthorization.STATUS_SUCCESS, exam_run__course=self.course, exam_run__date_first_eligible=past.date(), exam_run__date_last_eligible=future.date())
mmtrack = MMTrack(user=self.user, program=self.program, edx_user_data=self.cached_edx_user_data)
for now_value in valid_dates:
mmtrack.now = now_value
assert (mmtrack.get_exam_card_status() == ExamProfile.PROFILE_SCHEDULABLE)
for now_value in invalid_dates:
mmtrack.now = now_value
assert (mmtrack.get_exam_card_status() == ExamProfile.PROFILE_SUCCESS)<|docstring|>test get_exam_card_status against valid eligibility dates<|endoftext|> |
ea0446a9fc4915b71d6d782dad1733d5465381e75e73223a7edc3fa28d740c49 | @ddt.data((82.5, 'A'), (82.0, 'B'), (64.9, 'C'), (55, 'C'), (54.5, 'D'), (49.5, 'F'))
@ddt.unpack
def test_convert_to_letter(self, grade, letter):
'Test that convert_to_letter is correct'
assert (convert_to_letter(grade) == letter) | Test that convert_to_letter is correct | dashboard/utils_test.py | test_convert_to_letter | mitodl/micromasters | 32 | python | @ddt.data((82.5, 'A'), (82.0, 'B'), (64.9, 'C'), (55, 'C'), (54.5, 'D'), (49.5, 'F'))
@ddt.unpack
def test_convert_to_letter(self, grade, letter):
assert (convert_to_letter(grade) == letter) | @ddt.data((82.5, 'A'), (82.0, 'B'), (64.9, 'C'), (55, 'C'), (54.5, 'D'), (49.5, 'F'))
@ddt.unpack
def test_convert_to_letter(self, grade, letter):
assert (convert_to_letter(grade) == letter)<|docstring|>Test that convert_to_letter is correct<|endoftext|> |
e2e52719a35ff4056dd595c2c715d40ae190bd5914c78a750b10ced26541aeba | def __init__(self, cat: Catalog, work_space: str):
'\n\n @param cat: cat 实例\n @param work_space: 工作区名称\n '
self.cat = cat
self.work_space = Workspace(cat, work_space) | @param cat: cat 实例
@param work_space: 工作区名称 | src/geoserver/customer_base.py | __init__ | evaseemefly/gsconfig | 4 | python | def __init__(self, cat: Catalog, work_space: str):
'\n\n @param cat: cat 实例\n @param work_space: 工作区名称\n '
self.cat = cat
self.work_space = Workspace(cat, work_space) | def __init__(self, cat: Catalog, work_space: str):
'\n\n @param cat: cat 实例\n @param work_space: 工作区名称\n '
self.cat = cat
self.work_space = Workspace(cat, work_space)<|docstring|>@param cat: cat 实例
@param work_space: 工作区名称<|endoftext|> |
b3532eb0161294dfcc0effab78c413df4ff6777ce8458e8c9b571a21dbcb73f8 | def get(self):
'\n Health endpoint\n '
return (None, 200) | Health endpoint | wk-sentiment/app/main/api/health/controller.py | get | wkabbani/microservices | 4 | python | def get(self):
'\n \n '
return (None, 200) | def get(self):
'\n \n '
return (None, 200)<|docstring|>Health endpoint<|endoftext|> |
d44e711920fd49eae59b54ae9232ed2ced16a4bee8c0148dac5cb03f924c2604 | def get(self):
'\n Readiness endpoint\n '
return (None, 200) | Readiness endpoint | wk-sentiment/app/main/api/health/controller.py | get | wkabbani/microservices | 4 | python | def get(self):
'\n \n '
return (None, 200) | def get(self):
'\n \n '
return (None, 200)<|docstring|>Readiness endpoint<|endoftext|> |
8d6b180be7ac2e50e4b7bf9753541c2c8534bb2f284095b40f6c40eb58905266 | def PositionIsValid(self, x, y):
'find out if position is valid'
ret = self.GroupIsValid(self.matList[(x, :)])
ret &= self.GroupIsValid(self.matList[(:, y)])
ret &= self.GroupIsValid(self.matList[(self.matPos == self.matPos[(x, y)])])
return ret | find out if position is valid | sudoku/sudoku2.py | PositionIsValid | ac2sherry/AlgorithmTest | 1 | python | def PositionIsValid(self, x, y):
ret = self.GroupIsValid(self.matList[(x, :)])
ret &= self.GroupIsValid(self.matList[(:, y)])
ret &= self.GroupIsValid(self.matList[(self.matPos == self.matPos[(x, y)])])
return ret | def PositionIsValid(self, x, y):
ret = self.GroupIsValid(self.matList[(x, :)])
ret &= self.GroupIsValid(self.matList[(:, y)])
ret &= self.GroupIsValid(self.matList[(self.matPos == self.matPos[(x, y)])])
return ret<|docstring|>find out if position is valid<|endoftext|> |
de28dbe3a614d3e4d258748224231065e6ee75081dfea319b3cc28afdd95a63a | def _genPos(self, loc):
'recursion function cal each position'
if (loc >= MAX_POS):
return 'end'
(x, y) = ((loc // 9), (loc % 9))
validVal = self.FindValidVal(x, y)
for val in validVal:
self.matList[(x, y)] = val
if self.PositionIsValid(x, y):
flag = self._genPos((loc + 1))
if (flag == 'end'):
return 'end'
self.matList[(x, y)] = None
return | recursion function cal each position | sudoku/sudoku2.py | _genPos | ac2sherry/AlgorithmTest | 1 | python | def _genPos(self, loc):
if (loc >= MAX_POS):
return 'end'
(x, y) = ((loc // 9), (loc % 9))
validVal = self.FindValidVal(x, y)
for val in validVal:
self.matList[(x, y)] = val
if self.PositionIsValid(x, y):
flag = self._genPos((loc + 1))
if (flag == 'end'):
return 'end'
self.matList[(x, y)] = None
return | def _genPos(self, loc):
if (loc >= MAX_POS):
return 'end'
(x, y) = ((loc // 9), (loc % 9))
validVal = self.FindValidVal(x, y)
for val in validVal:
self.matList[(x, y)] = val
if self.PositionIsValid(x, y):
flag = self._genPos((loc + 1))
if (flag == 'end'):
return 'end'
self.matList[(x, y)] = None
return<|docstring|>recursion function cal each position<|endoftext|> |
ba227e304961d1edb8f830947653d51400396f13ed102955ad5125addd179099 | def construct_optimizer(model):
'Constructs the optimizer.\n\n Note that the momentum update in PyTorch differs from the one in Caffe2.\n In particular,\n\n Caffe2:\n V := mu * V + lr * g\n p := p - V\n\n PyTorch:\n V := mu * V + g\n p := p - lr * V\n\n where V is the velocity, mu is the momentum factor, lr is the learning rate,\n g is the gradient and p are the parameters.\n\n Since V is defined independently of the learning rate in PyTorch,\n when the learning rate is changed there is no need to perform the\n momentum correction by scaling V (unlike in the Caffe2 case).\n '
bn_params = []
non_bn_parameters = []
for (name, p) in model.named_parameters():
if ('bn' in name):
bn_params.append(p)
else:
non_bn_parameters.append(p)
bn_weight_decay = (cfg.BN.CUSTOM_WEIGHT_DECAY if cfg.BN.USE_CUSTOM_WEIGHT_DECAY else cfg.OPTIM.WEIGHT_DECAY)
optim_params = [{'params': bn_params, 'weight_decay': bn_weight_decay}, {'params': non_bn_parameters, 'weight_decay': cfg.OPTIM.WEIGHT_DECAY}]
assert (len(list(model.parameters())) == (len(non_bn_parameters) + len(bn_params))), 'parameter size does not match: {} + {} != {}'.format(len(non_bn_parameters), len(bn_params), len(list(model.parameters())))
return torch.optim.SGD(optim_params, lr=cfg.OPTIM.BASE_LR, momentum=cfg.OPTIM.MOMENTUM, weight_decay=cfg.OPTIM.WEIGHT_DECAY, dampening=cfg.OPTIM.DAMPENING, nesterov=cfg.OPTIM.NESTEROV) | Constructs the optimizer.
Note that the momentum update in PyTorch differs from the one in Caffe2.
In particular,
Caffe2:
V := mu * V + lr * g
p := p - V
PyTorch:
V := mu * V + g
p := p - lr * V
where V is the velocity, mu is the momentum factor, lr is the learning rate,
g is the gradient and p are the parameters.
Since V is defined independently of the learning rate in PyTorch,
when the learning rate is changed there is no need to perform the
momentum correction by scaling V (unlike in the Caffe2 case). | pycls/core/optimizer.py | construct_optimizer | blackfeather-wang/GFNet-Pytorch | 164 | python | def construct_optimizer(model):
'Constructs the optimizer.\n\n Note that the momentum update in PyTorch differs from the one in Caffe2.\n In particular,\n\n Caffe2:\n V := mu * V + lr * g\n p := p - V\n\n PyTorch:\n V := mu * V + g\n p := p - lr * V\n\n where V is the velocity, mu is the momentum factor, lr is the learning rate,\n g is the gradient and p are the parameters.\n\n Since V is defined independently of the learning rate in PyTorch,\n when the learning rate is changed there is no need to perform the\n momentum correction by scaling V (unlike in the Caffe2 case).\n '
bn_params = []
non_bn_parameters = []
for (name, p) in model.named_parameters():
if ('bn' in name):
bn_params.append(p)
else:
non_bn_parameters.append(p)
bn_weight_decay = (cfg.BN.CUSTOM_WEIGHT_DECAY if cfg.BN.USE_CUSTOM_WEIGHT_DECAY else cfg.OPTIM.WEIGHT_DECAY)
optim_params = [{'params': bn_params, 'weight_decay': bn_weight_decay}, {'params': non_bn_parameters, 'weight_decay': cfg.OPTIM.WEIGHT_DECAY}]
assert (len(list(model.parameters())) == (len(non_bn_parameters) + len(bn_params))), 'parameter size does not match: {} + {} != {}'.format(len(non_bn_parameters), len(bn_params), len(list(model.parameters())))
return torch.optim.SGD(optim_params, lr=cfg.OPTIM.BASE_LR, momentum=cfg.OPTIM.MOMENTUM, weight_decay=cfg.OPTIM.WEIGHT_DECAY, dampening=cfg.OPTIM.DAMPENING, nesterov=cfg.OPTIM.NESTEROV) | def construct_optimizer(model):
'Constructs the optimizer.\n\n Note that the momentum update in PyTorch differs from the one in Caffe2.\n In particular,\n\n Caffe2:\n V := mu * V + lr * g\n p := p - V\n\n PyTorch:\n V := mu * V + g\n p := p - lr * V\n\n where V is the velocity, mu is the momentum factor, lr is the learning rate,\n g is the gradient and p are the parameters.\n\n Since V is defined independently of the learning rate in PyTorch,\n when the learning rate is changed there is no need to perform the\n momentum correction by scaling V (unlike in the Caffe2 case).\n '
bn_params = []
non_bn_parameters = []
for (name, p) in model.named_parameters():
if ('bn' in name):
bn_params.append(p)
else:
non_bn_parameters.append(p)
bn_weight_decay = (cfg.BN.CUSTOM_WEIGHT_DECAY if cfg.BN.USE_CUSTOM_WEIGHT_DECAY else cfg.OPTIM.WEIGHT_DECAY)
optim_params = [{'params': bn_params, 'weight_decay': bn_weight_decay}, {'params': non_bn_parameters, 'weight_decay': cfg.OPTIM.WEIGHT_DECAY}]
assert (len(list(model.parameters())) == (len(non_bn_parameters) + len(bn_params))), 'parameter size does not match: {} + {} != {}'.format(len(non_bn_parameters), len(bn_params), len(list(model.parameters())))
return torch.optim.SGD(optim_params, lr=cfg.OPTIM.BASE_LR, momentum=cfg.OPTIM.MOMENTUM, weight_decay=cfg.OPTIM.WEIGHT_DECAY, dampening=cfg.OPTIM.DAMPENING, nesterov=cfg.OPTIM.NESTEROV)<|docstring|>Constructs the optimizer.
Note that the momentum update in PyTorch differs from the one in Caffe2.
In particular,
Caffe2:
V := mu * V + lr * g
p := p - V
PyTorch:
V := mu * V + g
p := p - lr * V
where V is the velocity, mu is the momentum factor, lr is the learning rate,
g is the gradient and p are the parameters.
Since V is defined independently of the learning rate in PyTorch,
when the learning rate is changed there is no need to perform the
momentum correction by scaling V (unlike in the Caffe2 case).<|endoftext|> |
97ec77a0b57fce7f3de9ef8bf74d8fdd8840e118548a604e3c5a2658140d8380 | def get_epoch_lr(cur_epoch):
'Retrieves the lr for the given epoch (as specified by the lr policy).'
return lr_policy.get_epoch_lr(cur_epoch) | Retrieves the lr for the given epoch (as specified by the lr policy). | pycls/core/optimizer.py | get_epoch_lr | blackfeather-wang/GFNet-Pytorch | 164 | python | def get_epoch_lr(cur_epoch):
return lr_policy.get_epoch_lr(cur_epoch) | def get_epoch_lr(cur_epoch):
return lr_policy.get_epoch_lr(cur_epoch)<|docstring|>Retrieves the lr for the given epoch (as specified by the lr policy).<|endoftext|> |
6cd8f98d0bc6e6627fe3bdfcbce2fa2e479ed93ebe07c50295f7ffb8f68e8a51 | def set_lr(optimizer, new_lr):
'Sets the optimizer lr to the specified value.'
for param_group in optimizer.param_groups:
param_group['lr'] = new_lr | Sets the optimizer lr to the specified value. | pycls/core/optimizer.py | set_lr | blackfeather-wang/GFNet-Pytorch | 164 | python | def set_lr(optimizer, new_lr):
for param_group in optimizer.param_groups:
param_group['lr'] = new_lr | def set_lr(optimizer, new_lr):
for param_group in optimizer.param_groups:
param_group['lr'] = new_lr<|docstring|>Sets the optimizer lr to the specified value.<|endoftext|> |
2217cd0ff6907daeb83daf7dbd451d60751e551887cf561e0679c2713ca2abab | def draw_sin():
'raw a circle of sin'
t = np.arange(1, Number, 1)
y = np.sin(((((2 * np.pi) * frequency) * t) / Number))
plt.plot(t, y)
plt.grid(True)
plt.text(900, 0.75, ('Frequency is ' + str(frequency)))
plt.show() | raw a circle of sin | mainSpace/files/TkinterDemo1.py | draw_sin | hanzhi713/ibcs-wd | 0 | python | def draw_sin():
t = np.arange(1, Number, 1)
y = np.sin(((((2 * np.pi) * frequency) * t) / Number))
plt.plot(t, y)
plt.grid(True)
plt.text(900, 0.75, ('Frequency is ' + str(frequency)))
plt.show() | def draw_sin():
t = np.arange(1, Number, 1)
y = np.sin(((((2 * np.pi) * frequency) * t) / Number))
plt.plot(t, y)
plt.grid(True)
plt.text(900, 0.75, ('Frequency is ' + str(frequency)))
plt.show()<|docstring|>raw a circle of sin<|endoftext|> |
f619c91fb425259b6ddd52e371787911aa3344d2802bb629e970c75d4acc3515 | def frequency_plus():
'function of add the frequency and plot the signal'
global frequency
frequency = (frequency + 1)
plt.clf()
draw_sin() | function of add the frequency and plot the signal | mainSpace/files/TkinterDemo1.py | frequency_plus | hanzhi713/ibcs-wd | 0 | python | def frequency_plus():
global frequency
frequency = (frequency + 1)
plt.clf()
draw_sin() | def frequency_plus():
global frequency
frequency = (frequency + 1)
plt.clf()
draw_sin()<|docstring|>function of add the frequency and plot the signal<|endoftext|> |
4893f45e1f5cebfafc881d671f752ed86873e210dfd7d7c81aca31c1aa55b4e4 | def my_button(root, label_text, button_text, button_func):
'function of creat label and button'
label = Label(root)
label['text'] = label_text
label.pack()
button = Button(root)
button['text'] = button_text
button['command'] = button_func
button.pack() | function of creat label and button | mainSpace/files/TkinterDemo1.py | my_button | hanzhi713/ibcs-wd | 0 | python | def my_button(root, label_text, button_text, button_func):
label = Label(root)
label['text'] = label_text
label.pack()
button = Button(root)
button['text'] = button_text
button['command'] = button_func
button.pack() | def my_button(root, label_text, button_text, button_func):
label = Label(root)
label['text'] = label_text
label.pack()
button = Button(root)
button['text'] = button_text
button['command'] = button_func
button.pack()<|docstring|>function of creat label and button<|endoftext|> |
ad1c95040230915962fc942a3f3738f1fb422e38a8a548f113a033076437fe92 | def main():
'main function'
root = Tk(className='DrawSin')
my_button(root, 'Draw sin', 'click to Draw', draw_sin)
my_button(root, 'Freq Plus', 'click to Plus', frequency_plus)
root.mainloop() | main function | mainSpace/files/TkinterDemo1.py | main | hanzhi713/ibcs-wd | 0 | python | def main():
root = Tk(className='DrawSin')
my_button(root, 'Draw sin', 'click to Draw', draw_sin)
my_button(root, 'Freq Plus', 'click to Plus', frequency_plus)
root.mainloop() | def main():
root = Tk(className='DrawSin')
my_button(root, 'Draw sin', 'click to Draw', draw_sin)
my_button(root, 'Freq Plus', 'click to Plus', frequency_plus)
root.mainloop()<|docstring|>main function<|endoftext|> |
107a3beed0bdd461d2f9e533d3d15335d4f99ab154402eccc0aeb2e2dfc2fc4c | def run_classifier(vec_model_name, classifier, classifier_details):
'Given the name of the word vector model, a sklearn classifier instance, and a dictionary of the classifier details,\n load the dataset and run the classification algorithm. The results are then saved to a JSON file. '
(training_data, training_labels) = get_training_info(vec_model_name)
(test_data, test_labels) = get_test_info(vec_model_name)
if (type(classifier) is ComplementNB):
training_data = minmax_scale(training_data, feature_range=(0, 1))
test_data = minmax_scale(test_data, feature_range=(0, 1))
classifier.fit(training_data, training_labels)
predictions = classifier.predict(test_data)
calculate_classifier_metrics(test_labels, predictions, classifier_details)
title = '{} w/{} Confusion Matrix'.format(classifier_details['Model'], vec_model_name)
show_confusion_matrix(test_labels, predictions) | Given the name of the word vector model, a sklearn classifier instance, and a dictionary of the classifier details,
load the dataset and run the classification algorithm. The results are then saved to a JSON file. | Scripts/run_classification.py | run_classifier | MathewTWilliams/News-Category-Classifiers | 0 | python | def run_classifier(vec_model_name, classifier, classifier_details):
'Given the name of the word vector model, a sklearn classifier instance, and a dictionary of the classifier details,\n load the dataset and run the classification algorithm. The results are then saved to a JSON file. '
(training_data, training_labels) = get_training_info(vec_model_name)
(test_data, test_labels) = get_test_info(vec_model_name)
if (type(classifier) is ComplementNB):
training_data = minmax_scale(training_data, feature_range=(0, 1))
test_data = minmax_scale(test_data, feature_range=(0, 1))
classifier.fit(training_data, training_labels)
predictions = classifier.predict(test_data)
calculate_classifier_metrics(test_labels, predictions, classifier_details)
title = '{} w/{} Confusion Matrix'.format(classifier_details['Model'], vec_model_name)
show_confusion_matrix(test_labels, predictions) | def run_classifier(vec_model_name, classifier, classifier_details):
'Given the name of the word vector model, a sklearn classifier instance, and a dictionary of the classifier details,\n load the dataset and run the classification algorithm. The results are then saved to a JSON file. '
(training_data, training_labels) = get_training_info(vec_model_name)
(test_data, test_labels) = get_test_info(vec_model_name)
if (type(classifier) is ComplementNB):
training_data = minmax_scale(training_data, feature_range=(0, 1))
test_data = minmax_scale(test_data, feature_range=(0, 1))
classifier.fit(training_data, training_labels)
predictions = classifier.predict(test_data)
calculate_classifier_metrics(test_labels, predictions, classifier_details)
title = '{} w/{} Confusion Matrix'.format(classifier_details['Model'], vec_model_name)
show_confusion_matrix(test_labels, predictions)<|docstring|>Given the name of the word vector model, a sklearn classifier instance, and a dictionary of the classifier details,
load the dataset and run the classification algorithm. The results are then saved to a JSON file.<|endoftext|> |
3f9c453f2c4e222c611435a350f54b8998b52488b06ec4da3b5967ece07cabf1 | @staticmethod
def Args(parser):
'Register flags for this command.\n\n Args:\n parser: An argparse.ArgumentParser-like object. It is mocked out in order\n to capture some information, but behaves like an ArgumentParser.\n '
parser.add_argument('build', help='The build to describe. The ID of the build is printed at the end of the build submission process, or in the ID column when listing builds.') | Register flags for this command.
Args:
parser: An argparse.ArgumentParser-like object. It is mocked out in order
to capture some information, but behaves like an ArgumentParser. | google-cloud-sdk/lib/surface/container/builds/describe.py | Args | KaranToor/MA450 | 1 | python | @staticmethod
def Args(parser):
'Register flags for this command.\n\n Args:\n parser: An argparse.ArgumentParser-like object. It is mocked out in order\n to capture some information, but behaves like an ArgumentParser.\n '
parser.add_argument('build', help='The build to describe. The ID of the build is printed at the end of the build submission process, or in the ID column when listing builds.') | @staticmethod
def Args(parser):
'Register flags for this command.\n\n Args:\n parser: An argparse.ArgumentParser-like object. It is mocked out in order\n to capture some information, but behaves like an ArgumentParser.\n '
parser.add_argument('build', help='The build to describe. The ID of the build is printed at the end of the build submission process, or in the ID column when listing builds.')<|docstring|>Register flags for this command.
Args:
parser: An argparse.ArgumentParser-like object. It is mocked out in order
to capture some information, but behaves like an ArgumentParser.<|endoftext|> |
f0db12eb7c6a03942c681d014fccdabd064d04d518c4440cb58c8cc9b6cd28cb | def Run(self, args):
'This is what gets called when the user runs this command.\n\n Args:\n args: an argparse namespace. All the arguments that were provided to this\n command invocation.\n\n Returns:\n Some value that we want to have printed later.\n '
client = cloudbuild_util.GetClientInstance()
resources = self.context['registry']
build_ref = resources.Parse(args.build, collection='cloudbuild.projects.builds')
return client.projects_builds.Get(client.MESSAGES_MODULE.CloudbuildProjectsBuildsGetRequest(projectId=build_ref.projectId, id=build_ref.id)) | This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Returns:
Some value that we want to have printed later. | google-cloud-sdk/lib/surface/container/builds/describe.py | Run | KaranToor/MA450 | 1 | python | def Run(self, args):
'This is what gets called when the user runs this command.\n\n Args:\n args: an argparse namespace. All the arguments that were provided to this\n command invocation.\n\n Returns:\n Some value that we want to have printed later.\n '
client = cloudbuild_util.GetClientInstance()
resources = self.context['registry']
build_ref = resources.Parse(args.build, collection='cloudbuild.projects.builds')
return client.projects_builds.Get(client.MESSAGES_MODULE.CloudbuildProjectsBuildsGetRequest(projectId=build_ref.projectId, id=build_ref.id)) | def Run(self, args):
'This is what gets called when the user runs this command.\n\n Args:\n args: an argparse namespace. All the arguments that were provided to this\n command invocation.\n\n Returns:\n Some value that we want to have printed later.\n '
client = cloudbuild_util.GetClientInstance()
resources = self.context['registry']
build_ref = resources.Parse(args.build, collection='cloudbuild.projects.builds')
return client.projects_builds.Get(client.MESSAGES_MODULE.CloudbuildProjectsBuildsGetRequest(projectId=build_ref.projectId, id=build_ref.id))<|docstring|>This is what gets called when the user runs this command.
Args:
args: an argparse namespace. All the arguments that were provided to this
command invocation.
Returns:
Some value that we want to have printed later.<|endoftext|> |
07b2da25e17ab393aa1606124b773c844a23a7eaebcf37495163628a2033d91f | def start(self):
'Starts the watch.'
self._started_at = compat.monotonic()
return self | Starts the watch. | ddtrace/internal/utils/time.py | start | mastizada/dd-trace-py | 308 | python | def start(self):
self._started_at = compat.monotonic()
return self | def start(self):
self._started_at = compat.monotonic()
return self<|docstring|>Starts the watch.<|endoftext|> |
02fc6b5518b4ba4d9a47fc77a302f835cb0f7131d55d94ca5bef6ae338a9ab9b | def elapsed(self):
'Get how many seconds have elapsed.\n\n :return: Number of seconds elapsed\n :rtype: float\n '
if (self._started_at is None):
raise RuntimeError('Can not get the elapsed time of a stopwatch if it has not been started/stopped')
if (self._stopped_at is None):
now = compat.monotonic()
else:
now = self._stopped_at
return (now - self._started_at) | Get how many seconds have elapsed.
:return: Number of seconds elapsed
:rtype: float | ddtrace/internal/utils/time.py | elapsed | mastizada/dd-trace-py | 308 | python | def elapsed(self):
'Get how many seconds have elapsed.\n\n :return: Number of seconds elapsed\n :rtype: float\n '
if (self._started_at is None):
raise RuntimeError('Can not get the elapsed time of a stopwatch if it has not been started/stopped')
if (self._stopped_at is None):
now = compat.monotonic()
else:
now = self._stopped_at
return (now - self._started_at) | def elapsed(self):
'Get how many seconds have elapsed.\n\n :return: Number of seconds elapsed\n :rtype: float\n '
if (self._started_at is None):
raise RuntimeError('Can not get the elapsed time of a stopwatch if it has not been started/stopped')
if (self._stopped_at is None):
now = compat.monotonic()
else:
now = self._stopped_at
return (now - self._started_at)<|docstring|>Get how many seconds have elapsed.
:return: Number of seconds elapsed
:rtype: float<|endoftext|> |
373eaa0d3ea692f8a23d5732ad306bde3457bc7be54a55bd553222831ba10cc7 | def __enter__(self):
'Starts the watch.'
self.start()
return self | Starts the watch. | ddtrace/internal/utils/time.py | __enter__ | mastizada/dd-trace-py | 308 | python | def __enter__(self):
self.start()
return self | def __enter__(self):
self.start()
return self<|docstring|>Starts the watch.<|endoftext|> |
82eaae39f5de3c45cc3bfc1681be2181aaf6e370a02438c5f5e88dfdefcf63f5 | def __exit__(self, tp, value, traceback):
'Stops the watch.'
self.stop() | Stops the watch. | ddtrace/internal/utils/time.py | __exit__ | mastizada/dd-trace-py | 308 | python | def __exit__(self, tp, value, traceback):
self.stop() | def __exit__(self, tp, value, traceback):
self.stop()<|docstring|>Stops the watch.<|endoftext|> |
5558d0224696fde1bd43f888262a40e7f991b75c24d47c072c4714a93b003f87 | def stop(self):
'Stops the watch.'
if (self._started_at is None):
raise RuntimeError('Can not stop a stopwatch that has not been started')
self._stopped_at = compat.monotonic()
return self | Stops the watch. | ddtrace/internal/utils/time.py | stop | mastizada/dd-trace-py | 308 | python | def stop(self):
if (self._started_at is None):
raise RuntimeError('Can not stop a stopwatch that has not been started')
self._stopped_at = compat.monotonic()
return self | def stop(self):
if (self._started_at is None):
raise RuntimeError('Can not stop a stopwatch that has not been started')
self._stopped_at = compat.monotonic()
return self<|docstring|>Stops the watch.<|endoftext|> |
70cd35de5b788bf7bda57bd4878bdadc0e5fc068a02e42381c69cd0b4dfdecf2 | @pytest.mark.parametrize('field', EXPECTED_FIELDS)
def test_read(self, field, mock_bower_environment, snapshot):
'\n Test reading default config values and testing property getter functions.\n '
snapshot.assert_match(getattr(CONFIG.BOWER, field)) | Test reading default config values and testing property getter functions. | ixian_docker/tests/modules/bower/test_config.py | test_read | kreneskyp/ixian-docker | 0 | python | @pytest.mark.parametrize('field', EXPECTED_FIELDS)
def test_read(self, field, mock_bower_environment, snapshot):
'\n \n '
snapshot.assert_match(getattr(CONFIG.BOWER, field)) | @pytest.mark.parametrize('field', EXPECTED_FIELDS)
def test_read(self, field, mock_bower_environment, snapshot):
'\n \n '
snapshot.assert_match(getattr(CONFIG.BOWER, field))<|docstring|>Test reading default config values and testing property getter functions.<|endoftext|> |
6abe263c64ba4f339a91cbccaa7e8dd6f2b3fc1bfd7efc757f794126c761422b | def __init__(self, last_transition_time=None, message=None, reason=None, status=None, type=None):
'\n V1beta1ReplicaSetCondition - a model defined in Swagger\n\n :param dict swaggerTypes: The key is attribute name\n and the value is attribute type.\n :param dict attributeMap: The key is attribute name\n and the value is json key in definition.\n '
self.swagger_types = {'last_transition_time': 'datetime', 'message': 'str', 'reason': 'str', 'status': 'str', 'type': 'str'}
self.attribute_map = {'last_transition_time': 'lastTransitionTime', 'message': 'message', 'reason': 'reason', 'status': 'status', 'type': 'type'}
self._last_transition_time = last_transition_time
self._message = message
self._reason = reason
self._status = status
self._type = type | V1beta1ReplicaSetCondition - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition. | src/kubernetes/client/models/v1beta1_replica_set_condition.py | __init__ | MarletteFunding/aws-kube-codesuite | 184 | python | def __init__(self, last_transition_time=None, message=None, reason=None, status=None, type=None):
'\n V1beta1ReplicaSetCondition - a model defined in Swagger\n\n :param dict swaggerTypes: The key is attribute name\n and the value is attribute type.\n :param dict attributeMap: The key is attribute name\n and the value is json key in definition.\n '
self.swagger_types = {'last_transition_time': 'datetime', 'message': 'str', 'reason': 'str', 'status': 'str', 'type': 'str'}
self.attribute_map = {'last_transition_time': 'lastTransitionTime', 'message': 'message', 'reason': 'reason', 'status': 'status', 'type': 'type'}
self._last_transition_time = last_transition_time
self._message = message
self._reason = reason
self._status = status
self._type = type | def __init__(self, last_transition_time=None, message=None, reason=None, status=None, type=None):
'\n V1beta1ReplicaSetCondition - a model defined in Swagger\n\n :param dict swaggerTypes: The key is attribute name\n and the value is attribute type.\n :param dict attributeMap: The key is attribute name\n and the value is json key in definition.\n '
self.swagger_types = {'last_transition_time': 'datetime', 'message': 'str', 'reason': 'str', 'status': 'str', 'type': 'str'}
self.attribute_map = {'last_transition_time': 'lastTransitionTime', 'message': 'message', 'reason': 'reason', 'status': 'status', 'type': 'type'}
self._last_transition_time = last_transition_time
self._message = message
self._reason = reason
self._status = status
self._type = type<|docstring|>V1beta1ReplicaSetCondition - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.<|endoftext|> |
7c4a00689c90d4883001772f671cffe52547466857c6bac7dae22946ae941c7f | @property
def last_transition_time(self):
'\n Gets the last_transition_time of this V1beta1ReplicaSetCondition.\n The last time the condition transitioned from one status to another.\n\n :return: The last_transition_time of this V1beta1ReplicaSetCondition.\n :rtype: datetime\n '
return self._last_transition_time | Gets the last_transition_time of this V1beta1ReplicaSetCondition.
The last time the condition transitioned from one status to another.
:return: The last_transition_time of this V1beta1ReplicaSetCondition.
:rtype: datetime | src/kubernetes/client/models/v1beta1_replica_set_condition.py | last_transition_time | MarletteFunding/aws-kube-codesuite | 184 | python | @property
def last_transition_time(self):
'\n Gets the last_transition_time of this V1beta1ReplicaSetCondition.\n The last time the condition transitioned from one status to another.\n\n :return: The last_transition_time of this V1beta1ReplicaSetCondition.\n :rtype: datetime\n '
return self._last_transition_time | @property
def last_transition_time(self):
'\n Gets the last_transition_time of this V1beta1ReplicaSetCondition.\n The last time the condition transitioned from one status to another.\n\n :return: The last_transition_time of this V1beta1ReplicaSetCondition.\n :rtype: datetime\n '
return self._last_transition_time<|docstring|>Gets the last_transition_time of this V1beta1ReplicaSetCondition.
The last time the condition transitioned from one status to another.
:return: The last_transition_time of this V1beta1ReplicaSetCondition.
:rtype: datetime<|endoftext|> |
3c227f9c100a89bc34701bb354c4bfe946af1a30385c27fe8efdf2c85c137306 | @last_transition_time.setter
def last_transition_time(self, last_transition_time):
'\n Sets the last_transition_time of this V1beta1ReplicaSetCondition.\n The last time the condition transitioned from one status to another.\n\n :param last_transition_time: The last_transition_time of this V1beta1ReplicaSetCondition.\n :type: datetime\n '
self._last_transition_time = last_transition_time | Sets the last_transition_time of this V1beta1ReplicaSetCondition.
The last time the condition transitioned from one status to another.
:param last_transition_time: The last_transition_time of this V1beta1ReplicaSetCondition.
:type: datetime | src/kubernetes/client/models/v1beta1_replica_set_condition.py | last_transition_time | MarletteFunding/aws-kube-codesuite | 184 | python | @last_transition_time.setter
def last_transition_time(self, last_transition_time):
'\n Sets the last_transition_time of this V1beta1ReplicaSetCondition.\n The last time the condition transitioned from one status to another.\n\n :param last_transition_time: The last_transition_time of this V1beta1ReplicaSetCondition.\n :type: datetime\n '
self._last_transition_time = last_transition_time | @last_transition_time.setter
def last_transition_time(self, last_transition_time):
'\n Sets the last_transition_time of this V1beta1ReplicaSetCondition.\n The last time the condition transitioned from one status to another.\n\n :param last_transition_time: The last_transition_time of this V1beta1ReplicaSetCondition.\n :type: datetime\n '
self._last_transition_time = last_transition_time<|docstring|>Sets the last_transition_time of this V1beta1ReplicaSetCondition.
The last time the condition transitioned from one status to another.
:param last_transition_time: The last_transition_time of this V1beta1ReplicaSetCondition.
:type: datetime<|endoftext|> |
60028ef2590bd103ae404fa1e0127018888790c465c34fe0051bc8d5c0635c34 | @property
def message(self):
'\n Gets the message of this V1beta1ReplicaSetCondition.\n A human readable message indicating details about the transition.\n\n :return: The message of this V1beta1ReplicaSetCondition.\n :rtype: str\n '
return self._message | Gets the message of this V1beta1ReplicaSetCondition.
A human readable message indicating details about the transition.
:return: The message of this V1beta1ReplicaSetCondition.
:rtype: str | src/kubernetes/client/models/v1beta1_replica_set_condition.py | message | MarletteFunding/aws-kube-codesuite | 184 | python | @property
def message(self):
'\n Gets the message of this V1beta1ReplicaSetCondition.\n A human readable message indicating details about the transition.\n\n :return: The message of this V1beta1ReplicaSetCondition.\n :rtype: str\n '
return self._message | @property
def message(self):
'\n Gets the message of this V1beta1ReplicaSetCondition.\n A human readable message indicating details about the transition.\n\n :return: The message of this V1beta1ReplicaSetCondition.\n :rtype: str\n '
return self._message<|docstring|>Gets the message of this V1beta1ReplicaSetCondition.
A human readable message indicating details about the transition.
:return: The message of this V1beta1ReplicaSetCondition.
:rtype: str<|endoftext|> |
e1ece4f467cb202ce33742f934320484ccdbb529407e37f700110dfa0fca5270 | @message.setter
def message(self, message):
'\n Sets the message of this V1beta1ReplicaSetCondition.\n A human readable message indicating details about the transition.\n\n :param message: The message of this V1beta1ReplicaSetCondition.\n :type: str\n '
self._message = message | Sets the message of this V1beta1ReplicaSetCondition.
A human readable message indicating details about the transition.
:param message: The message of this V1beta1ReplicaSetCondition.
:type: str | src/kubernetes/client/models/v1beta1_replica_set_condition.py | message | MarletteFunding/aws-kube-codesuite | 184 | python | @message.setter
def message(self, message):
'\n Sets the message of this V1beta1ReplicaSetCondition.\n A human readable message indicating details about the transition.\n\n :param message: The message of this V1beta1ReplicaSetCondition.\n :type: str\n '
self._message = message | @message.setter
def message(self, message):
'\n Sets the message of this V1beta1ReplicaSetCondition.\n A human readable message indicating details about the transition.\n\n :param message: The message of this V1beta1ReplicaSetCondition.\n :type: str\n '
self._message = message<|docstring|>Sets the message of this V1beta1ReplicaSetCondition.
A human readable message indicating details about the transition.
:param message: The message of this V1beta1ReplicaSetCondition.
:type: str<|endoftext|> |
b40f27fe66b23b9dc88ba945a1523720d287c4d8affdce429f314cacf10d21e3 | @property
def reason(self):
"\n Gets the reason of this V1beta1ReplicaSetCondition.\n The reason for the condition's last transition.\n\n :return: The reason of this V1beta1ReplicaSetCondition.\n :rtype: str\n "
return self._reason | Gets the reason of this V1beta1ReplicaSetCondition.
The reason for the condition's last transition.
:return: The reason of this V1beta1ReplicaSetCondition.
:rtype: str | src/kubernetes/client/models/v1beta1_replica_set_condition.py | reason | MarletteFunding/aws-kube-codesuite | 184 | python | @property
def reason(self):
"\n Gets the reason of this V1beta1ReplicaSetCondition.\n The reason for the condition's last transition.\n\n :return: The reason of this V1beta1ReplicaSetCondition.\n :rtype: str\n "
return self._reason | @property
def reason(self):
"\n Gets the reason of this V1beta1ReplicaSetCondition.\n The reason for the condition's last transition.\n\n :return: The reason of this V1beta1ReplicaSetCondition.\n :rtype: str\n "
return self._reason<|docstring|>Gets the reason of this V1beta1ReplicaSetCondition.
The reason for the condition's last transition.
:return: The reason of this V1beta1ReplicaSetCondition.
:rtype: str<|endoftext|> |
9737fb7cedcca870ed457381fa7f78d07025a71044cef35321ed6cfe9e6343a5 | @reason.setter
def reason(self, reason):
"\n Sets the reason of this V1beta1ReplicaSetCondition.\n The reason for the condition's last transition.\n\n :param reason: The reason of this V1beta1ReplicaSetCondition.\n :type: str\n "
self._reason = reason | Sets the reason of this V1beta1ReplicaSetCondition.
The reason for the condition's last transition.
:param reason: The reason of this V1beta1ReplicaSetCondition.
:type: str | src/kubernetes/client/models/v1beta1_replica_set_condition.py | reason | MarletteFunding/aws-kube-codesuite | 184 | python | @reason.setter
def reason(self, reason):
"\n Sets the reason of this V1beta1ReplicaSetCondition.\n The reason for the condition's last transition.\n\n :param reason: The reason of this V1beta1ReplicaSetCondition.\n :type: str\n "
self._reason = reason | @reason.setter
def reason(self, reason):
"\n Sets the reason of this V1beta1ReplicaSetCondition.\n The reason for the condition's last transition.\n\n :param reason: The reason of this V1beta1ReplicaSetCondition.\n :type: str\n "
self._reason = reason<|docstring|>Sets the reason of this V1beta1ReplicaSetCondition.
The reason for the condition's last transition.
:param reason: The reason of this V1beta1ReplicaSetCondition.
:type: str<|endoftext|> |
6d4a64fe18b4560a60e6c4f5a94a92f2776afb609d509e54a501143947994946 | @property
def status(self):
'\n Gets the status of this V1beta1ReplicaSetCondition.\n Status of the condition, one of True, False, Unknown.\n\n :return: The status of this V1beta1ReplicaSetCondition.\n :rtype: str\n '
return self._status | Gets the status of this V1beta1ReplicaSetCondition.
Status of the condition, one of True, False, Unknown.
:return: The status of this V1beta1ReplicaSetCondition.
:rtype: str | src/kubernetes/client/models/v1beta1_replica_set_condition.py | status | MarletteFunding/aws-kube-codesuite | 184 | python | @property
def status(self):
'\n Gets the status of this V1beta1ReplicaSetCondition.\n Status of the condition, one of True, False, Unknown.\n\n :return: The status of this V1beta1ReplicaSetCondition.\n :rtype: str\n '
return self._status | @property
def status(self):
'\n Gets the status of this V1beta1ReplicaSetCondition.\n Status of the condition, one of True, False, Unknown.\n\n :return: The status of this V1beta1ReplicaSetCondition.\n :rtype: str\n '
return self._status<|docstring|>Gets the status of this V1beta1ReplicaSetCondition.
Status of the condition, one of True, False, Unknown.
:return: The status of this V1beta1ReplicaSetCondition.
:rtype: str<|endoftext|> |
7044ad81335a0f1eed1da58de4f41daa75b325dbf317f8ef68e2b5735b5773c8 | @status.setter
def status(self, status):
'\n Sets the status of this V1beta1ReplicaSetCondition.\n Status of the condition, one of True, False, Unknown.\n\n :param status: The status of this V1beta1ReplicaSetCondition.\n :type: str\n '
if (status is None):
raise ValueError('Invalid value for `status`, must not be `None`')
self._status = status | Sets the status of this V1beta1ReplicaSetCondition.
Status of the condition, one of True, False, Unknown.
:param status: The status of this V1beta1ReplicaSetCondition.
:type: str | src/kubernetes/client/models/v1beta1_replica_set_condition.py | status | MarletteFunding/aws-kube-codesuite | 184 | python | @status.setter
def status(self, status):
'\n Sets the status of this V1beta1ReplicaSetCondition.\n Status of the condition, one of True, False, Unknown.\n\n :param status: The status of this V1beta1ReplicaSetCondition.\n :type: str\n '
if (status is None):
raise ValueError('Invalid value for `status`, must not be `None`')
self._status = status | @status.setter
def status(self, status):
'\n Sets the status of this V1beta1ReplicaSetCondition.\n Status of the condition, one of True, False, Unknown.\n\n :param status: The status of this V1beta1ReplicaSetCondition.\n :type: str\n '
if (status is None):
raise ValueError('Invalid value for `status`, must not be `None`')
self._status = status<|docstring|>Sets the status of this V1beta1ReplicaSetCondition.
Status of the condition, one of True, False, Unknown.
:param status: The status of this V1beta1ReplicaSetCondition.
:type: str<|endoftext|> |
523552f95ce3ec90033296caba18eba17fa1a5d44501532ce6a9d07ddb2f581c | @property
def type(self):
'\n Gets the type of this V1beta1ReplicaSetCondition.\n Type of replica set condition.\n\n :return: The type of this V1beta1ReplicaSetCondition.\n :rtype: str\n '
return self._type | Gets the type of this V1beta1ReplicaSetCondition.
Type of replica set condition.
:return: The type of this V1beta1ReplicaSetCondition.
:rtype: str | src/kubernetes/client/models/v1beta1_replica_set_condition.py | type | MarletteFunding/aws-kube-codesuite | 184 | python | @property
def type(self):
'\n Gets the type of this V1beta1ReplicaSetCondition.\n Type of replica set condition.\n\n :return: The type of this V1beta1ReplicaSetCondition.\n :rtype: str\n '
return self._type | @property
def type(self):
'\n Gets the type of this V1beta1ReplicaSetCondition.\n Type of replica set condition.\n\n :return: The type of this V1beta1ReplicaSetCondition.\n :rtype: str\n '
return self._type<|docstring|>Gets the type of this V1beta1ReplicaSetCondition.
Type of replica set condition.
:return: The type of this V1beta1ReplicaSetCondition.
:rtype: str<|endoftext|> |
0a0b7b4ede7ace54003b8e0d6267d01c3dbd656be54a71a0ec93211fed9758ce | @type.setter
def type(self, type):
'\n Sets the type of this V1beta1ReplicaSetCondition.\n Type of replica set condition.\n\n :param type: The type of this V1beta1ReplicaSetCondition.\n :type: str\n '
if (type is None):
raise ValueError('Invalid value for `type`, must not be `None`')
self._type = type | Sets the type of this V1beta1ReplicaSetCondition.
Type of replica set condition.
:param type: The type of this V1beta1ReplicaSetCondition.
:type: str | src/kubernetes/client/models/v1beta1_replica_set_condition.py | type | MarletteFunding/aws-kube-codesuite | 184 | python | @type.setter
def type(self, type):
'\n Sets the type of this V1beta1ReplicaSetCondition.\n Type of replica set condition.\n\n :param type: The type of this V1beta1ReplicaSetCondition.\n :type: str\n '
if (type is None):
raise ValueError('Invalid value for `type`, must not be `None`')
self._type = type | @type.setter
def type(self, type):
'\n Sets the type of this V1beta1ReplicaSetCondition.\n Type of replica set condition.\n\n :param type: The type of this V1beta1ReplicaSetCondition.\n :type: str\n '
if (type is None):
raise ValueError('Invalid value for `type`, must not be `None`')
self._type = type<|docstring|>Sets the type of this V1beta1ReplicaSetCondition.
Type of replica set condition.
:param type: The type of this V1beta1ReplicaSetCondition.
:type: str<|endoftext|> |
f92515cd38effc7eee4069f2288d78a0f0836df932fb36a84e3b4f7e14233415 | def to_dict(self):
'\n Returns the model properties as a dict\n '
result = {}
for (attr, _) in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
return result | Returns the model properties as a dict | src/kubernetes/client/models/v1beta1_replica_set_condition.py | to_dict | MarletteFunding/aws-kube-codesuite | 184 | python | def to_dict(self):
'\n \n '
result = {}
for (attr, _) in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
return result | def to_dict(self):
'\n \n '
result = {}
for (attr, _) in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map((lambda x: (x.to_dict() if hasattr(x, 'to_dict') else x)), value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map((lambda item: ((item[0], item[1].to_dict()) if hasattr(item[1], 'to_dict') else item)), value.items()))
else:
result[attr] = value
return result<|docstring|>Returns the model properties as a dict<|endoftext|> |
c373d87dd29c1e96dce460ab571bff86e58edb298ba83c85d8cc7603a6505de4 | def to_str(self):
'\n Returns the string representation of the model\n '
return pformat(self.to_dict()) | Returns the string representation of the model | src/kubernetes/client/models/v1beta1_replica_set_condition.py | to_str | MarletteFunding/aws-kube-codesuite | 184 | python | def to_str(self):
'\n \n '
return pformat(self.to_dict()) | def to_str(self):
'\n \n '
return pformat(self.to_dict())<|docstring|>Returns the string representation of the model<|endoftext|> |
1034ff7dd2eef24d21e3c2fa7409b793ab5cbb8cd75a2eb0ab3e62604b26264d | def __repr__(self):
'\n For `print` and `pprint`\n '
return self.to_str() | For `print` and `pprint` | src/kubernetes/client/models/v1beta1_replica_set_condition.py | __repr__ | MarletteFunding/aws-kube-codesuite | 184 | python | def __repr__(self):
'\n \n '
return self.to_str() | def __repr__(self):
'\n \n '
return self.to_str()<|docstring|>For `print` and `pprint`<|endoftext|> |
a34fd6eea58293fbb4758996c0ae1f7ee3122428bbdb3b4371e15aa5926723a7 | def __eq__(self, other):
'\n Returns true if both objects are equal\n '
if (not isinstance(other, V1beta1ReplicaSetCondition)):
return False
return (self.__dict__ == other.__dict__) | Returns true if both objects are equal | src/kubernetes/client/models/v1beta1_replica_set_condition.py | __eq__ | MarletteFunding/aws-kube-codesuite | 184 | python | def __eq__(self, other):
'\n \n '
if (not isinstance(other, V1beta1ReplicaSetCondition)):
return False
return (self.__dict__ == other.__dict__) | def __eq__(self, other):
'\n \n '
if (not isinstance(other, V1beta1ReplicaSetCondition)):
return False
return (self.__dict__ == other.__dict__)<|docstring|>Returns true if both objects are equal<|endoftext|> |
e5050f8e1402e3a4c90d6c6e229c4c9e2b8ec61e0be457915ea9d976f7e6b0b4 | def __ne__(self, other):
'\n Returns true if both objects are not equal\n '
return (not (self == other)) | Returns true if both objects are not equal | src/kubernetes/client/models/v1beta1_replica_set_condition.py | __ne__ | MarletteFunding/aws-kube-codesuite | 184 | python | def __ne__(self, other):
'\n \n '
return (not (self == other)) | def __ne__(self, other):
'\n \n '
return (not (self == other))<|docstring|>Returns true if both objects are not equal<|endoftext|> |
d26ec986fb0991aea04ff3d732b3022d547fc5f74261e820b953f13ed473fe04 | def get_homedir():
'determine home directory of current user'
global _HOME
if (_HOME is not None):
return _HOME
home = None
try:
from pathlib import Path
home = str(Path.home())
except:
pass
if (home is None):
home = os.path.expanduser('~')
if (home is None):
home = os.environ.get('HOME', os.path.abspath('..'))
_HOME = home
return home | determine home directory of current user | pycrosskit/platforms/linux.py | get_homedir | jiri-otoupal/py-cross-kit | 4 | python | def get_homedir():
global _HOME
if (_HOME is not None):
return _HOME
home = None
try:
from pathlib import Path
home = str(Path.home())
except:
pass
if (home is None):
home = os.path.expanduser('~')
if (home is None):
home = os.environ.get('HOME', os.path.abspath('..'))
_HOME = home
return home | def get_homedir():
global _HOME
if (_HOME is not None):
return _HOME
home = None
try:
from pathlib import Path
home = str(Path.home())
except:
pass
if (home is None):
home = os.path.expanduser('~')
if (home is None):
home = os.environ.get('HOME', os.path.abspath('..'))
_HOME = home
return home<|docstring|>determine home directory of current user<|endoftext|> |
f525ba7f02961c78e2c44c11962672570d4fd0a68d0c6ace97391c023c4b2607 | def get_desktop():
'get desktop location'
homedir = get_homedir()
desktop = os.path.join(homedir, 'Desktop')
ud_file = os.path.join(homedir, '.config', 'user-dirs.dirs')
if os.path.exists(ud_file):
val = desktop
with open(ud_file, 'r') as fh:
text = fh.readlines()
for line in text:
if ('DESKTOP' in line):
line = line.replace('$HOME', homedir)[:(- 1)]
(key, val) = line.split('=')
val = val.replace('"', '').replace("'", '')
desktop = val
return desktop | get desktop location | pycrosskit/platforms/linux.py | get_desktop | jiri-otoupal/py-cross-kit | 4 | python | def get_desktop():
homedir = get_homedir()
desktop = os.path.join(homedir, 'Desktop')
ud_file = os.path.join(homedir, '.config', 'user-dirs.dirs')
if os.path.exists(ud_file):
val = desktop
with open(ud_file, 'r') as fh:
text = fh.readlines()
for line in text:
if ('DESKTOP' in line):
line = line.replace('$HOME', homedir)[:(- 1)]
(key, val) = line.split('=')
val = val.replace('"', ).replace("'", )
desktop = val
return desktop | def get_desktop():
homedir = get_homedir()
desktop = os.path.join(homedir, 'Desktop')
ud_file = os.path.join(homedir, '.config', 'user-dirs.dirs')
if os.path.exists(ud_file):
val = desktop
with open(ud_file, 'r') as fh:
text = fh.readlines()
for line in text:
if ('DESKTOP' in line):
line = line.replace('$HOME', homedir)[:(- 1)]
(key, val) = line.split('=')
val = val.replace('"', ).replace("'", )
desktop = val
return desktop<|docstring|>get desktop location<|endoftext|> |
47440977781543b8dc1bc35c1689e0214a3fd69bf9a5c0127bf96b742168f1de | def get_startmenu():
'get start menu location'
homedir = get_homedir()
return os.path.join(homedir, '.local', 'share', 'applications') | get start menu location | pycrosskit/platforms/linux.py | get_startmenu | jiri-otoupal/py-cross-kit | 4 | python | def get_startmenu():
homedir = get_homedir()
return os.path.join(homedir, '.local', 'share', 'applications') | def get_startmenu():
homedir = get_homedir()
return os.path.join(homedir, '.local', 'share', 'applications')<|docstring|>get start menu location<|endoftext|> |
bb31632040c54badf1766b36cb8a1509b87bd8cf7cca42eab50179aa457f634e | def create_shortcut(shortcut_instance, desktop=False, startmenu=False):
'\n Create Shortcut\n :param shortcut_instance: Shortcut Instance\n :param startmenu: True to create Start Menu Shortcut\n :param desktop: True to create Desktop Shortcut\n :return: desktop icon path, start menu path\n :rtype: str, str\n '
text = DESKTOP_FORM.format(name=shortcut_instance.shortcut_name, desc=shortcut_instance.description, exe=shortcut_instance.exec_path, icon=shortcut_instance.icon_path, args=shortcut_instance.arguments)
user_folders = get_folders()
for (create, folder) in ((desktop, user_folders.desktop), (startmenu, user_folders.startmenu)):
if create:
if (not os.path.exists(folder)):
os.makedirs(folder)
dest = os.path.join(folder, shortcut_instance.exec_path)
with open(dest, 'w') as fout:
fout.write(text)
os.chmod(dest, stat.S_IWRITE)
return (user_folders.desktop, user_folders.startmenu) | Create Shortcut
:param shortcut_instance: Shortcut Instance
:param startmenu: True to create Start Menu Shortcut
:param desktop: True to create Desktop Shortcut
:return: desktop icon path, start menu path
:rtype: str, str | pycrosskit/platforms/linux.py | create_shortcut | jiri-otoupal/py-cross-kit | 4 | python | def create_shortcut(shortcut_instance, desktop=False, startmenu=False):
'\n Create Shortcut\n :param shortcut_instance: Shortcut Instance\n :param startmenu: True to create Start Menu Shortcut\n :param desktop: True to create Desktop Shortcut\n :return: desktop icon path, start menu path\n :rtype: str, str\n '
text = DESKTOP_FORM.format(name=shortcut_instance.shortcut_name, desc=shortcut_instance.description, exe=shortcut_instance.exec_path, icon=shortcut_instance.icon_path, args=shortcut_instance.arguments)
user_folders = get_folders()
for (create, folder) in ((desktop, user_folders.desktop), (startmenu, user_folders.startmenu)):
if create:
if (not os.path.exists(folder)):
os.makedirs(folder)
dest = os.path.join(folder, shortcut_instance.exec_path)
with open(dest, 'w') as fout:
fout.write(text)
os.chmod(dest, stat.S_IWRITE)
return (user_folders.desktop, user_folders.startmenu) | def create_shortcut(shortcut_instance, desktop=False, startmenu=False):
'\n Create Shortcut\n :param shortcut_instance: Shortcut Instance\n :param startmenu: True to create Start Menu Shortcut\n :param desktop: True to create Desktop Shortcut\n :return: desktop icon path, start menu path\n :rtype: str, str\n '
text = DESKTOP_FORM.format(name=shortcut_instance.shortcut_name, desc=shortcut_instance.description, exe=shortcut_instance.exec_path, icon=shortcut_instance.icon_path, args=shortcut_instance.arguments)
user_folders = get_folders()
for (create, folder) in ((desktop, user_folders.desktop), (startmenu, user_folders.startmenu)):
if create:
if (not os.path.exists(folder)):
os.makedirs(folder)
dest = os.path.join(folder, shortcut_instance.exec_path)
with open(dest, 'w') as fout:
fout.write(text)
os.chmod(dest, stat.S_IWRITE)
return (user_folders.desktop, user_folders.startmenu)<|docstring|>Create Shortcut
:param shortcut_instance: Shortcut Instance
:param startmenu: True to create Start Menu Shortcut
:param desktop: True to create Desktop Shortcut
:return: desktop icon path, start menu path
:rtype: str, str<|endoftext|> |
099a4694a9c3f44d4acfdc297760bfbdfc3f48e908bda739cec461a64db06ccd | def delete_shortcut(shortcut_name, desktop=False, startmenu=False):
'\n Delete Shortcut\n :param shortcut_name: Name of Shortcut\n :param startmenu: True to create Start Menu Shortcut\n :param desktop: True to create Desktop Shortcut\n :return: desktop icon path, start menu path\n :rtype: str, str\n '
user_folders = get_folders()
(desktop_path, startmenu_path) = ('', '')
if startmenu:
startmenu_path = (((user_folders.startmenu + '/') + shortcut_name) + scut_ext)
if os.path.exists(startmenu_path):
os.chmod(startmenu_path, stat.S_IWRITE)
os.remove(startmenu_path)
if desktop:
desktop_path = (((user_folders.desktop + '/') + shortcut_name) + scut_ext)
if os.path.exists(desktop_path):
os.chmod(desktop_path, stat.S_IWRITE)
os.remove(desktop_path)
return (desktop_path, startmenu_path) | Delete Shortcut
:param shortcut_name: Name of Shortcut
:param startmenu: True to create Start Menu Shortcut
:param desktop: True to create Desktop Shortcut
:return: desktop icon path, start menu path
:rtype: str, str | pycrosskit/platforms/linux.py | delete_shortcut | jiri-otoupal/py-cross-kit | 4 | python | def delete_shortcut(shortcut_name, desktop=False, startmenu=False):
'\n Delete Shortcut\n :param shortcut_name: Name of Shortcut\n :param startmenu: True to create Start Menu Shortcut\n :param desktop: True to create Desktop Shortcut\n :return: desktop icon path, start menu path\n :rtype: str, str\n '
user_folders = get_folders()
(desktop_path, startmenu_path) = (, )
if startmenu:
startmenu_path = (((user_folders.startmenu + '/') + shortcut_name) + scut_ext)
if os.path.exists(startmenu_path):
os.chmod(startmenu_path, stat.S_IWRITE)
os.remove(startmenu_path)
if desktop:
desktop_path = (((user_folders.desktop + '/') + shortcut_name) + scut_ext)
if os.path.exists(desktop_path):
os.chmod(desktop_path, stat.S_IWRITE)
os.remove(desktop_path)
return (desktop_path, startmenu_path) | def delete_shortcut(shortcut_name, desktop=False, startmenu=False):
'\n Delete Shortcut\n :param shortcut_name: Name of Shortcut\n :param startmenu: True to create Start Menu Shortcut\n :param desktop: True to create Desktop Shortcut\n :return: desktop icon path, start menu path\n :rtype: str, str\n '
user_folders = get_folders()
(desktop_path, startmenu_path) = (, )
if startmenu:
startmenu_path = (((user_folders.startmenu + '/') + shortcut_name) + scut_ext)
if os.path.exists(startmenu_path):
os.chmod(startmenu_path, stat.S_IWRITE)
os.remove(startmenu_path)
if desktop:
desktop_path = (((user_folders.desktop + '/') + shortcut_name) + scut_ext)
if os.path.exists(desktop_path):
os.chmod(desktop_path, stat.S_IWRITE)
os.remove(desktop_path)
return (desktop_path, startmenu_path)<|docstring|>Delete Shortcut
:param shortcut_name: Name of Shortcut
:param startmenu: True to create Start Menu Shortcut
:param desktop: True to create Desktop Shortcut
:return: desktop icon path, start menu path
:rtype: str, str<|endoftext|> |
55461661e900d35613d29fd9a99e84011c9119d92d4ecfcb08a1487311074a10 | def __init__(self, app, parent=None):
'\n\n :param app: The application this tool will run in.\n :type app: App\n :param parent: Qt Parent\n :return: FlatCAMTool\n '
QtWidgets.QWidget.__init__(self, parent)
self.layout = QtWidgets.QVBoxLayout()
self.setLayout(self.layout)
self.app = app
self.menuAction = None | :param app: The application this tool will run in.
:type app: App
:param parent: Qt Parent
:return: FlatCAMTool | FlatCAMTool.py | __init__ | JuanoVenegas/flatcam | 1 | python | def __init__(self, app, parent=None):
'\n\n :param app: The application this tool will run in.\n :type app: App\n :param parent: Qt Parent\n :return: FlatCAMTool\n '
QtWidgets.QWidget.__init__(self, parent)
self.layout = QtWidgets.QVBoxLayout()
self.setLayout(self.layout)
self.app = app
self.menuAction = None | def __init__(self, app, parent=None):
'\n\n :param app: The application this tool will run in.\n :type app: App\n :param parent: Qt Parent\n :return: FlatCAMTool\n '
QtWidgets.QWidget.__init__(self, parent)
self.layout = QtWidgets.QVBoxLayout()
self.setLayout(self.layout)
self.app = app
self.menuAction = None<|docstring|>:param app: The application this tool will run in.
:type app: App
:param parent: Qt Parent
:return: FlatCAMTool<|endoftext|> |
27efab4cbcc1c857d2eed2a394c0f076d1601267c0c12f6d0fe5c3a458cb7682 | def test_ieee_grids():
'\n Checks the .RAW files of IEEE grids against the PSS/e results\n This test checks 2 things:\n - PSS/e import fidelity\n - PSS/e vs GridCal results\n :return: Nothing, fails if not ok\n '
files = [('IEEE 14 bus.raw', 'IEEE 14 bus.sav.xlsx'), ('IEEE 30 bus.raw', 'IEEE 30 bus.sav.xlsx'), ('IEEE 118 Bus v2.raw', 'IEEE 118 Bus.sav.xlsx')]
options = PowerFlowOptions(SolverType.NR, verbose=False, initialize_with_existing_solution=False, multi_core=False, dispatch_storage=True, control_q=ReactivePowerControlMode.NoControl, control_p=True)
for (f1, f2) in files:
print(f1, end=' ')
fname = os.path.join('data', f1)
main_circuit = FileOpen(fname).open()
power_flow = PowerFlowDriver(main_circuit, options)
power_flow.run()
df_v = pd.read_excel(os.path.join('data', f2), sheet_name='Vabs', index_col=0)
df_p = pd.read_excel(os.path.join('data', f2), sheet_name='Pbranch', index_col=0)
v_gc = np.abs(power_flow.results.voltage)
v_psse = df_v.values[(:, 0)]
p_gc = power_flow.results.Sbranch.real
p_psse = df_p.values[(:, 0)]
assert np.allclose(v_gc, v_psse, atol=0.001)
assert np.allclose(p_gc, p_psse, atol=0.1)
print('ok') | Checks the .RAW files of IEEE grids against the PSS/e results
This test checks 2 things:
- PSS/e import fidelity
- PSS/e vs GridCal results
:return: Nothing, fails if not ok | src/tests/test_power_flow.py | test_ieee_grids | vineetjnair9/GridCal | 0 | python | def test_ieee_grids():
'\n Checks the .RAW files of IEEE grids against the PSS/e results\n This test checks 2 things:\n - PSS/e import fidelity\n - PSS/e vs GridCal results\n :return: Nothing, fails if not ok\n '
files = [('IEEE 14 bus.raw', 'IEEE 14 bus.sav.xlsx'), ('IEEE 30 bus.raw', 'IEEE 30 bus.sav.xlsx'), ('IEEE 118 Bus v2.raw', 'IEEE 118 Bus.sav.xlsx')]
options = PowerFlowOptions(SolverType.NR, verbose=False, initialize_with_existing_solution=False, multi_core=False, dispatch_storage=True, control_q=ReactivePowerControlMode.NoControl, control_p=True)
for (f1, f2) in files:
print(f1, end=' ')
fname = os.path.join('data', f1)
main_circuit = FileOpen(fname).open()
power_flow = PowerFlowDriver(main_circuit, options)
power_flow.run()
df_v = pd.read_excel(os.path.join('data', f2), sheet_name='Vabs', index_col=0)
df_p = pd.read_excel(os.path.join('data', f2), sheet_name='Pbranch', index_col=0)
v_gc = np.abs(power_flow.results.voltage)
v_psse = df_v.values[(:, 0)]
p_gc = power_flow.results.Sbranch.real
p_psse = df_p.values[(:, 0)]
assert np.allclose(v_gc, v_psse, atol=0.001)
assert np.allclose(p_gc, p_psse, atol=0.1)
print('ok') | def test_ieee_grids():
'\n Checks the .RAW files of IEEE grids against the PSS/e results\n This test checks 2 things:\n - PSS/e import fidelity\n - PSS/e vs GridCal results\n :return: Nothing, fails if not ok\n '
files = [('IEEE 14 bus.raw', 'IEEE 14 bus.sav.xlsx'), ('IEEE 30 bus.raw', 'IEEE 30 bus.sav.xlsx'), ('IEEE 118 Bus v2.raw', 'IEEE 118 Bus.sav.xlsx')]
options = PowerFlowOptions(SolverType.NR, verbose=False, initialize_with_existing_solution=False, multi_core=False, dispatch_storage=True, control_q=ReactivePowerControlMode.NoControl, control_p=True)
for (f1, f2) in files:
print(f1, end=' ')
fname = os.path.join('data', f1)
main_circuit = FileOpen(fname).open()
power_flow = PowerFlowDriver(main_circuit, options)
power_flow.run()
df_v = pd.read_excel(os.path.join('data', f2), sheet_name='Vabs', index_col=0)
df_p = pd.read_excel(os.path.join('data', f2), sheet_name='Pbranch', index_col=0)
v_gc = np.abs(power_flow.results.voltage)
v_psse = df_v.values[(:, 0)]
p_gc = power_flow.results.Sbranch.real
p_psse = df_p.values[(:, 0)]
assert np.allclose(v_gc, v_psse, atol=0.001)
assert np.allclose(p_gc, p_psse, atol=0.1)
print('ok')<|docstring|>Checks the .RAW files of IEEE grids against the PSS/e results
This test checks 2 things:
- PSS/e import fidelity
- PSS/e vs GridCal results
:return: Nothing, fails if not ok<|endoftext|> |
baeb86e191f16db257ab0e24af274d81b22f8441da5c1d99eae267b4ab065f0a | def add_payment_info(body=None):
'Processes Credit Card Payments\n\n Adds an item to the system # noqa: E501\n\n :param body: Payment item to add\n :type body: dict | bytes\n\n :rtype: None\n '
if connexion.request.is_json:
body = PaymentItem.from_dict(connexion.request.get_json())
return 'do some magic!' | Processes Credit Card Payments
Adds an item to the system # noqa: E501
:param body: Payment item to add
:type body: dict | bytes
:rtype: None | swagger_server/controllers/admins_controller.py | add_payment_info | sbalasa/ProcessPayment | 0 | python | def add_payment_info(body=None):
'Processes Credit Card Payments\n\n Adds an item to the system # noqa: E501\n\n :param body: Payment item to add\n :type body: dict | bytes\n\n :rtype: None\n '
if connexion.request.is_json:
body = PaymentItem.from_dict(connexion.request.get_json())
return 'do some magic!' | def add_payment_info(body=None):
'Processes Credit Card Payments\n\n Adds an item to the system # noqa: E501\n\n :param body: Payment item to add\n :type body: dict | bytes\n\n :rtype: None\n '
if connexion.request.is_json:
body = PaymentItem.from_dict(connexion.request.get_json())
return 'do some magic!'<|docstring|>Processes Credit Card Payments
Adds an item to the system # noqa: E501
:param body: Payment item to add
:type body: dict | bytes
:rtype: None<|endoftext|> |
5e1ecce345a8040b54584cb27a0d69a79ce26fa78e6eca2b7a803609a368aac9 | def convert_estimator(est, min_version=None):
' Convert scikit-learn estimator to its pure_sklearn counterpart '
est_name = est.__class__.__name__
pure_est_name = MAPPING.get(est_name)
if (pure_est_name is None):
raise ValueError("Cannot find 'pure_sklearn' counterpart for {}".format(est_name))
module = '.'.join(pure_est_name.split('.')[:(- 1)])
name = pure_est_name.split('.')[(- 1)]
return _instantiate_class(module, name)(est) | Convert scikit-learn estimator to its pure_sklearn counterpart | pure_sklearn/map.py | convert_estimator | ashetty1-m/pure-predict | 62 | python | def convert_estimator(est, min_version=None):
' '
est_name = est.__class__.__name__
pure_est_name = MAPPING.get(est_name)
if (pure_est_name is None):
raise ValueError("Cannot find 'pure_sklearn' counterpart for {}".format(est_name))
module = '.'.join(pure_est_name.split('.')[:(- 1)])
name = pure_est_name.split('.')[(- 1)]
return _instantiate_class(module, name)(est) | def convert_estimator(est, min_version=None):
' '
est_name = est.__class__.__name__
pure_est_name = MAPPING.get(est_name)
if (pure_est_name is None):
raise ValueError("Cannot find 'pure_sklearn' counterpart for {}".format(est_name))
module = '.'.join(pure_est_name.split('.')[:(- 1)])
name = pure_est_name.split('.')[(- 1)]
return _instantiate_class(module, name)(est)<|docstring|>Convert scikit-learn estimator to its pure_sklearn counterpart<|endoftext|> |
3783a59500ba0de94e622a669a55263285d7d7873fc20a093deca4d0c809f876 | def show():
'Shows the sidebar components for the template and returns\n user inputs as dict.'
with st.sidebar:
pass
(col1, col2) = st.columns((2, 3))
st.write('\n #####\n Analyze the results with most widely used metrics such as\n AUC ROC curve, precision-recall curve and confusion matrix.\n ')
result_df = utils.load_df('Choose a CSV file with predictions:')
st.write(result_df)
result_df_colnames = result_df.columns.tolist()
label = st.selectbox('Select the label', result_df_colnames)
evaluate = st.button('Evaluate!')
if evaluate:
evaluator = Evaluator(result_df=result_df, target=label, result_dir=config.RESULT_DIR)
evaluator.evaluate()
st.write(evaluator.plot_roc_curve_all())
st.write(evaluator.plot_confusion_matrix_all())
st.write(f'''
The best performing model in terms of AUC ROC in 5-fold
cross-validation is ***{evaluator.best_model_name}**.
This model is evaluated on the test set:
''')
st.write(evaluator.plot_test()) | Shows the sidebar components for the template and returns
user inputs as dict. | webapp/templates/Binary classification/3. Evaluation.py | show | piotrekwoznicki/Radiomics | 1 | python | def show():
'Shows the sidebar components for the template and returns\n user inputs as dict.'
with st.sidebar:
pass
(col1, col2) = st.columns((2, 3))
st.write('\n #####\n Analyze the results with most widely used metrics such as\n AUC ROC curve, precision-recall curve and confusion matrix.\n ')
result_df = utils.load_df('Choose a CSV file with predictions:')
st.write(result_df)
result_df_colnames = result_df.columns.tolist()
label = st.selectbox('Select the label', result_df_colnames)
evaluate = st.button('Evaluate!')
if evaluate:
evaluator = Evaluator(result_df=result_df, target=label, result_dir=config.RESULT_DIR)
evaluator.evaluate()
st.write(evaluator.plot_roc_curve_all())
st.write(evaluator.plot_confusion_matrix_all())
st.write(f'
The best performing model in terms of AUC ROC in 5-fold
cross-validation is ***{evaluator.best_model_name}**.
This model is evaluated on the test set:
')
st.write(evaluator.plot_test()) | def show():
'Shows the sidebar components for the template and returns\n user inputs as dict.'
with st.sidebar:
pass
(col1, col2) = st.columns((2, 3))
st.write('\n #####\n Analyze the results with most widely used metrics such as\n AUC ROC curve, precision-recall curve and confusion matrix.\n ')
result_df = utils.load_df('Choose a CSV file with predictions:')
st.write(result_df)
result_df_colnames = result_df.columns.tolist()
label = st.selectbox('Select the label', result_df_colnames)
evaluate = st.button('Evaluate!')
if evaluate:
evaluator = Evaluator(result_df=result_df, target=label, result_dir=config.RESULT_DIR)
evaluator.evaluate()
st.write(evaluator.plot_roc_curve_all())
st.write(evaluator.plot_confusion_matrix_all())
st.write(f'
The best performing model in terms of AUC ROC in 5-fold
cross-validation is ***{evaluator.best_model_name}**.
This model is evaluated on the test set:
')
st.write(evaluator.plot_test())<|docstring|>Shows the sidebar components for the template and returns
user inputs as dict.<|endoftext|> |
5b9b87816442b7caf4c53c6da66c07274ff787273707c58bf6982be167917d00 | def create_mlflow_experiment_by_name(name: str) -> Any:
'Create mlflow experiment by specified name.\n Returns experiment ID (existing or created).'
if (str is None):
raise Exception('Experiment name is empty!')
experiment = mlflow.get_experiment_by_name(name)
if (experiment is not None):
return experiment.experiment_id
experiment_id = mlflow.create_experiment(name)
print(f'created experiment ID: {experiment_id}')
return experiment_id | Create mlflow experiment by specified name.
Returns experiment ID (existing or created). | 9_evaluation_selection/src/forest_ml/utils/mlflow_utils.py | create_mlflow_experiment_by_name | aslamovyura/ml-python | 0 | python | def create_mlflow_experiment_by_name(name: str) -> Any:
'Create mlflow experiment by specified name.\n Returns experiment ID (existing or created).'
if (str is None):
raise Exception('Experiment name is empty!')
experiment = mlflow.get_experiment_by_name(name)
if (experiment is not None):
return experiment.experiment_id
experiment_id = mlflow.create_experiment(name)
print(f'created experiment ID: {experiment_id}')
return experiment_id | def create_mlflow_experiment_by_name(name: str) -> Any:
'Create mlflow experiment by specified name.\n Returns experiment ID (existing or created).'
if (str is None):
raise Exception('Experiment name is empty!')
experiment = mlflow.get_experiment_by_name(name)
if (experiment is not None):
return experiment.experiment_id
experiment_id = mlflow.create_experiment(name)
print(f'created experiment ID: {experiment_id}')
return experiment_id<|docstring|>Create mlflow experiment by specified name.
Returns experiment ID (existing or created).<|endoftext|> |
008914a86a828de7519cf3582c1325346ede9dc095dd1d398f787cf3898df538 | def run(metadata_file_path: str) -> str:
'\n Transforms a metadatafile from the input model to the SIKT\n metadata model that is stored in the datastore.\n Returns the path of the transformed metadata file.\n '
try:
logger.info(f'Transforming metadata {metadata_file_path}')
transformed_metadata_file_path = _transform_metadata(metadata_file_path)
logger.info(f'Transformed metadata and wrote to {transformed_metadata_file_path}')
return transformed_metadata_file_path
except Exception as e:
logger.error(f'Error during transformation: {str(e)}')
raise BuilderStepError('Failed to transform dataset') | Transforms a metadatafile from the input model to the SIKT
metadata model that is stored in the datastore.
Returns the path of the transformed metadata file. | dataset_builder/steps/dataset_transformer.py | run | statisticsnorway/microdata-dataset-builder | 0 | python | def run(metadata_file_path: str) -> str:
'\n Transforms a metadatafile from the input model to the SIKT\n metadata model that is stored in the datastore.\n Returns the path of the transformed metadata file.\n '
try:
logger.info(f'Transforming metadata {metadata_file_path}')
transformed_metadata_file_path = _transform_metadata(metadata_file_path)
logger.info(f'Transformed metadata and wrote to {transformed_metadata_file_path}')
return transformed_metadata_file_path
except Exception as e:
logger.error(f'Error during transformation: {str(e)}')
raise BuilderStepError('Failed to transform dataset') | def run(metadata_file_path: str) -> str:
'\n Transforms a metadatafile from the input model to the SIKT\n metadata model that is stored in the datastore.\n Returns the path of the transformed metadata file.\n '
try:
logger.info(f'Transforming metadata {metadata_file_path}')
transformed_metadata_file_path = _transform_metadata(metadata_file_path)
logger.info(f'Transformed metadata and wrote to {transformed_metadata_file_path}')
return transformed_metadata_file_path
except Exception as e:
logger.error(f'Error during transformation: {str(e)}')
raise BuilderStepError('Failed to transform dataset')<|docstring|>Transforms a metadatafile from the input model to the SIKT
metadata model that is stored in the datastore.
Returns the path of the transformed metadata file.<|endoftext|> |
53dc36b9cc0d8c480bc3550071b9bb991e61f3fca3c4000b7ecc8b56466644fa | def manual_args(args: Namespace) -> Namespace:
'function only called if no arguments have been passed to the script - mostly used for dev/debugging'
args.trials = 20
args.split_seed_init = 0
args.experiment_name = 'hiv_tn_4096_test4'
args.tracking_uri = os.getenv('TRACKING_URI', default='http://localhost:5000')
args.gradient_clip_val = 1.0
args.max_steps = 1000
args.seed = 0
args.patience = 50
args.data_name = 'hiv'
args.batch_size = 2048
args.split_type = 'random'
args.split_seed = 0
args.n_bits = 4096
args.radius = 4
args.chirality = True
args.features = True
args.featurizer_name = 'ecfp'
args.num_workers = 4
args.cache_dir = ('../../../' + 'data/molnet/hiv/')
args.decision_size = 128
args.feature_size = (args.decision_size * 2)
args.nr_layers = 2
args.nr_shared_layers = 2
args.nr_steps = 3
args.gamma = 1.2
args.lambda_sparse = 0.0
args.virtual_batch_size = (- 1)
args.normalize_input = False
args.lr = 0.00044816616909224065
args.optimizer = 'adam'
args.scheduler = 'linear_with_warmup'
args.scheduler_params = {'warmup_steps': 10}
args.log_sparsity = True
args.log_parameters = False
return args | function only called if no arguments have been passed to the script - mostly used for dev/debugging | src/experiments/molnet/tn_splits.py | manual_args | clemens33/thesis | 0 | python | def manual_args(args: Namespace) -> Namespace:
args.trials = 20
args.split_seed_init = 0
args.experiment_name = 'hiv_tn_4096_test4'
args.tracking_uri = os.getenv('TRACKING_URI', default='http://localhost:5000')
args.gradient_clip_val = 1.0
args.max_steps = 1000
args.seed = 0
args.patience = 50
args.data_name = 'hiv'
args.batch_size = 2048
args.split_type = 'random'
args.split_seed = 0
args.n_bits = 4096
args.radius = 4
args.chirality = True
args.features = True
args.featurizer_name = 'ecfp'
args.num_workers = 4
args.cache_dir = ('../../../' + 'data/molnet/hiv/')
args.decision_size = 128
args.feature_size = (args.decision_size * 2)
args.nr_layers = 2
args.nr_shared_layers = 2
args.nr_steps = 3
args.gamma = 1.2
args.lambda_sparse = 0.0
args.virtual_batch_size = (- 1)
args.normalize_input = False
args.lr = 0.00044816616909224065
args.optimizer = 'adam'
args.scheduler = 'linear_with_warmup'
args.scheduler_params = {'warmup_steps': 10}
args.log_sparsity = True
args.log_parameters = False
return args | def manual_args(args: Namespace) -> Namespace:
args.trials = 20
args.split_seed_init = 0
args.experiment_name = 'hiv_tn_4096_test4'
args.tracking_uri = os.getenv('TRACKING_URI', default='http://localhost:5000')
args.gradient_clip_val = 1.0
args.max_steps = 1000
args.seed = 0
args.patience = 50
args.data_name = 'hiv'
args.batch_size = 2048
args.split_type = 'random'
args.split_seed = 0
args.n_bits = 4096
args.radius = 4
args.chirality = True
args.features = True
args.featurizer_name = 'ecfp'
args.num_workers = 4
args.cache_dir = ('../../../' + 'data/molnet/hiv/')
args.decision_size = 128
args.feature_size = (args.decision_size * 2)
args.nr_layers = 2
args.nr_shared_layers = 2
args.nr_steps = 3
args.gamma = 1.2
args.lambda_sparse = 0.0
args.virtual_batch_size = (- 1)
args.normalize_input = False
args.lr = 0.00044816616909224065
args.optimizer = 'adam'
args.scheduler = 'linear_with_warmup'
args.scheduler_params = {'warmup_steps': 10}
args.log_sparsity = True
args.log_parameters = False
return args<|docstring|>function only called if no arguments have been passed to the script - mostly used for dev/debugging<|endoftext|> |
96bf6b38e3a7d0ee39a39820cae93bb13f7341dfcfdf7a855b44d21c2d3f5b56 | def to_dict(self):
'Returns the serialized form of the :class:`GraphObjectBase`\n as a dict. All sub-objects that are based off of :class:`GraphObjectBase`\n are also serialized and inserted into the dict\n \n Returns:\n dict: The serialized form of the :class:`GraphObjectBase`\n '
serialized = {}
for prop in self._prop_dict:
if isinstance(self._prop_dict[prop], GraphObjectBase):
serialized[prop] = self._prop_dict[prop].to_dict()
else:
serialized[prop] = self._prop_dict[prop]
return serialized | Returns the serialized form of the :class:`GraphObjectBase`
as a dict. All sub-objects that are based off of :class:`GraphObjectBase`
are also serialized and inserted into the dict
Returns:
dict: The serialized form of the :class:`GraphObjectBase` | src/msgraph/graph_object_base.py | to_dict | microsoftarchive/msgraph-sdk-python | 7 | python | def to_dict(self):
'Returns the serialized form of the :class:`GraphObjectBase`\n as a dict. All sub-objects that are based off of :class:`GraphObjectBase`\n are also serialized and inserted into the dict\n \n Returns:\n dict: The serialized form of the :class:`GraphObjectBase`\n '
serialized = {}
for prop in self._prop_dict:
if isinstance(self._prop_dict[prop], GraphObjectBase):
serialized[prop] = self._prop_dict[prop].to_dict()
else:
serialized[prop] = self._prop_dict[prop]
return serialized | def to_dict(self):
'Returns the serialized form of the :class:`GraphObjectBase`\n as a dict. All sub-objects that are based off of :class:`GraphObjectBase`\n are also serialized and inserted into the dict\n \n Returns:\n dict: The serialized form of the :class:`GraphObjectBase`\n '
serialized = {}
for prop in self._prop_dict:
if isinstance(self._prop_dict[prop], GraphObjectBase):
serialized[prop] = self._prop_dict[prop].to_dict()
else:
serialized[prop] = self._prop_dict[prop]
return serialized<|docstring|>Returns the serialized form of the :class:`GraphObjectBase`
as a dict. All sub-objects that are based off of :class:`GraphObjectBase`
are also serialized and inserted into the dict
Returns:
dict: The serialized form of the :class:`GraphObjectBase`<|endoftext|> |
1a9900a3cc1da156304f2c9eb05700152505e510cef344874464efe81ee1de16 | def parse_organism(x):
"\n The taxonomy ID is returned based on the provided 'Taxid interactor' value eg. taxid:10090(mouse)|taxid:10090(Mus musculus)\n "
if (x == '-'):
return None
x = (x.split('|')[1] if (len(x.split('|')) > 1) else x)
organism_id = re.search('taxid:(-*\\d+)', x).group(1)
return organism_id | The taxonomy ID is returned based on the provided 'Taxid interactor' value eg. taxid:10090(mouse)|taxid:10090(Mus musculus) | src/parsers/intact_parser.py | parse_organism | thehyve/ot_covid19 | 1 | python | def parse_organism(x):
"\n \n "
if (x == '-'):
return None
x = (x.split('|')[1] if (len(x.split('|')) > 1) else x)
organism_id = re.search('taxid:(-*\\d+)', x).group(1)
return organism_id | def parse_organism(x):
"\n \n "
if (x == '-'):
return None
x = (x.split('|')[1] if (len(x.split('|')) > 1) else x)
organism_id = re.search('taxid:(-*\\d+)', x).group(1)
return organism_id<|docstring|>The taxonomy ID is returned based on the provided 'Taxid interactor' value eg. taxid:10090(mouse)|taxid:10090(Mus musculus)<|endoftext|> |
82ef8905399b9bf972c888f40377daced1f8633eeeb50dbbd39bef0fe61a035d | def get_direct_interactors(input_df):
'\n This function reads the covid19 intact release\n\n Output: pd.DataFrame\n id: uniprot identifiers\n Covid_direct_interactions: Intact network identifiers\n '
taxonomy_ids = ['2697049', '694009', '9606']
filtered_interact = network_df.loc[(network_df['taxid_b'].isin(taxonomy_ids) & network_df['taxid_a'].isin(taxonomy_ids))]
filtered_interact = filtered_interact.loc[(~ ((filtered_interact['taxid_b'] == '9606') & (filtered_interact['taxid_a'] == '9606')))]
filtered_interact = filtered_interact.drop_duplicates()
interactors = filtered_interact.id_a.append(filtered_interact.id_b).unique()
aggregated_interactions = []
for interactor in interactors:
interaction_ids = filtered_interact.loc[(((filtered_interact.id_a == interactor) | (filtered_interact.id_b == interactor)), 'interaction_id')].unique().tolist()
try:
tax_id = filtered_interact.loc[(filtered_interact.id_a == interactor)].taxid_a.tolist()[0]
except:
tax_id = filtered_interact.loc[(filtered_interact.id_b == interactor)].taxid_b.tolist()[0]
aggregated_interactions.append({'uniprot_id': interactor.split('-')[0], 'Covid_direct_interactions': interaction_ids, 'tax_id': tax_id})
return pd.DataFrame(aggregated_interactions) | This function reads the covid19 intact release
Output: pd.DataFrame
id: uniprot identifiers
Covid_direct_interactions: Intact network identifiers | src/parsers/intact_parser.py | get_direct_interactors | thehyve/ot_covid19 | 1 | python | def get_direct_interactors(input_df):
'\n This function reads the covid19 intact release\n\n Output: pd.DataFrame\n id: uniprot identifiers\n Covid_direct_interactions: Intact network identifiers\n '
taxonomy_ids = ['2697049', '694009', '9606']
filtered_interact = network_df.loc[(network_df['taxid_b'].isin(taxonomy_ids) & network_df['taxid_a'].isin(taxonomy_ids))]
filtered_interact = filtered_interact.loc[(~ ((filtered_interact['taxid_b'] == '9606') & (filtered_interact['taxid_a'] == '9606')))]
filtered_interact = filtered_interact.drop_duplicates()
interactors = filtered_interact.id_a.append(filtered_interact.id_b).unique()
aggregated_interactions = []
for interactor in interactors:
interaction_ids = filtered_interact.loc[(((filtered_interact.id_a == interactor) | (filtered_interact.id_b == interactor)), 'interaction_id')].unique().tolist()
try:
tax_id = filtered_interact.loc[(filtered_interact.id_a == interactor)].taxid_a.tolist()[0]
except:
tax_id = filtered_interact.loc[(filtered_interact.id_b == interactor)].taxid_b.tolist()[0]
aggregated_interactions.append({'uniprot_id': interactor.split('-')[0], 'Covid_direct_interactions': interaction_ids, 'tax_id': tax_id})
return pd.DataFrame(aggregated_interactions) | def get_direct_interactors(input_df):
'\n This function reads the covid19 intact release\n\n Output: pd.DataFrame\n id: uniprot identifiers\n Covid_direct_interactions: Intact network identifiers\n '
taxonomy_ids = ['2697049', '694009', '9606']
filtered_interact = network_df.loc[(network_df['taxid_b'].isin(taxonomy_ids) & network_df['taxid_a'].isin(taxonomy_ids))]
filtered_interact = filtered_interact.loc[(~ ((filtered_interact['taxid_b'] == '9606') & (filtered_interact['taxid_a'] == '9606')))]
filtered_interact = filtered_interact.drop_duplicates()
interactors = filtered_interact.id_a.append(filtered_interact.id_b).unique()
aggregated_interactions = []
for interactor in interactors:
interaction_ids = filtered_interact.loc[(((filtered_interact.id_a == interactor) | (filtered_interact.id_b == interactor)), 'interaction_id')].unique().tolist()
try:
tax_id = filtered_interact.loc[(filtered_interact.id_a == interactor)].taxid_a.tolist()[0]
except:
tax_id = filtered_interact.loc[(filtered_interact.id_b == interactor)].taxid_b.tolist()[0]
aggregated_interactions.append({'uniprot_id': interactor.split('-')[0], 'Covid_direct_interactions': interaction_ids, 'tax_id': tax_id})
return pd.DataFrame(aggregated_interactions)<|docstring|>This function reads the covid19 intact release
Output: pd.DataFrame
id: uniprot identifiers
Covid_direct_interactions: Intact network identifiers<|endoftext|> |
817740f3c8b614ed2ad852acca30c88a2650fba382308c96699bf4444c24f186 | def pool_arrays(s):
'\n pd.Series -> serialized json\n '
x = set()
for e in s:
try:
for a in e:
x.add(a)
except TypeError:
continue
if (len(x) > 0):
return json.dumps(list(x))
else:
return np.nan | pd.Series -> serialized json | src/parsers/intact_parser.py | pool_arrays | thehyve/ot_covid19 | 1 | python | def pool_arrays(s):
'\n \n '
x = set()
for e in s:
try:
for a in e:
x.add(a)
except TypeError:
continue
if (len(x) > 0):
return json.dumps(list(x))
else:
return np.nan | def pool_arrays(s):
'\n \n '
x = set()
for e in s:
try:
for a in e:
x.add(a)
except TypeError:
continue
if (len(x) > 0):
return json.dumps(list(x))
else:
return np.nan<|docstring|>pd.Series -> serialized json<|endoftext|> |
3bb33e1db07b9630b9f7929c9c739c490bef042a3f97755edb52f0d277424b54 | def read_human_interactions(human_interactions_file):
'\n Based on the Intact JSON dump file, a dataframe is built with all \n human protein-protein interactions.\n\n Columns:\n interactor_a str uniprot id\n interactor_b str uniprot id\n interaction_identifier str intact id\n '
all_human_interactions = []
with open(human_interactions_file, 'r') as f:
for line in f:
interaction = json.loads(line)
if ((not interaction['interactorB']) or (not interaction['interactorA'])):
continue
if (interaction['interactorA']['id_source'] != 'uniprotkb'):
continue
if (interaction['interactorB']['id_source'] != 'uniprotkb'):
continue
if (not interaction['interaction']['interaction_score']):
continue
if (interaction['interaction']['interaction_score'] < 0.45):
continue
for evidence in interaction['interaction']['evidence']:
all_human_interactions.append({'interactor_a': interaction['interactorA']['id'].split('-')[0], 'interactor_b': interaction['interactorB']['id'].split('-')[0], 'interaction_identifier': evidence['interaction_identifier']})
return pd.DataFrame(all_human_interactions) | Based on the Intact JSON dump file, a dataframe is built with all
human protein-protein interactions.
Columns:
interactor_a str uniprot id
interactor_b str uniprot id
interaction_identifier str intact id | src/parsers/intact_parser.py | read_human_interactions | thehyve/ot_covid19 | 1 | python | def read_human_interactions(human_interactions_file):
'\n Based on the Intact JSON dump file, a dataframe is built with all \n human protein-protein interactions.\n\n Columns:\n interactor_a str uniprot id\n interactor_b str uniprot id\n interaction_identifier str intact id\n '
all_human_interactions = []
with open(human_interactions_file, 'r') as f:
for line in f:
interaction = json.loads(line)
if ((not interaction['interactorB']) or (not interaction['interactorA'])):
continue
if (interaction['interactorA']['id_source'] != 'uniprotkb'):
continue
if (interaction['interactorB']['id_source'] != 'uniprotkb'):
continue
if (not interaction['interaction']['interaction_score']):
continue
if (interaction['interaction']['interaction_score'] < 0.45):
continue
for evidence in interaction['interaction']['evidence']:
all_human_interactions.append({'interactor_a': interaction['interactorA']['id'].split('-')[0], 'interactor_b': interaction['interactorB']['id'].split('-')[0], 'interaction_identifier': evidence['interaction_identifier']})
return pd.DataFrame(all_human_interactions) | def read_human_interactions(human_interactions_file):
'\n Based on the Intact JSON dump file, a dataframe is built with all \n human protein-protein interactions.\n\n Columns:\n interactor_a str uniprot id\n interactor_b str uniprot id\n interaction_identifier str intact id\n '
all_human_interactions = []
with open(human_interactions_file, 'r') as f:
for line in f:
interaction = json.loads(line)
if ((not interaction['interactorB']) or (not interaction['interactorA'])):
continue
if (interaction['interactorA']['id_source'] != 'uniprotkb'):
continue
if (interaction['interactorB']['id_source'] != 'uniprotkb'):
continue
if (not interaction['interaction']['interaction_score']):
continue
if (interaction['interaction']['interaction_score'] < 0.45):
continue
for evidence in interaction['interaction']['evidence']:
all_human_interactions.append({'interactor_a': interaction['interactorA']['id'].split('-')[0], 'interactor_b': interaction['interactorB']['id'].split('-')[0], 'interaction_identifier': evidence['interaction_identifier']})
return pd.DataFrame(all_human_interactions)<|docstring|>Based on the Intact JSON dump file, a dataframe is built with all
human protein-protein interactions.
Columns:
interactor_a str uniprot id
interactor_b str uniprot id
interaction_identifier str intact id<|endoftext|> |
5b000ae5c264ac4a138e4877ac35c80e3bf260afc878434a62c9083c554e7afa | def __init__(self, layers, num_classes, obj_name, exp_name='default', tb_log=True):
'\n\n :param layers:\n :param num_classes:\n :param obj_name: like \'C\', \'A\'\n :param tb_log: boolean; if true then log to tensorboard\n :param exp_name: for example "v5_per_class" as in training on yeast_v5 with per class normalization\n '
self.arch = f'ResNet{len(layers)}'
date = datetime.now().strftime('%m-%d_%H-%M')
self.tag = f'{self.arch}_{exp_name}_{date}'
if tb_log:
base_dir = ('./results/' + 'tensorboardx/')
if (not os.path.isdir(base_dir)):
os.makedirs(base_dir)
self.writer = SummaryWriter((((base_dir + obj_name) + '/') + self.tag))
super().__init__()
self.conv1 = nn.Conv2d(2, 10, kernel_size=5, stride=1, padding=2)
self.layers = nn.ModuleList([BnLayer(layers[i], layers[(i + 1)]) for i in range((len(layers) - 1))])
self.layers2 = nn.ModuleList([ResnetLayer(layers[(i + 1)], layers[(i + 1)], 1) for i in range((len(layers) - 1))])
self.layers3 = nn.ModuleList([ResnetLayer(layers[(i + 1)], layers[(i + 1)], 1, log=(True if (i == (len(layers) - 2)) else False)) for i in range((len(layers) - 1))])
self.out = nn.Linear(layers[(- 1)], num_classes) | :param layers:
:param num_classes:
:param obj_name: like 'C', 'A'
:param tb_log: boolean; if true then log to tensorboard
:param exp_name: for example "v5_per_class" as in training on yeast_v5 with per class normalization | YNet_dev/models/ResNet.py | __init__ | OBA9k/Test_dev | 4 | python | def __init__(self, layers, num_classes, obj_name, exp_name='default', tb_log=True):
'\n\n :param layers:\n :param num_classes:\n :param obj_name: like \'C\', \'A\'\n :param tb_log: boolean; if true then log to tensorboard\n :param exp_name: for example "v5_per_class" as in training on yeast_v5 with per class normalization\n '
self.arch = f'ResNet{len(layers)}'
date = datetime.now().strftime('%m-%d_%H-%M')
self.tag = f'{self.arch}_{exp_name}_{date}'
if tb_log:
base_dir = ('./results/' + 'tensorboardx/')
if (not os.path.isdir(base_dir)):
os.makedirs(base_dir)
self.writer = SummaryWriter((((base_dir + obj_name) + '/') + self.tag))
super().__init__()
self.conv1 = nn.Conv2d(2, 10, kernel_size=5, stride=1, padding=2)
self.layers = nn.ModuleList([BnLayer(layers[i], layers[(i + 1)]) for i in range((len(layers) - 1))])
self.layers2 = nn.ModuleList([ResnetLayer(layers[(i + 1)], layers[(i + 1)], 1) for i in range((len(layers) - 1))])
self.layers3 = nn.ModuleList([ResnetLayer(layers[(i + 1)], layers[(i + 1)], 1, log=(True if (i == (len(layers) - 2)) else False)) for i in range((len(layers) - 1))])
self.out = nn.Linear(layers[(- 1)], num_classes) | def __init__(self, layers, num_classes, obj_name, exp_name='default', tb_log=True):
'\n\n :param layers:\n :param num_classes:\n :param obj_name: like \'C\', \'A\'\n :param tb_log: boolean; if true then log to tensorboard\n :param exp_name: for example "v5_per_class" as in training on yeast_v5 with per class normalization\n '
self.arch = f'ResNet{len(layers)}'
date = datetime.now().strftime('%m-%d_%H-%M')
self.tag = f'{self.arch}_{exp_name}_{date}'
if tb_log:
base_dir = ('./results/' + 'tensorboardx/')
if (not os.path.isdir(base_dir)):
os.makedirs(base_dir)
self.writer = SummaryWriter((((base_dir + obj_name) + '/') + self.tag))
super().__init__()
self.conv1 = nn.Conv2d(2, 10, kernel_size=5, stride=1, padding=2)
self.layers = nn.ModuleList([BnLayer(layers[i], layers[(i + 1)]) for i in range((len(layers) - 1))])
self.layers2 = nn.ModuleList([ResnetLayer(layers[(i + 1)], layers[(i + 1)], 1) for i in range((len(layers) - 1))])
self.layers3 = nn.ModuleList([ResnetLayer(layers[(i + 1)], layers[(i + 1)], 1, log=(True if (i == (len(layers) - 2)) else False)) for i in range((len(layers) - 1))])
self.out = nn.Linear(layers[(- 1)], num_classes)<|docstring|>:param layers:
:param num_classes:
:param obj_name: like 'C', 'A'
:param tb_log: boolean; if true then log to tensorboard
:param exp_name: for example "v5_per_class" as in training on yeast_v5 with per class normalization<|endoftext|> |
bf56cd0eb53dee2697d398e4732af9661894e2845e50d3e459cda9d6af434a5a | def is_white(pixel_val):
'tuple with (r, g, b) values'
return (pixel_val > 250) | tuple with (r, g, b) values | Backend Code/blank_init.py | is_white | kristian-georgiev/EasyFill | 1 | python | def is_white(pixel_val):
return (pixel_val > 250) | def is_white(pixel_val):
return (pixel_val > 250)<|docstring|>tuple with (r, g, b) values<|endoftext|> |
23f8451a568bb7467f82d33ed99a15d059ce6f666af50bfc958b75d3bb42fb3e | def __init__(self, kash_model: BaseModel, valid_x, valid_y, step=5, batch_size=256, average='weighted'):
'\n Evaluate callback, calculate precision, recall and f1\n Args:\n kash_model: the kashgari model to evaluate\n valid_x: feature data\n valid_y: label data\n step: step, default 5\n batch_size: batch size, default 256\n '
super(EvalCallBack, self).__init__()
self.kash_model = kash_model
self.valid_x = valid_x
self.valid_y = valid_y
self.step = step
self.batch_size = batch_size
self.average = average
self.logs = [] | Evaluate callback, calculate precision, recall and f1
Args:
kash_model: the kashgari model to evaluate
valid_x: feature data
valid_y: label data
step: step, default 5
batch_size: batch size, default 256 | kashgari/callbacks.py | __init__ | SunYanCN/Kashgari | 0 | python | def __init__(self, kash_model: BaseModel, valid_x, valid_y, step=5, batch_size=256, average='weighted'):
'\n Evaluate callback, calculate precision, recall and f1\n Args:\n kash_model: the kashgari model to evaluate\n valid_x: feature data\n valid_y: label data\n step: step, default 5\n batch_size: batch size, default 256\n '
super(EvalCallBack, self).__init__()
self.kash_model = kash_model
self.valid_x = valid_x
self.valid_y = valid_y
self.step = step
self.batch_size = batch_size
self.average = average
self.logs = [] | def __init__(self, kash_model: BaseModel, valid_x, valid_y, step=5, batch_size=256, average='weighted'):
'\n Evaluate callback, calculate precision, recall and f1\n Args:\n kash_model: the kashgari model to evaluate\n valid_x: feature data\n valid_y: label data\n step: step, default 5\n batch_size: batch size, default 256\n '
super(EvalCallBack, self).__init__()
self.kash_model = kash_model
self.valid_x = valid_x
self.valid_y = valid_y
self.step = step
self.batch_size = batch_size
self.average = average
self.logs = []<|docstring|>Evaluate callback, calculate precision, recall and f1
Args:
kash_model: the kashgari model to evaluate
valid_x: feature data
valid_y: label data
step: step, default 5
batch_size: batch size, default 256<|endoftext|> |
aa41519e2c65f07dfe74a4e28ff12bed9014a01fc2062c13d596b202b2c54573 | def _save_model(self, epoch, logs):
'Saves the model.\n\n Arguments:\n epoch: the epoch this iteration is in.\n logs: the `logs` dict passed in to `on_batch_end` or `on_epoch_end`.\n '
logs = (logs or {})
if (isinstance(self.save_freq, int) or (self.epochs_since_last_save >= self.period)):
self.epochs_since_last_save = 0
(file_handle, filepath) = self._get_file_handle_and_path(epoch, logs)
if self.save_best_only:
current = logs.get(self.monitor)
if (current is None):
logging.warning('Can save best model only with %s available, skipping.', self.monitor)
elif self.monitor_op(current, self.best):
if (self.verbose > 0):
print(('\nEpoch %05d: %s improved from %0.5f to %0.5f, saving model to %s' % ((epoch + 1), self.monitor, self.best, current, filepath)))
self.best = current
if self.save_weights_only:
filepath = os.path.join(filepath, 'cp')
self.model.save_weights(filepath, overwrite=True)
else:
self.kash_model.save(filepath)
elif (self.verbose > 0):
print(('\nEpoch %05d: %s did not improve from %0.5f' % ((epoch + 1), self.monitor, self.best)))
else:
if (self.verbose > 0):
print(('\nEpoch %05d: saving model to %s' % ((epoch + 1), filepath)))
if self.save_weights_only:
if K.in_multi_worker_mode():
self.model._ckpt_saved_epoch = epoch
filepath = os.path.join(filepath, 'cp')
self.model.save_weights(filepath, overwrite=True)
else:
self.kash_model.save(filepath)
self._maybe_remove_file(file_handle, filepath) | Saves the model.
Arguments:
epoch: the epoch this iteration is in.
logs: the `logs` dict passed in to `on_batch_end` or `on_epoch_end`. | kashgari/callbacks.py | _save_model | SunYanCN/Kashgari | 0 | python | def _save_model(self, epoch, logs):
'Saves the model.\n\n Arguments:\n epoch: the epoch this iteration is in.\n logs: the `logs` dict passed in to `on_batch_end` or `on_epoch_end`.\n '
logs = (logs or {})
if (isinstance(self.save_freq, int) or (self.epochs_since_last_save >= self.period)):
self.epochs_since_last_save = 0
(file_handle, filepath) = self._get_file_handle_and_path(epoch, logs)
if self.save_best_only:
current = logs.get(self.monitor)
if (current is None):
logging.warning('Can save best model only with %s available, skipping.', self.monitor)
elif self.monitor_op(current, self.best):
if (self.verbose > 0):
print(('\nEpoch %05d: %s improved from %0.5f to %0.5f, saving model to %s' % ((epoch + 1), self.monitor, self.best, current, filepath)))
self.best = current
if self.save_weights_only:
filepath = os.path.join(filepath, 'cp')
self.model.save_weights(filepath, overwrite=True)
else:
self.kash_model.save(filepath)
elif (self.verbose > 0):
print(('\nEpoch %05d: %s did not improve from %0.5f' % ((epoch + 1), self.monitor, self.best)))
else:
if (self.verbose > 0):
print(('\nEpoch %05d: saving model to %s' % ((epoch + 1), filepath)))
if self.save_weights_only:
if K.in_multi_worker_mode():
self.model._ckpt_saved_epoch = epoch
filepath = os.path.join(filepath, 'cp')
self.model.save_weights(filepath, overwrite=True)
else:
self.kash_model.save(filepath)
self._maybe_remove_file(file_handle, filepath) | def _save_model(self, epoch, logs):
'Saves the model.\n\n Arguments:\n epoch: the epoch this iteration is in.\n logs: the `logs` dict passed in to `on_batch_end` or `on_epoch_end`.\n '
logs = (logs or {})
if (isinstance(self.save_freq, int) or (self.epochs_since_last_save >= self.period)):
self.epochs_since_last_save = 0
(file_handle, filepath) = self._get_file_handle_and_path(epoch, logs)
if self.save_best_only:
current = logs.get(self.monitor)
if (current is None):
logging.warning('Can save best model only with %s available, skipping.', self.monitor)
elif self.monitor_op(current, self.best):
if (self.verbose > 0):
print(('\nEpoch %05d: %s improved from %0.5f to %0.5f, saving model to %s' % ((epoch + 1), self.monitor, self.best, current, filepath)))
self.best = current
if self.save_weights_only:
filepath = os.path.join(filepath, 'cp')
self.model.save_weights(filepath, overwrite=True)
else:
self.kash_model.save(filepath)
elif (self.verbose > 0):
print(('\nEpoch %05d: %s did not improve from %0.5f' % ((epoch + 1), self.monitor, self.best)))
else:
if (self.verbose > 0):
print(('\nEpoch %05d: saving model to %s' % ((epoch + 1), filepath)))
if self.save_weights_only:
if K.in_multi_worker_mode():
self.model._ckpt_saved_epoch = epoch
filepath = os.path.join(filepath, 'cp')
self.model.save_weights(filepath, overwrite=True)
else:
self.kash_model.save(filepath)
self._maybe_remove_file(file_handle, filepath)<|docstring|>Saves the model.
Arguments:
epoch: the epoch this iteration is in.
logs: the `logs` dict passed in to `on_batch_end` or `on_epoch_end`.<|endoftext|> |
e8df5146bf21ee175a57f155d8727fc095061f86dd489c313733f2acfefacda9 | @transformation
def get_subgraph_by_annotations(graph, annotations, or_=None):
'Induce a sub-graph given an annotations filter.\n\n :param graph: pybel.BELGraph graph: A BEL graph\n :param dict[str,iter[str]] annotations: Annotation filters (match all with :func:`pybel.utils.subdict_matches`)\n :param boolean or_: if True any annotation should be present, if False all annotations should be present in the\n edge. Defaults to True.\n :return: A subgraph of the original BEL graph\n :rtype: pybel.BELGraph\n '
edge_filter_builder = (build_annotation_dict_any_filter if ((or_ is None) or or_) else build_annotation_dict_all_filter)
return get_subgraph_by_edge_filter(graph, edge_filter_builder(annotations)) | Induce a sub-graph given an annotations filter.
:param graph: pybel.BELGraph graph: A BEL graph
:param dict[str,iter[str]] annotations: Annotation filters (match all with :func:`pybel.utils.subdict_matches`)
:param boolean or_: if True any annotation should be present, if False all annotations should be present in the
edge. Defaults to True.
:return: A subgraph of the original BEL graph
:rtype: pybel.BELGraph | src/pybel/struct/mutation/induction/annotations.py | get_subgraph_by_annotations | djinnome/pybel | 0 | python | @transformation
def get_subgraph_by_annotations(graph, annotations, or_=None):
'Induce a sub-graph given an annotations filter.\n\n :param graph: pybel.BELGraph graph: A BEL graph\n :param dict[str,iter[str]] annotations: Annotation filters (match all with :func:`pybel.utils.subdict_matches`)\n :param boolean or_: if True any annotation should be present, if False all annotations should be present in the\n edge. Defaults to True.\n :return: A subgraph of the original BEL graph\n :rtype: pybel.BELGraph\n '
edge_filter_builder = (build_annotation_dict_any_filter if ((or_ is None) or or_) else build_annotation_dict_all_filter)
return get_subgraph_by_edge_filter(graph, edge_filter_builder(annotations)) | @transformation
def get_subgraph_by_annotations(graph, annotations, or_=None):
'Induce a sub-graph given an annotations filter.\n\n :param graph: pybel.BELGraph graph: A BEL graph\n :param dict[str,iter[str]] annotations: Annotation filters (match all with :func:`pybel.utils.subdict_matches`)\n :param boolean or_: if True any annotation should be present, if False all annotations should be present in the\n edge. Defaults to True.\n :return: A subgraph of the original BEL graph\n :rtype: pybel.BELGraph\n '
edge_filter_builder = (build_annotation_dict_any_filter if ((or_ is None) or or_) else build_annotation_dict_all_filter)
return get_subgraph_by_edge_filter(graph, edge_filter_builder(annotations))<|docstring|>Induce a sub-graph given an annotations filter.
:param graph: pybel.BELGraph graph: A BEL graph
:param dict[str,iter[str]] annotations: Annotation filters (match all with :func:`pybel.utils.subdict_matches`)
:param boolean or_: if True any annotation should be present, if False all annotations should be present in the
edge. Defaults to True.
:return: A subgraph of the original BEL graph
:rtype: pybel.BELGraph<|endoftext|> |
850f8799dd3aefd3b8baca9133bf94f8891dcd6d749db0998dd2ff913c4bed89 | @transformation
def get_subgraph_by_annotation_value(graph, annotation, values):
'Induce a sub-graph over all edges whose annotations match the given key and value.\n\n :param pybel.BELGraph graph: A BEL graph\n :param str annotation: The annotation to group by\n :param values: The value(s) for the annotation\n :type values: str or iter[str]\n :return: A subgraph of the original BEL graph\n :rtype: pybel.BELGraph\n '
if isinstance(values, str):
values = {values}
return get_subgraph_by_annotations(graph, {annotation: values}) | Induce a sub-graph over all edges whose annotations match the given key and value.
:param pybel.BELGraph graph: A BEL graph
:param str annotation: The annotation to group by
:param values: The value(s) for the annotation
:type values: str or iter[str]
:return: A subgraph of the original BEL graph
:rtype: pybel.BELGraph | src/pybel/struct/mutation/induction/annotations.py | get_subgraph_by_annotation_value | djinnome/pybel | 0 | python | @transformation
def get_subgraph_by_annotation_value(graph, annotation, values):
'Induce a sub-graph over all edges whose annotations match the given key and value.\n\n :param pybel.BELGraph graph: A BEL graph\n :param str annotation: The annotation to group by\n :param values: The value(s) for the annotation\n :type values: str or iter[str]\n :return: A subgraph of the original BEL graph\n :rtype: pybel.BELGraph\n '
if isinstance(values, str):
values = {values}
return get_subgraph_by_annotations(graph, {annotation: values}) | @transformation
def get_subgraph_by_annotation_value(graph, annotation, values):
'Induce a sub-graph over all edges whose annotations match the given key and value.\n\n :param pybel.BELGraph graph: A BEL graph\n :param str annotation: The annotation to group by\n :param values: The value(s) for the annotation\n :type values: str or iter[str]\n :return: A subgraph of the original BEL graph\n :rtype: pybel.BELGraph\n '
if isinstance(values, str):
values = {values}
return get_subgraph_by_annotations(graph, {annotation: values})<|docstring|>Induce a sub-graph over all edges whose annotations match the given key and value.
:param pybel.BELGraph graph: A BEL graph
:param str annotation: The annotation to group by
:param values: The value(s) for the annotation
:type values: str or iter[str]
:return: A subgraph of the original BEL graph
:rtype: pybel.BELGraph<|endoftext|> |
59556a9f188ec39a59858222b73429dcac8628a66e71dcc4b1701e7d67253fc7 | @task
def build(c):
'\n Build the infrastructure\n '
command = 'build'
command += (' --build-arg PROJECT_NAME=%s' % c.project_name)
command += (' --build-arg USER_ID=%s' % c.user_id)
with Builder(c):
for service in c.services_to_build_first:
docker_compose(c, ('%s %s' % (command, service)))
docker_compose(c, command) | Build the infrastructure | tasks.py | build | jolicode/starfleet | 19 | python | @task
def build(c):
'\n \n '
command = 'build'
command += (' --build-arg PROJECT_NAME=%s' % c.project_name)
command += (' --build-arg USER_ID=%s' % c.user_id)
with Builder(c):
for service in c.services_to_build_first:
docker_compose(c, ('%s %s' % (command, service)))
docker_compose(c, command) | @task
def build(c):
'\n \n '
command = 'build'
command += (' --build-arg PROJECT_NAME=%s' % c.project_name)
command += (' --build-arg USER_ID=%s' % c.user_id)
with Builder(c):
for service in c.services_to_build_first:
docker_compose(c, ('%s %s' % (command, service)))
docker_compose(c, command)<|docstring|>Build the infrastructure<|endoftext|> |
6fa11905b2d5a26a9dcc88f89db6b9cf063e2656567186f00a78f6bfe15b945c | @task
def up(c):
'\n Build and start the infrastructure\n '
build(c)
docker_compose(c, 'up --remove-orphans --detach') | Build and start the infrastructure | tasks.py | up | jolicode/starfleet | 19 | python | @task
def up(c):
'\n \n '
build(c)
docker_compose(c, 'up --remove-orphans --detach') | @task
def up(c):
'\n \n '
build(c)
docker_compose(c, 'up --remove-orphans --detach')<|docstring|>Build and start the infrastructure<|endoftext|> |
8748df99d9e671c4aaf68f1e25c77501c56477fa5a21ac643799211552501b66 | @task
def start(c):
'\n Build and start the infrastructure, then install the application (composer, yarn, ...)\n '
if c.dinghy:
machine_running = c.run('dinghy status', hide=True).stdout
if (machine_running.splitlines()[0].strip() != 'VM: running'):
c.run('dinghy up --no-proxy')
c.run('docker-machine ssh dinghy "echo \'nameserver 8.8.8.8\' | sudo tee -a /etc/resolv.conf && sudo /etc/init.d/docker restart"')
stop_workers(c)
up(c)
cache_clear(c)
install(c)
migrate(c)
start_workers(c)
print((Fore.GREEN + 'The stack is now up and running.'))
help(c) | Build and start the infrastructure, then install the application (composer, yarn, ...) | tasks.py | start | jolicode/starfleet | 19 | python | @task
def start(c):
'\n \n '
if c.dinghy:
machine_running = c.run('dinghy status', hide=True).stdout
if (machine_running.splitlines()[0].strip() != 'VM: running'):
c.run('dinghy up --no-proxy')
c.run('docker-machine ssh dinghy "echo \'nameserver 8.8.8.8\' | sudo tee -a /etc/resolv.conf && sudo /etc/init.d/docker restart"')
stop_workers(c)
up(c)
cache_clear(c)
install(c)
migrate(c)
start_workers(c)
print((Fore.GREEN + 'The stack is now up and running.'))
help(c) | @task
def start(c):
'\n \n '
if c.dinghy:
machine_running = c.run('dinghy status', hide=True).stdout
if (machine_running.splitlines()[0].strip() != 'VM: running'):
c.run('dinghy up --no-proxy')
c.run('docker-machine ssh dinghy "echo \'nameserver 8.8.8.8\' | sudo tee -a /etc/resolv.conf && sudo /etc/init.d/docker restart"')
stop_workers(c)
up(c)
cache_clear(c)
install(c)
migrate(c)
start_workers(c)
print((Fore.GREEN + 'The stack is now up and running.'))
help(c)<|docstring|>Build and start the infrastructure, then install the application (composer, yarn, ...)<|endoftext|> |
2d8a3ad02163a56db175afff646abc9c11e3220623ec0863c7a3a0a75f6badb3 | @task
def install(c):
'\n Install the application (composer, yarn, ...)\n '
with Builder(c):
docker_compose_run(c, 'composer install -n --prefer-dist --optimize-autoloader', no_deps=True)
run_in_docker_or_locally_for_dinghy(c, 'yarn', no_deps=True)
run_in_docker_or_locally_for_dinghy(c, 'yarn run dev', no_deps=True) | Install the application (composer, yarn, ...) | tasks.py | install | jolicode/starfleet | 19 | python | @task
def install(c):
'\n \n '
with Builder(c):
docker_compose_run(c, 'composer install -n --prefer-dist --optimize-autoloader', no_deps=True)
run_in_docker_or_locally_for_dinghy(c, 'yarn', no_deps=True)
run_in_docker_or_locally_for_dinghy(c, 'yarn run dev', no_deps=True) | @task
def install(c):
'\n \n '
with Builder(c):
docker_compose_run(c, 'composer install -n --prefer-dist --optimize-autoloader', no_deps=True)
run_in_docker_or_locally_for_dinghy(c, 'yarn', no_deps=True)
run_in_docker_or_locally_for_dinghy(c, 'yarn run dev', no_deps=True)<|docstring|>Install the application (composer, yarn, ...)<|endoftext|> |
5c5a5dfebbcae48aadc6a97b0b0279d619c86f86040ff0ea6c3304e326e6ea25 | @task
def cache_clear(c):
'\n Clear the application cache\n '
with Builder(c):
docker_compose_run(c, 'rm -rf var/cache/ && php bin/console cache:warmup', no_deps=True) | Clear the application cache | tasks.py | cache_clear | jolicode/starfleet | 19 | python | @task
def cache_clear(c):
'\n \n '
with Builder(c):
docker_compose_run(c, 'rm -rf var/cache/ && php bin/console cache:warmup', no_deps=True) | @task
def cache_clear(c):
'\n \n '
with Builder(c):
docker_compose_run(c, 'rm -rf var/cache/ && php bin/console cache:warmup', no_deps=True)<|docstring|>Clear the application cache<|endoftext|> |
2fc718cd82318960911880ab3d7b0f286dc74a197870e4c967fa408793401f18 | @task
def migrate(c):
'\n Migrate database schema\n '
with Builder(c):
docker_compose_run(c, 'php bin/console doctrine:database:create --if-not-exists')
docker_compose_run(c, 'php bin/console doctrine:migration:migrate -n --allow-no-migration') | Migrate database schema | tasks.py | migrate | jolicode/starfleet | 19 | python | @task
def migrate(c):
'\n \n '
with Builder(c):
docker_compose_run(c, 'php bin/console doctrine:database:create --if-not-exists')
docker_compose_run(c, 'php bin/console doctrine:migration:migrate -n --allow-no-migration') | @task
def migrate(c):
'\n \n '
with Builder(c):
docker_compose_run(c, 'php bin/console doctrine:database:create --if-not-exists')
docker_compose_run(c, 'php bin/console doctrine:migration:migrate -n --allow-no-migration')<|docstring|>Migrate database schema<|endoftext|> |
7c00ec36b6621620c102fba180d10d39250584a428715e90859137497a93f365 | @task
def generate_migration(c):
'\n Generate database migration\n '
with Builder(c):
docker_compose_run(c, 'php bin/console make:migration -n') | Generate database migration | tasks.py | generate_migration | jolicode/starfleet | 19 | python | @task
def generate_migration(c):
'\n \n '
with Builder(c):
docker_compose_run(c, 'php bin/console make:migration -n') | @task
def generate_migration(c):
'\n \n '
with Builder(c):
docker_compose_run(c, 'php bin/console make:migration -n')<|docstring|>Generate database migration<|endoftext|> |
7476279896117e11ae24bf5dfdcdf2a78d74d97a3f1bbd7eaad56d194bfbdffc | @task
def fixtures(c):
'\n Load fixtures into database\n '
with Builder(c):
docker_compose_run(c, 'php bin/console doctrine:database:drop --force --if-exists')
docker_compose_run(c, 'php bin/console doctrine:database:create --if-not-exists')
migrate(c)
docker_compose_run(c, 'php bin/console doctrine:fixtures:load -n') | Load fixtures into database | tasks.py | fixtures | jolicode/starfleet | 19 | python | @task
def fixtures(c):
'\n \n '
with Builder(c):
docker_compose_run(c, 'php bin/console doctrine:database:drop --force --if-exists')
docker_compose_run(c, 'php bin/console doctrine:database:create --if-not-exists')
migrate(c)
docker_compose_run(c, 'php bin/console doctrine:fixtures:load -n') | @task
def fixtures(c):
'\n \n '
with Builder(c):
docker_compose_run(c, 'php bin/console doctrine:database:drop --force --if-exists')
docker_compose_run(c, 'php bin/console doctrine:database:create --if-not-exists')
migrate(c)
docker_compose_run(c, 'php bin/console doctrine:fixtures:load -n')<|docstring|>Load fixtures into database<|endoftext|> |
f2a709eef36ce47037512191cbed6473ef1c5f6aedc94b33d9ce338db89dc28d | @task
def fetch_conferences(c):
'\n Fetch conferences from all sources\n '
with Builder(c):
docker_compose_run(c, 'php bin/console starfleet:conferences:fetch -vv') | Fetch conferences from all sources | tasks.py | fetch_conferences | jolicode/starfleet | 19 | python | @task
def fetch_conferences(c):
'\n \n '
with Builder(c):
docker_compose_run(c, 'php bin/console starfleet:conferences:fetch -vv') | @task
def fetch_conferences(c):
'\n \n '
with Builder(c):
docker_compose_run(c, 'php bin/console starfleet:conferences:fetch -vv')<|docstring|>Fetch conferences from all sources<|endoftext|> |
9dd1a7664acde2adc1e11d7f26c95e4a4746f08348a5cc1756196d6487d8e054 | @task
def remind_cfp_ending(c):
"\n Remind CFP's ending\n "
with Builder(c):
docker_compose_run(c, 'php bin/console starfleet:conferences:remind-cfp-ending-soon') | Remind CFP's ending | tasks.py | remind_cfp_ending | jolicode/starfleet | 19 | python | @task
def remind_cfp_ending(c):
"\n \n "
with Builder(c):
docker_compose_run(c, 'php bin/console starfleet:conferences:remind-cfp-ending-soon') | @task
def remind_cfp_ending(c):
"\n \n "
with Builder(c):
docker_compose_run(c, 'php bin/console starfleet:conferences:remind-cfp-ending-soon')<|docstring|>Remind CFP's ending<|endoftext|> |
6cd962362fffee1c3a25ffc65862c1c8edfa8b59b7631c590035857f4e2ec4c5 | @task
def reset(c):
'\n Reset database\n '
with Builder(c):
docker_compose_run(c, 'php bin/console doctrine:database:drop --if-exists --force')
migrate(c) | Reset database | tasks.py | reset | jolicode/starfleet | 19 | python | @task
def reset(c):
'\n \n '
with Builder(c):
docker_compose_run(c, 'php bin/console doctrine:database:drop --if-exists --force')
migrate(c) | @task
def reset(c):
'\n \n '
with Builder(c):
docker_compose_run(c, 'php bin/console doctrine:database:drop --if-exists --force')
migrate(c)<|docstring|>Reset database<|endoftext|> |
d7cc08c406cfa96c1b499cf065bd70badfc974b4f2cec9091f7d5c811486f05e | @task
def phpcs(c, dry_run=False):
'\n Fix coding standards in code\n '
with Builder(c):
docker_compose_run(c, 'php bin/console lint:yaml --parse-tags config/')
docker_compose_run(c, 'php bin/console lint:twig templates/')
if dry_run:
docker_compose_run(c, 'php ./vendor/bin/php-cs-fixer fix --config=.php-cs-fixer.php --dry-run --diff')
else:
docker_compose_run(c, 'php ./vendor/bin/php-cs-fixer fix --config=.php-cs-fixer.php') | Fix coding standards in code | tasks.py | phpcs | jolicode/starfleet | 19 | python | @task
def phpcs(c, dry_run=False):
'\n \n '
with Builder(c):
docker_compose_run(c, 'php bin/console lint:yaml --parse-tags config/')
docker_compose_run(c, 'php bin/console lint:twig templates/')
if dry_run:
docker_compose_run(c, 'php ./vendor/bin/php-cs-fixer fix --config=.php-cs-fixer.php --dry-run --diff')
else:
docker_compose_run(c, 'php ./vendor/bin/php-cs-fixer fix --config=.php-cs-fixer.php') | @task
def phpcs(c, dry_run=False):
'\n \n '
with Builder(c):
docker_compose_run(c, 'php bin/console lint:yaml --parse-tags config/')
docker_compose_run(c, 'php bin/console lint:twig templates/')
if dry_run:
docker_compose_run(c, 'php ./vendor/bin/php-cs-fixer fix --config=.php-cs-fixer.php --dry-run --diff')
else:
docker_compose_run(c, 'php ./vendor/bin/php-cs-fixer fix --config=.php-cs-fixer.php')<|docstring|>Fix coding standards in code<|endoftext|> |
84afb4887b67aaade7ab030880bdf0d72ff086982c91c459b196c5ee0593eae5 | @task
def tests(c):
'\n Launch unit and functional tests\n '
with Builder(c):
reset(c)
docker_compose_run(c, 'php ./vendor/bin/simple-phpunit') | Launch unit and functional tests | tasks.py | tests | jolicode/starfleet | 19 | python | @task
def tests(c):
'\n \n '
with Builder(c):
reset(c)
docker_compose_run(c, 'php ./vendor/bin/simple-phpunit') | @task
def tests(c):
'\n \n '
with Builder(c):
reset(c)
docker_compose_run(c, 'php ./vendor/bin/simple-phpunit')<|docstring|>Launch unit and functional tests<|endoftext|> |
360f6f510d3312a36a8f7bc4a9dcb66f0650bfa9c1c67c5726eaef355eb39fa6 | @task
def phpstan(c):
'\n Runs PHPStan\n '
with Builder(c):
docker_compose_run(c, 'php ./vendor/bin/phpstan analyse') | Runs PHPStan | tasks.py | phpstan | jolicode/starfleet | 19 | python | @task
def phpstan(c):
'\n \n '
with Builder(c):
docker_compose_run(c, 'php ./vendor/bin/phpstan analyse') | @task
def phpstan(c):
'\n \n '
with Builder(c):
docker_compose_run(c, 'php ./vendor/bin/phpstan analyse')<|docstring|>Runs PHPStan<|endoftext|> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.