Neurolingua
commited on
Commit
•
caa9340
1
Parent(s):
4a8d12c
Update teacher_function.py
Browse files- teacher_function.py +158 -158
teacher_function.py
CHANGED
@@ -1,158 +1,158 @@
|
|
1 |
-
from ai71 import AI71
|
2 |
-
from PyPDF2 import PdfReader
|
3 |
-
from pdf2image import convert_from_path
|
4 |
-
import cv2
|
5 |
-
import numpy as np
|
6 |
-
import pytesseract
|
7 |
-
AI71_API_KEY = "api71-api-
|
8 |
-
|
9 |
-
def extract_text_from_pdf(pdf_file):
|
10 |
-
text = ""
|
11 |
-
reader = PdfReader(pdf_file)
|
12 |
-
for page in reader.pages:
|
13 |
-
text += page.extract_text()
|
14 |
-
return text
|
15 |
-
|
16 |
-
def generate_questions_from_text(text, no_of_questions, marks_per_part, no_parts):
|
17 |
-
ai71 = AI71(AI71_API_KEY)
|
18 |
-
messages = [
|
19 |
-
{"role": "system", "content": "You are a teaching assistant"},
|
20 |
-
{"role": "user",
|
21 |
-
"content": f"Give your own {no_of_questions} questions under each part for {no_parts} parts with {marks_per_part} marks for each part. Note that all questions must be from the topics of {text}"}
|
22 |
-
]
|
23 |
-
|
24 |
-
questions = []
|
25 |
-
for chunk in ai71.chat.completions.create(
|
26 |
-
model="tiiuae/falcon-180b-chat",
|
27 |
-
messages=messages,
|
28 |
-
stream=True,
|
29 |
-
):
|
30 |
-
if chunk.choices[0].delta.content:
|
31 |
-
questions.append(chunk.choices[0].delta.content)
|
32 |
-
|
33 |
-
return "".join(questions)
|
34 |
-
|
35 |
-
def extract_text_from_image(image_path):
|
36 |
-
img = cv2.imread(image_path)
|
37 |
-
text = pytesseract.image_to_string(img)
|
38 |
-
return text
|
39 |
-
|
40 |
-
|
41 |
-
def extract_text_from_pdf(pdf_path):
|
42 |
-
images = convert_from_path(pdf_path)
|
43 |
-
final_text = ""
|
44 |
-
for image in images:
|
45 |
-
image_cv = np.array(image)
|
46 |
-
image_cv = cv2.cvtColor(image_cv, cv2.COLOR_RGB2BGR)
|
47 |
-
text = pytesseract.image_to_string(image_cv)
|
48 |
-
final_text += text
|
49 |
-
return final_text
|
50 |
-
|
51 |
-
|
52 |
-
def evaluate(question, answer, max_marks):
|
53 |
-
prompt = f"""Questions: {question}
|
54 |
-
Answer: {answer}.
|
55 |
-
|
56 |
-
|
57 |
-
Evaluate above questions one by one(if there are multiple) by provided answers and assign marks out of {max_marks}. No need overall score. Note that as maximum mark increases, the size of the answer must be large enough to get good marks. Give ouput in format below:
|
58 |
-
description:
|
59 |
-
assigned marks:
|
60 |
-
total marks:
|
61 |
-
Note that you should not display total marks"""
|
62 |
-
|
63 |
-
messages = [
|
64 |
-
{"role": "system", "content": "You are an answer evaluator"},
|
65 |
-
{"role": "user", "content": prompt}
|
66 |
-
]
|
67 |
-
|
68 |
-
response_content = ""
|
69 |
-
for chunk in AI71(AI71_API_KEY).chat.completions.create(
|
70 |
-
model="tiiuae/falcon-180b-chat",
|
71 |
-
messages=messages,
|
72 |
-
stream=True
|
73 |
-
):
|
74 |
-
if chunk.choices[0].delta.content:
|
75 |
-
response_content += chunk.choices[0].delta.content
|
76 |
-
|
77 |
-
return response_content
|
78 |
-
|
79 |
-
def generate_student_report(name, age, cgpa, course, assigned_test, ai_test, interests, difficulty, courses_taken):
|
80 |
-
prompt = f"""
|
81 |
-
Name: {name}
|
82 |
-
Age: {age}
|
83 |
-
CGPA: {cgpa}
|
84 |
-
Course: {course}
|
85 |
-
Assigned Test Score: {assigned_test}
|
86 |
-
AI generated Test Score: {ai_test}
|
87 |
-
Interests: {interests}
|
88 |
-
Difficulty in: {difficulty}
|
89 |
-
Courses Taken: {courses_taken}
|
90 |
-
Use the above student data to generate a neat personalized report and suggested teaching methods."""
|
91 |
-
|
92 |
-
client = AI71(AI71_API_KEY)
|
93 |
-
|
94 |
-
response = client.chat.completions.create(
|
95 |
-
model="tiiuae/falcon-180B-chat",
|
96 |
-
messages=[
|
97 |
-
{"role": "system", "content": "You are a student report generator."},
|
98 |
-
{"role": "user", "content": prompt}
|
99 |
-
]
|
100 |
-
)
|
101 |
-
|
102 |
-
report = response.choices[0].message.content if response.choices and response.choices[
|
103 |
-
0].message else "No report generated."
|
104 |
-
print(report)
|
105 |
-
|
106 |
-
return report.replace('\n','<br>')
|
107 |
-
def generate_timetable_module(data,hours_per_day,days_per_week,semester_end_date,subjects):
|
108 |
-
response = AI71(AI71_API_KEY).chat.completions.create(
|
109 |
-
model="tiiuae/falcon-180B-chat",
|
110 |
-
messages=[
|
111 |
-
{"role": "system", "content": "You are a helpful assistant."},
|
112 |
-
{"role": "user", "content": f"Create a timetable starting from Monday based on the following inputs:\n"
|
113 |
-
f"- Number of hours per day: {hours_per_day}\n"
|
114 |
-
f"- Number of days per week: {days_per_week}\n"
|
115 |
-
f"- Semester end date: {semester_end_date}\n"
|
116 |
-
f"- Subjects: {', '.join(subjects)}\n"}
|
117 |
-
]
|
118 |
-
)
|
119 |
-
|
120 |
-
# Access the response content correctly
|
121 |
-
return( response.choices[0].message.content if response.choices and response.choices[0].message else "No timetable generated.")
|
122 |
-
|
123 |
-
def cluster_topics(academic_topics):
|
124 |
-
prompt = (
|
125 |
-
"Please cluster the following academic topics into their respective subjects such as Mathematics, Physics, etc.: "
|
126 |
-
+ ", ".join(academic_topics))
|
127 |
-
response = ""
|
128 |
-
for chunk in AI71(AI71_API_KEY).chat.completions.create(
|
129 |
-
model="tiiuae/falcon-180b-chat",
|
130 |
-
messages=[
|
131 |
-
{"role": "system", "content": "You are a helpful assistant."},
|
132 |
-
{"role": "user", "content": prompt},
|
133 |
-
],
|
134 |
-
stream=True,
|
135 |
-
):
|
136 |
-
if chunk.choices[0].delta.content:
|
137 |
-
response += chunk.choices[0].delta.content
|
138 |
-
return response
|
139 |
-
|
140 |
-
def generate_timetable_weak(clustered_subjects, hours_per_day):
|
141 |
-
prompt = (
|
142 |
-
f"Using the following subjects and topics:\n{clustered_subjects}\n"
|
143 |
-
f"Generate a special class timetable for {hours_per_day} hours per day.\n"
|
144 |
-
f"Also provide reference books and methods to teach the slow learners for each subject"
|
145 |
-
)
|
146 |
-
response = ""
|
147 |
-
for chunk in AI71(AI71_API_KEY).chat.completions.create(
|
148 |
-
model="tiiuae/falcon-180b-chat",
|
149 |
-
messages=[
|
150 |
-
{"role": "system", "content": "You are a helpful assistant."},
|
151 |
-
{"role": "user", "content": prompt},
|
152 |
-
],
|
153 |
-
stream=True,
|
154 |
-
):
|
155 |
-
if chunk.choices[0].delta.content:
|
156 |
-
response += chunk.choices[0].delta.content
|
157 |
-
return response
|
158 |
-
|
|
|
1 |
+
from ai71 import AI71
|
2 |
+
from PyPDF2 import PdfReader
|
3 |
+
from pdf2image import convert_from_path
|
4 |
+
import cv2
|
5 |
+
import numpy as np
|
6 |
+
import pytesseract
|
7 |
+
AI71_API_KEY = "api71-api-652e5c6c-8edf-41d0-9c34-28522b07bef9"
|
8 |
+
|
9 |
+
def extract_text_from_pdf(pdf_file):
|
10 |
+
text = ""
|
11 |
+
reader = PdfReader(pdf_file)
|
12 |
+
for page in reader.pages:
|
13 |
+
text += page.extract_text()
|
14 |
+
return text
|
15 |
+
|
16 |
+
def generate_questions_from_text(text, no_of_questions, marks_per_part, no_parts):
|
17 |
+
ai71 = AI71(AI71_API_KEY)
|
18 |
+
messages = [
|
19 |
+
{"role": "system", "content": "You are a teaching assistant"},
|
20 |
+
{"role": "user",
|
21 |
+
"content": f"Give your own {no_of_questions} questions under each part for {no_parts} parts with {marks_per_part} marks for each part. Note that all questions must be from the topics of {text}"}
|
22 |
+
]
|
23 |
+
|
24 |
+
questions = []
|
25 |
+
for chunk in ai71.chat.completions.create(
|
26 |
+
model="tiiuae/falcon-180b-chat",
|
27 |
+
messages=messages,
|
28 |
+
stream=True,
|
29 |
+
):
|
30 |
+
if chunk.choices[0].delta.content:
|
31 |
+
questions.append(chunk.choices[0].delta.content)
|
32 |
+
|
33 |
+
return "".join(questions)
|
34 |
+
|
35 |
+
def extract_text_from_image(image_path):
|
36 |
+
img = cv2.imread(image_path)
|
37 |
+
text = pytesseract.image_to_string(img)
|
38 |
+
return text
|
39 |
+
|
40 |
+
|
41 |
+
def extract_text_from_pdf(pdf_path):
|
42 |
+
images = convert_from_path(pdf_path)
|
43 |
+
final_text = ""
|
44 |
+
for image in images:
|
45 |
+
image_cv = np.array(image)
|
46 |
+
image_cv = cv2.cvtColor(image_cv, cv2.COLOR_RGB2BGR)
|
47 |
+
text = pytesseract.image_to_string(image_cv)
|
48 |
+
final_text += text
|
49 |
+
return final_text
|
50 |
+
|
51 |
+
|
52 |
+
def evaluate(question, answer, max_marks):
|
53 |
+
prompt = f"""Questions: {question}
|
54 |
+
Answer: {answer}.
|
55 |
+
|
56 |
+
|
57 |
+
Evaluate above questions one by one(if there are multiple) by provided answers and assign marks out of {max_marks}. No need overall score. Note that as maximum mark increases, the size of the answer must be large enough to get good marks. Give ouput in format below:
|
58 |
+
description:
|
59 |
+
assigned marks:
|
60 |
+
total marks:
|
61 |
+
Note that you should not display total marks"""
|
62 |
+
|
63 |
+
messages = [
|
64 |
+
{"role": "system", "content": "You are an answer evaluator"},
|
65 |
+
{"role": "user", "content": prompt}
|
66 |
+
]
|
67 |
+
|
68 |
+
response_content = ""
|
69 |
+
for chunk in AI71(AI71_API_KEY).chat.completions.create(
|
70 |
+
model="tiiuae/falcon-180b-chat",
|
71 |
+
messages=messages,
|
72 |
+
stream=True
|
73 |
+
):
|
74 |
+
if chunk.choices[0].delta.content:
|
75 |
+
response_content += chunk.choices[0].delta.content
|
76 |
+
|
77 |
+
return response_content
|
78 |
+
|
79 |
+
def generate_student_report(name, age, cgpa, course, assigned_test, ai_test, interests, difficulty, courses_taken):
|
80 |
+
prompt = f"""
|
81 |
+
Name: {name}
|
82 |
+
Age: {age}
|
83 |
+
CGPA: {cgpa}
|
84 |
+
Course: {course}
|
85 |
+
Assigned Test Score: {assigned_test}
|
86 |
+
AI generated Test Score: {ai_test}
|
87 |
+
Interests: {interests}
|
88 |
+
Difficulty in: {difficulty}
|
89 |
+
Courses Taken: {courses_taken}
|
90 |
+
Use the above student data to generate a neat personalized report and suggested teaching methods."""
|
91 |
+
|
92 |
+
client = AI71(AI71_API_KEY)
|
93 |
+
|
94 |
+
response = client.chat.completions.create(
|
95 |
+
model="tiiuae/falcon-180B-chat",
|
96 |
+
messages=[
|
97 |
+
{"role": "system", "content": "You are a student report generator."},
|
98 |
+
{"role": "user", "content": prompt}
|
99 |
+
]
|
100 |
+
)
|
101 |
+
|
102 |
+
report = response.choices[0].message.content if response.choices and response.choices[
|
103 |
+
0].message else "No report generated."
|
104 |
+
print(report)
|
105 |
+
|
106 |
+
return report.replace('\n','<br>')
|
107 |
+
def generate_timetable_module(data,hours_per_day,days_per_week,semester_end_date,subjects):
|
108 |
+
response = AI71(AI71_API_KEY).chat.completions.create(
|
109 |
+
model="tiiuae/falcon-180B-chat",
|
110 |
+
messages=[
|
111 |
+
{"role": "system", "content": "You are a helpful assistant."},
|
112 |
+
{"role": "user", "content": f"Create a timetable starting from Monday based on the following inputs:\n"
|
113 |
+
f"- Number of hours per day: {hours_per_day}\n"
|
114 |
+
f"- Number of days per week: {days_per_week}\n"
|
115 |
+
f"- Semester end date: {semester_end_date}\n"
|
116 |
+
f"- Subjects: {', '.join(subjects)}\n"}
|
117 |
+
]
|
118 |
+
)
|
119 |
+
|
120 |
+
# Access the response content correctly
|
121 |
+
return( response.choices[0].message.content if response.choices and response.choices[0].message else "No timetable generated.")
|
122 |
+
|
123 |
+
def cluster_topics(academic_topics):
|
124 |
+
prompt = (
|
125 |
+
"Please cluster the following academic topics into their respective subjects such as Mathematics, Physics, etc.: "
|
126 |
+
+ ", ".join(academic_topics))
|
127 |
+
response = ""
|
128 |
+
for chunk in AI71(AI71_API_KEY).chat.completions.create(
|
129 |
+
model="tiiuae/falcon-180b-chat",
|
130 |
+
messages=[
|
131 |
+
{"role": "system", "content": "You are a helpful assistant."},
|
132 |
+
{"role": "user", "content": prompt},
|
133 |
+
],
|
134 |
+
stream=True,
|
135 |
+
):
|
136 |
+
if chunk.choices[0].delta.content:
|
137 |
+
response += chunk.choices[0].delta.content
|
138 |
+
return response
|
139 |
+
|
140 |
+
def generate_timetable_weak(clustered_subjects, hours_per_day):
|
141 |
+
prompt = (
|
142 |
+
f"Using the following subjects and topics:\n{clustered_subjects}\n"
|
143 |
+
f"Generate a special class timetable for {hours_per_day} hours per day.\n"
|
144 |
+
f"Also provide reference books and methods to teach the slow learners for each subject"
|
145 |
+
)
|
146 |
+
response = ""
|
147 |
+
for chunk in AI71(AI71_API_KEY).chat.completions.create(
|
148 |
+
model="tiiuae/falcon-180b-chat",
|
149 |
+
messages=[
|
150 |
+
{"role": "system", "content": "You are a helpful assistant."},
|
151 |
+
{"role": "user", "content": prompt},
|
152 |
+
],
|
153 |
+
stream=True,
|
154 |
+
):
|
155 |
+
if chunk.choices[0].delta.content:
|
156 |
+
response += chunk.choices[0].delta.content
|
157 |
+
return response
|
158 |
+
|