Added ProblemGradingPipeline
Browse files- backend/app/problem_generator.py +18 -20
- backend/app/problem_grader.py +72 -0
backend/app/problem_generator.py
CHANGED
@@ -8,31 +8,29 @@ from langchain_core.output_parsers import StrOutputParser
|
|
8 |
from langchain.output_parsers.openai_functions import JsonOutputFunctionsParser
|
9 |
from backend.app.vectorstore import get_vector_db
|
10 |
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
self.system_role_prompt = """
|
15 |
-
You are a helpful assistant that generates questions based on a given context.
|
16 |
-
"""
|
17 |
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
|
22 |
-
|
23 |
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
|
32 |
-
|
|
|
33 |
self.chat_prompt = ChatPromptTemplate.from_messages([
|
34 |
-
("system",
|
35 |
-
("user",
|
36 |
])
|
37 |
|
38 |
self.llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0.7)
|
|
|
8 |
from langchain.output_parsers.openai_functions import JsonOutputFunctionsParser
|
9 |
from backend.app.vectorstore import get_vector_db
|
10 |
|
11 |
+
SYSTEM_ROLE_PROMPT = """
|
12 |
+
You are a helpful assistant that generates questions based on a given context.
|
13 |
+
"""
|
|
|
|
|
|
|
14 |
|
15 |
+
USER_ROLE_PROMPT = """
|
16 |
+
Based on the following context about {query}, generate 5 relevant and specific questions.
|
17 |
+
Make sure the questions can be answered using only the provided context.
|
18 |
|
19 |
+
Context: {context}
|
20 |
|
21 |
+
Generate 5 questions that test understanding of the material in the context.
|
22 |
+
|
23 |
+
Return only a json object with the following format:
|
24 |
+
{{
|
25 |
+
"questions": ["question1", "question2", "question3", "question4", "question5"]
|
26 |
+
}}
|
27 |
+
"""
|
28 |
|
29 |
+
class ProblemGenerationPipeline:
|
30 |
+
def __init__(self):
|
31 |
self.chat_prompt = ChatPromptTemplate.from_messages([
|
32 |
+
("system", SYSTEM_ROLE_PROMPT),
|
33 |
+
("user", USER_ROLE_PROMPT)
|
34 |
])
|
35 |
|
36 |
self.llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0.7)
|
backend/app/problem_grader.py
ADDED
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Dict
|
2 |
+
import json
|
3 |
+
|
4 |
+
from langchain_core.prompts import ChatPromptTemplate
|
5 |
+
from langchain_openai import ChatOpenAI
|
6 |
+
from langchain_core.runnables import RunnablePassthrough
|
7 |
+
from langchain_core.output_parsers import StrOutputParser
|
8 |
+
from backend.app.vectorstore import get_vector_db
|
9 |
+
|
10 |
+
SYSTEM_ROLE_PROMPT = """
|
11 |
+
You are a knowledgeable grading assistant that evaluates student answers based on provided context.
|
12 |
+
You should determine if answers are correct and provide constructive feedback.
|
13 |
+
"""
|
14 |
+
|
15 |
+
USER_ROLE_PROMPT = """
|
16 |
+
Grade the following student answer based on the provided context about {query}.
|
17 |
+
|
18 |
+
Context: {context}
|
19 |
+
|
20 |
+
Question: {problem}
|
21 |
+
Student Answer: {answer}
|
22 |
+
|
23 |
+
Evaluate if the answer is correct and provide brief feedback. Start with either "Correct" or "Incorrect"
|
24 |
+
followed by a brief explanation of why. Focus on the accuracy based on the context provided.
|
25 |
+
|
26 |
+
Always begin your response with "Correct" or "Incorrect" and then provide a brief explanation of why.
|
27 |
+
|
28 |
+
Your response should be direct and clear, for example:
|
29 |
+
"Correct. The answer accurately explains [reason]" or
|
30 |
+
"Incorrect. While [partial understanding], the answer misses [key point]"
|
31 |
+
"""
|
32 |
+
|
33 |
+
class ProblemGradingPipeline:
|
34 |
+
def __init__(self):
|
35 |
+
self.chat_prompt = ChatPromptTemplate.from_messages([
|
36 |
+
("system", SYSTEM_ROLE_PROMPT),
|
37 |
+
("user", USER_ROLE_PROMPT)
|
38 |
+
])
|
39 |
+
|
40 |
+
self.llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0.3)
|
41 |
+
self.retriever = get_vector_db().as_retriever(search_kwargs={"k": 2})
|
42 |
+
|
43 |
+
# Build the RAG chain
|
44 |
+
self.rag_chain = (
|
45 |
+
{
|
46 |
+
"context": self.retriever,
|
47 |
+
"query": RunnablePassthrough(),
|
48 |
+
"problem": RunnablePassthrough(),
|
49 |
+
"answer": RunnablePassthrough()
|
50 |
+
}
|
51 |
+
| self.chat_prompt
|
52 |
+
| self.llm
|
53 |
+
| StrOutputParser()
|
54 |
+
)
|
55 |
+
|
56 |
+
def grade(self, query: str, problem: str, answer: str) -> str:
|
57 |
+
"""
|
58 |
+
Grade a student's answer to a problem using RAG for context-aware evaluation.
|
59 |
+
|
60 |
+
Args:
|
61 |
+
query (str): The topic/context to use for grading
|
62 |
+
problem (str): The question being answered
|
63 |
+
answer (str): The student's answer to evaluate
|
64 |
+
|
65 |
+
Returns:
|
66 |
+
str: Grading response indicating if the answer is correct and providing feedback
|
67 |
+
"""
|
68 |
+
return self.rag_chain.invoke({
|
69 |
+
"query": query,
|
70 |
+
"problem": problem,
|
71 |
+
"answer": answer
|
72 |
+
})
|