File size: 2,287 Bytes
610d668 e32fb25 610d668 e32fb25 610d668 e32fb25 610d668 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 |
import json
from langchain.document_loaders import PyPDFLoader
from models import ExtractionResult, EvaluationResult
from llm import get_llm
llm = get_llm()
def extract_answers_from_pdf(pdf_path: str) -> ExtractionResult:
"""
Loads a PDF, extracts its content, and uses the LLM to output a JSON of the answers.
"""
loader = PyPDFLoader(pdf_path)
pages = loader.load_and_split()
all_page_content = "\n".join(page.page_content for page in pages)
# Prepare the prompt with the JSON schema.
extraction_schema = ExtractionResult.model_json_schema()
system_message = (
"You are a document analysis tool that extracts the options and correct answers from the provided document content. "
"The output must be a JSON object that strictly follows the schema: " + json.dumps(extraction_schema, indent=2)
)
user_message = (
"Please extract the correct answers and options (A, B, C, D, E) from the following document content:\n\n"
+ all_page_content
)
prompt = system_message + "\n\n" + user_message
response = llm.invoke(prompt, response_format={"type": "json_object"})
result = ExtractionResult.model_validate_json(response.content)
return result
def evaluate_student(answer_key: ExtractionResult, student: ExtractionResult) -> EvaluationResult:
"""
Compares the answer key with a student's answers and returns the evaluation result.
"""
evaluation_schema = EvaluationResult.model_json_schema()
system_message = (
"You are an academic evaluation tool that compares the answer key with a student's answers. "
"Calculate the total marks, grade, and percentage based on the provided JSON objects. "
"The output must be a JSON object that strictly follows the schema: " + json.dumps(evaluation_schema, indent=2)
)
user_message = (
"Answer Key JSON:\n" + json.dumps(answer_key.model_dump(), indent=2) + "\n\n"
"Student Answer JSON:\n" + json.dumps(student.model_dump(), indent=2)
)
prompt = system_message + "\n\n" + user_message
response = llm.invoke(prompt, response_format={"type": "json_object"})
evaluation_result = EvaluationResult.model_validate_json(response.content)
return evaluation_result
|