aie3-autograder / process_docs.py
Dobin Yim
final AIE3
a39c45e
from promptsplitembed import create_qamodel, create_prompt
from extractjson import extract_json
from langchain_openai import ChatOpenAI
qa_chat_model = create_qamodel(model="gpt-4o-mini", temperature=0)
def process_reference(reference_document, ref_prompt):
ref_prompt_template = create_prompt(ref_prompt)
ref_generation_chain = ref_prompt_template | qa_chat_model
result = ref_generation_chain.invoke({"source": reference_document})
ref_gen_tokens = result.usage_metadata["total_tokens"]
reference = dict(extract_json(result)[0])
answers = {}
for key in reference:
if key.startswith('Question'):
question_number = key.split('#')[1]
answer_key = f'Answer #{question_number}'
answers[key] = reference[answer_key]
print("Processed reference document")
return reference, answers, ref_gen_tokens
def process_student(documents, reference, student_prompt):
test_doc = documents[0]
student_prompt_template = create_prompt(student_prompt)
student_response_chain = student_prompt_template | qa_chat_model
student_result = student_response_chain.invoke({"source": reference.keys(),"student": test_doc })
student_gen_tokens = student_result.usage_metadata["total_tokens"]
student_result = dict(extract_json(student_result)[0])
print("Processed student document")
return student_result, student_gen_tokens