aie3-autograder / calcscore.py
Dobin Yim
modular files
c97d8e1
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
from promptsplitembed import create_prompt, create_qamodel
from extractjson import extract_json
def compute_cosine_similarity(reference_embeddings: dict, student_embeddings: dict) -> float:
similarity_results = {}
for key in reference_embeddings.keys():
if key not in student_embeddings:
similarity_results[key] = 0
continue
reference_vector = np.array(reference_embeddings[key]).reshape(1, -1)
student_vector = np.array(student_embeddings[key]).reshape(1, -1)
if reference_vector.shape[1] != student_vector.shape[1]:
min_dim = min(reference_vector.shape[1], student_vector.shape[1])
reference_vector = reference_vector[:, :min_dim]
student_vector = student_vector[:, :min_dim]
similarity = cosine_similarity(reference_vector, student_vector)[0][0]
similarity_results[key] = similarity
total_similarity = sum(similarity_results.values())
num_questions = len(similarity_results)
average_similarity = total_similarity / num_questions if num_questions else 0
return average_similarity
def llm_similarity(answers, student_result, llm_score_prompt_template):
score_prompt = llm_score_prompt_template
qa_chat_model = create_qamodel(model="gpt-4o-mini", temperature=0)
score_prompt_template = create_prompt(score_prompt)
student_score_chain = score_prompt_template | qa_chat_model
student_score = student_score_chain.invoke({"source": answers, "student": student_result })
llm_score_tokens = student_score.usage_metadata["total_tokens"]
student_score = dict(extract_json(student_score)[0])
total_score = sum(student_score.values())
num_questions = len(student_score)
average_score = total_score / num_questions if num_questions else 0
return average_score, llm_score_tokens