mq-quiz / src /quizzes /models.py
Teddy Xinyuan Chen
2024-10-08T20-23-09Z
9fbb706
import os
import typing
from openai import OpenAI
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
from django.contrib.postgres import fields
from django.db import models
class Quiz(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Question(models.Model):
quiz = models.ForeignKey(Quiz, on_delete=models.CASCADE)
prompt = models.CharField(max_length=200)
rubrics = models.TextField(
blank=True, null=True, verbose_name="Grading Rubrics - For LLM-graded questions only. You can leave this empty."
)
def __str__(self):
return self.prompt
def get_answer(self) -> typing.Union["Answer", None]:
return (
getattr(self, "multiplechoiceanswer", None)
or getattr(self, "freetextanswer", None)
# or getattr(self, "llmgradedanswer", None)
or self.llmgradedanswer # type: ignore
)
class Answer(models.Model):
question = models.OneToOneField(Question, on_delete=models.CASCADE)
class Meta:
abstract = True
def __str__(self) -> str:
return (
getattr(self, "correct_answer", None) or getattr(self, "rubrics", None) or "No answer or rubrics provided"
)
def is_correct(self, user_answer) -> bool:
return user_answer == getattr(self, "correct_answer", None)
class FreeTextAnswer(Answer):
correct_answer = models.CharField(max_length=200, default="")
case_sensitive = models.BooleanField(default=False)
def is_correct(self, user_answer) -> bool:
if not self.case_sensitive:
return user_answer.lower() == self.correct_answer.lower()
return user_answer == self.correct_answer
class LLMGradedAnswer(Answer):
def grade(self, user_answer) -> dict:
"""
Grades the user's answer by calling the grading API.
Args:
user_answer (str): The answer provided by the user.
Returns:
dict: The result of the grading.
"""
try:
# use dotenv to load env
from dotenv import load_dotenv
load_dotenv()
prompt = f"Grade the following answer based on the rubric:\nRubric: {self.question.rubrics}\nAnswer: {user_answer}"
response = client.chat.completions.create(
model="gpt-4o",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt},
],
)
return {"result": "success", "message": response.choices[0].message.content}
except Exception as e:
print(f"An error occurred: {e}")
return {"result": "error", "message": str(e)}
class MultipleChoiceAnswer(Answer):
correct_answer = models.CharField(max_length=200, default="")
choices = fields.ArrayField(models.CharField(max_length=200, blank=True))
def __str__(self) -> str:
return f"{self.correct_answer} from {self.choices}"
class LLMGradedAnswer(Answer):
correct_answer = models.CharField(max_length=200, default="")
def grade(self, user_answer) -> dict:
"""
Grades the user's answer by calling the grading API.
Args:
user_answer (str): The answer provided by the user.
Returns:
dict: The result of the grading.
"""
try:
# use dotenv to load env
from dotenv import load_dotenv
load_dotenv()
prompt = f"Grade the following answer based on the rubric:\nRubric: {self.question.rubrics}\nAnswer: {user_answer}"
response = client.chat.completions.create(
model="gpt-4o",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": prompt},
],
)
return {"result": "success", "message": response.choices[0].message.content}
except Exception as e:
print(f"An error occurred: {e}")
return {"result": "error", "message": str(e)}