Spaces:
Build error
Build error
File size: 7,743 Bytes
b91146d 1586102 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 |
import ast
from pymongo import MongoClient
from datetime import datetime
import openai
import google.generativeai as genai
from google.generativeai import GenerativeModel
from dotenv import load_dotenv
import os
from file_upload_vectorize import resources_collection, vectors_collection, courses_collection2, faculty_collection
# Load environment variables
load_dotenv()
MONGO_URI = os.getenv('MONGO_URI')
OPENAI_KEY = os.getenv('OPENAI_KEY')
GEMINI_KEY = os.getenv('GEMINI_KEY')
# Configure APIs
openai.api_key = OPENAI_KEY
genai.configure(api_key=GEMINI_KEY)
model = genai.GenerativeModel('gemini-pro')
# Connect to MongoDB
client = MongoClient(MONGO_URI)
db = client['novascholar_db']
quizzes_collection = db["quizzes"]
def strip_code_markers(response_text):
"""Strip off the markers ``` and python from a LLM model's response"""
if response_text.startswith("```python"):
response_text = response_text[len("```python"):].strip()
if response_text.startswith("```"):
response_text = response_text[len("```"):].strip()
if response_text.endswith("```"):
response_text = response_text[:-len("```")].strip()
return response_text
# New function to generate MCQs using Gemini
def generate_mcqs(context, num_questions, session_title, session_description):
"""Generate MCQs either from context or session details"""
try:
# Initialize Gemini model
if context:
prompt = f"""
Based on the following content, generate {num_questions} multiple choice questions.
Format each question as a Python dictionary with the following structure:
{{
"question": "Question text here",
"options": ["A) option1", "B) option2", "C) option3", "D) option4"],
"correct_option": "A) option1" or "B) option2" or "C) option3" or "D) option4"
}}
Content:
{context}
Generate challenging but clear questions that test understanding of key concepts.
Return only the Python list of dictionaries.
"""
else:
prompt = f"""
Generate {num_questions} multiple choice questions about the topic:
Title: {session_title}
Description: {session_description}
Format each question as a Python dictionary with the following structure:
{{
"question": "Question text here",
"options": ["A) option1", "B) option2", "C) option3", "D) option4"],
"correct_option": "A" or "B" or "C" or "D"
}}
Generate challenging but clear questions.
Return only the Python list of dictionaries without any additional formatting or markers
Do not write any other text, do not start the response with (```python), do not end the response with backticks(```)
A Sample response should look like this: Response Text: [
{
"question": "Which of the following is NOT a valid data type in C++?",
"options": ["int", "double", "boolean", "char"],
"correct_option": "C"
}
] (Notice that there are no backticks(```) around the response and no (```python))
.
"""
response = model.generate_content(prompt)
response_text = response.text.strip()
print("Response Text:", response_text)
modified_response_text = strip_code_markers(response_text)
print("Response Text Modified to:", modified_response_text)
# Extract and parse the response to get the list of MCQs
mcqs = ast.literal_eval(modified_response_text) # Be careful with eval, consider using ast.literal_eval for production
print(mcqs)
if not mcqs:
raise ValueError("No questions generated")
return mcqs
except Exception as e:
print(f"Error generating MCQs: , error: {e}")
return None
# New function to save quiz to database
def save_quiz(course_id, session_id, title, questions, user_id):
"""Save quiz to database"""
try:
quiz_data = {
"user_id": user_id,
"course_id": course_id,
"session_id": session_id,
"title": title,
"questions": questions,
"created_at": datetime.utcnow(),
"status": "active",
"submissions": []
}
result = quizzes_collection.insert_one(quiz_data)
return result.inserted_id
except Exception as e:
print(f"Error saving quiz: {e}")
return None
def get_student_quiz_score(quiz_id, student_id):
"""Get student's score for a specific quiz"""
quiz = quizzes_collection.find_one(
{
"_id": quiz_id,
"submissions.student_id": student_id
},
{"submissions.$": 1}
)
if quiz and quiz.get('submissions'):
return quiz['submissions'][0].get('score')
return None
# def submit_quiz_answers(quiz_id, student_id, student_answers):
# """Submit and score student's quiz answers"""
# quiz = quizzes_collection.find_one({"_id": quiz_id})
# if not quiz:
# return None
# # Calculate score
# correct_answers = 0
# total_questions = len(quiz['questions'])
# for q_idx, question in enumerate(quiz['questions']):
# if student_answers.get(str(q_idx)) == question['correct_option']:
# correct_answers += 1
# score = (correct_answers / total_questions) * 100
# # Store submission
# submission_data = {
# "student_id": student_id,
# "answers": student_answers,
# "score": score,
# "submitted_at": datetime.utcnow()
# }
# # Update quiz with submission
# quizzes_collection.update_one(
# {"_id": quiz_id},
# {
# "$push": {"submissions": submission_data}
# }
# )
# return score
def submit_quiz_answers(quiz_id, student_id, student_answers):
"""Submit and score student's quiz answers"""
try:
quiz = quizzes_collection.find_one({"_id": quiz_id})
if not quiz:
return None
# Calculate score
correct_answers = 0
total_questions = len(quiz['questions'])
for q_idx, question in enumerate(quiz['questions']):
student_answer = student_answers.get(str(q_idx))
if student_answer: # Only check if answer was provided
# Extract the option letter (A, B, C, D) from the full answer string
answer_letter = student_answer.split(')')[0].strip()
if answer_letter == question['correct_option']:
correct_answers += 1
score = (correct_answers / total_questions) * 100
# Store submission
submission_data = {
"student_id": student_id,
"answers": student_answers,
"score": score,
"submitted_at": datetime.utcnow()
}
# Update quiz with submission
result = quizzes_collection.update_one(
{"_id": quiz_id},
{"$push": {"submissions": submission_data}}
)
return score if result.modified_count > 0 else None
except Exception as e:
print(f"Error submitting quiz: {e}")
return None |