Spaces:
Build error
Build error
Merge branch 'master' of https://github.com/HarVkr/NovaScholar
Browse files- chatbot.py +23 -17
- db.py +0 -51
- gen_mcqs.py +137 -0
- main.py +3 -257
- research_assistant_dashboard.py +284 -36
- session_page.py +271 -167
chatbot.py
CHANGED
@@ -26,20 +26,26 @@ def insert_chat_message(user_id, session_id, role, content):
|
|
26 |
upsert=True
|
27 |
)
|
28 |
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
26 |
upsert=True
|
27 |
)
|
28 |
|
29 |
+
def give_chat_response(user_id, session_id, question, title, description, context):
|
30 |
+
context_prompt = f"""
|
31 |
+
Based on the following session title, description, and context, answer the user's question in 3-4 lines:
|
32 |
+
|
33 |
+
Title: {title}
|
34 |
+
Description: {description}
|
35 |
+
Context: {context}
|
36 |
+
|
37 |
+
Question: {question}
|
38 |
+
|
39 |
+
Please provide a clear and concise answer based on the information provided.
|
40 |
+
"""
|
41 |
+
|
42 |
+
response = model.generate_content(context_prompt)
|
43 |
+
if not response or not response.text:
|
44 |
+
return "No response received from the model"
|
45 |
+
|
46 |
+
assistant_response = response.text.strip()
|
47 |
+
|
48 |
+
# Save the chat message
|
49 |
+
insert_chat_message(user_id, session_id, "assistant", assistant_response)
|
50 |
+
|
51 |
+
return assistant_response
|
db.py
CHANGED
@@ -391,18 +391,6 @@ course_schema = {
|
|
391 |
# }
|
392 |
courses_collection2 = db["courses_collection2"]
|
393 |
|
394 |
-
# courses_collection2.insert_one(sample_course)
|
395 |
-
# print("Sample course inserted successfully!")
|
396 |
-
|
397 |
-
|
398 |
-
# sessions_collection.insert_one(session_data)
|
399 |
-
# sessions_collection.delete_one({"session_id": "S101"})
|
400 |
-
|
401 |
-
# course_id = "C101"
|
402 |
-
# sessions = sessions_collection.find({"course_id": course_id})
|
403 |
-
# for session in sessions:
|
404 |
-
# print(session)
|
405 |
-
|
406 |
|
407 |
# Define the users schema
|
408 |
users_schema = {
|
@@ -429,15 +417,6 @@ users_schema = {
|
|
429 |
# db.create_collection("users", validator={"$jsonSchema": users_schema})
|
430 |
users_collection = db["users"]
|
431 |
|
432 |
-
# sample_user = {
|
433 |
-
# "user_id": "U103",
|
434 |
-
# "username": "Yash Desai",
|
435 |
-
# "password": generate_password_hash("yash"),
|
436 |
-
# "role": "Faculty",
|
437 |
-
# "created_at": datetime.utcnow()
|
438 |
-
# }
|
439 |
-
# users_collection.insert_one(sample_user)
|
440 |
-
# print("Sample user inserted successfully!")
|
441 |
|
442 |
# Defining the Student Collection
|
443 |
student_schema = {
|
@@ -522,32 +501,6 @@ faculty_schema = {
|
|
522 |
students_collection = db["students"]
|
523 |
faculty_collection = db["faculty"]
|
524 |
|
525 |
-
# Inserting Sample Student Data
|
526 |
-
# sample_student = {
|
527 |
-
# "SID": "S102",
|
528 |
-
# "full_name": "Omkar Surve",
|
529 |
-
# "password": generate_password_hash("omkar"),
|
530 |
-
# "enrolled_courses": [
|
531 |
-
# {"course_id": "CS101", "title": "Introduction to Computer Science"}
|
532 |
-
# ],
|
533 |
-
# "created_at": datetime.utcnow()
|
534 |
-
# }
|
535 |
-
# # students_collection.insert_one(sample_student)
|
536 |
-
# print("Sample student inserted successfully!")
|
537 |
-
|
538 |
-
# Inserting Sample Faculty Data
|
539 |
-
# sample_faculty = {
|
540 |
-
# "TID": "F101",
|
541 |
-
# "full_name": "Dr. John Doe",
|
542 |
-
# "password": generate_password_hash("john"),
|
543 |
-
# "courses_taught": [
|
544 |
-
# {"course_id": "CS101", "title": "Introduction to Computer Science"}
|
545 |
-
# ],
|
546 |
-
# "created_at": datetime.utcnow()
|
547 |
-
# }
|
548 |
-
# faculty_collection.insert_one(sample_faculty)
|
549 |
-
# print("Sample faculty inserted successfully!")
|
550 |
-
|
551 |
# Defining the Vector Collection Schema
|
552 |
vector_schema = {
|
553 |
"bsonType": "object",
|
@@ -620,10 +573,6 @@ chat_history_schema = {
|
|
620 |
# db.create_collection("chat_history", validator={"$jsonSchema": chat_history_schema})
|
621 |
chat_history_collection = db["chat_history"]
|
622 |
|
623 |
-
# Create the collection with the schema
|
624 |
-
# db.create_collection("chat_history", validator={"$jsonSchema": chat_history_schema})
|
625 |
-
# chat_history_collection = db['chat_history']
|
626 |
-
|
627 |
|
628 |
# Database setup for Research Assistant
|
629 |
# Research Assistant Schema
|
|
|
391 |
# }
|
392 |
courses_collection2 = db["courses_collection2"]
|
393 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
394 |
|
395 |
# Define the users schema
|
396 |
users_schema = {
|
|
|
417 |
# db.create_collection("users", validator={"$jsonSchema": users_schema})
|
418 |
users_collection = db["users"]
|
419 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
420 |
|
421 |
# Defining the Student Collection
|
422 |
student_schema = {
|
|
|
501 |
students_collection = db["students"]
|
502 |
faculty_collection = db["faculty"]
|
503 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
504 |
# Defining the Vector Collection Schema
|
505 |
vector_schema = {
|
506 |
"bsonType": "object",
|
|
|
573 |
# db.create_collection("chat_history", validator={"$jsonSchema": chat_history_schema})
|
574 |
chat_history_collection = db["chat_history"]
|
575 |
|
|
|
|
|
|
|
|
|
576 |
|
577 |
# Database setup for Research Assistant
|
578 |
# Research Assistant Schema
|
gen_mcqs.py
ADDED
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pymongo import MongoClient
|
2 |
+
from datetime import datetime
|
3 |
+
import openai
|
4 |
+
import google.generativeai as genai
|
5 |
+
from google.generativeai import GenerativeModel
|
6 |
+
from dotenv import load_dotenv
|
7 |
+
import os
|
8 |
+
from file_upload_vectorize import resources_collection, vectors_collection, courses_collection2, faculty_collection
|
9 |
+
|
10 |
+
# Load environment variables
|
11 |
+
load_dotenv()
|
12 |
+
MONGO_URI = os.getenv('MONGO_URI')
|
13 |
+
OPENAI_KEY = os.getenv('OPENAI_KEY')
|
14 |
+
GEMINI_KEY = os.getenv('GEMINI_KEY')
|
15 |
+
|
16 |
+
# Configure APIs
|
17 |
+
openai.api_key = OPENAI_KEY
|
18 |
+
genai.configure(api_key=GEMINI_KEY)
|
19 |
+
model = genai.GenerativeModel('gemini-pro')
|
20 |
+
|
21 |
+
# Connect to MongoDB
|
22 |
+
client = MongoClient(MONGO_URI)
|
23 |
+
db = client['novascholar_db']
|
24 |
+
quizzes_collection = db["quizzes"]
|
25 |
+
|
26 |
+
# New function to generate MCQs using Gemini
|
27 |
+
def generate_mcqs(context, num_questions, session_title, session_description):
|
28 |
+
"""Generate MCQs either from context or session details"""
|
29 |
+
try:
|
30 |
+
# Initialize Gemini model
|
31 |
+
if context:
|
32 |
+
prompt = f"""
|
33 |
+
Based on the following content, generate {num_questions} multiple choice questions.
|
34 |
+
Format each question as a Python dictionary with the following structure:
|
35 |
+
{{
|
36 |
+
"question": "Question text here",
|
37 |
+
"options": ["A) option1", "B) option2", "C) option3", "D) option4"],
|
38 |
+
"correct_option": "A) option1" or "B) option2" or "C) option3" or "D) option4"
|
39 |
+
}}
|
40 |
+
|
41 |
+
Content:
|
42 |
+
{context}
|
43 |
+
|
44 |
+
Generate challenging but clear questions that test understanding of key concepts.
|
45 |
+
Return only the Python list of dictionaries.
|
46 |
+
"""
|
47 |
+
else:
|
48 |
+
prompt = f"""
|
49 |
+
Generate {num_questions} multiple choice questions about the topic:
|
50 |
+
Title: {session_title}
|
51 |
+
Description: {session_description}
|
52 |
+
|
53 |
+
Format each question as a Python dictionary with the following structure:
|
54 |
+
{{
|
55 |
+
"question": "Question text here",
|
56 |
+
"options": ["A) option1", "B) option2", "C) option3", "D) option4"],
|
57 |
+
"correct_option": "A" or "B" or "C" or "D"
|
58 |
+
}}
|
59 |
+
|
60 |
+
Generate challenging but clear questions.
|
61 |
+
Return only the Python list of dictionaries.
|
62 |
+
"""
|
63 |
+
|
64 |
+
response = model.generate_content(prompt)
|
65 |
+
# Extract and parse the response to get the list of MCQs
|
66 |
+
mcqs = eval(response.text) # Be careful with eval, consider using ast.literal_eval for production
|
67 |
+
print(mcqs)
|
68 |
+
return mcqs
|
69 |
+
except Exception as e:
|
70 |
+
print(f"Error generating MCQs: {e}")
|
71 |
+
return None
|
72 |
+
|
73 |
+
# New function to save quiz to database
|
74 |
+
def save_quiz(course_id, session_id, title, questions):
|
75 |
+
"""Save quiz to database"""
|
76 |
+
try:
|
77 |
+
quiz_data = {
|
78 |
+
"course_id": course_id,
|
79 |
+
"session_id": session_id,
|
80 |
+
"title": title,
|
81 |
+
"questions": questions,
|
82 |
+
"created_at": datetime.utcnow(),
|
83 |
+
"status": "active"
|
84 |
+
}
|
85 |
+
result = quizzes_collection.insert_one(quiz_data)
|
86 |
+
return result.inserted_id
|
87 |
+
except Exception as e:
|
88 |
+
print(f"Error saving quiz: {e}")
|
89 |
+
return None
|
90 |
+
|
91 |
+
|
92 |
+
def get_student_quiz_score(quiz_id, student_id):
|
93 |
+
"""Get student's score for a specific quiz"""
|
94 |
+
quiz = quizzes_collection.find_one(
|
95 |
+
{
|
96 |
+
"_id": quiz_id,
|
97 |
+
"submissions.student_id": student_id
|
98 |
+
},
|
99 |
+
{"submissions.$": 1}
|
100 |
+
)
|
101 |
+
if quiz and quiz.get('submissions'):
|
102 |
+
return quiz['submissions'][0].get('score')
|
103 |
+
return None
|
104 |
+
|
105 |
+
def submit_quiz_answers(quiz_id, student_id, student_answers):
|
106 |
+
"""Submit and score student's quiz answers"""
|
107 |
+
quiz = quizzes_collection.find_one({"_id": quiz_id})
|
108 |
+
if not quiz:
|
109 |
+
return None
|
110 |
+
|
111 |
+
# Calculate score
|
112 |
+
correct_answers = 0
|
113 |
+
total_questions = len(quiz['questions'])
|
114 |
+
|
115 |
+
for q_idx, question in enumerate(quiz['questions']):
|
116 |
+
if student_answers.get(str(q_idx)) == question['correct_option']:
|
117 |
+
correct_answers += 1
|
118 |
+
|
119 |
+
score = (correct_answers / total_questions) * 100
|
120 |
+
|
121 |
+
# Store submission
|
122 |
+
submission_data = {
|
123 |
+
"student_id": student_id,
|
124 |
+
"answers": student_answers,
|
125 |
+
"score": score,
|
126 |
+
"submitted_at": datetime.utcnow()
|
127 |
+
}
|
128 |
+
|
129 |
+
# Update quiz with submission
|
130 |
+
quizzes_collection.update_one(
|
131 |
+
{"_id": quiz_id},
|
132 |
+
{
|
133 |
+
"$push": {"submissions": submission_data}
|
134 |
+
}
|
135 |
+
)
|
136 |
+
|
137 |
+
return score
|
main.py
CHANGED
@@ -10,12 +10,8 @@ from db import (
|
|
10 |
research_assistants_collection,
|
11 |
)
|
12 |
from werkzeug.security import generate_password_hash, check_password_hash
|
13 |
-
import hashlib
|
14 |
import os
|
15 |
-
import openai
|
16 |
from openai import OpenAI
|
17 |
-
from bson import ObjectId
|
18 |
-
from pymongo import MongoClient
|
19 |
from dotenv import load_dotenv
|
20 |
|
21 |
client = OpenAI(api_key=os.getenv("OPENAI_KEY"))
|
@@ -476,10 +472,9 @@ def main_dashboard():
|
|
476 |
st.session_state.username, st.session_state.user_type
|
477 |
)
|
478 |
|
479 |
-
if st.
|
480 |
-
"Create New Course", key="create_course", use_container_width=True
|
481 |
-
|
482 |
-
st.session_state.show_create_course_form = True
|
483 |
|
484 |
if not enrolled_courses:
|
485 |
st.warning("No courses found")
|
@@ -543,255 +538,6 @@ def main_dashboard():
|
|
543 |
# if create_session:
|
544 |
# create_session_form(selected_course_id)
|
545 |
|
546 |
-
|
547 |
-
load_dotenv()
|
548 |
-
MONGO_URI = os.getenv("MONGO_URI")
|
549 |
-
|
550 |
-
|
551 |
-
def modify_courses_collection_schema():
|
552 |
-
"""Modify the schema of courses_collection2 to include start_date and end_date"""
|
553 |
-
client = MongoClient(MONGO_URI)
|
554 |
-
db = client["novascholar_db"]
|
555 |
-
courses_collection2 = db["courses_collection2"]
|
556 |
-
|
557 |
-
# Define the updated schema
|
558 |
-
updated_course_schema = {
|
559 |
-
"bsonType": "object",
|
560 |
-
"required": [
|
561 |
-
"course_id",
|
562 |
-
"title",
|
563 |
-
"description",
|
564 |
-
"faculty",
|
565 |
-
"faculty_id",
|
566 |
-
"duration",
|
567 |
-
"created_at",
|
568 |
-
"start_date",
|
569 |
-
"end_date",
|
570 |
-
],
|
571 |
-
"properties": {
|
572 |
-
"course_id": {
|
573 |
-
"bsonType": "string",
|
574 |
-
"description": "Unique identifier for the course",
|
575 |
-
},
|
576 |
-
"title": {"bsonType": "string", "description": "Title of the course"},
|
577 |
-
"description": {
|
578 |
-
"bsonType": "string",
|
579 |
-
"description": "Description of the course",
|
580 |
-
},
|
581 |
-
"faculty": {"bsonType": "string", "description": "Name of the faculty"},
|
582 |
-
"faculty_id": {
|
583 |
-
"bsonType": "string",
|
584 |
-
"description": "Unique identifier for the faculty",
|
585 |
-
},
|
586 |
-
"duration": {"bsonType": "string", "description": "Duration of the course"},
|
587 |
-
"created_at": {
|
588 |
-
"bsonType": "date",
|
589 |
-
"description": "Date when the course was created",
|
590 |
-
},
|
591 |
-
"start_date": {
|
592 |
-
"bsonType": "date",
|
593 |
-
"description": "Start date of the course",
|
594 |
-
},
|
595 |
-
"end_date": {"bsonType": "date", "description": "End date of the course"},
|
596 |
-
"sessions": {
|
597 |
-
"bsonType": "array",
|
598 |
-
"description": "List of sessions associated with the course",
|
599 |
-
"items": {
|
600 |
-
"bsonType": "object",
|
601 |
-
"required": ["session_id", "title", "date", "status", "created_at"],
|
602 |
-
"properties": {
|
603 |
-
"session_id": {
|
604 |
-
"bsonType": "string",
|
605 |
-
"description": "Unique identifier for the session",
|
606 |
-
},
|
607 |
-
"title": {
|
608 |
-
"bsonType": "string",
|
609 |
-
"description": "Title of the session",
|
610 |
-
},
|
611 |
-
"date": {
|
612 |
-
"bsonType": "date",
|
613 |
-
"description": "Date of the session",
|
614 |
-
},
|
615 |
-
"status": {
|
616 |
-
"bsonType": "string",
|
617 |
-
"description": "Status of the session (e.g., completed, upcoming)",
|
618 |
-
},
|
619 |
-
"created_at": {
|
620 |
-
"bsonType": "date",
|
621 |
-
"description": "Date when the session was created",
|
622 |
-
},
|
623 |
-
"pre_class": {
|
624 |
-
"bsonType": "object",
|
625 |
-
"description": "Pre-class segment data",
|
626 |
-
"properties": {
|
627 |
-
"resources": {
|
628 |
-
"bsonType": "array",
|
629 |
-
"description": "List of pre-class resources",
|
630 |
-
"items": {
|
631 |
-
"bsonType": "object",
|
632 |
-
"required": ["type", "title", "url"],
|
633 |
-
"properties": {
|
634 |
-
"type": {
|
635 |
-
"bsonType": "string",
|
636 |
-
"description": "Type of resource (e.g., pdf, video)",
|
637 |
-
},
|
638 |
-
"title": {
|
639 |
-
"bsonType": "string",
|
640 |
-
"description": "Title of the resource",
|
641 |
-
},
|
642 |
-
"url": {
|
643 |
-
"bsonType": "string",
|
644 |
-
"description": "URL of the resource",
|
645 |
-
},
|
646 |
-
"vector": {
|
647 |
-
"bsonType": "array",
|
648 |
-
"description": "Vector representation of the resource",
|
649 |
-
"items": {"bsonType": "double"},
|
650 |
-
},
|
651 |
-
},
|
652 |
-
},
|
653 |
-
},
|
654 |
-
"completion_required": {
|
655 |
-
"bsonType": "bool",
|
656 |
-
"description": "Indicates if completion of pre-class resources is required",
|
657 |
-
},
|
658 |
-
},
|
659 |
-
},
|
660 |
-
"in_class": {
|
661 |
-
"bsonType": "object",
|
662 |
-
"description": "In-class segment data",
|
663 |
-
"properties": {
|
664 |
-
"topics": {
|
665 |
-
"bsonType": "array",
|
666 |
-
"description": "List of topics covered in the session",
|
667 |
-
"items": {"bsonType": "string"},
|
668 |
-
},
|
669 |
-
"quiz": {
|
670 |
-
"bsonType": "object",
|
671 |
-
"description": "Quiz data",
|
672 |
-
"properties": {
|
673 |
-
"title": {
|
674 |
-
"bsonType": "string",
|
675 |
-
"description": "Title of the quiz",
|
676 |
-
},
|
677 |
-
"questions": {
|
678 |
-
"bsonType": "int",
|
679 |
-
"description": "Number of questions in the quiz",
|
680 |
-
},
|
681 |
-
"duration": {
|
682 |
-
"bsonType": "int",
|
683 |
-
"description": "Duration of the quiz in minutes",
|
684 |
-
},
|
685 |
-
},
|
686 |
-
},
|
687 |
-
"polls": {
|
688 |
-
"bsonType": "array",
|
689 |
-
"description": "List of polls conducted during the session",
|
690 |
-
"items": {
|
691 |
-
"bsonType": "object",
|
692 |
-
"required": ["question", "options"],
|
693 |
-
"properties": {
|
694 |
-
"question": {
|
695 |
-
"bsonType": "string",
|
696 |
-
"description": "Poll question",
|
697 |
-
},
|
698 |
-
"options": {
|
699 |
-
"bsonType": "array",
|
700 |
-
"description": "List of poll options",
|
701 |
-
"items": {"bsonType": "string"},
|
702 |
-
},
|
703 |
-
"responses": {
|
704 |
-
"bsonType": "object",
|
705 |
-
"description": "Responses to the poll",
|
706 |
-
"additionalProperties": {
|
707 |
-
"bsonType": "int"
|
708 |
-
},
|
709 |
-
},
|
710 |
-
},
|
711 |
-
},
|
712 |
-
},
|
713 |
-
},
|
714 |
-
},
|
715 |
-
"post_class": {
|
716 |
-
"bsonType": "object",
|
717 |
-
"description": "Post-class segment data",
|
718 |
-
"properties": {
|
719 |
-
"assignments": {
|
720 |
-
"bsonType": "array",
|
721 |
-
"description": "List of assignments",
|
722 |
-
"items": {
|
723 |
-
"bsonType": "object",
|
724 |
-
"required": [
|
725 |
-
"id",
|
726 |
-
"title",
|
727 |
-
"due_date",
|
728 |
-
"status",
|
729 |
-
],
|
730 |
-
"properties": {
|
731 |
-
"id": {
|
732 |
-
"bsonType": "int",
|
733 |
-
"description": "Assignment ID",
|
734 |
-
},
|
735 |
-
"title": {
|
736 |
-
"bsonType": "string",
|
737 |
-
"description": "Title of the assignment",
|
738 |
-
},
|
739 |
-
"due_date": {
|
740 |
-
"bsonType": "date",
|
741 |
-
"description": "Due date of the assignment",
|
742 |
-
},
|
743 |
-
"status": {
|
744 |
-
"bsonType": "string",
|
745 |
-
"description": "Status of the assignment (e.g., pending, completed)",
|
746 |
-
},
|
747 |
-
"submissions": {
|
748 |
-
"bsonType": "array",
|
749 |
-
"description": "List of submissions",
|
750 |
-
"items": {
|
751 |
-
"bsonType": "object",
|
752 |
-
"required": [
|
753 |
-
"student_id",
|
754 |
-
"file_url",
|
755 |
-
"submitted_at",
|
756 |
-
],
|
757 |
-
"properties": {
|
758 |
-
"student_id": {
|
759 |
-
"bsonType": "string",
|
760 |
-
"description": "ID of the student who submitted the assignment",
|
761 |
-
},
|
762 |
-
"file_url": {
|
763 |
-
"bsonType": "string",
|
764 |
-
"description": "URL of the submitted file",
|
765 |
-
},
|
766 |
-
"submitted_at": {
|
767 |
-
"bsonType": "date",
|
768 |
-
"description": "Date when the assignment was submitted",
|
769 |
-
},
|
770 |
-
},
|
771 |
-
},
|
772 |
-
},
|
773 |
-
},
|
774 |
-
},
|
775 |
-
}
|
776 |
-
},
|
777 |
-
},
|
778 |
-
},
|
779 |
-
},
|
780 |
-
},
|
781 |
-
},
|
782 |
-
}
|
783 |
-
|
784 |
-
# Update the schema using the collMod command
|
785 |
-
db.command(
|
786 |
-
{
|
787 |
-
"collMod": "courses_collection2",
|
788 |
-
"validator": {"$jsonSchema": updated_course_schema},
|
789 |
-
}
|
790 |
-
)
|
791 |
-
|
792 |
-
print("Schema updated successfully!")
|
793 |
-
|
794 |
-
|
795 |
def main():
|
796 |
st.set_page_config(page_title="NOVAScholar", page_icon="📚", layout="wide")
|
797 |
init_session_state()
|
|
|
10 |
research_assistants_collection,
|
11 |
)
|
12 |
from werkzeug.security import generate_password_hash, check_password_hash
|
|
|
13 |
import os
|
|
|
14 |
from openai import OpenAI
|
|
|
|
|
15 |
from dotenv import load_dotenv
|
16 |
|
17 |
client = OpenAI(api_key=os.getenv("OPENAI_KEY"))
|
|
|
472 |
st.session_state.username, st.session_state.user_type
|
473 |
)
|
474 |
|
475 |
+
if st.session_state.user_type == "faculty":
|
476 |
+
if st.button("Create New Course", key="create_course", use_container_width=True):
|
477 |
+
st.session_state.show_create_course_form = True
|
|
|
478 |
|
479 |
if not enrolled_courses:
|
480 |
st.warning("No courses found")
|
|
|
538 |
# if create_session:
|
539 |
# create_session_form(selected_course_id)
|
540 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
541 |
def main():
|
542 |
st.set_page_config(page_title="NOVAScholar", page_icon="📚", layout="wide")
|
543 |
init_session_state()
|
research_assistant_dashboard.py
CHANGED
@@ -2,52 +2,244 @@ import streamlit as st
|
|
2 |
from openai import OpenAI
|
3 |
import os
|
4 |
from dotenv import load_dotenv
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
|
6 |
-
# Initialize OpenAI
|
7 |
load_dotenv()
|
8 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
|
10 |
|
11 |
def get_research_papers(query):
|
12 |
-
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
try:
|
|
|
|
|
|
|
14 |
response = client.chat.completions.create(
|
15 |
-
model="
|
16 |
-
messages=[
|
17 |
-
{
|
18 |
-
"role": "system",
|
19 |
-
"content": "You are a helpful research assistant. Provide 10 relevant research papers with titles, authors, brief descriptions, and DOI/URL links. Format each paper as: \n\n1. **Title**\nAuthors: [names]\nLink: [DOI/URL]\nDescription: [brief summary]",
|
20 |
-
},
|
21 |
-
{
|
22 |
-
"role": "user",
|
23 |
-
"content": f"Give me 10 research papers about: {query}. Include valid DOI links or URLs to the papers where available.",
|
24 |
-
},
|
25 |
-
],
|
26 |
)
|
27 |
return response.choices[0].message.content
|
|
|
28 |
except Exception as e:
|
29 |
-
|
|
|
30 |
|
31 |
|
32 |
-
def
|
33 |
-
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
34 |
try:
|
|
|
|
|
|
|
35 |
response = client.chat.completions.create(
|
36 |
-
model="
|
37 |
-
messages=[
|
38 |
-
{
|
39 |
-
"role": "system",
|
40 |
-
"content": "You are a research analysis expert. Based on the provided papers, identify potential research gaps and future research directions.",
|
41 |
-
},
|
42 |
-
{
|
43 |
-
"role": "user",
|
44 |
-
"content": f"Based on these papers, what are the key areas that need more research?\n\nPapers:\n{papers}",
|
45 |
-
},
|
46 |
-
],
|
47 |
)
|
48 |
return response.choices[0].message.content
|
|
|
49 |
except Exception as e:
|
50 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
|
52 |
|
53 |
def display_research_assistant_dashboard():
|
@@ -55,6 +247,10 @@ def display_research_assistant_dashboard():
|
|
55 |
# Initialize session state for recommendations
|
56 |
if "recommendations" not in st.session_state:
|
57 |
st.session_state.recommendations = None
|
|
|
|
|
|
|
|
|
58 |
|
59 |
# Sidebar
|
60 |
with st.sidebar:
|
@@ -67,28 +263,80 @@ def display_research_assistant_dashboard():
|
|
67 |
# Main content
|
68 |
st.title("Research Paper Recommendations")
|
69 |
search_query = st.text_input("Enter research topic:")
|
70 |
-
|
71 |
col1, col2 = st.columns(2)
|
72 |
with col1:
|
73 |
if st.button("Get Research Papers"):
|
74 |
if search_query:
|
75 |
with st.spinner("Fetching recommendations..."):
|
76 |
st.session_state.recommendations = get_research_papers(search_query)
|
77 |
-
st.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
78 |
else:
|
79 |
st.warning("Please enter a search query")
|
80 |
-
|
81 |
with col2:
|
82 |
if st.button("Analyze Research Gaps"):
|
83 |
if st.session_state.recommendations:
|
84 |
with st.spinner("Analyzing research gaps..."):
|
85 |
gaps = analyze_research_gaps(st.session_state.recommendations)
|
|
|
|
|
|
|
86 |
st.markdown("### Potential Research Gaps")
|
87 |
st.markdown(gaps)
|
88 |
else:
|
89 |
st.warning("Please get research papers first")
|
90 |
|
91 |
-
|
92 |
-
|
93 |
-
|
94 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
from openai import OpenAI
|
3 |
import os
|
4 |
from dotenv import load_dotenv
|
5 |
+
from llama_index.core import (
|
6 |
+
VectorStoreIndex,
|
7 |
+
SimpleDirectoryReader,
|
8 |
+
Document,
|
9 |
+
GPTVectorStoreIndex,
|
10 |
+
)
|
11 |
+
from bson import ObjectId
|
12 |
+
import requests
|
13 |
+
import openai
|
14 |
+
import numpy as np
|
15 |
+
from pymongo import MongoClient
|
16 |
+
from bson import ObjectId
|
17 |
+
from datetime import datetime
|
18 |
+
from llama_index.embeddings.openai import OpenAIEmbedding
|
19 |
+
from typing import List, Dict
|
20 |
|
21 |
+
# Initialize Perplexity API and OpenAI API
|
22 |
load_dotenv()
|
23 |
+
perplexity_api_key = os.getenv("PERPLEXITY_KEY")
|
24 |
+
openai.api_key = os.getenv("OPENAI_KEY")
|
25 |
+
|
26 |
+
# MongoDB setup
|
27 |
+
MONGO_URI = os.getenv("MONGO_URI")
|
28 |
+
client = MongoClient(MONGO_URI)
|
29 |
+
db = client["novascholar_db"]
|
30 |
+
research_papers_collection = db["research_papers"]
|
31 |
+
|
32 |
+
|
33 |
+
def fetch_perplexity_data(api_key, topic):
|
34 |
+
"""
|
35 |
+
Fetch research papers data from Perplexity API with proper formatting
|
36 |
+
"""
|
37 |
+
headers = {
|
38 |
+
"accept": "application/json",
|
39 |
+
"content-type": "application/json",
|
40 |
+
"authorization": f"Bearer {api_key}",
|
41 |
+
}
|
42 |
+
|
43 |
+
# Structured prompt to get properly formatted response
|
44 |
+
messages = [
|
45 |
+
{
|
46 |
+
"role": "system",
|
47 |
+
"content": """You are a research paper retrieval expert. For the given topic, return exactly 10 research papers in the following format:
|
48 |
+
Title: Paper Title
|
49 |
+
Authors: Author 1, Author 2
|
50 |
+
Year: YYYY
|
51 |
+
Content: Detailed paper content with abstract and key findings
|
52 |
+
URL: DOI or paper URL
|
53 |
+
""",
|
54 |
+
},
|
55 |
+
{"role": "user", "content": f"Find 10 research papers about: {topic}"},
|
56 |
+
]
|
57 |
+
|
58 |
+
try:
|
59 |
+
client = OpenAI(api_key=api_key, base_url="https://api.perplexity.ai")
|
60 |
+
response = client.chat.completions.create(
|
61 |
+
model="llama-3.1-sonar-small-128k-chat", # Use the best Perplexity model
|
62 |
+
messages=messages,
|
63 |
+
)
|
64 |
+
|
65 |
+
# Extract and validate response
|
66 |
+
content = response.choices[0].message.content
|
67 |
+
st.write("Fetched Data:", content) # Debugging line to check the fetched data
|
68 |
+
|
69 |
+
return content
|
70 |
+
|
71 |
+
except Exception as e:
|
72 |
+
st.error(f"Failed to fetch data from Perplexity API: {str(e)}")
|
73 |
+
return ""
|
74 |
+
|
75 |
+
|
76 |
+
def split_and_vectorize_papers(content: str) -> List[Dict]:
|
77 |
+
"""Split and vectorize papers using OpenAI embeddings"""
|
78 |
+
papers = content.split("\n\n")
|
79 |
+
|
80 |
+
# Initialize OpenAI client
|
81 |
+
# client = OpenAI() # Uses api_key from environment variable
|
82 |
+
vectors = []
|
83 |
+
|
84 |
+
for paper in papers:
|
85 |
+
try:
|
86 |
+
# Get embedding using OpenAI's API directly
|
87 |
+
response = openai.embeddings.create(
|
88 |
+
model="text-embedding-ada-002", input=paper, encoding_format="float"
|
89 |
+
)
|
90 |
+
|
91 |
+
# Extract embedding from response
|
92 |
+
embedding = response.data[0].embedding
|
93 |
+
|
94 |
+
vectors.append(
|
95 |
+
{"content": paper, "vector": embedding, "timestamp": datetime.utcnow()}
|
96 |
+
)
|
97 |
+
|
98 |
+
except Exception as e:
|
99 |
+
st.error(f"Error vectorizing paper: {str(e)}")
|
100 |
+
continue
|
101 |
+
|
102 |
+
return vectors
|
103 |
+
|
104 |
+
|
105 |
+
def store_papers_in_mongodb(papers):
|
106 |
+
"""Store papers with vectors in MongoDB"""
|
107 |
+
try:
|
108 |
+
for paper in papers:
|
109 |
+
# Prepare MongoDB document
|
110 |
+
mongo_doc = {
|
111 |
+
"content": paper["content"],
|
112 |
+
"vector": paper["vector"],
|
113 |
+
"created_at": datetime.utcnow(),
|
114 |
+
}
|
115 |
+
|
116 |
+
# Insert into MongoDB
|
117 |
+
db.papers.update_one(
|
118 |
+
{"content": paper["content"]}, {"$set": mongo_doc}, upsert=True
|
119 |
+
)
|
120 |
+
|
121 |
+
st.success(f"Stored {len(papers)} papers in database")
|
122 |
+
return True
|
123 |
+
except Exception as e:
|
124 |
+
st.error(f"Error storing papers: {str(e)}")
|
125 |
|
126 |
|
127 |
def get_research_papers(query):
|
128 |
+
"""
|
129 |
+
Get and store research papers with improved error handling
|
130 |
+
"""
|
131 |
+
# Fetch papers from Perplexity
|
132 |
+
content = fetch_perplexity_data(perplexity_api_key, query)
|
133 |
+
|
134 |
+
if not content:
|
135 |
+
return []
|
136 |
+
|
137 |
+
# Split and vectorize papers
|
138 |
+
papers = split_and_vectorize_papers(content)
|
139 |
+
|
140 |
+
# Store papers in MongoDB
|
141 |
+
if store_papers_in_mongodb(papers):
|
142 |
+
return papers
|
143 |
+
else:
|
144 |
+
st.warning("Failed to store papers in database, but returning fetched results")
|
145 |
+
return papers
|
146 |
+
|
147 |
+
|
148 |
+
def analyze_research_gaps(papers):
|
149 |
+
"""
|
150 |
+
Analyze research gaps with improved prompt and error handling
|
151 |
+
"""
|
152 |
+
if not papers:
|
153 |
+
return "No papers provided for analysis"
|
154 |
+
|
155 |
+
# Prepare paper summaries for analysis
|
156 |
+
paper_summaries = "\n\n".join(
|
157 |
+
[
|
158 |
+
f"Key Findings: {paper['content'][:500]}..."
|
159 |
+
# f"Title: {paper['title']}\nYear: {paper['year']}\nKey Findings: {paper['content'][:500]}..."
|
160 |
+
for paper in papers
|
161 |
+
]
|
162 |
+
)
|
163 |
+
|
164 |
+
headers = {
|
165 |
+
"Authorization": f"Bearer {perplexity_api_key}",
|
166 |
+
"Content-Type": "application/json",
|
167 |
+
}
|
168 |
+
|
169 |
+
data = {
|
170 |
+
"messages": [
|
171 |
+
{
|
172 |
+
"role": "system",
|
173 |
+
"content": "You are a research analysis expert. Identify specific research gaps and future research directions based on the provided papers. Format your response with clear sections: Current State, Identified Gaps, and Future Directions.",
|
174 |
+
},
|
175 |
+
{
|
176 |
+
"role": "user",
|
177 |
+
"content": f"Analyze these papers and identify research gaps:\n\n{paper_summaries}",
|
178 |
+
},
|
179 |
+
]
|
180 |
+
}
|
181 |
+
|
182 |
try:
|
183 |
+
client = OpenAI(
|
184 |
+
api_key=perplexity_api_key, base_url="https://api.perplexity.ai"
|
185 |
+
)
|
186 |
response = client.chat.completions.create(
|
187 |
+
model="llama-3.1-sonar-small-128k-chat", # Use the best Perplexity model
|
188 |
+
messages=data["messages"],
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
189 |
)
|
190 |
return response.choices[0].message.content
|
191 |
+
|
192 |
except Exception as e:
|
193 |
+
st.error(f"Failed to analyze research gaps: {str(e)}")
|
194 |
+
return "Error analyzing research gaps"
|
195 |
|
196 |
|
197 |
+
def create_research_paper(gaps, topic, papers):
|
198 |
+
"""
|
199 |
+
Create a research paper that addresses the identified gaps using Perplexity API
|
200 |
+
"""
|
201 |
+
full_texts = "\n\n".join([paper["content"] for paper in papers])
|
202 |
+
headers = {
|
203 |
+
"Authorization": f"Bearer {perplexity_api_key}",
|
204 |
+
"Content-Type": "application/json",
|
205 |
+
}
|
206 |
+
data = {
|
207 |
+
"messages": [
|
208 |
+
{
|
209 |
+
"role": "system",
|
210 |
+
"content": "You are a research paper generation expert. Create a comprehensive research paper that addresses the identified gaps based on the provided papers. Format your response with clear sections: Introduction, Literature Review, Methodology, Results, Discussion, Conclusion, and References.",
|
211 |
+
},
|
212 |
+
{
|
213 |
+
"role": "user",
|
214 |
+
"content": f"Create a research paper on the topic '{topic}' that addresses the following research gaps:\n\n{gaps}\n\nBased on the following papers:\n\n{full_texts}",
|
215 |
+
},
|
216 |
+
]
|
217 |
+
}
|
218 |
try:
|
219 |
+
client = OpenAI(
|
220 |
+
api_key=perplexity_api_key, base_url="https://api.perplexity.ai"
|
221 |
+
)
|
222 |
response = client.chat.completions.create(
|
223 |
+
model="llama-3.1-sonar-small-128k-chat", # Use the best Perplexity model
|
224 |
+
messages=data["messages"],
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
225 |
)
|
226 |
return response.choices[0].message.content
|
227 |
+
|
228 |
except Exception as e:
|
229 |
+
st.error(f"Failed to create research paper: {str(e)}")
|
230 |
+
return "Error creating research paper"
|
231 |
+
|
232 |
+
|
233 |
+
def cosine_similarity(vec1, vec2):
|
234 |
+
"""Calculate the cosine similarity between two vectors"""
|
235 |
+
vec1 = np.array(vec1)
|
236 |
+
vec2 = np.array(vec2)
|
237 |
+
return np.dot(vec1, vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2))
|
238 |
+
|
239 |
+
|
240 |
+
def calculate_cosine_similarity(vec1: List[float], vec2: List[float]) -> float:
|
241 |
+
"""Calculate cosine similarity between two vectors"""
|
242 |
+
return np.dot(vec1, vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2))
|
243 |
|
244 |
|
245 |
def display_research_assistant_dashboard():
|
|
|
247 |
# Initialize session state for recommendations
|
248 |
if "recommendations" not in st.session_state:
|
249 |
st.session_state.recommendations = None
|
250 |
+
if "vectors" not in st.session_state:
|
251 |
+
st.session_state.vectors = None
|
252 |
+
if "generated_paper" not in st.session_state:
|
253 |
+
st.session_state.generated_paper = None
|
254 |
|
255 |
# Sidebar
|
256 |
with st.sidebar:
|
|
|
263 |
# Main content
|
264 |
st.title("Research Paper Recommendations")
|
265 |
search_query = st.text_input("Enter research topic:")
|
|
|
266 |
col1, col2 = st.columns(2)
|
267 |
with col1:
|
268 |
if st.button("Get Research Papers"):
|
269 |
if search_query:
|
270 |
with st.spinner("Fetching recommendations..."):
|
271 |
st.session_state.recommendations = get_research_papers(search_query)
|
272 |
+
st.session_state.vectors = [
|
273 |
+
paper["vector"] for paper in st.session_state.recommendations
|
274 |
+
]
|
275 |
+
st.markdown(
|
276 |
+
"\n\n".join(
|
277 |
+
[
|
278 |
+
f"**{i+1}.**\n{paper['content']}"
|
279 |
+
# f"**{i+1}. {paper['title']}**\n{paper['content']}"
|
280 |
+
for i, paper in enumerate(
|
281 |
+
st.session_state.recommendations
|
282 |
+
)
|
283 |
+
]
|
284 |
+
)
|
285 |
+
)
|
286 |
else:
|
287 |
st.warning("Please enter a search query")
|
|
|
288 |
with col2:
|
289 |
if st.button("Analyze Research Gaps"):
|
290 |
if st.session_state.recommendations:
|
291 |
with st.spinner("Analyzing research gaps..."):
|
292 |
gaps = analyze_research_gaps(st.session_state.recommendations)
|
293 |
+
st.session_state.generated_paper = create_research_paper(
|
294 |
+
gaps, search_query, st.session_state.recommendations
|
295 |
+
)
|
296 |
st.markdown("### Potential Research Gaps")
|
297 |
st.markdown(gaps)
|
298 |
else:
|
299 |
st.warning("Please get research papers first")
|
300 |
|
301 |
+
if st.button("Save and Vectorize"):
|
302 |
+
if st.session_state.generated_paper:
|
303 |
+
try:
|
304 |
+
# Initialize OpenAI client
|
305 |
+
|
306 |
+
# Get embedding for generated paper
|
307 |
+
response = openai.embeddings.create(
|
308 |
+
model="text-embedding-ada-002",
|
309 |
+
input=st.session_state.generated_paper,
|
310 |
+
encoding_format="float",
|
311 |
+
)
|
312 |
+
generated_vector = response.data[0].embedding
|
313 |
+
|
314 |
+
# Calculate similarities with stored vectors
|
315 |
+
similarities = [
|
316 |
+
calculate_cosine_similarity(generated_vector, paper_vector)
|
317 |
+
for paper_vector in st.session_state.vectors
|
318 |
+
]
|
319 |
+
|
320 |
+
# Display results
|
321 |
+
st.markdown("### Generated Research Paper")
|
322 |
+
st.markdown(st.session_state.generated_paper)
|
323 |
+
|
324 |
+
st.markdown("### Cosine Similarities with Original Papers")
|
325 |
+
for i, similarity in enumerate(similarities):
|
326 |
+
st.metric(
|
327 |
+
f"Paper {i+1}",
|
328 |
+
value=f"{similarity:.3f}",
|
329 |
+
help="Cosine similarity (1.0 = identical, 0.0 = completely different)",
|
330 |
+
)
|
331 |
+
|
332 |
+
except Exception as e:
|
333 |
+
st.error(f"Error during vectorization: {str(e)}")
|
334 |
+
else:
|
335 |
+
st.warning("Please analyze research gaps first")
|
336 |
+
|
337 |
+
|
338 |
+
# Run the dashboard
|
339 |
+
if __name__ == "__main__":
|
340 |
+
display_research_assistant_dashboard()
|
341 |
+
|
342 |
+
|
session_page.py
CHANGED
@@ -1,11 +1,9 @@
|
|
1 |
import streamlit as st
|
2 |
from datetime import datetime
|
3 |
-
from utils.sample_data import SAMPLE_CHAT_HISTORY, SAMPLE_STUDENT_PROGRESS
|
4 |
from utils.helpers import display_progress_bar, create_notification, format_datetime
|
5 |
-
from utils.sample_data import SAMPLE_SESSIONS, SAMPLE_COURSES
|
6 |
from file_upload_vectorize import upload_resource, extract_text_from_file, create_vector_store, resources_collection, model, assignment_submit
|
7 |
from db import courses_collection2, chat_history_collection, students_collection, faculty_collection, vectors_collection
|
8 |
-
from chatbot import
|
9 |
from bson import ObjectId
|
10 |
from live_polls import LivePollFeature
|
11 |
import pandas as pd
|
@@ -13,6 +11,7 @@ import plotly.express as px
|
|
13 |
from dotenv import load_dotenv
|
14 |
import os
|
15 |
from pymongo import MongoClient
|
|
|
16 |
|
17 |
load_dotenv()
|
18 |
MONGO_URI = os.getenv('MONGO_URI')
|
@@ -25,146 +24,90 @@ def get_current_user():
|
|
25 |
return None
|
26 |
return students_collection.find_one({"_id": st.session_state.user_id})
|
27 |
|
28 |
-
def display_preclass_content(session, student_id):
|
29 |
"""Display pre-class materials for a session"""
|
30 |
-
|
31 |
-
|
|
|
|
|
|
|
32 |
# Display pre-class materials
|
33 |
-
|
34 |
-
|
35 |
-
materials = resources_collection.find({"session_id": session['session_id']})
|
36 |
-
print(f"materials: {type(materials)}")
|
37 |
-
for material in materials:
|
38 |
-
print(f"material: {type(material)}")
|
39 |
-
with st.expander(f"{material['file_name']} ({material['material_type'].upper()})"):
|
40 |
-
file_type = material.get('file_type', 'unknown')
|
41 |
-
if file_type == 'application/pdf':
|
42 |
-
st.markdown(f"📑 [Open PDF Document]({material['file_name']})")
|
43 |
-
if st.button("View PDF", key=f"view_pdf_{material['file_name']}"):
|
44 |
-
st.text_area("PDF Content", material['text_content'], height=300)
|
45 |
-
if st.button("Download PDF", key=f"download_pdf_{material['file_name']}"):
|
46 |
-
st.download_button(
|
47 |
-
label="Download PDF",
|
48 |
-
data=material['file_content'],
|
49 |
-
file_name=material['file_name'],
|
50 |
-
mime='application/pdf'
|
51 |
-
)
|
52 |
-
if st.button("Mark PDF as Read", key=f"pdf_{material['file_name']}"):
|
53 |
-
create_notification("PDF marked as read!", "success")
|
54 |
-
|
55 |
-
user = get_current_user()
|
56 |
-
print(f"user: {type(user)}")
|
57 |
-
|
58 |
-
user = get_current_user()
|
59 |
|
60 |
-
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
st.
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
# Get document context
|
76 |
-
context = ""
|
77 |
-
materials = resources_collection.find({"session_id": session['session_id']})
|
78 |
-
context = ""
|
79 |
-
vector_data = None
|
80 |
-
|
81 |
-
context = ""
|
82 |
-
for material in materials:
|
83 |
-
resource_id = material['_id']
|
84 |
-
vector_data = vectors_collection.find_one({"resource_id": resource_id})
|
85 |
-
if vector_data and 'text' in vector_data:
|
86 |
-
context += vector_data['text'] + "\n"
|
87 |
-
|
88 |
-
if not vector_data:
|
89 |
-
st.error("No Pre-class materials found for this session.")
|
90 |
-
return
|
91 |
-
|
92 |
-
try:
|
93 |
-
# Generate response using Gemini
|
94 |
-
context_prompt = f"""
|
95 |
-
Based on the following context, answer the user's question:
|
96 |
-
|
97 |
-
Context:
|
98 |
-
{context}
|
99 |
-
|
100 |
-
Question: {prompt}
|
101 |
-
|
102 |
-
Please provide a clear and concise answer based only on the information provided in the context.
|
103 |
-
"""
|
104 |
-
|
105 |
-
response = model.generate_content(context_prompt)
|
106 |
-
if not response or not response.text:
|
107 |
-
st.error("No response received from the model")
|
108 |
-
return
|
109 |
-
|
110 |
-
assistant_response = response.text
|
111 |
-
# Display Assistant Response
|
112 |
-
with st.chat_message("assistant"):
|
113 |
-
st.markdown(assistant_response)
|
114 |
-
|
115 |
-
# Build the message
|
116 |
-
new_message = {
|
117 |
-
"prompt": prompt,
|
118 |
-
"response": assistant_response,
|
119 |
-
"timestamp": datetime.utcnow()
|
120 |
-
}
|
121 |
-
st.session_state.messages.append(new_message)
|
122 |
-
|
123 |
-
# Update database
|
124 |
-
try:
|
125 |
-
chat_history_collection.update_one(
|
126 |
-
{
|
127 |
-
"user_id": student_id,
|
128 |
-
"session_id": session['session_id']
|
129 |
-
},
|
130 |
-
{
|
131 |
-
"$push": {"messages": new_message},
|
132 |
-
"$setOnInsert": {
|
133 |
-
"user_id": student_id,
|
134 |
-
"session_id": session['session_id'],
|
135 |
-
"timestamp": datetime.utcnow()
|
136 |
-
}
|
137 |
-
},
|
138 |
-
upsert=True
|
139 |
)
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
144 |
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
"session_id": session['session_id']
|
151 |
-
})
|
152 |
-
if existing_chat and 'messages' in existing_chat:
|
153 |
-
st.session_state.messages = existing_chat['messages']
|
154 |
-
else:
|
155 |
-
st.session_state.messages = []
|
156 |
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
168 |
|
169 |
def display_in_class_content(session, user_type):
|
170 |
# """Display in-class activities and interactions"""
|
@@ -185,6 +128,57 @@ def display_post_class_content(session, student_id, course_id):
|
|
185 |
st.header("Post-class Work")
|
186 |
|
187 |
if st.session_state.user_type == 'faculty':
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
188 |
st.subheader("Add Assignments")
|
189 |
# Add assignment form
|
190 |
with st.form("add_assignment_form"):
|
@@ -310,6 +304,26 @@ def display_preclass_analytics(session, course_id):
|
|
310 |
student_chat = next((chat for chat in chat_data if chat['user_id'] == student_id), None)
|
311 |
|
312 |
if student_chat:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
313 |
message_count = len(student_chat.get('messages', []))
|
314 |
status = "Completed" if message_count >= 20 else "Incomplete"
|
315 |
if status == "Incomplete":
|
@@ -321,6 +335,7 @@ def display_preclass_analytics(session, course_id):
|
|
321 |
else:
|
322 |
message_count = 0
|
323 |
status = "Not Started"
|
|
|
324 |
incomplete_students.append({
|
325 |
'name': student_name,
|
326 |
'sid': student_sid,
|
@@ -331,7 +346,8 @@ def display_preclass_analytics(session, course_id):
|
|
331 |
'Student Name': student_name,
|
332 |
'SID': student_sid,
|
333 |
'Messages': message_count,
|
334 |
-
'Status': status
|
|
|
335 |
})
|
336 |
|
337 |
# Create DataFrame
|
@@ -356,16 +372,58 @@ def display_preclass_analytics(session, course_id):
|
|
356 |
st.markdown("### Overall Completion Rate")
|
357 |
st.progress(completion_rate / 100)
|
358 |
st.markdown(f"**{completion_rate:.1f}%** of students have completed pre-class materials")
|
359 |
-
|
360 |
-
#
|
361 |
-
st.
|
362 |
-
|
363 |
-
|
364 |
-
|
365 |
-
|
366 |
-
|
367 |
-
|
368 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
369 |
|
370 |
# Display students who haven't completed
|
371 |
if incomplete_students:
|
@@ -644,16 +702,69 @@ def upload_preclass_materials(session_id, course_id):
|
|
644 |
Uploaded on: {material['uploaded_at'].strftime('%Y-%m-%d %H:%M')}
|
645 |
""")
|
646 |
|
647 |
-
|
648 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
649 |
|
650 |
def display_session_content(student_id, course_id, session, username, user_type):
|
651 |
st.title(f"Session {session['session_id']}: {session['title']}")
|
652 |
-
# st.markdown(f"**Date:** {format_datetime(session['date'])}")
|
653 |
-
|
654 |
-
# Convert date string to datetime object
|
655 |
-
# session_date = datetime.fromisoformat(session['date'])
|
656 |
-
# st.markdown(f"**Date:** {format_datetime(session_date)}")
|
657 |
|
658 |
# Check if the date is a string or a datetime object
|
659 |
if isinstance(session['date'], str):
|
@@ -672,30 +783,23 @@ def display_session_content(student_id, course_id, session, username, user_type)
|
|
672 |
else:
|
673 |
tabs = (["Pre-class Analytics", "In-class Analytics", "Post-class Analytics"])
|
674 |
|
675 |
-
# Create tabs for different sections
|
676 |
-
# pre_class_tab, in_class_tab, post_class_tab, faculty_tab = st.tabs([
|
677 |
-
# "Pre-class Work",
|
678 |
-
# "In-class Work",
|
679 |
-
# "Post-class Work",
|
680 |
-
# "Faculty Analytics"
|
681 |
-
# ])
|
682 |
-
|
683 |
if st.session_state.user_type == 'student':
|
684 |
-
pre_class_tab, in_class_tab, post_class_tab = st.tabs(["Pre-class Work", "In-class Work", "Post-class Work"])
|
685 |
else:
|
686 |
pre_class_work, in_class_work, post_class_work, preclass_analytics, inclass_analytics, postclass_analytics = st.tabs(["Pre-class Work", "In-class Work", "Post-class Work", "Pre-class Analytics", "In-class Analytics", "Post-class Analytics"])
|
687 |
|
688 |
# Display pre-class materials
|
689 |
if st.session_state.user_type == 'student':
|
690 |
with pre_class_tab:
|
691 |
-
display_preclass_content(session, student_id)
|
692 |
-
|
693 |
with in_class_tab:
|
694 |
display_in_class_content(session, st.session_state.user_type)
|
695 |
|
696 |
# Post-class Content
|
697 |
with post_class_tab:
|
698 |
display_post_class_content(session, student_id, course_id)
|
|
|
|
|
699 |
|
700 |
if st.session_state.user_type == 'faculty':
|
701 |
with pre_class_work:
|
@@ -709,4 +813,4 @@ def display_session_content(student_id, course_id, session, username, user_type)
|
|
709 |
with inclass_analytics:
|
710 |
display_inclass_analytics(session, course_id)
|
711 |
with postclass_analytics:
|
712 |
-
display_postclass_analytics(session, course_id)
|
|
|
1 |
import streamlit as st
|
2 |
from datetime import datetime
|
|
|
3 |
from utils.helpers import display_progress_bar, create_notification, format_datetime
|
|
|
4 |
from file_upload_vectorize import upload_resource, extract_text_from_file, create_vector_store, resources_collection, model, assignment_submit
|
5 |
from db import courses_collection2, chat_history_collection, students_collection, faculty_collection, vectors_collection
|
6 |
+
from chatbot import give_chat_response
|
7 |
from bson import ObjectId
|
8 |
from live_polls import LivePollFeature
|
9 |
import pandas as pd
|
|
|
11 |
from dotenv import load_dotenv
|
12 |
import os
|
13 |
from pymongo import MongoClient
|
14 |
+
from gen_mcqs import generate_mcqs, save_quiz, quizzes_collection, get_student_quiz_score, submit_quiz_answers
|
15 |
|
16 |
load_dotenv()
|
17 |
MONGO_URI = os.getenv('MONGO_URI')
|
|
|
24 |
return None
|
25 |
return students_collection.find_one({"_id": st.session_state.user_id})
|
26 |
|
27 |
+
def display_preclass_content(session, student_id, course_id):
|
28 |
"""Display pre-class materials for a session"""
|
29 |
+
|
30 |
+
# Initialize 'messages' in session_state if it doesn't exist
|
31 |
+
if 'messages' not in st.session_state:
|
32 |
+
st.session_state.messages = []
|
33 |
+
|
34 |
# Display pre-class materials
|
35 |
+
materials = list(resources_collection.find({"course_id": course_id, "session_id": session['session_id']}))
|
36 |
+
st.subheader("Pre-class Materials")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
37 |
|
38 |
+
if materials:
|
39 |
+
for material in materials:
|
40 |
+
with st.expander(f"{material['file_name']} ({material['material_type'].upper()})"):
|
41 |
+
file_type = material.get('file_type', 'unknown')
|
42 |
+
if file_type == 'application/pdf':
|
43 |
+
st.markdown(f"📑 [Open PDF Document]({material['file_name']})")
|
44 |
+
if st.button("View PDF", key=f"view_pdf_{material['file_name']}"):
|
45 |
+
st.text_area("PDF Content", material['text_content'], height=300)
|
46 |
+
if st.button("Download PDF", key=f"download_pdf_{material['file_name']}"):
|
47 |
+
st.download_button(
|
48 |
+
label="Download PDF",
|
49 |
+
data=material['file_content'],
|
50 |
+
file_name=material['file_name'],
|
51 |
+
mime='application/pdf'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
52 |
)
|
53 |
+
if st.button("Mark PDF as Read", key=f"pdf_{material['file_name']}"):
|
54 |
+
create_notification("PDF marked as read!", "success")
|
55 |
+
else:
|
56 |
+
st.info("No pre-class materials uploaded by the faculty.")
|
57 |
+
st.subheader("Upload Pre-class Material")
|
58 |
+
|
59 |
+
# File upload section for students
|
60 |
+
uploaded_file = st.file_uploader("Upload Material", type=['txt', 'pdf', 'docx'])
|
61 |
+
if uploaded_file is not None:
|
62 |
+
with st.spinner("Processing document..."):
|
63 |
+
file_name = uploaded_file.name
|
64 |
+
file_content = extract_text_from_file(uploaded_file)
|
65 |
+
if file_content:
|
66 |
+
material_type = st.selectbox("Select Material Type", ["pdf", "docx", "txt"])
|
67 |
+
if st.button("Upload Material"):
|
68 |
+
upload_resource(course_id, session['session_id'], file_name, uploaded_file, material_type)
|
69 |
+
|
70 |
+
# Search for the newly uploaded resource's _id in resources_collection
|
71 |
+
resource_id = resources_collection.find_one({"file_name": file_name})["_id"]
|
72 |
+
create_vector_store(file_content, resource_id)
|
73 |
+
st.success("Material uploaded successfully!")
|
74 |
+
|
75 |
+
st.subheader("Learn the Topic Using Chatbot")
|
76 |
+
st.write(f"**Session Title:** {session['title']}")
|
77 |
+
st.write(f"**Description:** {session.get('description', 'No description available.')}")
|
78 |
|
79 |
+
# Chatbot interface
|
80 |
+
if prompt := st.chat_input("Ask a question about the session topic"):
|
81 |
+
if len(st.session_state.messages) >= 20:
|
82 |
+
st.warning("Message limit (20) reached for this session.")
|
83 |
+
return
|
|
|
|
|
|
|
|
|
|
|
|
|
84 |
|
85 |
+
st.session_state.messages.append({"role": "user", "content": prompt})
|
86 |
+
|
87 |
+
# Display User Message
|
88 |
+
with st.chat_message("user"):
|
89 |
+
st.markdown(prompt)
|
90 |
+
|
91 |
+
# Get response from chatbot
|
92 |
+
context = ""
|
93 |
+
for material in materials:
|
94 |
+
if 'text_content' in material:
|
95 |
+
context += material['text_content'] + "\n"
|
96 |
+
|
97 |
+
response = give_chat_response(student_id, session['session_id'], prompt, session['title'], session.get('description', ''), context)
|
98 |
+
st.session_state.messages.append({"role": "assistant", "content": response})
|
99 |
+
|
100 |
+
# Display Assistant Response
|
101 |
+
with st.chat_message("assistant"):
|
102 |
+
st.markdown(response)
|
103 |
+
|
104 |
+
st.subheader("Your Chat History")
|
105 |
+
for message in st.session_state.messages:
|
106 |
+
content = message.get("content", "") # Default to an empty string if "content" is not present
|
107 |
+
role = message.get("role", "user") # Default to "user" if "role" is not present
|
108 |
+
with st.chat_message(role):
|
109 |
+
st.markdown(content)
|
110 |
+
user = get_current_user()
|
111 |
|
112 |
def display_in_class_content(session, user_type):
|
113 |
# """Display in-class activities and interactions"""
|
|
|
128 |
st.header("Post-class Work")
|
129 |
|
130 |
if st.session_state.user_type == 'faculty':
|
131 |
+
"""Create quiz section UI for faculty"""
|
132 |
+
st.subheader("Create Quiz")
|
133 |
+
|
134 |
+
questions = []
|
135 |
+
with st.form("create_quiz_form"):
|
136 |
+
quiz_title = st.text_input("Quiz Title")
|
137 |
+
num_questions = st.number_input("Number of Questions", min_value=1, max_value=20, value=5)
|
138 |
+
|
139 |
+
# Option to choose quiz generation method
|
140 |
+
generation_method = st.radio(
|
141 |
+
"Question Generation Method",
|
142 |
+
["Generate from Pre-class Materials", "Generate Random Questions"]
|
143 |
+
)
|
144 |
+
|
145 |
+
submit_quiz = st.form_submit_button("Generate Quiz")
|
146 |
+
if submit_quiz:
|
147 |
+
if generation_method == "Generate from Pre-class Materials":
|
148 |
+
# Get pre-class materials from resources_collection
|
149 |
+
materials = resources_collection.find({"session_id": session['session_id']})
|
150 |
+
context = ""
|
151 |
+
for material in materials:
|
152 |
+
if 'text_content' in material:
|
153 |
+
context += material['text_content'] + "\n"
|
154 |
+
|
155 |
+
if not context:
|
156 |
+
st.error("No pre-class materials found for this session.")
|
157 |
+
return
|
158 |
+
|
159 |
+
# Generate MCQs from context
|
160 |
+
questions = generate_mcqs(context, num_questions, session['title'], session.get('description', ''))
|
161 |
+
else:
|
162 |
+
# Generate random MCQs based on session title and description
|
163 |
+
questions = generate_mcqs(None, num_questions, session['title'], session.get('description', ''))
|
164 |
+
print(questions)
|
165 |
+
|
166 |
+
if questions:
|
167 |
+
# Preview generated questions
|
168 |
+
st.subheader("Preview Generated Questions")
|
169 |
+
for i, q in enumerate(questions, 1):
|
170 |
+
st.markdown(f"**Question {i}:** {q['question']}")
|
171 |
+
for opt in q['options']:
|
172 |
+
st.markdown(f"- {opt}")
|
173 |
+
st.markdown(f"*Correct Answer: {q['correct_option']}*")
|
174 |
+
|
175 |
+
# Save quiz
|
176 |
+
quiz_id = save_quiz(course_id, session['session_id'], quiz_title, questions)
|
177 |
+
if quiz_id:
|
178 |
+
st.success("Quiz saved successfully!")
|
179 |
+
else:
|
180 |
+
st.error("Error saving quiz.")
|
181 |
+
|
182 |
st.subheader("Add Assignments")
|
183 |
# Add assignment form
|
184 |
with st.form("add_assignment_form"):
|
|
|
304 |
student_chat = next((chat for chat in chat_data if chat['user_id'] == student_id), None)
|
305 |
|
306 |
if student_chat:
|
307 |
+
messages = student_chat.get('messages', [])
|
308 |
+
message_count = len(messages)
|
309 |
+
status = "Completed" if message_count >= 20 else "Incomplete"
|
310 |
+
|
311 |
+
# Format chat history for display
|
312 |
+
chat_history = []
|
313 |
+
for msg in messages:
|
314 |
+
timestamp_str = msg.get('timestamp', '')
|
315 |
+
if isinstance(timestamp_str, str):
|
316 |
+
timestamp = datetime.fromisoformat(timestamp_str)
|
317 |
+
else:
|
318 |
+
timestamp = timestamp_str
|
319 |
+
# timestamp = msg.get('timestamp', '').strftime("%Y-%m-%d %H:%M:%S")
|
320 |
+
chat_history.append({
|
321 |
+
# 'timestamp': timestamp,
|
322 |
+
'timestamp': timestamp.strftime("%Y-%m-%d %H:%M:%S"),
|
323 |
+
'prompt': msg.get('prompt'),
|
324 |
+
'response': msg.get('response')
|
325 |
+
})
|
326 |
+
|
327 |
message_count = len(student_chat.get('messages', []))
|
328 |
status = "Completed" if message_count >= 20 else "Incomplete"
|
329 |
if status == "Incomplete":
|
|
|
335 |
else:
|
336 |
message_count = 0
|
337 |
status = "Not Started"
|
338 |
+
chat_history = []
|
339 |
incomplete_students.append({
|
340 |
'name': student_name,
|
341 |
'sid': student_sid,
|
|
|
346 |
'Student Name': student_name,
|
347 |
'SID': student_sid,
|
348 |
'Messages': message_count,
|
349 |
+
'Status': status,
|
350 |
+
'Chat History': chat_history
|
351 |
})
|
352 |
|
353 |
# Create DataFrame
|
|
|
372 |
st.markdown("### Overall Completion Rate")
|
373 |
st.progress(completion_rate / 100)
|
374 |
st.markdown(f"**{completion_rate:.1f}%** of students have completed pre-class materials")
|
375 |
+
|
376 |
+
# Create tabs for different views
|
377 |
+
tab1, tab2 = st.tabs(["Student Overview", "Detailed Chat History"])
|
378 |
+
|
379 |
+
with tab1:
|
380 |
+
# Display completion summary table
|
381 |
+
st.markdown("### Student Completion Details")
|
382 |
+
summary_df = df[['Student Name', 'SID', 'Messages', 'Status']].copy()
|
383 |
+
st.dataframe(
|
384 |
+
summary_df.style.apply(lambda x: ['background-color: #90EE90' if v == 'Completed'
|
385 |
+
else 'background-color: #FFB6C1' if v == 'Incomplete'
|
386 |
+
else 'background-color: #FFE4B5'
|
387 |
+
for v in x],
|
388 |
+
subset=['Status'])
|
389 |
+
)
|
390 |
+
|
391 |
+
with tab2:
|
392 |
+
# Display detailed chat history
|
393 |
+
st.markdown("### Student Chat Histories")
|
394 |
+
|
395 |
+
# Add student selector
|
396 |
+
selected_student = st.selectbox(
|
397 |
+
"Select a student to view chat history:",
|
398 |
+
options=df['Student Name'].tolist()
|
399 |
+
)
|
400 |
+
|
401 |
+
# Get selected student's data
|
402 |
+
student_data = df[df['Student Name'] == selected_student].iloc[0]
|
403 |
+
print(student_data)
|
404 |
+
chat_history = student_data['Chat History']
|
405 |
+
# Refresh chat history when a new student is selected
|
406 |
+
if 'selected_student' not in st.session_state or st.session_state.selected_student != selected_student:
|
407 |
+
st.session_state.selected_student = selected_student
|
408 |
+
st.session_state.selected_student_chat_history = chat_history
|
409 |
+
else:
|
410 |
+
chat_history = st.session_state.selected_student_chat_history
|
411 |
+
# Display student info and chat statistics
|
412 |
+
st.markdown(f"**Student ID:** {student_data['SID']}")
|
413 |
+
st.markdown(f"**Status:** {student_data['Status']}")
|
414 |
+
st.markdown(f"**Total Messages:** {student_data['Messages']}")
|
415 |
+
|
416 |
+
|
417 |
+
|
418 |
+
|
419 |
+
# Display chat history in a table
|
420 |
+
if chat_history:
|
421 |
+
chat_df = pd.DataFrame(chat_history)
|
422 |
+
st.dataframe(
|
423 |
+
chat_df.style.apply(lambda x: ['background-color: #E8F0FE' if v == 'response' else 'background-color: #FFFFFF' for v in x], subset=['prompt']), use_container_width=True
|
424 |
+
)
|
425 |
+
else:
|
426 |
+
st.info("No chat history available for this student.")
|
427 |
|
428 |
# Display students who haven't completed
|
429 |
if incomplete_students:
|
|
|
702 |
Uploaded on: {material['uploaded_at'].strftime('%Y-%m-%d %H:%M')}
|
703 |
""")
|
704 |
|
705 |
+
def display_quiz_tab(student_id, course_id, session_id):
|
706 |
+
"""Display quizzes for students"""
|
707 |
+
st.header("Course Quizzes")
|
708 |
+
|
709 |
+
# Get available quizzes for this session
|
710 |
+
quizzes = quizzes_collection.find({
|
711 |
+
"course_id": course_id,
|
712 |
+
"session_id": session_id,
|
713 |
+
"status": "active"
|
714 |
+
})
|
715 |
+
|
716 |
+
quizzes = list(quizzes)
|
717 |
+
if not quizzes:
|
718 |
+
st.info("No quizzes available for this session.")
|
719 |
+
return
|
720 |
+
|
721 |
+
for quiz in quizzes:
|
722 |
+
with st.expander(f"📝 {quiz['title']}", expanded=True):
|
723 |
+
# Check if student has already taken this quiz
|
724 |
+
existing_score = get_student_quiz_score(quiz['_id'], student_id)
|
725 |
+
|
726 |
+
if existing_score is not None:
|
727 |
+
st.success(f"Quiz completed! Your score: {existing_score:.1f}%")
|
728 |
+
|
729 |
+
# Display correct answers after submission
|
730 |
+
st.subheader("Quiz Review")
|
731 |
+
for i, question in enumerate(quiz['questions']):
|
732 |
+
st.markdown(f"**Question {i+1}:** {question['question']}")
|
733 |
+
for opt in question['options']:
|
734 |
+
if opt.startswith(question['correct_option']):
|
735 |
+
st.markdown(f"✅ {opt}")
|
736 |
+
else:
|
737 |
+
st.markdown(f"- {opt}")
|
738 |
+
|
739 |
+
else:
|
740 |
+
# Display quiz questions
|
741 |
+
st.write("Please select your answers:")
|
742 |
+
|
743 |
+
# Create a form for quiz submission
|
744 |
+
with st.form(f"quiz_form_{quiz['_id']}"):
|
745 |
+
student_answers = {}
|
746 |
+
|
747 |
+
for i, question in enumerate(quiz['questions']):
|
748 |
+
st.markdown(f"**Question {i+1}:** {question['question']}")
|
749 |
+
options = [opt for opt in question['options']]
|
750 |
+
student_answers[str(i)] = st.radio(
|
751 |
+
f"Select answer for question {i+1}:",
|
752 |
+
options=options,
|
753 |
+
key=f"q_{quiz['_id']}_{i}"
|
754 |
+
)
|
755 |
+
|
756 |
+
# Submit button
|
757 |
+
if st.form_submit_button("Submit Quiz"):
|
758 |
+
print(student_answers)
|
759 |
+
score = submit_quiz_answers(quiz['_id'], student_id, student_answers)
|
760 |
+
if score is not None:
|
761 |
+
st.success(f"Quiz submitted successfully! Your score: {score:.1f}%")
|
762 |
+
st.rerun() # Refresh to show results
|
763 |
+
else:
|
764 |
+
st.error("Error submitting quiz. Please try again.")
|
765 |
|
766 |
def display_session_content(student_id, course_id, session, username, user_type):
|
767 |
st.title(f"Session {session['session_id']}: {session['title']}")
|
|
|
|
|
|
|
|
|
|
|
768 |
|
769 |
# Check if the date is a string or a datetime object
|
770 |
if isinstance(session['date'], str):
|
|
|
783 |
else:
|
784 |
tabs = (["Pre-class Analytics", "In-class Analytics", "Post-class Analytics"])
|
785 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
786 |
if st.session_state.user_type == 'student':
|
787 |
+
pre_class_tab, in_class_tab, post_class_tab, quiz_tab = st.tabs(["Pre-class Work", "In-class Work", "Post-class Work", "Quizzes"])
|
788 |
else:
|
789 |
pre_class_work, in_class_work, post_class_work, preclass_analytics, inclass_analytics, postclass_analytics = st.tabs(["Pre-class Work", "In-class Work", "Post-class Work", "Pre-class Analytics", "In-class Analytics", "Post-class Analytics"])
|
790 |
|
791 |
# Display pre-class materials
|
792 |
if st.session_state.user_type == 'student':
|
793 |
with pre_class_tab:
|
794 |
+
display_preclass_content(session, student_id, course_id)
|
|
|
795 |
with in_class_tab:
|
796 |
display_in_class_content(session, st.session_state.user_type)
|
797 |
|
798 |
# Post-class Content
|
799 |
with post_class_tab:
|
800 |
display_post_class_content(session, student_id, course_id)
|
801 |
+
with quiz_tab:
|
802 |
+
display_quiz_tab(student_id, course_id, session['session_id'])
|
803 |
|
804 |
if st.session_state.user_type == 'faculty':
|
805 |
with pre_class_work:
|
|
|
813 |
with inclass_analytics:
|
814 |
display_inclass_analytics(session, course_id)
|
815 |
with postclass_analytics:
|
816 |
+
display_postclass_analytics(session, course_id)
|