File size: 4,945 Bytes
142a1cd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25ac852
 
4cabe54
25ac852
 
 
 
4cabe54
25ac852
 
f049aec
142a1cd
4cabe54
 
142a1cd
 
 
 
 
 
 
 
 
78d1477
 
 
 
25ac852
 
1ad69d1
4cabe54
 
78d1477
 
142a1cd
ce8e40e
 
 
1ad69d1
142a1cd
 
1ad69d1
 
142a1cd
1ad69d1
142a1cd
1ad69d1
 
25ac852
142a1cd
 
ce8e40e
1ad69d1
 
142a1cd
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
# import os
# from dotenv import load_dotenv
# from groq import Groq
# import streamlit as st

# # Load environment variables
# load_dotenv()

# # Initialize the Groq client with API key
# client = Groq(api_key=os.getenv("GROQ_API_KEY"))

# # Function to generate MCQs based on the input text
# def generate_mcqs_from_text(user_text):
#     prompt = f"""
#     Based on the following text, generate between 30 to 50 multiple-choice questions. 
#     Each question should have four answer options, with one correct answer and three distractors. 
#     Make sure the questions are clear and test the user's understanding of the content. 
#     Provide the questions in the following format:
    
#     Question: [Your question here]  
#     A. [Option 1]  
#     B. [Option 2]  
#     C. [Option 3]  
#     D. [Option 4]  
#     Correct Answer: [Correct Option]
    
#     Text: {user_text}
#     """
#     chat_completion = client.chat.completions.create(
#         messages=[{"role": "user", "content": prompt}],
#         model="gemma2-9b-it",  # Use a valid model that supports chat completions
#     )
#     return chat_completion.choices[0].message.content

# # Function to evaluate user answers
# def evaluate_answers(mcqs, user_answers):
#     prompt = f"""
#     Here are the user's answers to the following multiple-choice questions. 
#     Please evaluate them as an experienced teacher, providing feedback on correctness, 
#     and explain why each answer is right or wrong.
    
#     MCQs: {mcqs}
#     User Answers: {user_answers}
#     """
#     chat_completion = client.chat.completions.create(
#         messages=[{"role": "user", "content": prompt}],
#         model="gemma2-9b-it",  # Use a valid model
#     )
#     return chat_completion.choices[0].message.content

# # Streamlit App
# st.title("Study Assistant - MCQ Generator & Evaluator")
# st.sidebar.header("Input Settings")

# # Input text from user
# user_text = st.sidebar.text_area("Enter the text for generating MCQs:", height=200)

# if st.sidebar.button("Generate MCQs"):
#     if user_text.strip():
#         with st.spinner("Generating MCQs..."):
#             mcqs = generate_mcqs_from_text(user_text)
#         st.subheader("Generated MCQs:")
#         st.text_area("MCQs", value=mcqs, height=400)
#         st.session_state["mcqs"] = mcqs  # Store MCQs for evaluation
#     else:
#         st.error("Please enter text for generating MCQs.")

# # Input user answers for evaluation
# if "mcqs" in st.session_state:
#     user_answers = st.text_area("Enter your answers (e.g., A, B, C, D for each question):", height=100)
#     if st.button("Evaluate Answers"):
#         if user_answers.strip():
#             with st.spinner("Evaluating answers..."):
#                 evaluation_result = evaluate_answers(st.session_state["mcqs"], user_answers)
#             st.subheader("Evaluation Result:")
#             st.text_area("Evaluation", value=evaluation_result, height=400)
#         else:
#             st.error("Please enter your answers for evaluation.")


import os
from dotenv import load_dotenv
from groq import Groq
import streamlit as st

# Load environment variables
load_dotenv()

# Initialize the Groq client with API key
client = Groq(api_key=os.getenv("GROQ_API_KEY"))

# Function to generate MCQs as a 35-year experienced educator
def generate_mcqs_from_text(user_text):
    prompt = f"""
    You are a 35-year experienced educator specializing in crafting challenging and insightful MCQs. 
    Based on the following text, generate between 30 to 50 multiple-choice questions (MCQs). 
    Each question should:
    1. Test critical thinking and understanding of the content.
    2. Include four options (A, B, C, D), with one correct answer and three well-designed distractors.
    3. Provide clear, concise language and avoid ambiguity.
    4. Provide the correct answer with each MCQ.

    Format the output as:
    Question: [Your question here]  
    A. [Option 1]  
    B. [Option 2]  
    C. [Option 3]  
    D. [Option 4]  
    Correct Answer: [Correct Option]
    
    Text: {user_text}
    """
    chat_completion = client.chat.completions.create(
        messages=[{"role": "user", "content": prompt}],
        model="gemma2-9b-it",  # Use a valid Groq-supported model
    )
    return chat_completion.choices[0].message.content

# Streamlit App
st.title("MCQ Generator with Correct Answers")
st.write("Paste your text below, and the app will generate MCQs with the correct answers.")

# Input text from user
user_text = st.text_area("Enter the text for generating MCQs:", height=200)

if st.button("Generate MCQs"):
    if user_text.strip():
        with st.spinner("Generating MCQs..."):
            mcqs = generate_mcqs_from_text(user_text)
        st.subheader("Generated MCQs with Correct Answers:")
        st.text_area("MCQs", value=mcqs, height=600)
    else:
        st.error("Please enter text for generating MCQs.")