File size: 6,457 Bytes
ce5b5d6 8d44be7 ce5b5d6 dbfc2bc 840c5fe 527cad7 4c2d859 840c5fe 4c2d859 527cad7 4c2d859 840c5fe 4c2d859 840c5fe 527cad7 840c5fe 4c2d859 840c5fe 527cad7 840c5fe 4c2d859 840c5fe 4c2d859 840c5fe 4c2d859 840c5fe 4c2d859 840c5fe c910196 527cad7 97e8a1b 840c5fe c910196 527cad7 97e8a1b 527cad7 c910196 b8542fc 527cad7 ce5b5d6 c910196 527cad7 97e8a1b 527cad7 b8542fc 527cad7 ce5b5d6 527cad7 c910196 b8542fc 527cad7 c910196 ce5b5d6 527cad7 ce5b5d6 8d44be7 ce5b5d6 8d44be7 ce5b5d6 8d44be7 ce5b5d6 8d44be7 ce5b5d6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 |
import os
import gradio as gr
from dotenv import load_dotenv
from openai import OpenAI
from prompts.initial_prompt import INITIAL_PROMPT
from prompts.main_prompt import MAIN_PROMPT
# Load OpenAI API Key from .env file
if os.path.exists(".env"):
load_dotenv(".env")
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
client = OpenAI(api_key=OPENAI_API_KEY)
# Define the function to call GPT model
def gpt_call(history, user_message, model="gpt-4o-mini", max_tokens=512, temperature=0.7, top_p=0.95):
"""
Calls OpenAI Chat API to generate responses.
- history: [(user_text, assistant_text), ...]
- user_message: latest message from user
"""
messages = [{"role": "system", "content": MAIN_PROMPT}]
# Add conversation history
for user_text, assistant_text in history:
if user_text:
messages.append({"role": "user", "content": user_text})
if assistant_text:
messages.append({"role": "assistant", "content": assistant_text})
messages.append({"role": "user", "content": user_message})
# OpenAI API Call
completion = client.chat.completions.create(
model=model,
messages=messages,
max_tokens=max_tokens,
temperature=temperature,
top_p=top_p
)
return completion.choices[0].message.content
# Reflection steps appear one-by-one
REFLECTION_STEPS = [
{
"title": "Pre-Video Reflection",
"question": "Before watching the video, how did you approach solving the task? What strategies did you use?",
"follow_up": "You used **{response}**—interesting! Why do you think this strategy is effective for solving proportional reasoning problems?",
"next_step": "Watch the Video"
},
{
"title": "Watch the Video",
"question": "Now, please watch the video at the provided link. Let me know when you're done watching.",
"follow_up": "Great! Now that you've watched the video, let's reflect on key aspects of the lesson.",
"next_step": "Observing Creativity-Directed Practices"
},
{
"title": "Observing Creativity-Directed Practices",
"question": "Let's start with **Creativity-Directed Practices**. What stood out to you about how the teacher encouraged student creativity?",
"follow_up": "You mentioned **{response}**. Can you explain how that supported students' creative problem-solving?",
"next_step": "Small Group Interactions"
},
{
"title": "Small Group Interactions",
"question": "Now, let's reflect on **Small Group Interactions**. What did you notice about how the teacher guided student discussions?",
"follow_up": "Interesting! You noted **{response}**. How do you think that helped students deepen their understanding?",
"next_step": "Student Reasoning and Connections"
},
{
"title": "Student Reasoning and Connections",
"question": "Next, let’s discuss **Student Reasoning and Connections**. How did students reason through the task?",
"follow_up": "That’s a great point about **{response}**. Can you explain why this was significant in their problem-solving?",
"next_step": "Common Core Practice Standards"
},
{
"title": "Common Core Practice Standards",
"question": "Now, let’s reflect on **Common Core Practice Standards**. Which ones do you think were emphasized in the lesson?",
"follow_up": "You mentioned **{response}**. How do you see this practice supporting students' proportional reasoning?",
"next_step": "Problem Posing Activity"
},
{
"title": "Problem Posing Activity",
"question": "Let’s engage in a **Problem-Posing Activity**. Pose a problem that encourages students to use visuals and proportional reasoning.",
"follow_up": "That's an interesting problem! Does it allow for multiple solution paths? How does it connect to Common Core practices we discussed?",
"next_step": "Final Reflection"
},
{
"title": "Final Reflection",
"question": "📚 **Final Reflection**\n\nWhat’s one change you will make in your own teaching based on this module?",
"follow_up": "That’s a great insight! How do you think implementing **{response}** will impact student learning?",
"next_step": "End" # Final step
}
]
def respond(user_message, history):
if not user_message:
return "", history
# Find the last reflection step completed
completed_steps = [h for h in history if "Reflection Step" in h[1]]
reflection_index = len(completed_steps)
if reflection_index < len(REFLECTION_STEPS):
current_step = REFLECTION_STEPS[reflection_index]
next_reflection = current_step["question"]
else:
# If it's the last step, check the user's response
if user_message.strip().lower() in ["no", "no thanks", "i'm done"]:
assistant_reply = "Thank you for engaging in this reflection! If you ever have more thoughts or questions, feel free to return. Happy teaching! 🎉"
history.append((user_message, assistant_reply))
return "", history
else:
next_reflection = "You've completed the reflections. Would you like to discuss anything further?"
assistant_reply = gpt_call(history, user_message)
# Follow-up question before moving on
if reflection_index > 0 and reflection_index < len(REFLECTION_STEPS):
follow_up_prompt = REFLECTION_STEPS[reflection_index - 1]["follow_up"].format(response=user_message)
assistant_reply += f"\n\n{follow_up_prompt}"
# Append the assistant's response and introduce the next reflection question
history.append((user_message, assistant_reply))
history.append(("", f"**Reflection Step {reflection_index + 1}:** {next_reflection}"))
return "", history
with gr.Blocks() as demo:
gr.Markdown("## AI-Guided Math PD Chatbot")
chatbot = gr.Chatbot(value=[("", INITIAL_PROMPT)], height=600)
state_history = gr.State([("", INITIAL_PROMPT)])
user_input = gr.Textbox(placeholder="Type your message here...", label="Your Input")
user_input.submit(respond, inputs=[user_input, state_history], outputs=[user_input, chatbot]).then(fn=lambda _, h: h, inputs=[user_input, chatbot], outputs=[state_history])
if __name__ == "__main__":
demo.launch(server_name="0.0.0.0", server_port=7860, share=True)
|