File size: 6,203 Bytes
ce5b5d6 8d44be7 ce5b5d6 4c2d859 527cad7 4c2d859 527cad7 4c2d859 527cad7 4c2d859 527cad7 4c2d859 c910196 527cad7 97e8a1b c910196 527cad7 97e8a1b 527cad7 c910196 527cad7 8d44be7 ce5b5d6 8d44be7 ce5b5d6 c910196 527cad7 97e8a1b 527cad7 ce5b5d6 527cad7 c910196 527cad7 c910196 ce5b5d6 527cad7 ce5b5d6 8d44be7 ce5b5d6 8d44be7 ce5b5d6 8d44be7 ce5b5d6 8d44be7 ce5b5d6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 |
import os
import gradio as gr
from dotenv import load_dotenv
from openai import OpenAI
from prompts.initial_prompt import INITIAL_PROMPT
from prompts.main_prompt import MAIN_PROMPT
# Load OpenAI API Key from .env file
if os.path.exists(".env"):
load_dotenv(".env")
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
client = OpenAI(api_key=OPENAI_API_KEY)
# Define pre-video and post-video reflection steps
REFLECTION_STEPS = [
{
"title": "Pre-Video Reflection",
"question": "Before watching the video, let's reflect on your approach to the problem.\n\nHow did you solve the task? What strategies did you use?",
"follow_up": "You used **{response}**—interesting! Why do you think this strategy is effective for solving proportional reasoning problems?",
"next_step": "Watch the Video"
},
{
"title": "Watch the Video",
"question": "Now, please watch the video at the provided link and observe how the teacher facilitates problem-solving. Let me know when you're done watching.",
"follow_up": "Great! Now that you've watched the video, let's reflect on key aspects of the lesson.",
"next_step": "Post-Video Reflection - Observing Creativity-Directed Practices"
},
{
"title": "Post-Video Reflection - Observing Creativity-Directed Practices",
"question": "Let's start with **Observing Creativity-Directed Practices.**\n\nWhat stood out to you the most about how the teacher encouraged student creativity?",
"follow_up": "You mentioned **{response}**. Can you explain how that supported students' creative problem-solving?",
"next_step": "Post-Video Reflection - Small Group Interactions"
},
{
"title": "Post-Video Reflection - Small Group Interactions",
"question": "Now, let's reflect on **Small Group Interactions.**\n\nWhat did you notice about how the teacher guided student discussions?",
"follow_up": "Interesting! You noted **{response}**. How do you think that helped students deepen their understanding?",
"next_step": "Post-Video Reflection - Student Reasoning and Connections"
},
{
"title": "Post-Video Reflection - Student Reasoning and Connections",
"question": "Next, let’s discuss **Student Reasoning and Connections.**\n\nHow did students reason through the task? What connections did they make between percent relationships and fractions?",
"follow_up": "That’s a great point about **{response}**. Can you explain why this was significant in their problem-solving?",
"next_step": "Post-Video Reflection - Common Core Practice Standards"
},
{
"title": "Post-Video Reflection - Common Core Practice Standards",
"question": "Now, let’s reflect on **Common Core Practice Standards.**\n\nWhich Common Core practice standards do you think the teacher emphasized during the lesson?",
"follow_up": "You mentioned **{response}**. How do you see this practice supporting students' proportional reasoning?",
"next_step": "Problem Posing Activity"
},
{
"title": "Problem Posing Activity",
"question": "Let’s engage in a **Problem-Posing Activity.**\n\nBased on what you observed, pose a problem that encourages students to use visuals and proportional reasoning.",
"follow_up": "That's an interesting problem! Does it allow for multiple solution paths? How does it connect to the Common Core practices we discussed?",
"next_step": "Final Reflection"
},
{
"title": "Final Reflection",
"question": "📚 **Final Reflection**\n\nWhat’s one change you will make in your own teaching based on this module?",
"follow_up": "That’s a great insight! How do you think implementing **{response}** will impact student learning?",
"next_step": None # End of reflections
}
]
def gpt_call(history, user_message, model="gpt-4o-mini", max_tokens=1024, temperature=0.7, top_p=0.95):
messages = [{"role": "system", "content": MAIN_PROMPT}]
for user_text, assistant_text in history:
if user_text:
messages.append({"role": "user", "content": user_text})
if assistant_text:
messages.append({"role": "assistant", "content": assistant_text})
messages.append({"role": "user", "content": user_message})
completion = client.chat.completions.create(model=model, messages=messages, max_tokens=max_tokens, temperature=temperature, top_p=top_p)
return completion.choices[0].message.content
def respond(user_message, history):
if not user_message:
return "", history
# Find the last reflection step completed
completed_steps = [h for h in history if "Reflection Step" in h[1]]
reflection_index = len(completed_steps)
if reflection_index < len(REFLECTION_STEPS):
current_step = REFLECTION_STEPS[reflection_index]
next_reflection = current_step["question"]
else:
next_reflection = "You've completed the reflections. Would you like to discuss anything further?"
assistant_reply = gpt_call(history, user_message)
# Follow-up question before moving on
if reflection_index > 0:
follow_up_prompt = REFLECTION_STEPS[reflection_index - 1]["follow_up"].format(response=user_message)
assistant_reply += f"\n\n{follow_up_prompt}"
# Append the assistant's response and introduce the next reflection question
history.append((user_message, assistant_reply))
history.append(("", f"**Reflection Step {reflection_index + 1}:** {next_reflection}"))
return "", history
with gr.Blocks() as demo:
gr.Markdown("## AI-Guided Math PD Chatbot")
chatbot = gr.Chatbot(value=[("", INITIAL_PROMPT)], height=600)
state_history = gr.State([("", INITIAL_PROMPT)])
user_input = gr.Textbox(placeholder="Type your message here...", label="Your Input")
user_input.submit(respond, inputs=[user_input, state_history], outputs=[user_input, chatbot]).then(fn=lambda _, h: h, inputs=[user_input, chatbot], outputs=[state_history])
if __name__ == "__main__":
demo.launch(server_name="0.0.0.0", server_port=7860, share=True)
|