File size: 3,946 Bytes
ce5b5d6 4ff02b9 ce5b5d6 4ff02b9 ce5b5d6 4ff02b9 ce5b5d6 cf70031 ce5b5d6 cf70031 ce5b5d6 4ff02b9 ce5b5d6 4ff02b9 ce5b5d6 4ff02b9 2c5504c 4ff02b9 2c5504c ce5b5d6 cf70031 ce5b5d6 4ff02b9 cf70031 ce5b5d6 4ff02b9 ce5b5d6 e35958e ce5b5d6 4ff02b9 ce5b5d6 4ff02b9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 |
import os
import gradio as gr
from dotenv import load_dotenv
from openai import OpenAI
from prompts.initial_prompt import INITIAL_PROMPT
from prompts.main_prompt import MAIN_PROMPT
# Load OpenAI API Key from .env file
if os.path.exists(".env"):
load_dotenv(".env")
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
client = OpenAI(api_key=OPENAI_API_KEY)
def gpt_call(history, user_message,
model="gpt-4o-mini",
max_tokens=1024,
temperature=0.7,
top_p=0.95):
"""
Calls OpenAI Chat API to generate responses.
- history: [(user_text, assistant_text), ...]
- user_message: latest message from user
"""
messages = [{"role": "system", "content": MAIN_PROMPT}]
# Add conversation history
for user_text, assistant_text in history:
if user_text:
messages.append({"role": "user", "content": user_text})
if assistant_text:
messages.append({"role": "assistant", "content": assistant_text})
messages.append({"role": "user", "content": user_message})
# OpenAI API Call
completion = client.chat.completions.create(
model=model,
messages=messages,
max_tokens=max_tokens,
temperature=temperature,
top_p=top_p
)
response = completion.choices[0].message.content
# Encourage teachers to explain their reasoning before providing guidance
if "solve" in user_message.lower() or "explain" in user_message.lower():
response = "Great! Before we move forward, can you explain your reasoning? Why do you think this is the right approach? Once you share your thoughts, I'll guide you further.\n\n" + response
# Encourage problem posing
if "pose a problem" in user_message.lower():
response += "\n\nNow that you've explored this concept, try creating your own problem related to it. How would you challenge your students?"
# Cover Common Core practice standards
if "common core" in user_message.lower():
response += "\n\nHow do you see this aligning with Common Core practice standards? Can you identify any specific standards this connects to?"
# Encourage creativity-directed practices
if "creativity" in user_message.lower():
response += "\n\nHow did creativity play a role in this problem-solving process? Did you find any opportunities to think differently?"
# Provide structured summary
if "summary" in user_message.lower():
response += "\n\nSummary: Today, we explored problem-solving strategies, reflected on reasoning, and connected ideas to teaching practices. We examined key characteristics of proportional and non-proportional relationships, explored their graphical representations, and considered pedagogical approaches. Keep thinking about how these concepts can be applied in your own classroom!"
return response
def respond(user_message, history):
"""
Handles user input and chatbot responses.
"""
if not user_message:
return "", history
assistant_reply = gpt_call(history, user_message)
history.append((user_message, assistant_reply))
return "", history
##############################
# Gradio Blocks UI
##############################
with gr.Blocks() as demo:
gr.Markdown("## AI-Guided Math PD Chatbot")
chatbot = gr.Chatbot(
value=[("", INITIAL_PROMPT)],
height=600
)
state_history = gr.State([("", INITIAL_PROMPT)])
user_input = gr.Textbox(
placeholder="Type your message here...",
label="Your Input"
)
user_input.submit(
respond,
inputs=[user_input, state_history],
outputs=[user_input, chatbot]
).then(
fn=lambda _, h: h,
inputs=[user_input, chatbot],
outputs=[state_history]
)
if __name__ == "__main__":
demo.launch(server_name="0.0.0.0", server_port=7860, share=True)
|