File size: 3,796 Bytes
ce5b5d6 4ff02b9 ce5b5d6 4ff02b9 ce5b5d6 4ff02b9 ce5b5d6 cf70031 ce5b5d6 cf70031 ce5b5d6 4ff02b9 ce5b5d6 4ff02b9 ce5b5d6 4ff02b9 2c5504c aeb9bef 357e545 aeb9bef 4ff02b9 aeb9bef 2cdba6f aeb9bef 4ff02b9 aeb9bef 2c5504c ce5b5d6 cf70031 ce5b5d6 4ff02b9 cf70031 ce5b5d6 4ff02b9 ce5b5d6 e35958e ce5b5d6 4ff02b9 ce5b5d6 4ff02b9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 |
import os
import gradio as gr
from dotenv import load_dotenv
from openai import OpenAI
from prompts.initial_prompt import INITIAL_PROMPT
from prompts.main_prompt import MAIN_PROMPT
# Load OpenAI API Key from .env file
if os.path.exists(".env"):
load_dotenv(".env")
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
client = OpenAI(api_key=OPENAI_API_KEY)
def gpt_call(history, user_message,
model="gpt-4o-mini",
max_tokens=1024,
temperature=0.7,
top_p=0.95):
"""
Calls OpenAI Chat API to generate responses.
- history: [(user_text, assistant_text), ...]
- user_message: latest message from user
"""
messages = [{"role": "system", "content": MAIN_PROMPT}]
# Add conversation history
for user_text, assistant_text in history:
if user_text:
messages.append({"role": "user", "content": user_text})
if assistant_text:
messages.append({"role": "assistant", "content": assistant_text})
messages.append({"role": "user", "content": user_message})
# OpenAI API Call
completion = client.chat.completions.create(
model=model,
messages=messages,
max_tokens=max_tokens,
temperature=temperature,
top_p=top_p
)
response = completion.choices[0].message.content
# Ensure AI always asks for reasoning first before answering
if any(keyword in user_message.lower() for keyword in ["problem 2", "problem 3"]):
response = "Interesting! Before we move on, what do you think about this problem? Is it proportional? Why or why not? Let's explore your reasoning first.\n\n" + response
# Push for deeper explanations—even if the answer is correct
if any(keyword in user_message.lower() for keyword in ["correct", "right", "exactly"]):
response = "That’s a great insight! But let’s push further—can you explain it another way? Could someone misunderstand this concept? Let’s explore that.\n\n" + response
# Ensure the AI always asks a follow-up before moving to the next question
if any(keyword in user_message.lower() for keyword in ["move on", "next question"]):
response = "Before we continue, let’s reflect for a moment—what was the biggest takeaway from this problem? Could we change something and still get a non-proportional relationship?\n\n" + response
# Make the Problem-Posing Activity more interactive
if "pose a problem" in user_message.lower():
response += "\n\nThat's a great start! But let's refine it—does your problem truly show a non-proportional relationship? What would happen if we removed the fixed cost? Try adjusting it and see if it still works!"
return response
def respond(user_message, history):
"""
Handles user input and chatbot responses.
"""
if not user_message:
return "", history
assistant_reply = gpt_call(history, user_message)
history.append((user_message, assistant_reply))
return "", history
##############################
# Gradio Blocks UI
##############################
with gr.Blocks() as demo:
gr.Markdown("## AI-Guided Math PD Chatbot")
chatbot = gr.Chatbot(
value=[("", INITIAL_PROMPT)],
height=600
)
state_history = gr.State([("", INITIAL_PROMPT)])
user_input = gr.Textbox(
placeholder="Type your message here...",
label="Your Input"
)
user_input.submit(
respond,
inputs=[user_input, state_history],
outputs=[user_input, chatbot]
).then(
fn=lambda _, h: h,
inputs=[user_input, chatbot],
outputs=[state_history]
)
if __name__ == "__main__":
demo.launch(server_name="0.0.0.0", server_port=7860, share=True)
|