module2 / app.py
alibicer's picture
Update app.py
5e48bae verified
raw
history blame
3.63 kB
import os
import gradio as gr
from dotenv import load_dotenv
from openai import OpenAI
from prompts.initial_prompt import INITIAL_PROMPT
from prompts.main_prompt import TASK_PROMPT
# Load the OpenAI API key from .env file
if os.path.exists(".env"):
load_dotenv(".env")
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
client = OpenAI(api_key=OPENAI_API_KEY)
def gpt_call(history, user_message,
model="gpt-4o-mini",
max_tokens=512,
temperature=0.7,
top_p=0.95):
"""
Calls OpenAI's ChatCompletion API to generate responses.
- history: [(user_text, assistant_text), ...]
- user_message: User's latest input
"""
# System message (TASK_PROMPT) at the beginning
messages = [{"role": "system", "content": TASK_PROMPT}]
# Convert history into OpenAI format
for user_text, assistant_text in history:
if user_text:
messages.append({"role": "user", "content": user_text})
if assistant_text:
messages.append({"role": "assistant", "content": assistant_text})
# Add the latest user input
messages.append({"role": "user", "content": user_message})
# AI-controlled gradual guidance
if "bar model" in user_message.lower():
return "Great! You've started using a bar model. Can you explain how you divided it? What does each section represent?"
elif "double number line" in user_message.lower():
return "Nice! How does your number line show the relationship between time and distance? Did you mark the correct intervals?"
elif "ratio table" in user_message.lower():
return "Good choice! Before I check, how did you determine the ratio for 1 hour?"
elif "graph" in user_message.lower():
return "Graphs are powerful! What key points did you plot, and why?"
else:
# OpenAI API call (fallback response)
completion = client.chat.completions.create(
model=model,
messages=messages,
max_tokens=max_tokens,
temperature=temperature,
top_p=top_p
)
return completion.choices[0].message.content
def respond(user_message, history):
"""
Handles user input and chatbot response in Gradio.
- user_message: The latest input from the user.
- history: A list of (user, assistant) message pairs.
"""
if not user_message:
return "", history
# Generate AI response
assistant_reply = gpt_call(history, user_message)
# Append to history
history.append((user_message, assistant_reply))
# Return the updated history and clear the input box
return "", history
##############################
# Gradio Chatbot UI
##############################
with gr.Blocks() as demo:
gr.Markdown("## AI-Guided Teacher PD Chatbot")
# Initial chatbot message (starts with the task)
chatbot = gr.Chatbot(
value=[("", INITIAL_PROMPT)],
height=500
)
# Chat history state
state_history = gr.State([("", INITIAL_PROMPT)])
# User input box
user_input = gr.Textbox(
placeholder="Type your response here...",
label="Your Input"
)
# When user submits input → respond() updates chatbot
user_input.submit(
respond,
inputs=[user_input, state_history],
outputs=[user_input, chatbot]
).then(
fn=lambda _, h: h,
inputs=[user_input, chatbot],
outputs=[state_history]
)
# Launch the chatbot
if __name__ == "__main__":
demo.launch(server_name="0.0.0.0", server_port=7860, share=True)