Module3 / app.py
alibicer's picture
Update app.py
9586e3d verified
raw
history blame
2.03 kB
import os
import gradio as gr
from openai import OpenAI
from prompts.main_prompt import MAIN_PROMPT
from prompts.initial_prompt import INITIAL_PROMPT
# βœ… Load API Key
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
if not OPENAI_API_KEY:
raise ValueError("⚠️ Missing OpenAI API Key! Set it in Hugging Face 'Settings' β†’ 'Secrets'.")
client = OpenAI(api_key=OPENAI_API_KEY)
# βœ… Ensure AI asks users first & only helps if needed
def respond(user_message, history):
if not user_message:
return "", history
# βœ… AI waits for user answers before solving
try:
assistant_reply = client.chat.completions.create(
model="gpt-4o",
messages=[
{"role": "system", "content": MAIN_PROMPT},
*[
{"role": "user", "content": u} if i % 2 == 0 else {"role": "assistant", "content": a}
for i, (u, a) in enumerate(history)
],
{"role": "user", "content": user_message}
],
max_tokens=300, # βœ… Prevents cutting off messages
temperature=0.7,
).choices[0].message.content
except Exception as e:
assistant_reply = f"⚠️ Error: {str(e)}"
history.append((user_message, assistant_reply))
return "", history
# βœ… Fix Gradio UI to Start Properly
with gr.Blocks() as demo:
gr.Markdown("# **AI-Guided Math PD Chatbot**")
chatbot = gr.Chatbot(
value=[("", INITIAL_PROMPT)], # βœ… Starts with an introduction message
height=500
)
state_history = gr.State([("", INITIAL_PROMPT)]) # βœ… Ensures step-by-step history
user_input = gr.Textbox(placeholder="Type your message here...", label="Your Input")
user_input.submit(
respond,
inputs=[user_input, state_history],
outputs=[user_input, chatbot]
).then(
fn=lambda _, h: h,
inputs=[user_input, chatbot],
outputs=[state_history]
)
if __name__ == "__main__":
demo.launch()