alibicer's picture
Update app.py
3d4f000 verified
raw
history blame
1.87 kB
import os
import gradio as gr
from dotenv import load_dotenv
from openai import OpenAI
from prompts.main_prompt import MAIN_PROMPT
# Load API key from .env file
if os.path.exists(".env"):
load_dotenv(".env")
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
if not OPENAI_API_KEY:
raise ValueError("OpenAI API key is missing! Set it in the .env file.")
client = OpenAI(api_key=OPENAI_API_KEY)
# Chatbot Response Function
def respond(user_message, history):
if not user_message:
return "", history
try:
assistant_reply = client.chat.completions.create(
model="gpt-4o",
messages=[
{"role": "system", "content": MAIN_PROMPT},
*[
{"role": "user", "content": u} if i % 2 == 0 else {"role": "assistant", "content": a}
for i, (u, a) in enumerate(history)
],
{"role": "user", "content": user_message}
],
max_tokens=512,
temperature=0.7,
).choices[0].message.content
history.append((user_message, assistant_reply))
return "", history
except Exception as e:
return f"An error occurred: {str(e)}", history
# Gradio UI Setup
with gr.Blocks() as demo:
gr.Markdown("## AI-Guided Math PD Chatbot")
chatbot = gr.Chatbot(value=[(MAIN_PROMPT, "")], height=500)
state_history = gr.State([(MAIN_PROMPT, "")])
user_input = gr.Textbox(placeholder="Type your message here...", label="Your Input")
user_input.submit(
respond,
inputs=[user_input, state_history],
outputs=[user_input, chatbot]
).then(
fn=lambda _, h: h,
inputs=[user_input, chatbot],
outputs=[state_history]
)
if __name__ == "__main__":
demo.launch(server_name="0.0.0.0", server_port=7860, share=True)