File size: 1,906 Bytes
ce5b5d6 6ad188b ce5b5d6 3d4f000 ce5b5d6 3d4f000 ce5b5d6 3d4f000 ce5b5d6 3d4f000 ce5b5d6 3d4f000 ce5b5d6 3d4f000 ce5b5d6 3d4f000 ce5b5d6 3d4f000 ce5b5d6 3d4f000 ce5b5d6 3d4f000 ce5b5d6 3d4f000 ce5b5d6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 |
import os
import gradio as gr
from dotenv import load_dotenv
from openai import OpenAI
from prompts.main_prompt import MAIN_PROMPT # Ensure this file exists and is fixed
# Load API key from .env file
if os.path.exists(".env"):
load_dotenv(".env")
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
if not OPENAI_API_KEY:
raise ValueError("OpenAI API key is missing! Set it in the .env file.")
client = OpenAI(api_key=OPENAI_API_KEY)
# Chatbot Response Function
def respond(user_message, history):
if not user_message:
return "", history
try:
assistant_reply = client.chat.completions.create(
model="gpt-4o",
messages=[
{"role": "system", "content": MAIN_PROMPT},
*[
{"role": "user", "content": u} if i % 2 == 0 else {"role": "assistant", "content": a}
for i, (u, a) in enumerate(history)
],
{"role": "user", "content": user_message}
],
max_tokens=512,
temperature=0.7,
).choices[0].message.content
history.append((user_message, assistant_reply))
return "", history
except Exception as e:
return f"An error occurred: {str(e)}", history
# Gradio UI Setup
with gr.Blocks() as demo:
gr.Markdown("## AI-Guided Math PD Chatbot")
chatbot = gr.Chatbot(value=[(MAIN_PROMPT, "")], height=500)
state_history = gr.State([(MAIN_PROMPT, "")])
user_input = gr.Textbox(placeholder="Type your message here...", label="Your Input")
user_input.submit(
respond,
inputs=[user_input, state_history],
outputs=[user_input, chatbot]
).then(
fn=lambda _, h: h,
inputs=[user_input, chatbot],
outputs=[state_history]
)
if __name__ == "__main__":
demo.launch(server_name="0.0.0.0", server_port=7860, share=True)
|