Spaces:
Sleeping
Sleeping
File size: 3,291 Bytes
ce5b5d6 89a1bb6 ce5b5d6 89a1bb6 ce5b5d6 89a1bb6 faf0908 ce5b5d6 89a1bb6 ce5b5d6 89a1bb6 ce5b5d6 89a1bb6 ce5b5d6 89a1bb6 ce5b5d6 89a1bb6 ce5b5d6 89a1bb6 ce5b5d6 faf0908 ce5b5d6 89a1bb6 faf0908 5bfde13 faf0908 ce5b5d6 89a1bb6 ce5b5d6 89a1bb6 ce5b5d6 89a1bb6 ce5b5d6 89a1bb6 ce5b5d6 89a1bb6 ce5b5d6 89a1bb6 ce5b5d6 89a1bb6 ce5b5d6 89a1bb6 ce5b5d6 89a1bb6 ce5b5d6 89a1bb6 ce5b5d6 89a1bb6 ce5b5d6 89a1bb6 ce5b5d6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 |
import os
import gradio as gr
from dotenv import load_dotenv
from openai import OpenAI
from prompts.initial_prompt import INITIAL_PROMPT
from prompts.main_prompt import MAIN_PROMPT, PROBLEM_SOLUTIONS_PROMPT # Ensure both are imported
# Load the API key from the .env file if available
if os.path.exists(".env"):
load_dotenv(".env")
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
client = OpenAI(api_key=OPENAI_API_KEY)
def gpt_call(history, user_message,
model="gpt-4o",
max_tokens=1500, # Increased from 512 to 1500 to prevent truncation
temperature=0.7,
top_p=0.95):
"""
Calls the OpenAI API to generate a response.
- history: [(user_text, assistant_text), ...]
- user_message: The latest user message
"""
# 1) Start with the system message (MAIN_PROMPT) for context
messages = [{"role": "system", "content": MAIN_PROMPT}]
# 2) Append conversation history
for user_text, assistant_text in history:
if user_text:
messages.append({"role": "user", "content": user_text})
if assistant_text:
messages.append({"role": "assistant", "content": assistant_text})
# 3) Add the user's new message
messages.append({"role": "user", "content": user_message})
# 4) Call OpenAI API
completion = client.chat.completions.create(
model=model,
messages=messages,
max_tokens=max_tokens, # Increased to allow longer responses
temperature=temperature,
top_p=top_p
)
# 5) Ensure full response is returned without being cut off
full_response = "".join(choice.message.content for choice in completion.choices).strip()
return full_response
def respond(user_message, history):
"""
Handles user input and gets GPT-generated response.
- user_message: The message from the user
- history: List of (user, assistant) conversation history
"""
if not user_message:
return "", history
# If the user asks for a solution, inject PROBLEM_SOLUTIONS_PROMPT
if "solution" in user_message.lower():
assistant_reply = gpt_call(history, PROBLEM_SOLUTIONS_PROMPT)
else:
assistant_reply = gpt_call(history, user_message)
# Add conversation turn to history
history.append((user_message, assistant_reply))
return "", history
##############################
# Gradio Blocks UI
##############################
with gr.Blocks() as demo:
gr.Markdown("## AI-Guided Math PD Chatbot")
# Chatbot initialization with the first AI message
chatbot = gr.Chatbot(
value=[("", INITIAL_PROMPT)], # Initial system prompt
height=500
)
# Stores the chat history
state_history = gr.State([("", INITIAL_PROMPT)])
# User input field
user_input = gr.Textbox(
placeholder="Type your message here...",
label="Your Input"
)
# Submit action
user_input.submit(
respond,
inputs=[user_input, state_history],
outputs=[user_input, chatbot]
).then(
fn=lambda _, h: h,
inputs=[user_input, chatbot],
outputs=[state_history]
)
# Run the Gradio app
if __name__ == "__main__":
demo.launch(server_name="0.0.0.0", server_port=7860, share=True)
|