File size: 3,333 Bytes
72068ec 866286c 3538cf5 866286c 46431bd 7b6aaf6 73b1050 46431bd 866286c 7b6aaf6 8bcaf24 46431bd caf5169 46431bd 72068ec 7b6aaf6 8bcaf24 7b6aaf6 46431bd 7b6aaf6 4910ade 7b6aaf6 73b1050 46431bd 7b6aaf6 4910ade caf5169 3538cf5 46431bd 3538cf5 866286c caf5169 7b6aaf6 866286c 8bcaf24 866286c 7b6aaf6 866286c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 |
import os
import gradio as gr
from dotenv import load_dotenv
from openai import OpenAI
from prompts.initial_prompt import INITIAL_PROMPT
from prompts.main_prompt import MAIN_PROMPT, get_prompt_for_method, get_feedback_for_method
# β
Load API key from .env file
if os.path.exists(".env"):
load_dotenv(".env")
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
# β
Ensure API Key is available
if not OPENAI_API_KEY:
raise ValueError("π¨ OpenAI API key is missing! Set it in the .env file.")
client = OpenAI(api_key=OPENAI_API_KEY)
# β
Chatbot Response Function with Full Debugging
def respond(user_message, history, selected_method):
if not user_message:
return "β No input received.", history, selected_method
user_message = user_message.strip().lower() # Normalize input
valid_methods = ["bar model", "double number line", "equation"]
# β
Ensure history is a list of tuples
if not isinstance(history, list):
history = []
history = [(str(h[0]), str(h[1])) for h in history if isinstance(h, tuple) and len(h) == 2]
# β
Debug Logs
print("\nDEBUG: Incoming User Message:", user_message)
print("DEBUG: Current History:", history)
print("DEBUG: Selected Method Before Processing:", selected_method)
# β
If user selects a method, store it and provide the method-specific prompt
if user_message in valid_methods:
selected_method = user_message # Store the method
method_prompt = get_prompt_for_method(user_message)
history.append((user_message, method_prompt)) # Store correctly formatted tuple
print("DEBUG: Method Selected:", selected_method)
print("DEBUG: Sending Prompt for Method:", method_prompt)
return method_prompt, history, selected_method
# β
If a method has already been selected, provide feedback
if selected_method:
feedback = get_feedback_for_method(selected_method, user_message)
history.append((user_message, feedback)) # Store correctly formatted tuple
print("DEBUG: Feedback Given:", feedback)
print("DEBUG: Updated History:", history)
return feedback, history, selected_method
# β
Ensure chatbot always responds with a valid tuple
error_msg = "β Please select a method first (Bar Model, Double Number Line, or Equation)."
history.append((user_message, error_msg)) # Store correctly formatted tuple
print("DEBUG: Error Triggered, No Method Selected")
return error_msg, history, selected_method
# β
Gradio UI Setup
with gr.Blocks() as demo:
gr.Markdown("## π€ AI-Guided Math PD Chatbot")
chatbot = gr.Chatbot(value=[(INITIAL_PROMPT, "Hello! Please select a method to begin.")], height=500)
state_history = gr.State([(INITIAL_PROMPT, "Hello! Please select a method to begin.")])
state_selected_method = gr.State(None) # β
New state to track selected method
user_input = gr.Textbox(placeholder="Type your message here...", label="Your Input")
# β
Handling user input and response logic
user_input.submit(
respond,
inputs=[user_input, state_history, state_selected_method],
outputs=[chatbot, state_history, state_selected_method]
)
if __name__ == "__main__":
demo.launch(server_name="0.0.0.0", server_port=7860, share=True)
|