File size: 3,179 Bytes
72068ec
866286c
 
 
 
 
3538cf5
866286c
 
 
 
 
 
 
 
 
 
 
 
 
72068ec
7b6aaf6
73b1050
7b6aaf6
866286c
7b6aaf6
8bcaf24
 
 
caf5169
 
 
72068ec
 
caf5169
 
72068ec
 
7b6aaf6
8bcaf24
7b6aaf6
 
caf5169
72068ec
7b6aaf6
4910ade
7b6aaf6
 
73b1050
caf5169
72068ec
7b6aaf6
4910ade
caf5169
3538cf5
caf5169
72068ec
3538cf5
866286c
 
 
 
 
caf5169
 
7b6aaf6
866286c
 
 
8bcaf24
866286c
 
7b6aaf6
 
866286c
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79

import os
import gradio as gr
from dotenv import load_dotenv
from openai import OpenAI
from prompts.initial_prompt import INITIAL_PROMPT
from prompts.main_prompt import MAIN_PROMPT, get_prompt_for_method, get_feedback_for_method

# βœ… Load API key from .env file
if os.path.exists(".env"):
    load_dotenv(".env")

OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")

# βœ… Ensure API Key is available
if not OPENAI_API_KEY:
    raise ValueError("🚨 OpenAI API key is missing! Set it in the .env file.")

client = OpenAI(api_key=OPENAI_API_KEY)

# βœ… Chatbot Response Function with Debugging
def respond(user_message, history, selected_method):
    if not user_message:
        return "", history, selected_method

    user_message = user_message.strip().lower()  # Normalize input

    valid_methods = ["bar model", "double number line", "equation"]

    # βœ… Ensure history is a list of strictly two-element tuples
    if not isinstance(history, list):
        history = []
    
    # βœ… Convert all history elements to strings and tuples if necessary
    history = [(str(h[0]), str(h[1])) for h in history if isinstance(h, tuple) and len(h) == 2]

    print("\nDEBUG: Current History:", history)  # πŸ›  Debugging step

    # βœ… If user selects a method, store it and provide the method-specific prompt
    if user_message in valid_methods:
        selected_method = user_message  # Store the method
        method_prompt = get_prompt_for_method(user_message)
        history.append((user_message, method_prompt))  # Ensure correct format
        print("\nDEBUG: Method Selected:", selected_method)  # πŸ›  Debugging
        return method_prompt, history, selected_method

    # βœ… If a method has already been selected, provide feedback
    if selected_method:
        feedback = get_feedback_for_method(selected_method, user_message)
        history.append((user_message, feedback))  # Ensure correct format
        print("\nDEBUG: Providing Feedback:", feedback)  # πŸ›  Debugging
        return feedback, history, selected_method

    # βœ… Ensure chatbot always responds with a valid tuple
    error_msg = "❌ Please select a method first (Bar Model, Double Number Line, or Equation)."
    history.append((user_message, error_msg))  # Ensure correct format
    print("\nDEBUG: Error Message Triggered")  # πŸ›  Debugging
    return error_msg, history, selected_method

# βœ… Gradio UI Setup
with gr.Blocks() as demo:
    gr.Markdown("## πŸ€– AI-Guided Math PD Chatbot")

    chatbot = gr.Chatbot(value=[(INITIAL_PROMPT, "Hello! Please select a method to begin.")], height=500)
    state_history = gr.State([(INITIAL_PROMPT, "Hello! Please select a method to begin.")])
    state_selected_method = gr.State(None)  # βœ… New state to track selected method

    user_input = gr.Textbox(placeholder="Type your message here...", label="Your Input")

    # βœ… Handling user input and response logic
    user_input.submit(
        respond,
        inputs=[user_input, state_history, state_selected_method],
        outputs=[chatbot, state_history, state_selected_method]
    )

if __name__ == "__main__":
    demo.launch(server_name="0.0.0.0", server_port=7860, share=True)