File size: 4,166 Bytes
ce5b5d6
 
 
 
 
4ff02b9
ce5b5d6
4ff02b9
ce5b5d6
 
 
 
 
 
4ff02b9
 
 
 
 
ce5b5d6
cf70031
ce5b5d6
cf70031
ce5b5d6
 
4ff02b9
 
ce5b5d6
 
 
 
 
 
 
4ff02b9
 
ce5b5d6
 
 
 
 
 
 
4ff02b9
2c5504c
 
357e545
 
 
 
 
2cdba6f
357e545
4ff02b9
4d842e1
2cdba6f
4d842e1
2cdba6f
4d842e1
4ff02b9
4d842e1
4ff02b9
4d842e1
2cdba6f
4d842e1
4ff02b9
2c5504c
ce5b5d6
 
 
cf70031
ce5b5d6
 
 
 
4ff02b9
cf70031
ce5b5d6
 
 
4ff02b9
ce5b5d6
 
e35958e
ce5b5d6
 
4ff02b9
 
ce5b5d6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4ff02b9
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
import os
import gradio as gr
from dotenv import load_dotenv
from openai import OpenAI
from prompts.initial_prompt import INITIAL_PROMPT
from prompts.main_prompt import MAIN_PROMPT

# Load OpenAI API Key from .env file
if os.path.exists(".env"):
    load_dotenv(".env")

OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
client = OpenAI(api_key=OPENAI_API_KEY)

def gpt_call(history, user_message,
             model="gpt-4o-mini",
             max_tokens=1024,
             temperature=0.7,
             top_p=0.95):
    """
    Calls OpenAI Chat API to generate responses.
    - history: [(user_text, assistant_text), ...]
    - user_message: latest message from user
    """
    messages = [{"role": "system", "content": MAIN_PROMPT}]
    
    # Add conversation history
    for user_text, assistant_text in history:
        if user_text:
            messages.append({"role": "user", "content": user_text})
        if assistant_text:
            messages.append({"role": "assistant", "content": assistant_text})

    messages.append({"role": "user", "content": user_message})
    
    # OpenAI API Call
    completion = client.chat.completions.create(
        model=model,
        messages=messages,
        max_tokens=max_tokens,
        temperature=temperature,
        top_p=top_p
    )
    
    response = completion.choices[0].message.content
    
    # Ensure AI always demands detailed explanations first
    if any(keyword in user_message.lower() for keyword in ["yes", "no", "next question"]):
        response = "Before we move forward, let's take a step back. What do you think about the problem? Why do you believe your answer or method is correct? Can you explain your reasoning step by step? Let's make sure we fully understand before proceeding.\n\n" + response
    
    # Ensure AI always asks for reasoning
    if any(keyword in user_message.lower() for keyword in ["solve", "explain", "why", "reasoning"]):
        response = "Great thinking! Now, explain your reasoning step by step. What patterns do you notice? Let's walk through it together.\n\n" + response
    
    # Provide step-by-step hints instead of full solutions immediately
    if any(keyword in user_message.lower() for keyword in ["hint", "stuck", "help"]):
        response = "Here's a hint: What key properties or relationships can help you solve this? Try breaking it down further. What happens if we adjust one of the variables?\n\n" + response
    
    # Encourage teachers to create their own problems at the end
    if "pose a problem" in user_message.lower():
        response += "\n\nNow that you've explored this concept, try creating a problem similar to the ones we discussed. What elements would you include to ensure it is non-proportional?"
    
    # Ask about Common Core practice standards and creativity-directed practices in the summary
    if "summary" in user_message.lower():
        response += "\n\nReflection time! Which Common Core practice standards did we apply? How did creativity shape your approach to solving this problem? What strategies would help students build deeper mathematical reasoning?"
    
    return response

def respond(user_message, history):
    """
    Handles user input and chatbot responses.
    """
    if not user_message:
        return "", history

    assistant_reply = gpt_call(history, user_message)
    history.append((user_message, assistant_reply))
    return "", history

##############################
#  Gradio Blocks UI
##############################
with gr.Blocks() as demo:
    gr.Markdown("## AI-Guided Math PD Chatbot")

    chatbot = gr.Chatbot(
        value=[("", INITIAL_PROMPT)],
        height=600
    )

    state_history = gr.State([("", INITIAL_PROMPT)])

    user_input = gr.Textbox(
        placeholder="Type your message here...",
        label="Your Input"
    )

    user_input.submit(
        respond,
        inputs=[user_input, state_history],
        outputs=[user_input, chatbot]
    ).then(
        fn=lambda _, h: h,
        inputs=[user_input, chatbot],
        outputs=[state_history]
    )

if __name__ == "__main__":
    demo.launch(server_name="0.0.0.0", server_port=7860, share=True)