File size: 2,886 Bytes
ce5b5d6
 
 
 
 
 
 
e35958e
ce5b5d6
 
 
 
 
 
 
e35958e
ce5b5d6
e35958e
ce5b5d6
e35958e
ce5b5d6
 
e35958e
ce5b5d6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e35958e
 
 
ce5b5d6
 
 
 
e35958e
 
ce5b5d6
e35958e
 
 
 
 
 
 
ce5b5d6
 
 
 
 
 
 
e35958e
ce5b5d6
 
e35958e
ce5b5d6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
import os
import gradio as gr
from dotenv import load_dotenv
from openai import OpenAI
from prompts.initial_prompt import INITIAL_PROMPT
from prompts.main_prompt import MAIN_PROMPT

# Load API key from .env file
if os.path.exists(".env"):
    load_dotenv(".env")

OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")

client = OpenAI(api_key=OPENAI_API_KEY)

def gpt_call(history, user_message, model="gpt-4o-mini", max_tokens=512, temperature=0.7, top_p=0.95):
    """
    Calls OpenAI API to generate a response based on conversation history.
    - history: [(user_text, assistant_text), ...]
    - user_message: The latest user input
    """
    messages = [{"role": "system", "content": MAIN_PROMPT}]

    for user_text, assistant_text in history:
        if user_text:
            messages.append({"role": "user", "content": user_text})
        if assistant_text:
            messages.append({"role": "assistant", "content": assistant_text})

    messages.append({"role": "user", "content": user_message})

    completion = client.chat.completions.create(
        model=model,
        messages=messages,
        max_tokens=max_tokens,
        temperature=temperature,
        top_p=top_p
    )
    return completion.choices[0].message.content

def respond(user_message, history):
    """
    Handles chatbot responses.
    - Ensures teachers must explain their reasoning before AI provides hints or feedback.
    - Guides the conversation to include CCSS practice standards, problem-posing, creativity-directed practices, and summary.
    """
    if not user_message:
        return "", history

    # Extract the last interaction
    last_message = history[-1][0] if history else ""

    if "problem" in last_message.lower() and "solve" in last_message.lower():
        # If the bot is expecting an explanation, store the response and move forward
        history.append((user_message, "Thanks for sharing your reasoning! Let's analyze your response."))
    else:
        # Regular OpenAI GPT response
        assistant_reply = gpt_call(history, user_message)
        history.append((user_message, assistant_reply))

    return "", history

##############################
#  Gradio Blocks UI
##############################
with gr.Blocks() as demo:
    gr.Markdown("## AI-Guided Math PD Chatbot")

    chatbot = gr.Chatbot(
        value=[("", INITIAL_PROMPT)],
        height=500
    )

    state_history = gr.State([("", INITIAL_PROMPT)])

    user_input = gr.Textbox(
        placeholder="Type your message here...",
        label="Your Input"
    )

    user_input.submit(
        respond,
        inputs=[user_input, state_history],
        outputs=[user_input, chatbot]
    ).then(
        fn=lambda _, h: h,
        inputs=[user_input, chatbot],
        outputs=[state_history]
    )

if __name__ == "__main__":
    demo.launch(server_name="0.0.0.0", server_port=7860, share=True)