File size: 4,204 Bytes
ce5b5d6 4ff02b9 ce5b5d6 4ff02b9 ce5b5d6 4ff02b9 ce5b5d6 cf70031 ce5b5d6 cf70031 ce5b5d6 4ff02b9 ce5b5d6 4ff02b9 ce5b5d6 4ff02b9 2c5504c 2cdba6f 4ff02b9 2cdba6f 4ff02b9 2cdba6f 4ff02b9 2cdba6f 4ff02b9 2cdba6f 4ff02b9 2cdba6f 2c5504c ce5b5d6 cf70031 ce5b5d6 4ff02b9 cf70031 ce5b5d6 4ff02b9 ce5b5d6 e35958e ce5b5d6 4ff02b9 ce5b5d6 4ff02b9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 |
import os
import gradio as gr
from dotenv import load_dotenv
from openai import OpenAI
from prompts.initial_prompt import INITIAL_PROMPT
from prompts.main_prompt import MAIN_PROMPT
# Load OpenAI API Key from .env file
if os.path.exists(".env"):
load_dotenv(".env")
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
client = OpenAI(api_key=OPENAI_API_KEY)
def gpt_call(history, user_message,
model="gpt-4o-mini",
max_tokens=1024,
temperature=0.7,
top_p=0.95):
"""
Calls OpenAI Chat API to generate responses.
- history: [(user_text, assistant_text), ...]
- user_message: latest message from user
"""
messages = [{"role": "system", "content": MAIN_PROMPT}]
# Add conversation history
for user_text, assistant_text in history:
if user_text:
messages.append({"role": "user", "content": user_text})
if assistant_text:
messages.append({"role": "assistant", "content": assistant_text})
messages.append({"role": "user", "content": user_message})
# OpenAI API Call
completion = client.chat.completions.create(
model=model,
messages=messages,
max_tokens=max_tokens,
temperature=temperature,
top_p=top_p
)
response = completion.choices[0].message.content
# Ensure AI is conversational and interactive
if any(keyword in user_message.lower() for keyword in ["solve", "explain", "why", "reasoning"]):
response = "Great thinking! Now, explain your reasoning step by step. What patterns or relationships do you notice? Let's walk through it together.\n\n" + response
# Provide guidance instead of full solutions immediately
if any(keyword in user_message.lower() for keyword in ["hint", "stuck", "help"]):
response = "Here's a hint: What key properties or relationships can help you solve this? Try breaking it down further.\n\n" + response
# Encourage problem posing at the end of each module
if "pose a problem" in user_message.lower():
response += "\n\nNow that you've explored this concept, can you create your own problem? How would you challenge your students with a similar situation?"
# Ask about Common Core practice standards and creativity-directed practices at the end
if "summary" in user_message.lower():
response += "\n\nReflection time! Which Common Core practice standards did we apply? How did creativity shape your approach to solving this problem?"
# Step-by-step solutions instead of immediate answers
if any(keyword in user_message.lower() for keyword in ["solution", "answer"]):
response = "Let's take this step by step. What information do we have? How can we use it to set up an equation or method?\n\n" + response
# Provide illustrations where relevant
if any(keyword in user_message.lower() for keyword in ["visualize", "graph", "draw", "picture", "illustration"]):
response += "\n\nLet me generate an illustration to help you visualize this concept. It will be an approximation to support your understanding."
return response
def respond(user_message, history):
"""
Handles user input and chatbot responses.
"""
if not user_message:
return "", history
assistant_reply = gpt_call(history, user_message)
history.append((user_message, assistant_reply))
return "", history
##############################
# Gradio Blocks UI
##############################
with gr.Blocks() as demo:
gr.Markdown("## AI-Guided Math PD Chatbot")
chatbot = gr.Chatbot(
value=[("", INITIAL_PROMPT)],
height=600
)
state_history = gr.State([("", INITIAL_PROMPT)])
user_input = gr.Textbox(
placeholder="Type your message here...",
label="Your Input"
)
user_input.submit(
respond,
inputs=[user_input, state_history],
outputs=[user_input, chatbot]
).then(
fn=lambda _, h: h,
inputs=[user_input, chatbot],
outputs=[state_history]
)
if __name__ == "__main__":
demo.launch(server_name="0.0.0.0", server_port=7860, share=True)
|