Spaces:
Sleeping
Sleeping
File size: 6,342 Bytes
1eb7acd 1d3ecc9 1eb7acd 848fb00 1eb7acd 7cccfe4 fa0b449 1eb7acd 7cccfe4 1eb7acd 7cccfe4 1eb7acd 7cccfe4 1eb7acd 7cccfe4 1eb7acd 7cccfe4 1eb7acd 155a8a1 fa0b449 1eb7acd d59827f 1eb7acd fa0b449 d59827f 90bb291 d59827f 90bb291 d59827f 1eb7acd d59827f 90bb291 1eb7acd 90bb291 1eb7acd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 |
from transformers import AutoModelForCausalLM, AutoTokenizer
import gradio as gr
import torch
import spaces
# Initialize the model and tokenizer
model_name = "Qwen/Qwen2.5-Math-1.5B-Instruct"
device = "cuda" if torch.cuda.is_available() else "cpu"
model = AutoModelForCausalLM.from_pretrained(
model_name,
torch_dtype=torch.float16 if device == "cuda" else torch.float32,
device_map="auto" if device == "cuda" else None
)
tokenizer = AutoTokenizer.from_pretrained(model_name)
# System instruction
SYSTEM_INSTRUCTION = (
"You are a helpful and patient math tutor tasked with providing step-by-step hints and guidance for solving math problems."
"Your primary role is to assist learners in understanding how to approach and solve problems without revealing the final answer, even if explicitly requested."
"Always encourage the learner to solve the problem themselves by offering incremental hints and explanations."
"Under no circumstances should you provide the complete solution or final answer."
)
def apply_chat_template(messages):
"""
Prepares the messages for the model using the tokenizer's chat template.
"""
return tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
@spaces.GPU
def generate_response(chat_history, user_input):
"""
Generates a response from the model based on the chat history and user input.
"""
# Append user input to chat history
chat_history.append(("User", user_input + "\n\n strinctly prohibited to reveal answer only provide hints and guidelines to solve this"))
# Prepare messages for the model
messages = [{"role": "system", "content": SYSTEM_INSTRUCTION}] + [
{"role": "user", "content": msg[1]} if msg[0] == "User" else {"role": "assistant", "content": msg[1]}
for msg in chat_history
]
# Tokenize the input for the model
text = apply_chat_template(messages)
model_inputs = tokenizer([text], return_tensors="pt").to(device)
# Generate the model's response
generated_ids = model.generate(
**model_inputs,
max_new_tokens=512
)
generated_ids = [
output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
]
response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
# Append AI response to chat history
chat_history.append(("MathTutor", response))
# Return updated chat history
return chat_history
def format_chat_history(history):
"""
Formats the conversation history for a user-friendly chat display.
"""
chat_display = ""
for message in history:
if message["role"] == "user":
chat_display += f"**User:** {message['content']}\n\n"
elif message["role"] == "assistant":
chat_display += f"**MathTutor:** {message['content']}\n\n"
return chat_display
# Gradio chat interface
def create_chat_interface():
"""
Creates the Gradio interface for the chat application.
"""
with gr.Blocks() as chat_app:
gr.HTML("""
<!-- Include KaTeX CSS and JS -->
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/[email protected]/dist/katex.min.css">
<script defer src="https://cdn.jsdelivr.net/npm/[email protected]/dist/katex.min.js"></script>
<script defer src="https://cdn.jsdelivr.net/npm/[email protected]/dist/contrib/auto-render.min.js"></script>
<script>
// Preprocess LaTeX content
function preprocessLatex(text) {
// Convert block math `[ ... ]` to `\\[ ... \\]`
text = text.replace(/\[([^\[\]]+)\]/g, '\\[$1\\]');
// Convert inline math `( ... )` to `\\( ... \\)`
text = text.replace(/\(([^\(\)]+)\)/g, '\\($1\\)');
return text;
}
// Render LaTeX only for elements requiring math
function renderChatLatex(mutationsList) {
for (const mutation of mutationsList) {
if (mutation.type === "childList") {
mutation.addedNodes.forEach((node) => {
if (node.nodeType === 1) { // Ensure it's an element node
// Check if the content needs LaTeX rendering
if (node.innerHTML.match(/\\\(|\\\[|\$|\[|\(/)) {
node.innerHTML = preprocessLatex(node.innerHTML);
renderMathInElement(node, {
delimiters: [
{ left: "\\(", right: "\\)", display: false },
{ left: "\\[", right: "\\]", display: true },
{ left: "$$", right: "$$", display: true },
{ left: "$", right: "$", display: false }
]
});
}
}
});
}
}
}
// Setup MutationObserver
document.addEventListener("DOMContentLoaded", () => {
const chatContainer = document.querySelector("#chat-container");
const observer = new MutationObserver(renderChatLatex);
observer.observe(chatContainer, { childList: true, subtree: true });
});
</script>
""")
gr.Markdown("## Math Hint Chat")
gr.Markdown(
"This chatbot provides hints and step-by-step guidance for solving math problems. "
)
chatbot = gr.Chatbot(label="Math Tutor Chat",, elem_id="chat-container")
user_input = gr.Textbox(
placeholder="Ask your math question here (e.g., Solve for x: 4x + 5 = 6x + 7)",
label="Your Query"
)
send_button = gr.Button("Send")
# Hidden state for managing chat history
chat_history = gr.State([])
# Button interaction for chat
send_button.click(
fn=generate_response,
inputs=[chat_history, user_input],
outputs=[chatbot]
)
return chat_app
app = create_chat_interface()
app.launch(debug=True) |