Spaces:
Running
Running
File size: 3,603 Bytes
8a529be 8da95b6 444d1cb 8da95b6 444d1cb 8a529be 444d1cb 8da95b6 444d1cb 8da95b6 444d1cb 8da95b6 444d1cb 8da95b6 444d1cb 8d5df47 8da95b6 8d5df47 8da95b6 8d5df47 8da95b6 8d5df47 8da95b6 8d5df47 8da95b6 8d5df47 8da95b6 8d5df47 8da95b6 8d5df47 8da95b6 8d5df47 64e039c 81f0c5f 8da95b6 444d1cb 8a529be 444d1cb 81f0c5f 444d1cb 5b32090 444d1cb 5b32090 444d1cb 8da95b6 444d1cb 8da95b6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 |
import gradio as gr
import ctranslate2
from transformers import AutoTokenizer
from huggingface_hub import snapshot_download
from codeexecutor import postprocess_completion, get_majority_vote
# Define the model and tokenizer loading
model_prompt = "Solve the following mathematical problem: "
tokenizer = AutoTokenizer.from_pretrained("AI-MO/NuminaMath-7B-TIR")
model_path = snapshot_download(repo_id="Makima57/deepseek-math-Numina")
generator = ctranslate2.Generator(model_path, device="cpu", compute_type="int8")
iterations = 10
# Function to generate predictions using the model
def get_prediction(question):
input_text = model_prompt + question
input_tokens = tokenizer.tokenize(input_text)
results = generator.generate_batch([input_tokens])
output_tokens = results[0].sequences[0]
predicted_answer = tokenizer.convert_tokens_to_string(output_tokens)
return predicted_answer
# Function to perform majority voting across multiple predictions
def majority_vote(question, num_iterations=10):
all_predictions = []
all_answer = []
for _ in range(num_iterations):
prediction = get_prediction(question)
answer = postprocess_completion(prediction, True, True)
all_predictions.append(prediction)
all_answer.append(answer)
majority_voted_pred = max(set(all_predictions), key=all_predictions.count)
majority_voted_ans = get_majority_vote(all_answer)
return majority_voted_pred, all_predictions, majority_voted_ans
# Gradio interface for user input and output
def gradio_interface(question, correct_answer):
final_prediction, all_predictions, final_answer = majority_vote(question, iterations)
return {
"Question": question,
"Generated Answers (10 iterations)": all_predictions,
"Majority-Voted Prediction": final_prediction,
"Correct solution": correct_answer,
"Majority answer": final_answer
}
# Custom CSS for enhanced design
custom_css = """
body {
background-color: #fafafa;
font-family: 'Open Sans', sans-serif;
}
.gradio-container {
background-color: #ffffff;
border: 3px solid #007acc;
border-radius: 15px;
padding: 20px;
box-shadow: 0 8px 20px rgba(0, 0, 0, 0.15);
max-width: 800px;
margin: 50px auto;
}
h1 {
font-family: 'Poppins', sans-serif;
color: #007acc;
font-weight: bold;
font-size: 32px;
text-align: center;
margin-bottom: 20px;
}
p {
font-family: 'Roboto', sans-serif;
font-size: 18px;
color: #333;
text-align: center;
margin-bottom: 15px;
}
input, textarea {
font-family: 'Montserrat', sans-serif;
font-size: 16px;
padding: 10px;
border: 2px solid #007acc;
border-radius: 10px;
background-color: #f1f8ff;
margin-bottom: 15px;
}
"""
# Gradio app setup
interface = gr.Interface(
fn=gradio_interface,
inputs=[
gr.Textbox(label="π§ Math Question", placeholder="Enter your math question here...", elem_id="math_question"),
gr.Textbox(label="β
Correct Answer", placeholder="Enter the correct answer here...", elem_id="correct_answer"),
],
outputs=[
gr.JSON(label="π Results"), # Display the results in a JSON format
],
title="π’ Math Question Solver",
description="Enter a math question to get the model prediction and see all generated answers.",
css=custom_css # Apply custom CSS
)
if __name__ == "__main__":
interface.launch()
|