Makima57 commited on
Commit
8a529be
·
verified ·
1 Parent(s): 9510484

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -59
app.py CHANGED
@@ -1,15 +1,15 @@
1
- import gradio as gr
2
- import ctranslate2
3
- from transformers import AutoTokenizer
4
- from huggingface_hub import snapshot_download
5
- from codeexecutor import postprocess_completion, get_majority_vote
6
 
7
  # Define the model and tokenizer loading
8
  model_prompt = "Solve the following mathematical problem: "
9
  tokenizer = AutoTokenizer.from_pretrained("AI-MO/NuminaMath-7B-TIR")
10
  model_path = snapshot_download(repo_id="Makima57/deepseek-math-Numina")
11
  generator = ctranslate2.Generator(model_path, device="cpu", compute_type="int8")
12
- iterations = 10
13
 
14
  # Function to generate predictions using the model
15
  def get_prediction(question):
@@ -18,24 +18,24 @@ def get_prediction(question):
18
  results = generator.generate_batch([input_tokens])
19
  output_tokens = results[0].sequences[0]
20
  predicted_answer = tokenizer.convert_tokens_to_string(output_tokens)
21
- return predicted_answer
22
 
23
  # Function to perform majority voting across multiple predictions
24
  def majority_vote(question, num_iterations=10):
25
  all_predictions = []
26
- all_answer = []
27
  for _ in range(num_iterations):
28
  prediction = get_prediction(question)
29
- answer = postprocess_completion(prediction, True, True)
30
  all_predictions.append(prediction)
31
  all_answer.append(answer)
32
  majority_voted_pred = max(set(all_predictions), key=all_predictions.count)
33
- majority_voted_ans = get_majority_vote(all_answer)
34
- return majority_voted_pred, all_predictions, majority_voted_ans
35
 
36
  # Gradio interface for user input and output
37
  def gradio_interface(question, correct_answer):
38
- final_prediction, all_predictions, final_answer = majority_vote(question, iterations)
39
  return {
40
  "Question": question,
41
  "Generated Answers (10 iterations)": all_predictions,
@@ -44,61 +44,19 @@ def gradio_interface(question, correct_answer):
44
  "Majority answer": final_answer
45
  }
46
 
47
- # Custom CSS styling for a better look and feel
48
- custom_css = """
49
- body {
50
- background-color: #f7f9fc;
51
- font-family: 'Arial', sans-serif;
52
- }
53
- .gradio-container {
54
- border-radius: 10px;
55
- border: 1px solid #e0e0e0;
56
- box-shadow: 0 4px 8px rgba(0, 0, 0, 0.1);
57
- padding: 20px;
58
- background-color: white;
59
- }
60
- h1, h2, p {
61
- text-align: center;
62
- color: #333;
63
- }
64
- input, textarea {
65
- border-radius: 5px;
66
- border: 1px solid #ccc;
67
- padding: 10px;
68
- }
69
- .gr-button {
70
- background-color: #4CAF50;
71
- color: white;
72
- border-radius: 5px;
73
- padding: 10px 20px;
74
- font-size: 16px;
75
- }
76
- .gr-button:hover {
77
- background-color: #45a049;
78
- }
79
- .gr-output {
80
- background-color: #f1f1f1;
81
- border-radius: 5px;
82
- padding: 20px;
83
- font-size: 14px;
84
- }
85
- """
86
-
87
  # Gradio app setup
88
- demo = gr.Interface(
89
  fn=gradio_interface,
90
  inputs=[
91
- gr.Textbox(label="Math Question", placeholder="Enter your math question here...", elem_id="math_question"),
92
- gr.Textbox(label="Correct Answer", placeholder="Enter the correct answer here...", elem_id="correct_answer"),
93
  ],
94
  outputs=[
95
- gr.JSON(label="Results", elem_id="results"), # Display the results in a JSON format
96
  ],
97
  title="Math Question Solver",
98
  description="Enter a math question to get the model prediction and see all generated answers.",
99
- live=True,
100
- css=custom_css, # Apply custom CSS
101
  )
102
 
103
  if __name__ == "__main__":
104
- demo.launch()
 
1
+ import gradio as gr
2
+ import ctranslate2
3
+ from transformers import AutoTokenizer
4
+ from huggingface_hub import snapshot_download
5
+ from codeexecutor import postprocess_completion,get_majority_vote
6
 
7
  # Define the model and tokenizer loading
8
  model_prompt = "Solve the following mathematical problem: "
9
  tokenizer = AutoTokenizer.from_pretrained("AI-MO/NuminaMath-7B-TIR")
10
  model_path = snapshot_download(repo_id="Makima57/deepseek-math-Numina")
11
  generator = ctranslate2.Generator(model_path, device="cpu", compute_type="int8")
12
+ iterations=10
13
 
14
  # Function to generate predictions using the model
15
  def get_prediction(question):
 
18
  results = generator.generate_batch([input_tokens])
19
  output_tokens = results[0].sequences[0]
20
  predicted_answer = tokenizer.convert_tokens_to_string(output_tokens)
21
+ return predicted_answer
22
 
23
  # Function to perform majority voting across multiple predictions
24
  def majority_vote(question, num_iterations=10):
25
  all_predictions = []
26
+ all_answer=[]
27
  for _ in range(num_iterations):
28
  prediction = get_prediction(question)
29
+ answer=postprocess_completion(prediction,True,True)
30
  all_predictions.append(prediction)
31
  all_answer.append(answer)
32
  majority_voted_pred = max(set(all_predictions), key=all_predictions.count)
33
+ majority_voted_ans=get_majority_vote(all_answer)
34
+ return majority_voted_pred, all_predictions,majority_voted_ans
35
 
36
  # Gradio interface for user input and output
37
  def gradio_interface(question, correct_answer):
38
+ final_prediction, all_predictions,final_answer = majority_vote(question, iterations)
39
  return {
40
  "Question": question,
41
  "Generated Answers (10 iterations)": all_predictions,
 
44
  "Majority answer": final_answer
45
  }
46
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
  # Gradio app setup
48
+ interface = gr.Interface(
49
  fn=gradio_interface,
50
  inputs=[
51
+ gr.Textbox(label="Math Question"),
52
+ gr.Textbox(label="Correct Answer"),
53
  ],
54
  outputs=[
55
+ gr.JSON(label="Results"), # Display the results in a JSON format
56
  ],
57
  title="Math Question Solver",
58
  description="Enter a math question to get the model prediction and see all generated answers.",
 
 
59
  )
60
 
61
  if __name__ == "__main__":
62
+ interface.launch()