Makima57 commited on
Commit
0302f9b
·
verified ·
1 Parent(s): fd8428b

Update app.py

Browse files

majority with steps 1

Files changed (1) hide show
  1. app.py +49 -20
app.py CHANGED
@@ -2,10 +2,11 @@ import gradio as gr
2
  import ctranslate2
3
  from transformers import AutoTokenizer
4
  from huggingface_hub import snapshot_download
5
- from codeexecutor import postprocess_completion, get_majority_vote
 
6
 
7
  # Define the model and tokenizer loading
8
- model_prompt = "Solve the following mathematical problem: "
9
  tokenizer = AutoTokenizer.from_pretrained("AI-MO/NuminaMath-7B-TIR")
10
  model_path = snapshot_download(repo_id="Makima57/deepseek-math-Numina")
11
  generator = ctranslate2.Generator(model_path, device="cpu", compute_type="int8")
@@ -15,31 +16,59 @@ iterations = 10
15
  def get_prediction(question):
16
  input_text = model_prompt + question
17
  input_tokens = tokenizer.tokenize(input_text)
18
- results = generator.generate_batch([input_tokens])
 
 
 
 
 
19
  output_tokens = results[0].sequences[0]
20
  predicted_answer = tokenizer.convert_tokens_to_string(output_tokens)
21
  return predicted_answer
22
 
23
- # Function to perform majority voting and solve the problem with steps
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  def majority_vote_with_steps(question, num_iterations=10):
25
  all_predictions = []
26
- all_answer = []
27
- steps_to_solve = []
28
-
29
  for _ in range(num_iterations):
30
  prediction = get_prediction(question)
31
- # Process prediction to get steps and answer
32
- answer, success = postprocess_completion(prediction, True, True)
33
  all_predictions.append(prediction)
34
- all_answer.append(answer)
35
- if success:
36
- steps_to_solve.append(answer) # Add the steps if code executes successfully
37
-
38
- majority_voted_ans = get_majority_vote(all_answer)
39
-
40
- # If steps to solve exist, return them, else fallback to "No steps found"
41
- steps_solution = steps_to_solve[0] if steps_to_solve else "No steps found"
42
-
 
 
 
 
 
43
  return majority_voted_ans, steps_solution
44
 
45
  # Gradio interface for user input and output
@@ -52,7 +81,7 @@ def gradio_interface(question, correct_answer):
52
  "Correct Solution": correct_answer
53
  }
54
 
55
- # Custom CSS for enhanced design
56
  custom_css = """
57
  body {
58
  background-color: #fafafa;
@@ -94,7 +123,7 @@ custom_css = """
94
  #math_question, #correct_answer {
95
  font-size: 20px;
96
  font-family: 'Poppins', sans-serif;
97
- font-weight: 500px; /* Apply bold */
98
  color: #007acc;
99
  margin-bottom: 5px;
100
  display: inline-block;
 
2
  import ctranslate2
3
  from transformers import AutoTokenizer
4
  from huggingface_hub import snapshot_download
5
+ from codeexecutor import get_majority_vote
6
+ import re
7
 
8
  # Define the model and tokenizer loading
9
+ model_prompt = "Explain and solve the following mathematical problem step by step, showing all work: "
10
  tokenizer = AutoTokenizer.from_pretrained("AI-MO/NuminaMath-7B-TIR")
11
  model_path = snapshot_download(repo_id="Makima57/deepseek-math-Numina")
12
  generator = ctranslate2.Generator(model_path, device="cpu", compute_type="int8")
 
16
  def get_prediction(question):
17
  input_text = model_prompt + question
18
  input_tokens = tokenizer.tokenize(input_text)
19
+ results = generator.generate_batch(
20
+ [input_tokens],
21
+ max_length=512,
22
+ sampling_temperature=0.7,
23
+ sampling_topk=40,
24
+ )
25
  output_tokens = results[0].sequences[0]
26
  predicted_answer = tokenizer.convert_tokens_to_string(output_tokens)
27
  return predicted_answer
28
 
29
+ # Function to parse the prediction to extract the answer and steps
30
+ def parse_prediction(prediction):
31
+ lines = prediction.strip().split('\n')
32
+ answer = None
33
+ steps = []
34
+ for line in lines:
35
+ # Check for "Answer:" or "answer:"
36
+ match = re.match(r'^\s*(?:Answer|answer)\s*[:=]\s*(.*)', line)
37
+ if match:
38
+ answer = match.group(1).strip()
39
+ else:
40
+ steps.append(line)
41
+ if answer is None:
42
+ # If no "Answer:" found, assume last line is the answer
43
+ answer = lines[-1].strip()
44
+ steps = lines[:-1]
45
+ steps_text = '\n'.join(steps).strip()
46
+ return answer, steps_text
47
+
48
+ # Function to perform majority voting and get steps
49
  def majority_vote_with_steps(question, num_iterations=10):
50
  all_predictions = []
51
+ all_answers = []
52
+ steps_list = []
53
+
54
  for _ in range(num_iterations):
55
  prediction = get_prediction(question)
56
+ answer, steps = parse_prediction(prediction)
 
57
  all_predictions.append(prediction)
58
+ all_answers.append(answer)
59
+ steps_list.append(steps)
60
+
61
+ # Get the majority voted answer
62
+ majority_voted_ans = get_majority_vote(all_answers)
63
+
64
+ # Find the steps corresponding to the majority voted answer
65
+ for i, ans in enumerate(all_answers):
66
+ if ans == majority_voted_ans:
67
+ steps_solution = steps_list[i]
68
+ break
69
+ else:
70
+ steps_solution = "No steps found"
71
+
72
  return majority_voted_ans, steps_solution
73
 
74
  # Gradio interface for user input and output
 
81
  "Correct Solution": correct_answer
82
  }
83
 
84
+ # Custom CSS for enhanced design (unchanged)
85
  custom_css = """
86
  body {
87
  background-color: #fafafa;
 
123
  #math_question, #correct_answer {
124
  font-size: 20px;
125
  font-family: 'Poppins', sans-serif;
126
+ font-weight: 500px;
127
  color: #007acc;
128
  margin-bottom: 5px;
129
  display: inline-block;