openfree commited on
Commit
1794ce2
·
verified ·
1 Parent(s): 6d70605

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -20
app.py CHANGED
@@ -8,7 +8,7 @@ phi4_model_path = "microsoft/Phi-4-reasoning-plus"
8
 
9
  device = "cuda:0" if torch.cuda.is_available() else "cpu"
10
 
11
- phi4_model = AutoModelForCausalLM.from_pretrained(phi4_model_path, torch_dtype="auto").to(device)
12
  phi4_tokenizer = AutoTokenizer.from_pretrained(phi4_model_path)
13
 
14
  @spaces.GPU(duration=60)
@@ -24,7 +24,7 @@ def generate_response(user_message, max_tokens, temperature, top_k, top_p, repet
24
  end_tag = "<|im_end|>"
25
 
26
  # Recommended prompt settings by Microsoft
27
- system_message = "You are a friendly and knowledgeable assistant, here to help with any questions or tasks."
28
  prompt = f"{start_tag}system{sep_tag}{system_message}{end_tag}"
29
  for message in history_state:
30
  if message["role"] == "user":
@@ -44,10 +44,10 @@ def generate_response(user_message, max_tokens, temperature, top_k, top_p, repet
44
  "input_ids": inputs["input_ids"],
45
  "attention_mask": inputs["attention_mask"],
46
  "max_new_tokens": int(max_tokens),
47
- "do_sample": do_sample,
48
- "temperature": temperature,
49
  "top_k": int(top_k),
50
- "top_p": top_p,
51
  "repetition_penalty": repetition_penalty,
52
  "streamer": streamer,
53
  }
@@ -70,16 +70,20 @@ def generate_response(user_message, max_tokens, temperature, top_k, top_p, repet
70
  yield new_history, new_history
71
 
72
  example_messages = {
73
- "Learn about physics": "Explain Newton's laws of motion.",
74
- "Discover space facts": "What are some interesting facts about black holes?",
75
- "Write a factorial function": "Write a Python function to calculate the factorial of a number."
76
  }
77
 
78
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
79
  gr.Markdown(
80
  """
81
  # Phi-4-reasoning-plus Chatbot
82
- Welcome to the Phi-4 Chatbot! You can chat with Microsoft's Phi-4-reasoning-plus model. Adjust the settings on the left to customize the model's responses.
 
 
 
 
83
  """
84
  )
85
 
@@ -90,16 +94,16 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
90
  gr.Markdown("### Settings")
91
  max_tokens_slider = gr.Slider(
92
  minimum=64,
93
- maximum=4096,
94
- step=50,
95
- value=512,
96
  label="Max Tokens"
97
  )
98
  with gr.Accordion("Advanced Settings", open=False):
99
  temperature_slider = gr.Slider(
100
  minimum=0.1,
101
  maximum=2.0,
102
- value=1.0,
103
  label="Temperature"
104
  )
105
  top_k_slider = gr.Slider(
@@ -112,7 +116,7 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
112
  top_p_slider = gr.Slider(
113
  minimum=0.1,
114
  maximum=1.0,
115
- value=0.9,
116
  label="Top-p"
117
  )
118
  repetition_penalty_slider = gr.Slider(
@@ -134,9 +138,9 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
134
  clear_button = gr.Button("Clear", scale=1)
135
  gr.Markdown("**Try these examples:**")
136
  with gr.Row():
137
- example1_button = gr.Button("Learn about physics")
138
- example2_button = gr.Button("Discover space facts")
139
- example3_button = gr.Button("Write a factorial function")
140
 
141
  submit_button.click(
142
  fn=generate_response,
@@ -155,17 +159,17 @@ with gr.Blocks(theme=gr.themes.Soft()) as demo:
155
  )
156
 
157
  example1_button.click(
158
- fn=lambda: gr.update(value=example_messages["Learn about physics"]),
159
  inputs=None,
160
  outputs=user_input
161
  )
162
  example2_button.click(
163
- fn=lambda: gr.update(value=example_messages["Discover space facts"]),
164
  inputs=None,
165
  outputs=user_input
166
  )
167
  example3_button.click(
168
- fn=lambda: gr.update(value=example_messages["Write a factorial function"]),
169
  inputs=None,
170
  outputs=user_input
171
  )
 
8
 
9
  device = "cuda:0" if torch.cuda.is_available() else "cpu"
10
 
11
+ phi4_model = AutoModelForCausalLM.from_pretrained(phi4_model_path, device_map="auto", torch_dtype="auto")
12
  phi4_tokenizer = AutoTokenizer.from_pretrained(phi4_model_path)
13
 
14
  @spaces.GPU(duration=60)
 
24
  end_tag = "<|im_end|>"
25
 
26
  # Recommended prompt settings by Microsoft
27
+ system_message = "Your role as an assistant involves thoroughly exploring questions through a systematic thinking process before providing the final precise and accurate solutions. This requires engaging in a comprehensive cycle of analysis, summarizing, exploration, reassessment, reflection, backtracing, and iteration to develop well-considered thinking process. Please structure your response into two main sections: Thought and Solution using the specified format: <think> {Thought section} </think> {Solution section}. In the Thought section, detail your reasoning process in steps. Each step should include detailed considerations such as analysing questions, summarizing relevant findings, brainstorming new ideas, verifying the accuracy of the current steps, refining any errors, and revisiting previous steps. In the Solution section, based on various attempts, explorations, and reflections from the Thought section, systematically present the final solution that you deem correct. The Solution section should be logical, accurate, and concise and detail necessary steps needed to reach the conclusion. Now, try to solve the following question through the above guidelines:"
28
  prompt = f"{start_tag}system{sep_tag}{system_message}{end_tag}"
29
  for message in history_state:
30
  if message["role"] == "user":
 
44
  "input_ids": inputs["input_ids"],
45
  "attention_mask": inputs["attention_mask"],
46
  "max_new_tokens": int(max_tokens),
47
+ "do_sample": True,
48
+ "temperature": 0.8,
49
  "top_k": int(top_k),
50
+ "top_p": 0.95,
51
  "repetition_penalty": repetition_penalty,
52
  "streamer": streamer,
53
  }
 
70
  yield new_history, new_history
71
 
72
  example_messages = {
73
+ "Math problem": "Solve for x: 3x^2 + 6x - 9 = 0",
74
+ "Algorithmic task": "Write a Python function to find the longest common subsequence of two strings.",
75
+ "Reasoning puzzle": "There are 5 houses in a row, each with a different color. The person in each house has a different nationality, pet, drink, and cigarette brand. Given that: The Brit lives in the red house. The Swede keeps dogs. The Dane drinks tea. The green house is on the left of the white house. The green house owner drinks coffee. The person who smokes Pall Mall keeps birds. The owner of the yellow house smokes Dunhill. The man living in the center house drinks milk. The Norwegian lives in the first house. The man who smokes Blend lives next to the one who keeps cats. The man who keeps horses lives next to the man who smokes Dunhill. The owner who smokes Blue Master drinks beer. The German smokes Prince. The Norwegian lives next to the blue house. The man who smokes Blend has a neighbor who drinks water. Who owns the fish?"
76
  }
77
 
78
  with gr.Blocks(theme=gr.themes.Soft()) as demo:
79
  gr.Markdown(
80
  """
81
  # Phi-4-reasoning-plus Chatbot
82
+ Welcome to the Phi-4-reasoning-plus Chatbot! This model is designed for advanced reasoning tasks and structured thinking. The model will provide responses with two sections:
83
+ 1. **Thought section**: A detailed reasoning chain showing step-by-step analysis
84
+ 2. **Solution section**: A concise, accurate final answer
85
+
86
+ Adjust the settings on the left to customize the model's responses. For complex queries, consider increasing the max tokens.
87
  """
88
  )
89
 
 
94
  gr.Markdown("### Settings")
95
  max_tokens_slider = gr.Slider(
96
  minimum=64,
97
+ maximum=32768,
98
+ step=1024,
99
+ value=4096,
100
  label="Max Tokens"
101
  )
102
  with gr.Accordion("Advanced Settings", open=False):
103
  temperature_slider = gr.Slider(
104
  minimum=0.1,
105
  maximum=2.0,
106
+ value=0.8,
107
  label="Temperature"
108
  )
109
  top_k_slider = gr.Slider(
 
116
  top_p_slider = gr.Slider(
117
  minimum=0.1,
118
  maximum=1.0,
119
+ value=0.95,
120
  label="Top-p"
121
  )
122
  repetition_penalty_slider = gr.Slider(
 
138
  clear_button = gr.Button("Clear", scale=1)
139
  gr.Markdown("**Try these examples:**")
140
  with gr.Row():
141
+ example1_button = gr.Button("Math problem")
142
+ example2_button = gr.Button("Algorithmic task")
143
+ example3_button = gr.Button("Reasoning puzzle")
144
 
145
  submit_button.click(
146
  fn=generate_response,
 
159
  )
160
 
161
  example1_button.click(
162
+ fn=lambda: gr.update(value=example_messages["Math problem"]),
163
  inputs=None,
164
  outputs=user_input
165
  )
166
  example2_button.click(
167
+ fn=lambda: gr.update(value=example_messages["Algorithmic task"]),
168
  inputs=None,
169
  outputs=user_input
170
  )
171
  example3_button.click(
172
+ fn=lambda: gr.update(value=example_messages["Reasoning puzzle"]),
173
  inputs=None,
174
  outputs=user_input
175
  )