praeclarumjj3 commited on
Commit
dc7f71c
·
1 Parent(s): 03dacef

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -29
app.py CHANGED
@@ -80,51 +80,48 @@ def flag_last_response(state, model_selector, request: gr.Request):
80
  vote_last_response(state, "flag", model_selector, request)
81
  return ("",) + (disable_btn,) * 3
82
 
83
- def regenerate(state, image_process_mode, seg_process_mode, depth_process_mode):
84
  state.messages[-1][-1] = None
85
  prev_human_msg = state.messages[-2]
86
  if type(prev_human_msg[1]) in (tuple, list):
87
- prev_human_msg[1] = (*prev_human_msg[1][:2], image_process_mode, prev_human_msg[1][3], seg_process_mode, prev_human_msg[1][5], depth_process_mode)
88
  state.skip_next = False
89
- return (state, state.to_gradio_chatbot(), "", None, None, None, None) + (disable_btn,) * 5
90
 
91
 
92
  def clear_history(request: gr.Request):
93
  state = default_conversation.copy()
94
- return (state, state.to_gradio_chatbot(), "", None, None, None, None) + (disable_btn,) * 5
95
 
96
 
97
- def add_text(state, text, image, image_process_mode, seg, seg_process_mode, depth, depth_process_mode, request: gr.Request):
98
  logger.info(f"add_text. len: {len(text)}")
99
  if len(text) <= 0 and image is None:
100
  state.skip_next = True
101
- return (state, state.to_gradio_chatbot(), "", None, None, None, None) + (no_change_btn,) * 5
102
  if args.moderate:
103
  flagged = violates_moderation(text)
104
  if flagged:
105
  state.skip_next = True
106
- return (state, state.to_gradio_chatbot(), moderation_msg, None, None, None, None) + (
107
  no_change_btn,) * 5
108
 
109
- text = text[:1200] # Hard cut-off
110
  if image is not None:
111
- text = text[:864] # Hard cut-off for images
112
  if '<image>' not in text:
113
  text = '<image>\n' + text
114
  if seg is not None:
115
  if '<seg>' not in text:
116
  text = '<seg>\n' + text
117
- if depth is not None:
118
- if '<depth>' not in text:
119
- text = '<depth>\n' + text
120
 
121
- text = (text, image, image_process_mode, seg, seg_process_mode, depth, depth_process_mode)
122
  if len(state.get_images(return_pil=True)) > 0:
123
  state = default_conversation.copy()
124
  state.append_message(state.roles[0], text)
125
  state.append_message(state.roles[1], None)
126
  state.skip_next = False
127
- return (state, state.to_gradio_chatbot(), "", None, None, None, None) + (disable_btn,) * 5
128
 
129
 
130
  def http_bot(state, model_selector, temperature, top_p, max_new_tokens, request: gr.Request):
@@ -158,13 +155,11 @@ def http_bot(state, model_selector, temperature, top_p, max_new_tokens, request:
158
  "stop": state.sep if state.sep_style in [SeparatorStyle.SINGLE, SeparatorStyle.MPT] else state.sep2,
159
  "images": f'List of {len(state.get_images())}',
160
  "segs": f'List of {len(state.get_segs())}',
161
- "depths": f'List of {len(state.get_depths())}',
162
  }
163
  logger.info(f"==== request ====\n{pload}")
164
 
165
  pload['images'] = state.get_images()
166
  pload['segs'] = state.get_segs()
167
- pload['depths'] = state.get_depths()
168
 
169
  state.messages[-1][-1] = "▌"
170
  yield (state, state.to_gradio_chatbot()) + (disable_btn,) * 5
@@ -194,6 +189,8 @@ def http_bot(state, model_selector, temperature, top_p, max_new_tokens, request:
194
 
195
  state.messages[-1][-1] = state.messages[-1][-1][:-1]
196
  yield (state, state.to_gradio_chatbot()) + (enable_btn,) * 5
 
 
197
  logger.info(f"{output}")
198
 
199
 
@@ -255,12 +252,6 @@ def build_demo(embed_mode):
255
  ["Crop", "Resize", "Pad", "Default"],
256
  value="Default",
257
  label="Preprocess for non-square Seg Map", visible=False)
258
-
259
- depthbox = gr.Image(type="pil", label="Depth Map")
260
- depth_process_mode = gr.Radio(
261
- ["Crop", "Resize", "Pad", "Default"],
262
- value="Default",
263
- label="Preprocess for non-square Depth Map", visible=False)
264
 
265
  with gr.Accordion("Parameters", open=False) as parameter_row:
266
  temperature = gr.Slider(minimum=0.0, maximum=1.0, value=0.8, step=0.1, interactive=True, label="Temperature",)
@@ -284,8 +275,13 @@ def build_demo(embed_mode):
284
 
285
  cur_dir = os.path.dirname(os.path.abspath(__file__))
286
  gr.Examples(examples=[
287
- [f"{cur_dir}/examples/suits.jpg", f"{cur_dir}/examples/suits_pan.png", f"{cur_dir}/examples/suits_depth.jpeg", "Can you describe the depth order of the objects in this image, from closest to farthest?", "0.5", "0.5"],
288
- ], inputs=[imagebox, segbox, depthbox, textbox, temperature, top_p])
 
 
 
 
 
289
 
290
  if not embed_mode:
291
  gr.Markdown(tos_markdown)
@@ -299,16 +295,16 @@ def build_demo(embed_mode):
299
  [state, model_selector], [textbox, upvote_btn, downvote_btn, flag_btn])
300
  flag_btn.click(flag_last_response,
301
  [state, model_selector], [textbox, upvote_btn, downvote_btn, flag_btn])
302
- regenerate_btn.click(regenerate, [state, image_process_mode, seg_process_mode, depth_process_mode],
303
- [state, chatbot, textbox, imagebox, segbox, depthbox] + btn_list).then(
304
  http_bot, [state, model_selector, temperature, top_p, max_output_tokens],
305
  [state, chatbot] + btn_list)
306
- clear_btn.click(clear_history, None, [state, chatbot, textbox, imagebox, segbox, depthbox] + btn_list)
307
 
308
- textbox.submit(add_text, [state, textbox, imagebox, image_process_mode, segbox, seg_process_mode, depthbox, depth_process_mode], [state, chatbot, textbox, imagebox, segbox, depthbox] + btn_list
309
  ).then(http_bot, [state, model_selector, temperature, top_p, max_output_tokens],
310
  [state, chatbot] + btn_list)
311
- submit_btn.click(add_text, [state, textbox, imagebox, image_process_mode, segbox, seg_process_mode, depthbox, depth_process_mode], [state, chatbot, textbox, imagebox, segbox, depthbox] + btn_list
312
  ).then(http_bot, [state, model_selector, temperature, top_p, max_output_tokens],
313
  [state, chatbot] + btn_list)
314
 
 
80
  vote_last_response(state, "flag", model_selector, request)
81
  return ("",) + (disable_btn,) * 3
82
 
83
+ def regenerate(state, image_process_mode, seg_process_mode):
84
  state.messages[-1][-1] = None
85
  prev_human_msg = state.messages[-2]
86
  if type(prev_human_msg[1]) in (tuple, list):
87
+ prev_human_msg[1] = (*prev_human_msg[1][:2], image_process_mode, prev_human_msg[1][3], seg_process_mode, None, None)
88
  state.skip_next = False
89
+ return (state, state.to_gradio_chatbot(), "", None, None) + (disable_btn,) * 5
90
 
91
 
92
  def clear_history(request: gr.Request):
93
  state = default_conversation.copy()
94
+ return (state, state.to_gradio_chatbot(), "", None, None) + (disable_btn,) * 5
95
 
96
 
97
+ def add_text(state, text, image, image_process_mode, seg, seg_process_mode, request: gr.Request):
98
  logger.info(f"add_text. len: {len(text)}")
99
  if len(text) <= 0 and image is None:
100
  state.skip_next = True
101
+ return (state, state.to_gradio_chatbot(), "", None, None) + (no_change_btn,) * 5
102
  if args.moderate:
103
  flagged = violates_moderation(text)
104
  if flagged:
105
  state.skip_next = True
106
+ return (state, state.to_gradio_chatbot(), moderation_msg, None, None) + (
107
  no_change_btn,) * 5
108
 
109
+ text = text[:1576] # Hard cut-off
110
  if image is not None:
111
+ text = text[:1200] # Hard cut-off for images
112
  if '<image>' not in text:
113
  text = '<image>\n' + text
114
  if seg is not None:
115
  if '<seg>' not in text:
116
  text = '<seg>\n' + text
 
 
 
117
 
118
+ text = (text, image, image_process_mode, seg, seg_process_mode, None, None)
119
  if len(state.get_images(return_pil=True)) > 0:
120
  state = default_conversation.copy()
121
  state.append_message(state.roles[0], text)
122
  state.append_message(state.roles[1], None)
123
  state.skip_next = False
124
+ return (state, state.to_gradio_chatbot(), "", None, None) + (disable_btn,) * 5
125
 
126
 
127
  def http_bot(state, model_selector, temperature, top_p, max_new_tokens, request: gr.Request):
 
155
  "stop": state.sep if state.sep_style in [SeparatorStyle.SINGLE, SeparatorStyle.MPT] else state.sep2,
156
  "images": f'List of {len(state.get_images())}',
157
  "segs": f'List of {len(state.get_segs())}',
 
158
  }
159
  logger.info(f"==== request ====\n{pload}")
160
 
161
  pload['images'] = state.get_images()
162
  pload['segs'] = state.get_segs()
 
163
 
164
  state.messages[-1][-1] = "▌"
165
  yield (state, state.to_gradio_chatbot()) + (disable_btn,) * 5
 
189
 
190
  state.messages[-1][-1] = state.messages[-1][-1][:-1]
191
  yield (state, state.to_gradio_chatbot()) + (enable_btn,) * 5
192
+
193
+ finish_tstamp = time.time()
194
  logger.info(f"{output}")
195
 
196
 
 
252
  ["Crop", "Resize", "Pad", "Default"],
253
  value="Default",
254
  label="Preprocess for non-square Seg Map", visible=False)
 
 
 
 
 
 
255
 
256
  with gr.Accordion("Parameters", open=False) as parameter_row:
257
  temperature = gr.Slider(minimum=0.0, maximum=1.0, value=0.8, step=0.1, interactive=True, label="Temperature",)
 
275
 
276
  cur_dir = os.path.dirname(os.path.abspath(__file__))
277
  gr.Examples(examples=[
278
+ [f"{cur_dir}/examples/people.jpg", f"{cur_dir}/examples/people_pan.png", "What objects can be seen in the image?", "0.9", "1.0"],
279
+ [f"{cur_dir}/examples/corgi.jpg", f"{cur_dir}/examples/corgi_pan.png", "What objects can be seen in the image?", "0.6", "0.7"],
280
+ [f"{cur_dir}/examples/friends.jpg", f"{cur_dir}/examples/friends_pan.png", "Can you count the number of people in the image?", "0.8", "0.9"],
281
+ [f"{cur_dir}/examples/friends.jpg", f"{cur_dir}/examples/friends_pan.png", "What is happening in the image?", "0.8", "0.9"],
282
+ [f"{cur_dir}/examples/suits.jpg", f"{cur_dir}/examples/suits_pan.png", "What objects can be seen in the image?", "0.5", "0.5"],
283
+ [f"{cur_dir}/examples/suits.jpg", f"{cur_dir}/examples/suits_ins.png", "What objects can be seen in the image?", "0.5", "0.5"],
284
+ ], inputs=[imagebox, segbox, textbox, temperature, top_p])
285
 
286
  if not embed_mode:
287
  gr.Markdown(tos_markdown)
 
295
  [state, model_selector], [textbox, upvote_btn, downvote_btn, flag_btn])
296
  flag_btn.click(flag_last_response,
297
  [state, model_selector], [textbox, upvote_btn, downvote_btn, flag_btn])
298
+ regenerate_btn.click(regenerate, [state, image_process_mode, seg_process_mode],
299
+ [state, chatbot, textbox, imagebox, segbox] + btn_list).then(
300
  http_bot, [state, model_selector, temperature, top_p, max_output_tokens],
301
  [state, chatbot] + btn_list)
302
+ clear_btn.click(clear_history, None, [state, chatbot, textbox, imagebox, segbox] + btn_list)
303
 
304
+ textbox.submit(add_text, [state, textbox, imagebox, image_process_mode, segbox, seg_process_mode], [state, chatbot, textbox, imagebox, segbox] + btn_list
305
  ).then(http_bot, [state, model_selector, temperature, top_p, max_output_tokens],
306
  [state, chatbot] + btn_list)
307
+ submit_btn.click(add_text, [state, textbox, imagebox, image_process_mode, segbox, seg_process_mode], [state, chatbot, textbox, imagebox, segbox] + btn_list
308
  ).then(http_bot, [state, model_selector, temperature, top_p, max_output_tokens],
309
  [state, chatbot] + btn_list)
310