JasperHaozhe commited on
Commit
ba19818
·
verified ·
1 Parent(s): f4bd3e6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -184,7 +184,8 @@ def model_inference(input_dict, history):
184
  ).to("cuda")
185
 
186
  streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=False)
187
- generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=1024, temperature=0.1, top_p=0.95, top_k=50)
 
188
  thread = Thread(target=model.generate, kwargs=generation_kwargs)
189
  thread.start()
190
 
@@ -242,7 +243,6 @@ def model_inference(input_dict, history):
242
  # complete_assistant_response_for_gradio += f"\n<b>Analyzing Operation Result ...</b> @region(size={proc_img.size[0]}x{proc_img.size[1]})\n\n"
243
  complete_assistant_response_for_gradio += [f"\n<b>Analyzing Operation Result ...</b> @region(size={proc_img.size[0]}x{proc_img.size[1]})\n\n"]
244
  yield complete_assistant_response_for_gradio # Update Gradio display
245
- # all_images.append(proc_img)
246
 
247
 
248
  else:
 
184
  ).to("cuda")
185
 
186
  streamer = TextIteratorStreamer(processor, skip_prompt=True, skip_special_tokens=False)
187
+ # generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=1024, temperature=0.1, top_p=0.95, top_k=50)
188
+ generation_kwargs = dict(inputs, streamer=streamer, max_new_tokens=1024, do_sample=False, num_beams=1)
189
  thread = Thread(target=model.generate, kwargs=generation_kwargs)
190
  thread.start()
191
 
 
243
  # complete_assistant_response_for_gradio += f"\n<b>Analyzing Operation Result ...</b> @region(size={proc_img.size[0]}x{proc_img.size[1]})\n\n"
244
  complete_assistant_response_for_gradio += [f"\n<b>Analyzing Operation Result ...</b> @region(size={proc_img.size[0]}x{proc_img.size[1]})\n\n"]
245
  yield complete_assistant_response_for_gradio # Update Gradio display
 
246
 
247
 
248
  else: