chendl commited on
Commit
ca42c83
·
1 Parent(s): 7bf1d4b

update cap

Browse files
app.py CHANGED
@@ -244,7 +244,7 @@ def gradio_ask(user_message, chatbot, chat_state,radio):
244
 
245
  chat.ask(user_message, chat_state,radio)
246
  chatbot = chatbot + [[user_message, None]]
247
- return '', chatbot, chat_state
248
 
249
 
250
  def gradio_answer(chatbot, chat_state, img_list, radio, text,num_beams, temperature):
@@ -320,7 +320,7 @@ with gr.Blocks() as demo:
320
  upload_button.click(upload_img, [image, text_input, chat_state,chatbot],
321
  [image, text_input, upload_button, chat_state, img_list,chatbot])
322
 
323
- text_input.submit(gradio_ask, [text_input, chatbot, chat_state,radio], [text_input, chatbot, chat_state]).then(
324
  gradio_answer, [chatbot, chat_state, img_list, radio, text_input,num_beams, temperature], [text_input,chatbot, chat_state, img_list]
325
  )
326
  clear.click(gradio_reset, [chat_state, img_list], [chatbot, image, text_input, upload_button, chat_state, img_list],
 
244
 
245
  chat.ask(user_message, chat_state,radio)
246
  chatbot = chatbot + [[user_message, None]]
247
+ return chatbot, chat_state
248
 
249
 
250
  def gradio_answer(chatbot, chat_state, img_list, radio, text,num_beams, temperature):
 
320
  upload_button.click(upload_img, [image, text_input, chat_state,chatbot],
321
  [image, text_input, upload_button, chat_state, img_list,chatbot])
322
 
323
+ text_input.submit(gradio_ask, [text_input, chatbot, chat_state,radio], [chatbot, chat_state]).then(
324
  gradio_answer, [chatbot, chat_state, img_list, radio, text_input,num_beams, temperature], [text_input,chatbot, chat_state, img_list]
325
  )
326
  clear.click(gradio_reset, [chat_state, img_list], [chatbot, image, text_input, upload_button, chat_state, img_list],
multimodal/open_flamingo/chat/conversation.py CHANGED
@@ -457,7 +457,9 @@ class Chat:
457
  # print(
458
  # f"### Assistant: {tokenizer.decode(outputs[0, input_ids.shape[1]:], skip_special_tokens=True).strip()}")
459
  output_text = self.tokenizer.decode(text_outputs[0])
460
- output_text = re.findall(r'Assistant:(.+)', output_text)[-1]
 
 
461
 
462
  return output_text, out_image
463
 
 
457
  # print(
458
  # f"### Assistant: {tokenizer.decode(outputs[0, input_ids.shape[1]:], skip_special_tokens=True).strip()}")
459
  output_text = self.tokenizer.decode(text_outputs[0])
460
+ print(output_text)
461
+ output_text = re.findall(r'Assistant:(.+)', output_text)
462
+ print(output_text)
463
 
464
  return output_text, out_image
465
 
multimodal/open_flamingo/eval/task/caption_chat.py CHANGED
@@ -171,7 +171,7 @@ def captioner(
171
  else:
172
  break
173
  outputs = outputs[:, ori_prompt_length:]
174
- outputs = postprocess_captioning_generation(tokenizer.batch_decode(outputs, skip_special_tokens=True)[0]).replace('"', "")
175
  # new_predictions = [
176
  # postprocess_captioning_generation(out).replace('"', "")
177
  # for out in tokenizer.batch_decode(outputs, skip_special_tokens=True)
 
171
  else:
172
  break
173
  outputs = outputs[:, ori_prompt_length:]
174
+ outputs = tokenizer.batch_decode(outputs, skip_special_tokens=True)[0].replace('"', "")
175
  # new_predictions = [
176
  # postprocess_captioning_generation(out).replace('"', "")
177
  # for out in tokenizer.batch_decode(outputs, skip_special_tokens=True)