Spaces:
Runtime error
Runtime error
update cap
Browse files
multimodal/open_flamingo/chat/conversation.py
CHANGED
@@ -425,6 +425,7 @@ class Chat:
|
|
425 |
if radio in ["Cap"]:
|
426 |
output_text, out_image = captioner(self.model, self.tokenizer, image_ori, batch_images, input_ids,
|
427 |
attention_mask, image_start_index_list, image_nums, added_bbox_list)
|
|
|
428 |
else:
|
429 |
with torch.inference_mode():
|
430 |
text_outputs = self.model.generate(
|
@@ -477,6 +478,7 @@ class Chat:
|
|
477 |
print(output_text)
|
478 |
output_text = re.findall(r'Assistant:(.+)', output_text)[-1]
|
479 |
print(output_text)
|
|
|
480 |
|
481 |
return output_text, out_image
|
482 |
|
|
|
425 |
if radio in ["Cap"]:
|
426 |
output_text, out_image = captioner(self.model, self.tokenizer, image_ori, batch_images, input_ids,
|
427 |
attention_mask, image_start_index_list, image_nums, added_bbox_list)
|
428 |
+
|
429 |
else:
|
430 |
with torch.inference_mode():
|
431 |
text_outputs = self.model.generate(
|
|
|
478 |
print(output_text)
|
479 |
output_text = re.findall(r'Assistant:(.+)', output_text)[-1]
|
480 |
print(output_text)
|
481 |
+
print(output_text)
|
482 |
|
483 |
return output_text, out_image
|
484 |
|