|
import gradio as gr |
|
import torch |
|
import cv2 |
|
from lavis.models import load_model_and_preprocess |
|
from PIL import Image |
|
|
|
|
|
|
|
model, vis_processors, _ = load_model_and_preprocess(name="blip_caption", model_type="base_coco", is_eval=True) |
|
|
|
|
|
def generate_caption(image_file): |
|
image = Image.fromarray(image_file).convert('RGB') |
|
|
|
|
|
image = vis_processors["eval"](image).unsqueeze(0) |
|
|
|
|
|
captions = model.generate({"image": image}, use_nucleus_sampling=True, num_captions=5) |
|
beautified_captions = [caption.capitalize() for caption in captions] |
|
beautified_captions_str = "\n".join(beautified_captions) |
|
|
|
return beautified_captions_str |
|
|
|
|
|
|
|
|
|
interface = gr.Interface(fn=generate_caption, inputs="image", outputs="text", title="Blip-Caption") |
|
|
|
|
|
interface.launch() |