darshan8950's picture
Create app.py
8a2b261
raw
history blame
1.2 kB
import gradio as gr
import torch
from PIL import Image
from lavis.models import load_model_and_preprocess
import json
# Load the Blip-Caption model
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model, vis_processors, _ = load_model_and_preprocess(name="blip_caption", model_type="base_coco", is_eval=True, device=device)
# Define the input and output functions for Gradio
def generate_caption(image_file):
# Load the image from the file path
raw_image = Image.open(image_file).convert("RGB")
# Preprocess the image using the Blip-Caption model's visual processors
image = vis_processors["eval"](raw_image).unsqueeze(0).to(device)
# Generate captions using the Blip-Caption model
captions = model.generate({"image": image}, use_nucleus_sampling=True, num_captions=5)
res=" "
for i in captions:
res=res+", "+i
return (res)
# Set up the Gradio interface
inputs = gr.inputs.Image(type="pil",label="Image")
outputs = gr.Textbox(label="Captions")
interface = gr.Interface(fn=generate_caption, inputs=inputs, outputs="text", title="Blip-Caption")
# Launch the interface
interface.launch(share=True)