File size: 1,185 Bytes
8df3bcd e80ac2f 8df3bcd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 |
import gradio as gr
from transformers import BlipForConditionalGeneration, AutoProcessor
from PIL import Image
import torch
# Load model and processor
processor = AutoProcessor.from_pretrained("blip-fine-tuned/")
processor.tokenizer.padding_size = 'left'
model = BlipForConditionalGeneration.from_pretrained("blip-fine-tuned/")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
def predict(image):
# Preprocess the image
inputs = processor(images=image, return_tensors="pt").to(device)
pixel_values = inputs.pixel_values
# get predictions
with torch.no_grad():
generated_ids = model.generate(pixel_values=pixel_values, max_length=100)
generated_caption = processor.batch_decode(generated_ids, skip_special_tokens=True)[0]
return generated_caption
# interface = gr.Interface(fn=predict, inputs=gr.Image(type="pil"), outputs="text")
interface = gr.Interface(
fn=predict,
inputs=gr.Image(type="pil"),
outputs="text",
title="BLIP Image Caption Generator",
description="Upload an image or select a sample to generate a descriptive caption." # Add description here
)
interface.launch() |