Commit
·
8a2b261
1
Parent(s):
aa90529
Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import torch
|
3 |
+
from PIL import Image
|
4 |
+
from lavis.models import load_model_and_preprocess
|
5 |
+
import json
|
6 |
+
|
7 |
+
# Load the Blip-Caption model
|
8 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
9 |
+
model, vis_processors, _ = load_model_and_preprocess(name="blip_caption", model_type="base_coco", is_eval=True, device=device)
|
10 |
+
|
11 |
+
# Define the input and output functions for Gradio
|
12 |
+
def generate_caption(image_file):
|
13 |
+
# Load the image from the file path
|
14 |
+
raw_image = Image.open(image_file).convert("RGB")
|
15 |
+
|
16 |
+
# Preprocess the image using the Blip-Caption model's visual processors
|
17 |
+
image = vis_processors["eval"](raw_image).unsqueeze(0).to(device)
|
18 |
+
|
19 |
+
# Generate captions using the Blip-Caption model
|
20 |
+
captions = model.generate({"image": image}, use_nucleus_sampling=True, num_captions=5)
|
21 |
+
res=" "
|
22 |
+
for i in captions:
|
23 |
+
res=res+", "+i
|
24 |
+
return (res)
|
25 |
+
|
26 |
+
# Set up the Gradio interface
|
27 |
+
inputs = gr.inputs.Image(type="pil",label="Image")
|
28 |
+
outputs = gr.Textbox(label="Captions")
|
29 |
+
interface = gr.Interface(fn=generate_caption, inputs=inputs, outputs="text", title="Blip-Caption")
|
30 |
+
|
31 |
+
# Launch the interface
|
32 |
+
interface.launch(share=True)
|