Commit
·
088da54
1
Parent(s):
9646a58
Update app.py
Browse files
app.py
CHANGED
@@ -2,7 +2,7 @@ import gradio as gr
|
|
2 |
import torch
|
3 |
import cv2
|
4 |
from lavis.models import load_model_and_preprocess
|
5 |
-
|
6 |
|
7 |
# Load the Blip-Caption model
|
8 |
|
@@ -10,10 +10,10 @@ model, vis_processors, _ = load_model_and_preprocess(name="blip_caption", model_
|
|
10 |
|
11 |
# Define the input and output functions for Gradio
|
12 |
def generate_caption(image_file):
|
13 |
-
|
14 |
|
15 |
# Preprocess the image using the Blip-Caption model's visual processors
|
16 |
-
image = vis_processors["eval"](
|
17 |
|
18 |
# Generate captions using the Blip-Caption model
|
19 |
captions = model.generate({"image": image}, use_nucleus_sampling=True, num_captions=5)
|
|
|
2 |
import torch
|
3 |
import cv2
|
4 |
from lavis.models import load_model_and_preprocess
|
5 |
+
from PIL import Image
|
6 |
|
7 |
# Load the Blip-Caption model
|
8 |
|
|
|
10 |
|
11 |
# Define the input and output functions for Gradio
|
12 |
def generate_caption(image_file):
|
13 |
+
image = Image.fromarray(image_file).convert('RGB')
|
14 |
|
15 |
# Preprocess the image using the Blip-Caption model's visual processors
|
16 |
+
image = vis_processors["eval"](image).unsqueeze(0)
|
17 |
|
18 |
# Generate captions using the Blip-Caption model
|
19 |
captions = model.generate({"image": image}, use_nucleus_sampling=True, num_captions=5)
|